blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
db802391bcd03b8b2e12e805475b0d5c95ca0008 | 04803c70bb97012b7d500a177ac0240fb2ddbe38 | /blend4_pdep/pdep/network269_1.py | d12a2a0c2cbdc47b265da5b522cbfd7932b3bb86 | [] | no_license | shenghuiqin/chpd | 735e0415f6688d88579fc935459c1b0f53596d1d | 396ba54629036e3f2be0b3fabe09b78c90d56939 | refs/heads/master | 2023-03-01T23:29:02.118150 | 2019-10-05T04:02:23 | 2019-10-05T04:02:23 | 192,084,217 | 0 | 0 | null | 2019-06-18T18:33:13 | 2019-06-15T13:52:28 | HTML | UTF-8 | Python | false | false | 24,141 | py | species(
label = '[CH2]CO[CH2](3406)',
structure = SMILES('[CH2]CO[CH2]'),
E0 = (163.919,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,415.335,2171.22],'cm^-1')),
HinderedRotor(inertia=(0.0766174,'amu*angstrom^2'), symmetry=1, barrier=(9.18605,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.105119,'amu*angstrom^2'), symmetry=1, barrier=(2.41688,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.0760886,'amu*angstrom^2'), symmetry=1, barrier=(9.18796,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (58.0791,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.3362,0.0379301,-3.71701e-05,2.20963e-08,-5.55876e-12,19773.7,16.104], Tmin=(100,'K'), Tmax=(945.827,'K')), NASAPolynomial(coeffs=[6.70523,0.0194525,-7.86561e-06,1.44049e-09,-9.89181e-14,18947.2,-4.73055], Tmin=(945.827,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(163.919,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(CsJOCC) + radical(CJCO)"""),
)
species(
label = 'CH2O(13)(14)',
structure = SMILES('C=O'),
E0 = (-119.055,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (30.026,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4140.62,'J/mol'), sigma=(3.59,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.79372,-0.00990833,3.7322e-05,-3.79285e-08,1.31773e-11,-14379.2,0.602798], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16953,0.00619321,-2.25056e-06,3.65976e-10,-2.20149e-14,-14548.7,6.04208], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-119.055,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""CH2O""", comment="""Thermo library: FFCM1(-)"""),
)
species(
label = 'C2H4(19)(20)',
structure = SMILES('C=C'),
E0 = (42.0619,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3000,3050,3100,1330,1430,900,1050,1000,1050,1600,1700],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2334.71,'J/mol'), sigma=(3.971,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=1.5, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.9592,-0.00757051,5.7099e-05,-6.91588e-08,2.69884e-11,5089.78,4.0973], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.99183,0.0104834,-3.71721e-06,5.94628e-10,-3.5363e-14,4268.66,-0.269082], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(42.0619,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(133.032,'J/(mol*K)'), label="""C2H4""", comment="""Thermo library: FFCM1(-)"""),
)
species(
label = 'H(3)(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'C3H5O(135)(134)',
structure = SMILES('[CH2]OC=C'),
E0 = (76.6924,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,3010,987.5,1337.5,450,1655,3000,3100,440,815,1455,1000,319.986,320.005],'cm^-1')),
HinderedRotor(inertia=(0.0016457,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.277681,'amu*angstrom^2'), symmetry=1, barrier=(20.1806,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (57.0712,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(2900.74,'J/mol'), sigma=(5.09846,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with Tc=453.09 K, Pc=49.66 bar (from Joback method)"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.59643,0.0244142,9.3695e-07,-1.83877e-08,8.69513e-12,9280.22,14.7231], Tmin=(100,'K'), Tmax=(988.882,'K')), NASAPolynomial(coeffs=[9.07407,0.0132332,-4.8876e-06,8.99482e-10,-6.42037e-14,7264.66,-20.1688], Tmin=(988.882,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(76.6924,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), label="""CH2OCHCH2""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'C2H4(T)(899)',
structure = SMILES('[CH2][CH2]'),
E0 = (318.146,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,180,1436.54,1437.15,2688.96,2689.16],'cm^-1')),
HinderedRotor(inertia=(0.0257549,'amu*angstrom^2'), symmetry=1, barrier=(17.2441,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (28.0532,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.40736,0.0100312,6.40927e-06,-1.41291e-08,5.92671e-12,38288.2,6.11703], Tmin=(100,'K'), Tmax=(954.26,'K')), NASAPolynomial(coeffs=[5.52249,0.00856173,-2.90743e-06,5.02353e-10,-3.44572e-14,37547.8,-5.75276], Tmin=(954.26,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(318.146,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), label="""C2H4(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = '[CH2]O[CH]C(4894)',
structure = SMILES('[CH2]O[CH]C'),
E0 = (132.786,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (58.0791,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.07912,0.0410978,-4.15576e-05,2.34499e-08,-5.32469e-12,16040.6,15.834], Tmin=(100,'K'), Tmax=(1069.75,'K')), NASAPolynomial(coeffs=[8.97105,0.0153272,-5.42175e-06,9.29844e-10,-6.16823e-14,14566.1,-17.8801], Tmin=(1069.75,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(132.786,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(CsJOCC) + radical(CCsJOCs)"""),
)
species(
label = '[CH2][CH]OC(4895)',
structure = SMILES('[CH2][CH]OC'),
E0 = (156.604,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (58.0791,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[1.90614,0.0490487,-6.79679e-05,5.41328e-08,-1.72812e-11,18907.6,15.3999], Tmin=(100,'K'), Tmax=(841.187,'K')), NASAPolynomial(coeffs=[7.45112,0.0190454,-7.98275e-06,1.45438e-09,-9.80969e-14,18103.4,-9.6279], Tmin=(841.187,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(156.604,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(270.22,'J/(mol*K)'), comment="""Thermo library: DFT_QCI_thermo + radical(CJCO) + radical(CCsJOCs)"""),
)
species(
label = '[CH2][O](1408)',
structure = SMILES('[CH2][O]'),
E0 = (192.903,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (30.026,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.88409,-0.00363885,3.28543e-05,-4.13611e-08,1.59631e-11,23210.8,7.47983], Tmin=(100,'K'), Tmax=(933.06,'K')), NASAPolynomial(coeffs=[6.69335,0.000289989,8.61416e-07,-1.56351e-10,7.33778e-15,21991.3,-9.6043], Tmin=(933.06,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(192.903,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), comment="""Thermo library: FFCM1(-) + radical(H3COJ) + radical(CsJOH)"""),
)
species(
label = 'C=COC(2832)',
structure = SMILES('C=COC'),
E0 = (-147.959,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650,2750,2800,2850,1350,1500,750,1050,1375,1000,3010,987.5,1337.5,450,1655,180,180],'cm^-1')),
HinderedRotor(inertia=(0.962677,'amu*angstrom^2'), symmetry=1, barrier=(22.1338,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.964727,'amu*angstrom^2'), symmetry=1, barrier=(22.181,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (58.0791,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.38888,0.0203564,3.95656e-05,-7.42349e-08,3.2473e-11,-17723.4,12.6367], Tmin=(100,'K'), Tmax=(922.779,'K')), NASAPolynomial(coeffs=[14.7891,0.00555506,3.11097e-07,-1.33471e-10,5.00883e-15,-21670.3,-55.1761], Tmin=(922.779,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-147.959,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(224.491,'J/(mol*K)'), comment="""Thermo group additivity estimation: group(O2s-Cs(Cds-Cd)) + group(Cs-OsHHH) + group(Cds-CdsOsH) + group(Cds-CdsHH)"""),
)
species(
label = 'oxetane(3407)',
structure = SMILES('C1COC1'),
E0 = (-91.7532,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (58.0791,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.47569,-0.00369706,8.7065e-05,-1.05609e-07,3.87161e-11,-11002.7,11.3085], Tmin=(100,'K'), Tmax=(960.537,'K')), NASAPolynomial(coeffs=[8.01392,0.016513,-5.56902e-06,1.07256e-09,-8.24746e-14,-13678.7,-19.7945], Tmin=(960.537,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-91.7532,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(232.805,'J/(mol*K)'), label="""oxetane""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'CH2(17)(18)',
structure = SMILES('[CH2]'),
E0 = (381.08,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([971.045,2816.03,3444.23],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.71758,0.00127391,2.17347e-06,-3.48858e-09,1.65209e-12,45872.4,1.75298], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.14632,0.00303671,-9.96474e-07,1.50484e-10,-8.57336e-15,46041.3,4.72342], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(381.08,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2(T)""", comment="""Thermo library: FFCM1(-)"""),
)
species(
label = '[CH2]C[O](1195)',
structure = SMILES('[CH2]C[O]'),
E0 = (188.892,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2850,1437.5,1250,1305,750,350,3000,3100,440,815,1455,1000,1398.33],'cm^-1')),
HinderedRotor(inertia=(0.00547724,'amu*angstrom^2'), symmetry=1, barrier=(7.58298,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (44.0526,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.57171,0.0102136,5.90913e-06,-7.99869e-09,2.07078e-12,22733,11.7517], Tmin=(100,'K'), Tmax=(1490.84,'K')), NASAPolynomial(coeffs=[4.741,0.01502,-6.91914e-06,1.31179e-09,-8.9824e-14,21501.6,2.68291], Tmin=(1490.84,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(188.892,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), comment="""Thermo library: FFCM1(-) + radical(CJCO) + radical(CCOJ)"""),
)
species(
label = '[CH2]O[CH2](1203)',
structure = SMILES('[CH2]O[CH2]'),
E0 = (177.918,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3033.33,3066.67,3100,415,465,780,850,1435,1475,900,1100,393.523],'cm^-1')),
HinderedRotor(inertia=(0.00108835,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
HinderedRotor(inertia=(0.00108835,'amu*angstrom^2'), symmetry=1, barrier=(0.119627,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (44.0526,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.05159,0.0165784,-1.00609e-07,-1.04835e-08,4.7882e-12,21436.5,11.5123], Tmin=(100,'K'), Tmax=(1037.89,'K')), NASAPolynomial(coeffs=[7.39316,0.00943956,-3.64811e-06,7.00835e-10,-5.09569e-14,20018.6,-12.084], Tmin=(1037.89,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(177.918,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(199.547,'J/(mol*K)'), comment="""Thermo library: FFCM1(-) + radical(CsJOCH3) + radical(CsJOCH3)"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ar(8)',
structure = SMILES('[Ar]'),
E0 = (-6.19426,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (39.348,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1134.93,'J/mol'), sigma=(3.33,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,-745,4.3663], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,-745,4.3663], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-6.19426,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ar""", comment="""Thermo library: BurkeH2O2"""),
)
transitionState(
label = 'TS1',
E0 = (163.919,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (295.254,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (227.125,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (322.283,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (280.978,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (511.05,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (227.319,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (172.203,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS9',
E0 = (569.971,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS10',
E0 = (558.998,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction1',
reactants = ['[CH2]CO[CH2](3406)'],
products = ['CH2O(13)(14)', 'C2H4(19)(20)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(5e+12,'s^-1'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Exact match found for rate rule [RJJ]
Euclidian distance = 0
family: 1,4_Linear_birad_scission"""),
)
reaction(
label = 'reaction2',
reactants = ['H(3)(3)', 'C3H5O(135)(134)'],
products = ['[CH2]CO[CH2](3406)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(2.182e+10,'cm^3/(mol*s)'), n=0.859, Ea=(6.76971,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [Cds-OsH_Cds;HJ]
Euclidian distance = 0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction3',
reactants = ['CH2O(13)(14)', 'C2H4(T)(899)'],
products = ['[CH2]CO[CH2](3406)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(4660,'cm^3/(mol*s)'), n=3.17, Ea=(28.0328,'kJ/mol'), T0=(1,'K'), Tmin=(300,'K'), Tmax=(1500,'K'), comment="""Estimated using template [Od_CO-HH;YJ] for rate rule [Od_CO-HH;CJ]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: R_Addition_MultipleBond"""),
)
reaction(
label = 'reaction4',
reactants = ['[CH2]CO[CH2](3406)'],
products = ['[CH2]O[CH]C(4894)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(3.7e+13,'s^-1','+|-',2), n=-0.1, Ea=(158.364,'kJ/mol'), T0=(1,'K'), Tmin=(700,'K'), Tmax=(1800,'K'), comment="""From training reaction 347 used for R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeO
Exact match found for rate rule [R2H_S;C_rad_out_2H;Cs_H_out_H/NonDeO]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction5',
reactants = ['[CH2]CO[CH2](3406)'],
products = ['[CH2][CH]OC(4895)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(4.61991,'s^-1'), n=3.5644, Ea=(117.059,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [R3H_SS;C_rad_out_2H;XH_out] for rate rule [R3H_SS_O;C_rad_out_2H;XH_out]
Euclidian distance = 1.0
Multiplied by reaction path degeneracy 2.0
family: intra_H_migration"""),
)
reaction(
label = 'reaction6',
reactants = ['C2H4(T)(899)', '[CH2][O](1408)'],
products = ['[CH2]CO[CH2](3406)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(1.47003e+07,'m^3/(mol*s)'), n=0.0284742, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;Y_rad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: R_Recombination
Ea raised from -14.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction7',
reactants = ['[CH2]CO[CH2](3406)'],
products = ['C=COC(2832)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(1.4874e+09,'s^-1'), n=1.045, Ea=(63.4002,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [R3radExo;Y_rad;XH_Rrad]
Euclidian distance = 0
Multiplied by reaction path degeneracy 2.0
family: Intra_Disproportionation"""),
)
reaction(
label = 'reaction8',
reactants = ['[CH2]CO[CH2](3406)'],
products = ['oxetane(3407)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(1.62e+12,'s^-1'), n=-0.305, Ea=(8.28432,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""From training reaction 2 used for R4_SSS;C_rad_out_2H;Cpri_rad_out_2H
Exact match found for rate rule [R4_SSS;C_rad_out_2H;Cpri_rad_out_2H]
Euclidian distance = 0
family: Birad_recombination"""),
)
reaction(
label = 'reaction9',
reactants = ['CH2(17)(18)', '[CH2]C[O](1195)'],
products = ['[CH2]CO[CH2](3406)'],
transitionState = 'TS9',
kinetics = Arrhenius(A=(54738.4,'m^3/(mol*s)'), n=0.884925, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using an average for rate rule [O_rad/NonDe;Birad]
Euclidian distance = 0
family: Birad_R_Recombination
Ea raised from -2.9 to 0 kJ/mol."""),
)
reaction(
label = 'reaction10',
reactants = ['CH2(17)(18)', '[CH2]O[CH2](1203)'],
products = ['[CH2]CO[CH2](3406)'],
transitionState = 'TS10',
kinetics = Arrhenius(A=(4.4725e+06,'m^3/(mol*s)'), n=0.36814, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [Y_rad;Birad] for rate rule [C_rad/H2/O;Birad]
Euclidian distance = 3.0
Multiplied by reaction path degeneracy 2.0
family: Birad_R_Recombination
Ea raised from -1.7 to 0 kJ/mol."""),
)
network(
label = '269',
isomers = [
'[CH2]CO[CH2](3406)',
],
reactants = [
('CH2O(13)(14)', 'C2H4(19)(20)'),
('H(3)(3)', 'C3H5O(135)(134)'),
],
bathGas = {
'Ne': 0.333333,
'N2': 0.333333,
'Ar(8)': 0.333333,
},
)
pressureDependence(
label = '269',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"[email protected]"
] | |
653ca533eb3f53b29e27b7eb8e5b17df34a8b2eb | 6203105c774913bbb3dc7e2d9bb99f739d9d24fa | /discrete_autoencoder/layers/activation.py | 249418a2662f3838a89576ab80e8e36e9d2b2c95 | [] | no_license | bstriner/discrete_autoencoder | f5eae31e155b2c4c440c9fe89b060c3be61de888 | baca5d23964b08ff7e3062c07d74b0ff9a631e98 | refs/heads/master | 2021-07-23T02:12:36.405089 | 2017-11-03T09:37:02 | 2017-11-03T09:37:02 | 108,048,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | from .layer import Layer
class ActivationLayer(Layer):
def __init__(self, activation=None):
self.activation = activation
params = []
non_trainable_weights = []
super(ActivationLayer, self).__init__(params=params, non_trainable_weights=non_trainable_weights)
def call(self, x):
out = self.activation(x)
return out, []
| [
"[email protected]"
] | |
c03a75610d9c6359e5b0034a66cbf001dc6401f2 | 2c94c322b5e1f53e131f7d680bcd4413ff11a10b | /bubble_s_ascending.py | dd23b2b99c326a2660e536c4be43b9aee8db5ab4 | [] | no_license | Nehanavgurukul/list | 66996ad6f30183f8d6c758ab824fd5a7840ba4dd | e4aa2686c0f007477e147c733ac98708773570cb | refs/heads/main | 2023-01-11T23:38:15.450749 | 2020-10-29T16:10:29 | 2020-10-29T16:10:29 | 308,381,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | num=[23,50,56,20,11,70]
index=0
while(index<len(num)):
j=0
while(j<len(num)):
if(num[index]<num[j]):
tamp=num[index]
num[index]=num[j]
num[j]=tamp
j=j+1
index=index+1
print(num) | [
"[email protected]"
] | |
2cb1b08ed78ba1f7af4b2d62d28e84f0291436db | b8498a35832f5d14bd15332a3dd2b93d64351448 | /fluent-python/15-context-mngr/else_block.py | adac5f3d0a2da378b4e488ca68369bc802379035 | [] | no_license | luzzyzhang/my-python-cookbook | 8b3daf4c4354a98ff305375c8a3a35551eee67e7 | 8d160f6d6d18b7a9801d433f6e3868d054432bde | refs/heads/master | 2021-09-06T20:25:39.026278 | 2018-02-11T04:22:13 | 2018-02-11T04:22:13 | 46,793,714 | 2 | 1 | null | 2017-06-17T13:57:26 | 2015-11-24T13:35:26 | Python | UTF-8 | Python | false | false | 527 | py | # -*- coding: utf-8 -*-
"""This is just demo code
"""
# for ... else ...
for item in my_list:
if item.flavor == 'bananas':
break
else:
raise ValueError('No banana flavor found')
try:
dangerous_call()
after_call()
except OSError:
log('OSError ...')
# VS
# For clarity and correctness, the body of a try block should only have the
# statements that may generate the expected exceptions. This is much better:
try:
dangerous_call()
except OSError:
log('OSError ...')
else:
after_call()
| [
"[email protected]"
] | |
230586f9d6953a471e7d734543104ead21c8cefe | 41bea39563c74621924d79723f8ba84889958365 | /nkamg_pcap/server/pcap/pcapanalysis.py | f35fbc521427fc37b36571488b62756327ed850d | [
"MIT"
] | permissive | NKQiuKF/pcap_update | abee0c13cb583fddb89eb9e86a487279bdc18f1d | 679e3f116367394a5f58eb4f95b5318e80fee331 | refs/heads/master | 2022-10-21T17:49:30.706480 | 2019-09-02T09:22:06 | 2019-09-02T09:22:06 | 205,816,421 | 1 | 0 | null | 2022-10-06T18:33:32 | 2019-09-02T08:55:55 | JavaScript | UTF-8 | Python | false | false | 22,425 | py | #coding:utf-8
#from superset.data import *
"""conn.log中每一条代表一次完整的连接,包含多个包"""
import sys
#sys.path.append("../../")
import os
import pandas as pd
from superset.data import *
from geoip import geolite2
import hashlib
from get_province import get_province_code
FilePath = os.path.join('/'.join(os.path.dirname(os.path.abspath(__file__)).split('/')[:-2]),'web/file')
BroPath = os.path.join(os.path.dirname(os.path.abspath(__file__)),'bro')
os.chdir(BroPath)
def sha256(filename):
sha256Obj = hashlib.sha256()
with open(filename,'rb') as f:
sha256Obj.update(f.read())
return sha256Obj.hexdigest()
def get_pcap_info(f):
pcap_analysis=[]
pcap = os.path.join(FilePath,f)
slug = sha256(pcap)
os.popen('mv '+pcap+' '+BroPath)
os.popen('bro -C -r '+f)
df_conn = pd.read_csv("conn.log",skiprows=8,skipfooter=1,
sep=r"\t",engine='python')
df_conn.drop(df_conn.columns[-1],axis=1,inplace=True)
df_conn.columns = ['time', 'id', 'orig_h',
'orig_p', 'resp_h', 'resp_p',
'proto', 'service', 'duration',
'orig_bytes', 'resp_bytes',
'conn_state', 'local_orig',
'local_resp', 'missed_bytes',
'history','orig_pkts',
'orig_ip_bytes',
'resp_pkts',
'tunnel_parents']
df_conn.sort_values(['time'],ascending=True,inplace=True)
df_conn['temporary'] = df_conn['resp_h'].apply(geolite2.lookup)
#CCA2
df_conn['country'] = df_conn['temporary'].apply(
lambda x:'local' if not x else x.country
)
df_conn['continent'] = df_conn['temporary'].apply(
lambda x:'local' if not x else x.continent
)
df_conn['Lat'] = df_conn['temporary'].apply(
lambda x:'None' if not x else x.location[0]
)
df_conn['Lng'] = df_conn['temporary'].apply(
lambda x:'None'if not x else x.location[1]
)
del df_conn['temporary']
f = os.path.splitext(f)[0]
f = f.replace('-','_')
for col in df_conn.columns[1:]:
df_conn[col] = df_conn[col].apply(lambda x: x if x!='-' else 0)
df_conn['service'] = df_conn['service'].apply(lambda x:x if x!=0 else 'Unknown')
df_conn.time = pd.to_datetime(df_conn['time'],unit='s')
df_conn['date'] = df_conn['time']
df_conn['allbytes']=df_conn['orig_bytes']+df_conn['resp_bytes']
df_conn.to_sql(
f,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'time':DateTime(),
'date':Date(),
'id':String(),
'orig_p':String(),
'orig_h':String(),
'resp_h':String(),
'resp_p':String(),
'proto':String(),
'service':String(),
'duration':Float(),
'orig_bytes':Float(),
'resp_bytes':Float(),
'conn_state':String(),
'local_orig':String(),
'local_resp':String(),
'missed_bytes':Float(),
'history':String(),
'orig_pkts':BigInteger(),
'orig_ip_bytes':BigInteger(),
'resp_pkts':BigInteger(),
'tunnel_parents':BigInteger(),
'country':String(),
'allbytes':Float(),
},
index=False
)
tbl = db.session.query(TBL).filter_by(table_name=f).first()
if not tbl:
tbl = TBL(table_name=f)
tbl.database = get_or_create_main_db()
tbl.description = "Pcap Connection Info"
tbl.filter_select_enabled = True
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
defaults = {
"bottom_margin":"auto",
"viz_type":'table',
"since":"100 years ago",
"until":'',
"show_controls":True,
}
slc=Slice(
slice_name=f+u'_table',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
page_length=50,
row_limit=None,
table_filter=True,
metrics=[],
all_columns=[u'time', u'id', u'orig_h', u'orig_p', u'resp_h', u'resp_p', u'proto',
u'service', u'duration', u'orig_bytes', u'resp_bytes', u'conn_state',
u'local_orig', u'local_resp', u'missed_bytes', u'history', u'orig_pkts',
u'orig_ip_bytes', u'resp_pkts', u'tunnel_parents','country'],
include_search=True,
)
)
pcap_analysis.append(slc.slice_name)
merge_slice(slc)
slc=Slice(
slice_name=f+u'_目的端口',
viz_type='pie',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='pie',
groupby=['resp_p'],
metrics=['count'],
donut=True,
show_legend=False,
labels_outside=False,
)
)
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
slc=Slice(
slice_name=f+u'_发送流量',
viz_type='sankey',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"having": "",
"slice_id": 432,
"viz_type": "sankey",
"row_limit": 50000,
"metric": "sum__orig_bytes",
"since": "100 years ago",
"until": "",
"where": "",
"datasource": "33__table",
"filters": [],
"color_scheme": "bnbColors",
"granularity_sqla": "time",
"time_grain_sqla": "Time Column",
"groupby": ["service", "country"]
}
""")
)
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
slc=Slice(
slice_name=f+u'_对应日期',
viz_type='sankey',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"having": "",
"slice_id": 432,
"viz_type": "sankey",
"row_limit": 50000,
"metric": "sum__orig_bytes",
"since": "100 years ago",
"until": "",
"where": "",
"datasource": "33__table",
"filters": [],
"color_scheme": "bnbColors",
"granularity_sqla": "time",
"time_grain_sqla": "Time Column",
"groupby": ["service", "date"]
}
"""),
)
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
slc=Slice(
slice_name=f+u'_源端口_发送与接收',
viz_type='dist_bar',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{"bottom_margin": "auto", "y_axis_label": "", "row_limit": 10, "show_legend": true, "filters": [], "show_controls": true, "granularity_sqla": "time", "viz_type": "dist_bar", "since": "100 years ago", "x_axis_label": "", "order_bars": false, "color_scheme": "d3Category10", "until": "", "columns": ["proto"], "show_bar_value": false, "y_axis_format": ".3s", "metrics": ["sum__orig_bytes", "sum__resp_bytes"], "slice_id": 458, "where": "", "reduce_x_ticks": false, "groupby": ["orig_h"], "datasource": "34__table", "contribution": false, "time_grain_sqla": null, "having": "", "bar_stacked": true}
"""
))
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
slc=Slice(
slice_name=f+u'_目的端口_发送与接收',
viz_type='dist_bar',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{"bottom_margin": "auto", "y_axis_label": "", "row_limit": 10, "show_legend": true, "filters": [], "show_controls": true, "granularity_sqla": "time", "viz_type": "dist_bar", "since": "100 years ago", "x_axis_label": "", "order_bars": false, "color_scheme": "d3Category10", "until": "", "columns": ["proto"], "show_bar_value": false, "y_axis_format": ".3s", "metrics": ["sum__orig_bytes", "sum__resp_bytes"], "slice_id": 458, "where": "", "reduce_x_ticks": false, "groupby": ["resp_h"], "datasource": "34__table", "contribution": false, "time_grain_sqla": null, "having": "", "bar_stacked": true}
"""
))
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
slc=Slice(
slice_name=f+u'_服务_发送与接收',
viz_type='dist_bar',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{"bottom_margin": "auto", "y_axis_label": "", "row_limit": 10, "show_legend": true, "filters": [], "show_controls": true, "granularity_sqla": "time", "viz_type": "dist_bar", "since": "100 years ago", "x_axis_label": "", "order_bars": false, "color_scheme": "d3Category10", "until": "", "columns": [], "show_bar_value": false, "y_axis_format": ".3s", "metrics": ["sum__orig_bytes", "sum__resp_bytes"], "slice_id": 463, "where": "", "reduce_x_ticks": false, "groupby": ["service"], "datasource": "34__table", "contribution": false, "time_grain_sqla": null, "having": "", "bar_stacked": true}
"""
))
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
slc=Slice(
slice_name=f+u'_国家_发送与接收',
viz_type='dist_bar',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{"bottom_margin": "auto", "y_axis_label": "", "row_limit": 10, "show_legend": true, "filters": [], "show_controls": true, "granularity_sqla": "time", "viz_type": "dist_bar", "since": "100 years ago", "x_axis_label": "", "order_bars": false, "color_scheme": "d3Category10", "until": "", "columns": [], "show_bar_value": false, "y_axis_format": ".3s", "metrics": ["sum__orig_bytes", "sum__resp_bytes"], "slice_id": 463, "where": "", "reduce_x_ticks": false, "groupby": ["country"], "datasource": "34__table", "contribution": false, "time_grain_sqla": null, "having": "", "bar_stacked": true}
"""
))
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
slc=Slice(
slice_name=f+u'_Duration时间',
viz_type='treemap',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{"having": "", "slice_id": 463, "viz_type": "treemap", "where": "", "since": "100 years ago", "until": "", "metrics": ["sum__duration"], "datasource": "34__table", "filters": [], "color_scheme": "bnbColors", "granularity_sqla": "time", "treemap_ratio": 1.618033988749895, "time_grain_sqla": "Time Column", "groupby": ["service"], "number_format": ".3s"}
"""
))
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
slc=Slice(
slice_name=f+u'_气泡图',
viz_type='bubble',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{"bottom_margin": "auto", "y_axis_label": "\u63a5\u6536\u5305", "series": "proto", "entity": "service", "show_legend": true, "filters": [], "granularity_sqla": "time", "size": "sum__allbytes", "viz_type": "bubble", "since": "100 years ago", "x_axis_label": "\u53d1\u9001\u5305", "color_scheme": "bnbColors", "y_axis_format": ".3s", "y_axis_showminmax": true, "x_axis_format": ".3s", "left_margin": "auto", "where": "", "until": "", "y_log_scale": false, "datasource": "34__table", "x_axis_showminmax": true, "y": "sum__resp_pkts", "x": "sum__orig_pkts", "x_log_scale": false, "time_grain_sqla": "Time Column", "having": "", "max_bubble_size": "100"}
"""
)
)
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
slc=Slice(
slice_name=f+u'_流量力导向图',
viz_type='directed_force',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{"link_length": "150", "slice_id": 2, "viz_type": "directed_force", "row_limit": 50, "metric": "sum__allbytes", "since": "", "until": "now", "where": "", "charge": "-500", "groupby": ["orig_h", "resp_h"], "datasource": "34__table", "filters": [], "granularity_sqla": "time", "time_grain_sqla": "Time Column", "having": ""}
"""
)
)
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
slc=Slice(
slice_name=f+u'_过滤器',
viz_type='filter_box',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{"having": "", "slice_id": 406, "show_sqla_time_granularity": false, "viz_type": "filter_box", "where": "", "metric": "sum__duration", "since": "100 years ago", "until": "", "show_druid_time_origin": false, "groupby": ["orig_h", "orig_p", "resp_h", "resp_p", "proto", "service", "country"], "datasource": "35__table", "filters": [], "show_druid_time_granularity": false, "granularity_sqla": "time", "show_sqla_time_column": false, "time_grain_sqla": "Time Column", "date_filter": true, "instant_filtering": true}
"""
)
)
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
def draw_map():
df_map = df_conn[df_conn['country']!='local']
df_map.to_sql(
f+'_map',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'time':DateTime(),
'date':Date(),
'id':String(),
'allbytes':Float(),
'orig_p':String(),
'orig_h':String(),
'resp_h':String(),
'resp_p':String(),
'proto':String(),
'service':String(),
'duration':Float(),
'orig_bytes':Float(),
'resp_bytes':Float(),
'conn_state':String(),
'local_orig':String(),
'local_resp':String(),
'missed_bytes':Float(),
'history':String(),
'orig_pkts':BigInteger(),
'orig_ip_bytes':BigInteger(),
'resp_pkts':BigInteger(),
'tunnel_parents':BigInteger(),
'country':String(),
'Lat':Float(),
'Lng':Float()
},
index=False
)
tbl = db.session.query(TBL).filter_by(table_name=f+'_map').first()
if not tbl:
tbl = TBL(table_name=f+'_map')
tbl.database = get_or_create_main_db()
tbl.description = "Pcap Connection Map Info"
tbl.filter_select_enabled = True
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
slc=Slice(
slice_name=f+u'_WorldMap',
viz_type='world_map',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{"since": "100 years ago", "having": "", "viz_type": "world_map", "slice_id": 531, "where": "", "metric": "sum__duration", "show_bubbles": true, "entity": "country", "country_fieldtype": "cca2", "datasource": "35__table", "filters": [], "secondary_metric": "sum__allbytes", "granularity_sqla": "time", "time_grain_sqla": "Time Column", "until": "", "max_bubble_size": "25"}
"""
)
)
merge_slice(slc)
pcap_analysis.append(slc.slice_name)
draw_map()
print("Creating a Pcap Analysis dashboard")
dash_name = f+"_Analysis"
dash = db.session.query(Dash).filter_by(slug=slug).first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
[
{
"col": 1,
"row": 97,
"size_x": 47,
"size_y": 19,
"slice_id": "433"
},
{
"col": 37,
"row": 0,
"size_x": 12,
"size_y": 17,
"slice_id": "434"
},
{
"col": 34,
"row": 116,
"size_x": 15,
"size_y": 28,
"slice_id": "435"
},
{
"col": 33,
"row": 144,
"size_x": 16,
"size_y": 16,
"slice_id": "436"
},
{
"col": 17,
"row": 129,
"size_x": 16,
"size_y": 14,
"slice_id": "437"
},
{
"col": 1,
"row": 129,
"size_x": 16,
"size_y": 14,
"slice_id": "438"
},
{
"col": 15,
"row": 116,
"size_x": 17,
"size_y": 13,
"slice_id": "439"
},
{
"col": 1,
"row": 116,
"size_x": 14,
"size_y": 12,
"slice_id": "440"
},
{
"col": 33,
"row": 160,
"size_x": 16,
"size_y": 16,
"slice_id": "441"
},
{
"col": 1,
"row": 143,
"size_x": 32,
"size_y": 16,
"slice_id": "442"
},
{
"col": 1,
"row": 159,
"size_x": 32,
"size_y": 16,
"slice_id": "443"
},
{
"col": 1,
"row": 0,
"size_x": 9,
"size_y": 15,
"slice_id": "444"
},
{
"col": 10,
"row": 0,
"size_x": 27,
"size_y": 18,
"slice_id": "445"
}
]
""")
l = json.loads(js)
slices = (
db.session.query(Slice).filter(Slice.slice_name.in_(
pcap_analysis)).all()
)
# slices = sorted(slices,key=lambda x:x.id)
for i,pos in enumerate(l):
pos['slice_id'] = str(slices[i].id)
dash.dashboard_title = dash_name
dash.position_json = json.dumps(l, indent=4)
dash.slug = slug
dash.slices = slices
db.session.merge(dash)
db.session.commit()
def main():
while True:
file_list = os.listdir(FilePath)
for f in file_list:
if os.path.splitext(f)[-1]!='.pcap':
continue
else:
get_pcap_info(f)
os.popen("rm -f "+BroPath+'/*.log')
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
54691aec62e64eee0903528551d1bc0d23f22069 | 1b57d2f689903d9937f77e26be40784af2ff2669 | /view_helpers/home.py | f7682825ddec4cb03015fe1ee82b08065034f3d6 | [] | no_license | FMularski/passwordkeeper | b1ef31c04bcfa7f012f28852fd8ae1f33efeff98 | 5859fbef89a3f80b27d52fd124971180e12e4fef | refs/heads/main | 2023-07-16T21:09:34.938196 | 2021-09-06T21:05:01 | 2021-09-06T21:05:01 | 347,174,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | def encode_password(password, pin):
pin_sum = 0
for char in pin:
pin_sum += ord(char)
encoded = ''
for char in password:
encoded += chr(ord(char) + pin_sum)
return encoded
def decode_password(encoded, pin):
pin_sum = 0
for char in pin:
pin_sum += ord(char)
decoded = ''
for char in encoded:
decoded += chr(ord(char) - pin_sum)
return decoded
| [
"[email protected]"
] | |
9459f7c0aa601f412b8a975d03fa86c2914ac96c | 797761aeb37a8de4288696875f1a3c6c0eff3824 | /mlfromscratch/unsupervised_learning/partitioning_around_medoids.py | 6fdab2470e836fd1065b261519fafc06d73dad7f | [
"MIT"
] | permissive | kauziishere/ML-From-Scratch | 27df8a6565e964309d25729fe373746845b57b6b | a5040b84102fc2259b925d7337e7ff87080b6e5f | refs/heads/master | 2021-05-14T17:06:48.691808 | 2017-08-30T09:27:19 | 2017-08-30T09:27:19 | 116,040,355 | 1 | 1 | null | 2018-01-02T17:38:06 | 2018-01-02T17:31:24 | Python | UTF-8 | Python | false | false | 5,357 | py | import sys
import os
import math
import random
from sklearn import datasets
import numpy as np
# Import helper functions
from mlfromscratch.utils.data_manipulation import normalize
from mlfromscratch.utils.data_operation import euclidean_distance
from mlfromscratch.unsupervised_learning import PCA
from mlfromscratch.utils import Plot
class PAM():
"""A simple clustering method that forms k clusters by first assigning
samples to the closest medoids, and then swapping medoids with non-medoid
samples if the total distance (cost) between the cluster members and their medoid
is smaller than prevoisly.
Parameters:
-----------
k: int
The number of clusters the algorithm will form.
"""
def __init__(self, k=2):
self.k = k
# Initialize the medoids as random samples
def _init_random_medoids(self, X):
n_samples, n_features = np.shape(X)
medoids = np.zeros((self.k, n_features))
for i in range(self.k):
medoid = X[np.random.choice(range(n_samples))]
medoids[i] = medoid
return medoids
# Return the index of the closest medoid to the sample
def _closest_medoid(self, sample, medoids):
closest_i = None
closest_distance = float("inf")
for i, medoid in enumerate(medoids):
distance = euclidean_distance(sample, medoid)
if distance < closest_distance:
closest_i = i
closest_distance = distance
return closest_i
# Assign the samples to the closest medoids to create clusters
def _create_clusters(self, X, medoids):
clusters = [[] for _ in range(self.k)]
for sample_i, sample in enumerate(X):
medoid_i = self._closest_medoid(sample, medoids)
clusters[medoid_i].append(sample_i)
return clusters
# Calculate the cost (total distance between samples and their medoids)
def _calculate_cost(self, X, clusters, medoids):
cost = 0
# For each cluster
for i, cluster in enumerate(clusters):
medoid = medoids[i]
for sample_i in cluster:
# Add distance between sample and medoid as cost
cost += euclidean_distance(X[sample_i], medoid)
return cost
# Returns a list of all samples that are not currently medoids
def _get_non_medoids(self, X, medoids):
non_medoids = []
for sample in X:
if not sample in medoids:
non_medoids.append(sample)
return non_medoids
# Classify samples as the index of their clusters
def _get_cluster_labels(self, clusters, X):
# One prediction for each sample
y_pred = np.zeros(np.shape(X)[0])
for cluster_i in range(len(clusters)):
cluster = clusters[cluster_i]
for sample_i in cluster:
y_pred[sample_i] = cluster_i
return y_pred
# Do Partitioning Around Medoids and return the cluster labels
def predict(self, X):
# Initialize medoids randomly
medoids = self._init_random_medoids(X)
# Assign samples to closest medoids
clusters = self._create_clusters(X, medoids)
# Calculate the initial cost (total distance between samples and
# corresponding medoids)
cost = self._calculate_cost(X, clusters, medoids)
# Iterate until we no longer have a cheaper cost
while True:
best_medoids = medoids
lowest_cost = cost
for medoid in medoids:
# Get all non-medoid samples
non_medoids = self._get_non_medoids(X, medoids)
# Calculate the cost when swapping medoid and samples
for sample in non_medoids:
# Swap sample with the medoid
new_medoids = medoids.copy()
new_medoids[medoids == medoid] = sample
# Assign samples to new medoids
new_clusters = self._create_clusters(X, new_medoids)
# Calculate the cost with the new set of medoids
new_cost = self._calculate_cost(
X, new_clusters, new_medoids)
# If the swap gives us a lower cost we save the medoids and cost
if new_cost < lowest_cost:
lowest_cost = new_cost
best_medoids = new_medoids
# If there was a swap that resultet in a lower cost we save the
# resulting medoids from the best swap and the new cost
if lowest_cost < cost:
cost = lowest_cost
medoids = best_medoids
# Else finished
else:
break
final_clusters = self._create_clusters(X, medoids)
# Return the samples cluster indices as labels
return self._get_cluster_labels(final_clusters, X)
def main():
# Load the dataset
X, y = datasets.make_blobs()
# Cluster the data using K-Medoids
clf = PAM(k=3)
y_pred = clf.predict(X)
# Project the data onto the 2 primary principal components
p = Plot()
p.plot_in_2d(X, y_pred, title="PAM Clustering")
p.plot_in_2d(X, y, title="Actual Clustering")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
97a3f89a8219b377de059f311121c2a9894553ab | 38c10c01007624cd2056884f25e0d6ab85442194 | /v8/tools/release/releases.py | 5b826fccba089d27afc2f0cab0fefc209ae033af | [
"BSD-3-Clause",
"bzip2-1.0.6"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 19,503 | py | #!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script retrieves the history of all V8 branches and
# their corresponding Chromium revisions.
# Requires a chromium checkout with branch heads:
# gclient sync --with_branch_heads
# gclient fetch
import argparse
import csv
import itertools
import json
import os
import re
import sys
from common_includes import *
CONFIG = {
"BRANCHNAME": "retrieve-v8-releases",
"PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
}
# Expression for retrieving the bleeding edge revision from a commit message.
PUSH_MSG_SVN_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
PUSH_MSG_GIT_RE = re.compile(r".* \(based on ([a-fA-F0-9]+)\)$")
# Expression for retrieving the merged patches from a merge commit message
# (old and new format).
MERGE_MESSAGE_RE = re.compile(r"^.*[M|m]erged (.+)(\)| into).*$", re.M)
CHERRY_PICK_TITLE_GIT_RE = re.compile(r"^.* \(cherry\-pick\)\.?$")
# New git message for cherry-picked CLs. One message per line.
MERGE_MESSAGE_GIT_RE = re.compile(r"^Merged ([a-fA-F0-9]+)\.?$")
# Expression for retrieving reverted patches from a commit message (old and
# new format).
ROLLBACK_MESSAGE_RE = re.compile(r"^.*[R|r]ollback of (.+)(\)| in).*$", re.M)
# New git message for reverted CLs. One message per line.
ROLLBACK_MESSAGE_GIT_RE = re.compile(r"^Rollback of ([a-fA-F0-9]+)\.?$")
# Expression for retrieving the code review link.
REVIEW_LINK_RE = re.compile(r"^Review URL: (.+)$", re.M)
# Expression with three versions (historical) for extracting the v8 revision
# from the chromium DEPS file.
DEPS_RE = re.compile(r"""^\s*(?:["']v8_revision["']: ["']"""
"""|\(Var\("googlecode_url"\) % "v8"\) \+ "\/trunk@"""
"""|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)"""
"""([^"']+)["'].*$""", re.M)
# Expression to pick tag and revision for bleeding edge tags. To be used with
# output of 'svn log'.
BLEEDING_EDGE_TAGS_RE = re.compile(
r"A \/tags\/([^\s]+) \(from \/branches\/bleeding_edge\:(\d+)\)")
OMAHA_PROXY_URL = "http://omahaproxy.appspot.com/"
def SortBranches(branches):
"""Sort branches with version number names."""
return sorted(branches, key=SortingKey, reverse=True)
def FilterDuplicatesAndReverse(cr_releases):
"""Returns the chromium releases in reverse order filtered by v8 revision
duplicates.
cr_releases is a list of [cr_rev, v8_hsh] reverse-sorted by cr_rev.
"""
last = ""
result = []
for release in reversed(cr_releases):
if last == release[1]:
continue
last = release[1]
result.append(release)
return result
def BuildRevisionRanges(cr_releases):
"""Returns a mapping of v8 revision -> chromium ranges.
The ranges are comma-separated, each range has the form R1:R2. The newest
entry is the only one of the form R1, as there is no end range.
cr_releases is a list of [cr_rev, v8_hsh] reverse-sorted by cr_rev.
cr_rev either refers to a chromium commit position or a chromium branch
number.
"""
range_lists = {}
cr_releases = FilterDuplicatesAndReverse(cr_releases)
# Visit pairs of cr releases from oldest to newest.
for cr_from, cr_to in itertools.izip(
cr_releases, itertools.islice(cr_releases, 1, None)):
# Assume the chromium revisions are all different.
assert cr_from[0] != cr_to[0]
ran = "%s:%d" % (cr_from[0], int(cr_to[0]) - 1)
# Collect the ranges in lists per revision.
range_lists.setdefault(cr_from[1], []).append(ran)
# Add the newest revision.
if cr_releases:
range_lists.setdefault(cr_releases[-1][1], []).append(cr_releases[-1][0])
# Stringify and comma-separate the range lists.
return dict((hsh, ", ".join(ran)) for hsh, ran in range_lists.iteritems())
def MatchSafe(match):
if match:
return match.group(1)
else:
return ""
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
self.CommonPrepare()
self.PrepareBranch()
class RetrieveV8Releases(Step):
MESSAGE = "Retrieve all V8 releases."
def ExceedsMax(self, releases):
return (self._options.max_releases > 0
and len(releases) > self._options.max_releases)
def GetMasterHashFromPush(self, title):
return MatchSafe(PUSH_MSG_GIT_RE.match(title))
def GetMergedPatches(self, body):
patches = MatchSafe(MERGE_MESSAGE_RE.search(body))
if not patches:
patches = MatchSafe(ROLLBACK_MESSAGE_RE.search(body))
if patches:
# Indicate reverted patches with a "-".
patches = "-%s" % patches
return patches
def GetMergedPatchesGit(self, body):
patches = []
for line in body.splitlines():
patch = MatchSafe(MERGE_MESSAGE_GIT_RE.match(line))
if patch:
patches.append(patch)
patch = MatchSafe(ROLLBACK_MESSAGE_GIT_RE.match(line))
if patch:
patches.append("-%s" % patch)
return ", ".join(patches)
def GetReleaseDict(
self, git_hash, master_position, master_hash, branch, version,
patches, cl_body):
revision = self.GetCommitPositionNumber(git_hash)
return {
# The cr commit position number on the branch.
"revision": revision,
# The git revision on the branch.
"revision_git": git_hash,
# The cr commit position number on master.
"master_position": master_position,
# The same for git.
"master_hash": master_hash,
# The branch name.
"branch": branch,
# The version for displaying in the form 3.26.3 or 3.26.3.12.
"version": version,
# The date of the commit.
"date": self.GitLog(n=1, format="%ci", git_hash=git_hash),
# Merged patches if available in the form 'r1234, r2345'.
"patches_merged": patches,
# Default for easier output formatting.
"chromium_revision": "",
# Default for easier output formatting.
"chromium_branch": "",
# Link to the CL on code review. Candiates pushes are not uploaded,
# so this field will be populated below with the recent roll CL link.
"review_link": MatchSafe(REVIEW_LINK_RE.search(cl_body)),
# Link to the commit message on google code.
"revision_link": ("https://code.google.com/p/v8/source/detail?r=%s"
% revision),
}
def GetRelease(self, git_hash, branch):
self.ReadAndPersistVersion()
base_version = [self["major"], self["minor"], self["build"]]
version = ".".join(base_version)
body = self.GitLog(n=1, format="%B", git_hash=git_hash)
patches = ""
if self["patch"] != "0":
version += ".%s" % self["patch"]
if CHERRY_PICK_TITLE_GIT_RE.match(body.splitlines()[0]):
patches = self.GetMergedPatchesGit(body)
else:
patches = self.GetMergedPatches(body)
if SortingKey("4.2.69") <= SortingKey(version):
master_hash = self.GetLatestReleaseBase(version=version)
else:
# Legacy: Before version 4.2.69, the master revision was determined
# by commit message.
title = self.GitLog(n=1, format="%s", git_hash=git_hash)
master_hash = self.GetMasterHashFromPush(title)
master_position = ""
if master_hash:
master_position = self.GetCommitPositionNumber(master_hash)
return self.GetReleaseDict(
git_hash, master_position, master_hash, branch, version,
patches, body), self["patch"]
def GetReleasesFromBranch(self, branch):
self.GitReset(self.vc.RemoteBranch(branch))
if branch == self.vc.MasterBranch():
return self.GetReleasesFromMaster()
releases = []
try:
for git_hash in self.GitLog(format="%H").splitlines():
if VERSION_FILE not in self.GitChangedFiles(git_hash):
continue
if self.ExceedsMax(releases):
break # pragma: no cover
if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
break # pragma: no cover
release, patch_level = self.GetRelease(git_hash, branch)
releases.append(release)
# Follow branches only until their creation point.
# TODO(machenbach): This omits patches if the version file wasn't
# manipulated correctly. Find a better way to detect the point where
# the parent of the branch head leads to the trunk branch.
if branch != self.vc.CandidateBranch() and patch_level == "0":
break
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up checked-out version file.
self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
return releases
def GetReleaseFromRevision(self, revision):
releases = []
try:
if (VERSION_FILE not in self.GitChangedFiles(revision) or
not self.GitCheckoutFileSafe(VERSION_FILE, revision)):
print "Skipping revision %s" % revision
return [] # pragma: no cover
branches = map(
str.strip,
self.Git("branch -r --contains %s" % revision).strip().splitlines(),
)
branch = ""
for b in branches:
if b.startswith("origin/"):
branch = b.split("origin/")[1]
break
if b.startswith("branch-heads/"):
branch = b.split("branch-heads/")[1]
break
else:
print "Could not determine branch for %s" % revision
release, _ = self.GetRelease(revision, branch)
releases.append(release)
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up checked-out version file.
self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
return releases
def RunStep(self):
self.GitCreateBranch(self._config["BRANCHNAME"])
releases = []
if self._options.branch == 'recent':
# List every release from the last 7 days.
revisions = self.GetRecentReleases(max_age=7 * DAY_IN_SECONDS)
for revision in revisions:
releases += self.GetReleaseFromRevision(revision)
elif self._options.branch == 'all': # pragma: no cover
# Retrieve the full release history.
for branch in self.vc.GetBranches():
releases += self.GetReleasesFromBranch(branch)
releases += self.GetReleasesFromBranch(self.vc.CandidateBranch())
releases += self.GetReleasesFromBranch(self.vc.MasterBranch())
else: # pragma: no cover
# Retrieve history for a specified branch.
assert self._options.branch in (self.vc.GetBranches() +
[self.vc.CandidateBranch(), self.vc.MasterBranch()])
releases += self.GetReleasesFromBranch(self._options.branch)
self["releases"] = sorted(releases,
key=lambda r: SortingKey(r["version"]),
reverse=True)
class UpdateChromiumCheckout(Step):
MESSAGE = "Update the chromium checkout."
def RunStep(self):
cwd = self._options.chromium
self.GitFetchOrigin("+refs/heads/*:refs/remotes/origin/*",
"+refs/branch-heads/*:refs/remotes/branch-heads/*",
cwd=cwd)
# Update v8 checkout in chromium.
self.GitFetchOrigin(cwd=os.path.join(cwd, "v8"))
def ConvertToCommitNumber(step, revision):
# Simple check for git hashes.
if revision.isdigit() and len(revision) < 8:
return revision
return step.GetCommitPositionNumber(
revision, cwd=os.path.join(step._options.chromium, "v8"))
class RetrieveChromiumV8Releases(Step):
MESSAGE = "Retrieve V8 releases from Chromium DEPS."
def RunStep(self):
cwd = self._options.chromium
# All v8 revisions we are interested in.
releases_dict = dict((r["revision_git"], r) for r in self["releases"])
cr_releases = []
count_past_last_v8 = 0
try:
for git_hash in self.GitLog(
format="%H", grep="V8", branch="origin/master",
path="DEPS", cwd=cwd).splitlines():
deps = self.GitShowFile(git_hash, "DEPS", cwd=cwd)
match = DEPS_RE.search(deps)
if match:
cr_rev = self.GetCommitPositionNumber(git_hash, cwd=cwd)
if cr_rev:
v8_hsh = match.group(1)
cr_releases.append([cr_rev, v8_hsh])
if count_past_last_v8:
count_past_last_v8 += 1 # pragma: no cover
if count_past_last_v8 > 20:
break # pragma: no cover
# Stop as soon as we find a v8 revision that we didn't fetch in the
# v8-revision-retrieval part above (i.e. a revision that's too old).
# Just iterate a few more times in case there were reverts.
if v8_hsh not in releases_dict:
count_past_last_v8 += 1 # pragma: no cover
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Add the chromium ranges to the v8 candidates and master releases.
all_ranges = BuildRevisionRanges(cr_releases)
for hsh, ranges in all_ranges.iteritems():
releases_dict.get(hsh, {})["chromium_revision"] = ranges
# TODO(machenbach): Unify common code with method above.
class RetrieveChromiumBranches(Step):
MESSAGE = "Retrieve Chromium branch information."
def RunStep(self):
cwd = self._options.chromium
# All v8 revisions we are interested in.
releases_dict = dict((r["revision_git"], r) for r in self["releases"])
# Filter out irrelevant branches.
branches = filter(lambda r: re.match(r"branch-heads/\d+", r),
self.GitRemotes(cwd=cwd))
# Transform into pure branch numbers.
branches = map(lambda r: int(re.match(r"branch-heads/(\d+)", r).group(1)),
branches)
branches = sorted(branches, reverse=True)
cr_branches = []
count_past_last_v8 = 0
try:
for branch in branches:
deps = self.GitShowFile(
"refs/branch-heads/%d" % branch, "DEPS", cwd=cwd)
match = DEPS_RE.search(deps)
if match:
v8_hsh = match.group(1)
cr_branches.append([str(branch), v8_hsh])
if count_past_last_v8:
count_past_last_v8 += 1 # pragma: no cover
if count_past_last_v8 > 20:
break # pragma: no cover
# Stop as soon as we find a v8 revision that we didn't fetch in the
# v8-revision-retrieval part above (i.e. a revision that's too old).
# Just iterate a few more times in case there were reverts.
if v8_hsh not in releases_dict:
count_past_last_v8 += 1 # pragma: no cover
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Add the chromium branches to the v8 candidate releases.
all_ranges = BuildRevisionRanges(cr_branches)
for revision, ranges in all_ranges.iteritems():
releases_dict.get(revision, {})["chromium_branch"] = ranges
class RetrieveInformationOnChromeReleases(Step):
MESSAGE = 'Retrieves relevant information on the latest Chrome releases'
def Run(self):
params = None
result_raw = self.ReadURL(
OMAHA_PROXY_URL + "all.json",
params,
wait_plan=[5, 20]
)
recent_releases = json.loads(result_raw)
canaries = []
for current_os in recent_releases:
for current_version in current_os["versions"]:
if current_version["channel"] != "canary":
continue
current_candidate = self._CreateCandidate(current_version)
canaries.append(current_candidate)
chrome_releases = {"canaries": canaries}
self["chrome_releases"] = chrome_releases
def _GetGitHashForV8Version(self, v8_version):
if v8_version == "N/A":
return ""
if v8_version.split(".")[3]== "0":
return self.GitGetHashOfTag(v8_version[:-2])
return self.GitGetHashOfTag(v8_version)
def _CreateCandidate(self, current_version):
params = None
url_to_call = (OMAHA_PROXY_URL + "v8.json?version="
+ current_version["previous_version"])
result_raw = self.ReadURL(
url_to_call,
params,
wait_plan=[5, 20]
)
previous_v8_version = json.loads(result_raw)["v8_version"]
v8_previous_version_hash = self._GetGitHashForV8Version(previous_v8_version)
current_v8_version = current_version["v8_version"]
v8_version_hash = self._GetGitHashForV8Version(current_v8_version)
current_candidate = {
"chrome_version": current_version["version"],
"os": current_version["os"],
"release_date": current_version["current_reldate"],
"v8_version": current_v8_version,
"v8_version_hash": v8_version_hash,
"v8_previous_version": previous_v8_version,
"v8_previous_version_hash": v8_previous_version_hash,
}
return current_candidate
class CleanUp(Step):
MESSAGE = "Clean up."
def RunStep(self):
self.CommonCleanup()
class WriteOutput(Step):
MESSAGE = "Print output."
def Run(self):
output = {
"releases": self["releases"],
"chrome_releases": self["chrome_releases"],
}
if self._options.csv:
with open(self._options.csv, "w") as f:
writer = csv.DictWriter(f,
["version", "branch", "revision",
"chromium_revision", "patches_merged"],
restval="",
extrasaction="ignore")
for release in self["releases"]:
writer.writerow(release)
if self._options.json:
with open(self._options.json, "w") as f:
f.write(json.dumps(output))
if not self._options.csv and not self._options.json:
print output # pragma: no cover
class Releases(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-b", "--branch", default="recent",
help=("The branch to analyze. If 'all' is specified, "
"analyze all branches. If 'recent' (default) "
"is specified, track beta, stable and "
"candidates."))
parser.add_argument("-c", "--chromium",
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
parser.add_argument("--csv", help="Path to a CSV file for export.")
parser.add_argument("-m", "--max-releases", type=int, default=0,
help="The maximum number of releases to track.")
parser.add_argument("--json", help="Path to a JSON file for export.")
def _ProcessOptions(self, options): # pragma: no cover
options.force_readline_defaults = True
return True
def _Config(self):
return {
"BRANCHNAME": "retrieve-v8-releases",
"PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
}
def _Steps(self):
return [
Preparation,
RetrieveV8Releases,
UpdateChromiumCheckout,
RetrieveChromiumV8Releases,
RetrieveChromiumBranches,
RetrieveInformationOnChromeReleases,
CleanUp,
WriteOutput,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(Releases().Run())
| [
"[email protected]"
] | |
bfce55234939a14e100bb118db3fb7d4ddba9508 | 9c5e09b4f048a13961c0f4a1370a7bf01a421d92 | /gym/envs/robotics/fetch_env.py | cb9f04f8d4a2bc9dfb685158d68bd8e3fcbf1435 | [
"MIT"
] | permissive | StanfordVL/Gym | daa8c780f5ace3e33c3bf0f7109f40a0a820d59e | 5e14d19e57d8ba318b97a5edda0ab2ea591dea08 | refs/heads/master | 2023-02-03T02:44:40.185713 | 2020-12-17T14:10:16 | 2020-12-17T14:10:16 | 280,579,514 | 9 | 4 | null | null | null | null | UTF-8 | Python | false | false | 8,359 | py | import numpy as np
from . import rotations, robot_env, utils
def goal_distance(goal_a, goal_b):
assert goal_a.shape == goal_b.shape
return np.linalg.norm(goal_a - goal_b, axis=-1)
class FetchEnv(robot_env.RobotEnv):
"""Superclass for all Fetch environments.
"""
def __init__(
self, model_path, n_substeps, gripper_extra_height, block_gripper,
has_object, target_in_the_air, target_offset, obj_range, target_range,
distance_threshold, initial_qpos, reward_type,
):
"""Initializes a new Fetch environment.
Args:
model_path (string): path to the environments XML file
n_substeps (int): number of substeps the simulation runs on every call to step
gripper_extra_height (float): additional height above the table when positioning the gripper
block_gripper (boolean): whether or not the gripper is blocked (i.e. not movable) or not
has_object (boolean): whether or not the environment has an object
target_in_the_air (boolean): whether or not the target should be in the air above the table or on the table surface
target_offset (float or array with 3 elements): offset of the target
obj_range (float): range of a uniform distribution for sampling initial object positions
target_range (float): range of a uniform distribution for sampling a target
distance_threshold (float): the threshold after which a goal is considered achieved
initial_qpos (dict): a dictionary of joint names and values that define the initial configuration
reward_type ('sparse' or 'dense'): the reward type, i.e. sparse or dense
"""
self.gripper_extra_height = gripper_extra_height
self.block_gripper = block_gripper
self.has_object = has_object
self.target_in_the_air = target_in_the_air
self.target_offset = target_offset
self.obj_range = obj_range
self.target_range = target_range
self.distance_threshold = distance_threshold
self.reward_type = reward_type
super(FetchEnv, self).__init__(
model_path=model_path, n_substeps=n_substeps, n_actions=4,
initial_qpos=initial_qpos)
# GoalEnv methods
# ----------------------------
def compute_reward(self, achieved_goal, goal, info):
# Compute distance between goal and the achieved goal.
d = goal_distance(achieved_goal, goal)
if self.reward_type == 'sparse':
return -(d > self.distance_threshold).astype(np.float32)
else:
return -d
# RobotEnv methods
# ----------------------------
def _step_callback(self):
if self.block_gripper:
self.sim.data.set_joint_qpos('robot0:l_gripper_finger_joint', 0.)
self.sim.data.set_joint_qpos('robot0:r_gripper_finger_joint', 0.)
self.sim.forward()
def _set_action(self, action):
assert action.shape == (4,)
action = action.copy() # ensure that we don't change the action outside of this scope
pos_ctrl, gripper_ctrl = action[:3], action[3]
pos_ctrl *= 0.05 # limit maximum change in position
rot_ctrl = [1., 0., 1., 0.] # fixed rotation of the end effector, expressed as a quaternion
gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl])
assert gripper_ctrl.shape == (2,)
if self.block_gripper:
gripper_ctrl = np.zeros_like(gripper_ctrl)
action = np.concatenate([pos_ctrl, rot_ctrl, gripper_ctrl])
# Apply action to simulation.
utils.ctrl_set_action(self.sim, action)
utils.mocap_set_action(self.sim, action)
def _get_obs(self):
# positions
grip_pos = self.sim.data.get_site_xpos('robot0:grip')
dt = self.sim.nsubsteps * self.sim.model.opt.timestep
grip_velp = self.sim.data.get_site_xvelp('robot0:grip') * dt
robot_qpos, robot_qvel = utils.robot_get_obs(self.sim)
if self.has_object:
object_pos = self.sim.data.get_site_xpos('object0')
# rotations
object_rot = rotations.mat2euler(self.sim.data.get_site_xmat('object0'))
# velocities
object_velp = self.sim.data.get_site_xvelp('object0') * dt
object_velr = self.sim.data.get_site_xvelr('object0') * dt
# gripper state
object_rel_pos = object_pos - grip_pos
object_velp -= grip_velp
else:
object_pos = object_rot = object_velp = object_velr = object_rel_pos = np.zeros(0)
gripper_state = robot_qpos[-2:]
gripper_vel = robot_qvel[-2:] * dt # change to a scalar if the gripper is made symmetric
if not self.has_object:
achieved_goal = grip_pos.copy()
else:
achieved_goal = np.squeeze(object_pos.copy())
obs = np.concatenate([
grip_pos, object_pos.ravel(), object_rel_pos.ravel(), gripper_state, object_rot.ravel(),
object_velp.ravel(), object_velr.ravel(), grip_velp, gripper_vel,
])
return {
'observation': obs.copy(),
'achieved_goal': achieved_goal.copy(),
'desired_goal': self.goal.copy(),
}
def _viewer_setup(self):
body_id = self.sim.model.body_name2id('robot0:gripper_link')
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self.viewer.cam.lookat[idx] = value
self.viewer.cam.distance = 2.5
self.viewer.cam.azimuth = 132.
self.viewer.cam.elevation = -14.
def _render_callback(self):
# Visualize target.
sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()
site_id = self.sim.model.site_name2id('target0')
self.sim.model.site_pos[site_id] = self.goal - sites_offset[0]
self.sim.forward()
def _reset_sim(self):
self.sim.set_state(self.initial_state)
# Randomize start position of object.
if self.has_object:
object_xpos = self.initial_gripper_xpos[:2]
while np.linalg.norm(object_xpos - self.initial_gripper_xpos[:2]) < 0.1:
object_xpos = self.initial_gripper_xpos[:2] + self.np_random.uniform(-self.obj_range, self.obj_range, size=2)
object_qpos = self.sim.data.get_joint_qpos('object0:joint')
assert object_qpos.shape == (7,)
object_qpos[:2] = object_xpos
self.sim.data.set_joint_qpos('object0:joint', object_qpos)
self.sim.forward()
return True
def _sample_goal(self):
if self.has_object:
goal = self.initial_gripper_xpos[:3] + self.np_random.uniform(-self.target_range, self.target_range, size=3)
goal += self.target_offset
goal[2] = self.height_offset
if self.target_in_the_air and self.np_random.uniform() < 0.5:
goal[2] += self.np_random.uniform(0, 0.45)
else:
goal = self.initial_gripper_xpos[:3] + self.np_random.uniform(-0.15, 0.15, size=3)
return goal.copy()
def _is_success(self, achieved_goal, desired_goal):
d = goal_distance(achieved_goal, desired_goal)
return (d < self.distance_threshold).astype(np.float32)
def _env_setup(self, initial_qpos):
for name, value in initial_qpos.items():
self.sim.data.set_joint_qpos(name, value)
utils.reset_mocap_welds(self.sim)
self.sim.forward()
# Move end effector into position.
gripper_target = np.array([-0.498, 0.005, -0.431 + self.gripper_extra_height]) + self.sim.data.get_site_xpos('robot0:grip')
gripper_rotation = np.array([1., 0., 1., 0.])
self.sim.data.set_mocap_pos('robot0:mocap', gripper_target)
self.sim.data.set_mocap_quat('robot0:mocap', gripper_rotation)
for _ in range(10):
self.sim.step()
# Extract information for sampling goals.
self.initial_gripper_xpos = self.sim.data.get_site_xpos('robot0:grip').copy()
if self.has_object:
self.height_offset = self.sim.data.get_site_xpos('object0')[2]
def render(self, mode='human', width=500, height=500):
return super(FetchEnv, self).render(mode, width, height)
| [
"[email protected]"
] | |
aeedf79574fb645c330f2c9285caa73a7e7cc84a | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200703_python1/day18_py200830/if_ex_2_b.py | 86d87ce269e727558d85bff7d25188b797e7433c | [] | no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | """
ex 2.
write a program to find the smallest number among 3 given numbers
"""
a = 24
b = 15
c = 6
d = 78
max = a
# 1st round
if max < a:
max = a
# 2nd round
if max < b:
max = b
# 3rd round
if max < c:
max = c
# 4th round
if max < d:
max = d
print("The max number is {}".format(max))
#
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
num3 = float(input("Enter third number: "))
min = num1
if num1<min:
min = num1
if num2<min:
min = num2
if num3<min:
min = num3
print("The smallest number is {}".format(min))
| [
"[email protected]"
] | |
655612c5cb4bfd720ce79722d13902b46883a0db | 93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3 | /python/paddle/distributed/fleet/metrics/metric.py | 12a24292e5a3ad9ea838d9451fdf72e7e846a528 | [
"Apache-2.0"
] | permissive | hutuxian/Paddle | f8b7693bccc6d56887164c1de0b6f6e91cffaae8 | a1b640bc66a5cc9583de503e7406aeba67565e8d | refs/heads/develop | 2023-08-29T19:36:45.382455 | 2020-09-09T09:19:07 | 2020-09-09T09:19:07 | 164,977,763 | 8 | 27 | Apache-2.0 | 2023-06-16T09:47:39 | 2019-01-10T02:50:31 | Python | UTF-8 | Python | false | false | 14,010 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fleet Metrics"""
import paddle.fluid as fluid
import math
import numpy as np
from paddle.fluid.framework import Variable
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
def sum(input, scope=None):
"""
distributed sum in fleet
Args:
input(numpy.array|Variable|string): output of a layer
scope(Scope): specific scope
Returns:
global_metric(numpy.array): sum array
Example:
.. code-block:: python
# in model.py
input = fluid.layers.cast(some_input, dtype='float32')
cnt = fluid.layers.reduce_sum(input)
global_cnt = fluid.layers.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = fluid.layers.elementwise_add(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
# in train.py, after train or infer
res = np.array(scope.find_var(global_cnt.name).get_tensor())
print("sum array: ", paddle.distributed.fleet.sum(res))
"""
fleet._role_maker._barrier_worker()
if scope is None:
scope = fluid.global_scope()
if isinstance(input, Variable):
input = np.array(scope.find_var(input.name).get_tensor())
elif isinstance(input, str):
input = np.array(scope.find_var(input).get_tensor())
old_shape = np.array(input.shape)
output = np.copy(input) * 0
fleet._role_maker._all_reduce(input, output, mode="sum")
output = output.reshape(old_shape)
return output
def max(input, scope=None):
"""
distributed max in fleet
Args:
input(numpy.array|Variable|string): output of a layer
scope(Scope): specific scope
Returns:
global_metric(numpy.array): max array
Example:
.. code-block:: python
# in model.py
input = fluid.layers.cast(some_input, dtype='float32')
cnt = fluid.layers.reduce_sum(input)
global_cnt = fluid.layers.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = fluid.layers.elementwise_max(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
# in train.py, after train or infer
res = np.array(scope.find_var(global_cnt.name).get_tensor())
print("max array: ", paddle.distributed.fleet.max(res))
"""
fleet._role_maker._barrier_worker()
if scope is None:
scope = fluid.global_scope()
if isinstance(input, Variable):
input = np.array(scope.find_var(input.name).get_tensor())
elif isinstance(input, str):
input = np.array(scope.find_var(input).get_tensor())
old_shape = np.array(input.shape)
output = np.copy(input) * 0
fleet._role_maker._all_reduce(input, output, mode="max")
output = output.reshape(old_shape)
return output
def min(input, scope=None):
"""
distributed min in fleet
Args:
input(numpy.array|Variable|string): output of a layer
scope(Scope): specific scope
Returns:
global_metric(numpy.array): min array
Example:
.. code-block:: python
# in model.py
input = fluid.layers.cast(some_input, dtype='float32')
cnt = fluid.layers.reduce_sum(input)
global_cnt = fluid.layers.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = fluid.layers.elementwise_min(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
# in train.py, after train or infer
res = np.array(scope.find_var(global_cnt.name).get_tensor())
print("min array: ", paddle.distributed.fleet.min(res))
"""
fleet._role_maker._barrier_worker()
if scope is None:
scope = fluid.global_scope()
if isinstance(input, Variable):
input = np.array(scope.find_var(input.name).get_tensor())
elif isinstance(input, str):
input = np.array(scope.find_var(input).get_tensor())
old_shape = np.array(input.shape)
output = np.copy(input) * 0
fleet._role_maker._all_reduce(input, output, mode="min")
output = output.reshape(old_shape)
return output
def auc(stat_pos, stat_neg, scope=None):
"""
distributed auc in fleet
Args:
stat_pos(numpy.array|Variable|string): stat_pos in output of fluid.layers.auc
stat_neg(numpy.array|Variable|string): stat_neg in output of fluid.layers.auc
scope(Scope): specific scope
Returns:
auc_value(float): auc value
Example:
.. code-block:: python
# in model.py
similarity_norm = fluid.layers.sigmoid(fluid.layers.clip(output, min=-15.0, max=15.0))
binary_predict = fluid.layers.concat(
input=[fluid.layers.elementwise_sub(fluid.layers.ceil(similarity_norm), similarity_norm), similarity_norm], axis=1)
self.auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, stat_neg] =
fluid.layers.auc(input=binary_predict, label=label, curve='ROC', num_thresholds=4096)
# in train.py, after train or infer
pos = np.array(scope.find_var(stat_pos.name).get_tensor())
neg = np.array(scope.find_var(stat_neg.name).get_tensor())
print("auc: ", paddle.distributed.fleet.auc(pos, neg))
"""
fleet._role_maker._barrier_worker()
if scope is None:
scope = fluid.global_scope()
if isinstance(stat_pos, Variable):
stat_pos = np.array(scope.find_var(stat_pos.name).get_tensor())
elif isinstance(stat_pos, str):
stat_pos = np.array(scope.find_var(stat_pos).get_tensor())
if isinstance(stat_neg, Variable):
stat_neg = np.array(scope.find_var(stat_neg.name).get_tensor())
elif isinstance(stat_neg, str):
stat_neg = np.array(scope.find_var(stat_neg).get_tensor())
# auc pos bucket shape
old_pos_shape = np.array(stat_pos.shape)
# reshape to one dim
stat_pos = stat_pos.reshape(-1)
global_pos = np.copy(stat_pos) * 0
# mpi allreduce
fleet._role_maker._all_reduce(stat_pos, global_pos)
# reshape to its original shape
global_pos = global_pos.reshape(old_pos_shape)
# auc neg bucket
old_neg_shape = np.array(stat_neg.shape)
stat_neg = stat_neg.reshape(-1)
global_neg = np.copy(stat_neg) * 0
fleet._role_maker._all_reduce(stat_neg, global_neg)
global_neg = global_neg.reshape(old_neg_shape)
# calculate auc
num_bucket = len(global_pos[0])
area = 0.0
pos = 0.0
neg = 0.0
new_pos = 0.0
new_neg = 0.0
total_ins_num = 0
for i in range(num_bucket):
index = num_bucket - 1 - i
new_pos = pos + global_pos[0][index]
total_ins_num += global_pos[0][index]
new_neg = neg + global_neg[0][index]
total_ins_num += global_neg[0][index]
area += (new_neg - neg) * (pos + new_pos) / 2
pos = new_pos
neg = new_neg
auc_value = None
if pos * neg == 0 or total_ins_num == 0:
auc_value = 0.5
else:
auc_value = area / (pos * neg)
fleet._role_maker._barrier_worker()
return auc_value
def mae(abserr, total_ins_num, scope=None):
"""
distributed mae in fleet
Args:
abserr(numpy.array|Variable|string): abserr in output of fluid.contrib.layers.ctr_metric_bundle
total_ins_num(int|float): total train/infer instance count
scope(Scope): specific scope
Returns:
mae(float): mae value
Example:
.. code-block:: python
# in model.py
sqrerr, abserr, prob, q, pos, total = fluid.contrib.layers.ctr_metric_bundle(similarity_norm, fluid.layers.cast(x=label, dtype='float32'))
# in train.py, after train or infer
res = np.array(scope.find_var(abserr.name).get_tensor())
print("mae: ", paddle.distributed.fleet.mae(res, total_ins_num))
"""
fleet._role_maker._barrier_worker()
if scope is None:
scope = fluid.global_scope()
if isinstance(abserr, Variable):
abserr = np.array(scope.find_var(abserr.name).get_tensor())
elif isinstance(abserr, str):
abserr = np.array(scope.find_var(abserr).get_tensor())
old_metric_shape = np.array(abserr.shape)
abserr = abserr.reshape(-1)
global_metric = np.copy(abserr) * 0
fleet._role_maker._all_reduce(abserr, global_metric)
global_metric = global_metric.reshape(old_metric_shape)
mae_value = global_metric[0] / total_ins_num
return mae_value
def rmse(sqrerr, total_ins_num, scope=None):
"""
distributed rmse in fleet
Args:
sqrerr(numpy.array|Variable|string): sqrerr in output of fluid.contrib.layers.ctr_metric_bundle
total_ins_num(int|float): total train/infer instance count
scope(Scope): specific scope
Returns:
rmse(float): rmse value
Example:
.. code-block:: python
# in model.py
sqrerr, abserr, prob, q, pos, total = fluid.contrib.layers.ctr_metric_bundle(similarity_norm, fluid.layers.cast(x=label, dtype='float32'))
# in train.py, after train or infer
res = np.array(scope.find_var(sqrerr.name).get_tensor())
print("rmse: ", paddle.distributed.fleet.rmse(res, total_ins_num))
"""
fleet._role_maker._barrier_worker()
if scope is None:
scope = fluid.global_scope()
if isinstance(sqrerr, Variable):
sqrerr = np.array(scope.find_var(sqrerr.name).get_tensor())
elif isinstance(sqrerr, str):
sqrerr = np.array(scope.find_var(sqrerr).get_tensor())
old_metric_shape = np.array(sqrerr.shape)
sqrerr = sqrerr.reshape(-1)
global_metric = np.copy(sqrerr) * 0
fleet._role_maker._all_reduce(sqrerr, global_metric)
global_metric = global_metric.reshape(old_metric_shape)
rmse_value = math.sqrt(global_metric[0] / total_ins_num)
return rmse_value
def mse(sqrerr, total_ins_num, scope=None):
"""
distributed mse in fleet
Args:
sqrerr(numpy.array|Variable|string): sqrerr in output of fluid.contrib.layers.ctr_metric_bundle
total_ins_num(int|float): total train/infer instance count
scope(Scope): specific scope
Returns:
mse(float): mse value
Example:
.. code-block:: python
# in model.py
sqrerr, abserr, prob, q, pos, total = fluid.contrib.layers.ctr_metric_bundle(similarity_norm, fluid.layers.cast(x=label, dtype='float32'))
# in train.py, after train or infer
metric = np.array(scope.find_var(sqrerr.name).get_tensor())
print("mse: ", paddle.distributed.fleet.mse(metric, total_ins_num))
"""
fleet._role_maker._barrier_worker()
if scope is None:
scope = fluid.global_scope()
if isinstance(sqrerr, Variable):
sqrerr = np.array(scope.find_var(sqrerr.name).get_tensor())
elif isinstance(sqrerr, str):
sqrerr = np.array(scope.find_var(sqrerr).get_tensor())
old_metric_shape = np.array(sqrerr.shape)
sqrerr = sqrerr.reshape(-1)
global_metric = np.copy(sqrerr) * 0
fleet._role_maker._all_reduce(sqrerr, global_metric)
global_metric = global_metric.reshape(old_metric_shape)
mse_value = global_metric[0] / total_ins_num
return mse_value
def acc(correct, total, scope=None):
"""
distributed accuracy in fleet
Args:
correct(numpy.array|Variable|string): correct Variable
total(numpy.array|Variable): total Variable
scope(Scope): specific scope
Returns:
acc(float): accuracy value
Example:
.. code-block:: python
# in model.py
correct = fluid.layers.create_global_var(dtype='float32', shape=[1], value=0)
total = fluid.layers.create_global_var(dtype='float32', shape=[1], value=0)
acc = fluid.layers.acc(predict, label, k=1, correct=correct, total=total)
global_correct = fluid.layers.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp1 = fluid.layers.elementwise_min(correct, global_correct)
fluid.layers.assign(tmp1, global_correct)
global_total = fluid.layers.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp2 = fluid.layers.elementwise_min(total, global_total)
fluid.layers.assign(tmp2, global_total)
# in train.py, after train or infer
correct_num = np.array(scope.find_var(correct.name).get_tensor())
total_num = np.array(scope.find_var(total.name).get_tensor())
print("accuracy: ", paddle.distributed.fleet.acc(correct_num, total_num))
"""
fleet._role_maker._barrier_worker()
if scope is None:
scope = fluid.global_scope()
if isinstance(correct, Variable):
correct = np.array(scope.find_var(correct.name).get_tensor())
elif isinstance(correct, str):
correct = np.array(scope.find_var(correct).get_tensor())
if isinstance(total, Variable):
total = np.array(scope.find_var(total.name).get_tensor())
elif isinstance(total, str):
total = np.array(scope.find_var(total).get_tensor())
global_correct_num = np.copy(correct) * 0
global_total_num = np.copy(total) * 0
fleet._role_maker._all_reduce(correct, global_correct_num)
fleet._role_maker._all_reduce(total, global_total_num)
return float(global_correct_num[0]) / float(global_total_num[0])
| [
"[email protected]"
] | |
9e274569e9a69cde12005a4f9cbda430d664a352 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/Trigger/TrigHypothesis/TrigBphysHypo/python/TrigMultiTrkFexConfig.py | 7ff7c6851f577e90bd81bf44b82f5324d00b7fa9 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,996 | py | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
# include the python fragment to set up the default bphysics vertex fitter
from TrigBphysHypo import TrigBphysVertexingConfig
from TrigBphysHypo.TrigBphysHypoConf import TrigMultiTrkFex
from AthenaCommon.AppMgr import ToolSvc
from TrigTimeMonitor.TrigTimeHistToolConfig import TrigTimeHistToolConfig
from TrigBphysHypo.TrigMultiTrkFexMonitoring import TrigMultiTrkFexValidationMonitoring
from TrigBphysHypo.TrigMultiTrkFexMonitoring import TrigMultiTrkFexOnlineMonitoring
class TrigMultiTrkFexPy (TrigMultiTrkFex):
__slots__ = []
def __init__(self, name = "MultiTrkFexPy"):
super( TrigMultiTrkFexPy, self ).__init__( name )
# AcceptAll flag: if true take events regardless of cuts
self.AcceptAll = False
def setTrackThresholds(self, thresholds) :
self.ptTrkMin = [] # reset, use thresholds from trigger name
for thr in sorted(thresholds) : # should should have lowest pt first, which is what we want
if len(self.ptTrkMin) < self.nTrk :
self.ptTrkMin.append( thr )
while len(self.ptTrkMin) < self.nTrk :
self.ptTrkMin.append(900.)
def setNumberAndTrackThresholds(self, nTrk, thresholds) :
self.ptTrkMin = [] # reset, use thresholds from trigger name
self.nTrk = nTrk
for thr in sorted(thresholds) : # should should have lowest pt first, which is what we want
if len(self.ptTrkMin) < self.nTrk :
self.ptTrkMin.append( thr )
while len(self.ptTrkMin) < self.nTrk :
self.ptTrkMin.append(900.)
def setEFMuonThresholds(self, thresholds) :
self.ptMuonMin = [] # reset, use thresholds from trigger name
for thr in sorted(thresholds) : # should should have lowest pt first, which is what we want
if len(self.ptMuonMin) < self.nEfMuon :
self.ptMuonMin.append( thr )
while len(self.ptMuonMin) < self.nEfMuon :
self.ptMuonMin.append(2000.) # lower pt cut makes no sense at trigger
def setElectronTrackThresholds(self, thresholds) :
self.ptTrkMin = [] # reset, use thresholds from trigger name
for thr in sorted(thresholds) : # should should have lowest pt first, which is what we want
if len(self.ptTrkMin) < self.nTrk :
self.ptTrkMin.append( thr )
while len(self.ptTrkMin) < self.nTrk :
self.ptTrkMin.append(4500.)
def setL2CombMuonThresholds(self, thresholds) :
self.ptMuonMin = [] # reset, use thresholds from trigger name
for thr in sorted(thresholds) : # should should have lowest pt first, which is what we want
if len(self.ptMuonMin) < self.nL2CombMuon :
self.ptMuonMin.append( thr )
while len(self.ptMuonMin) < self.nL2CombMuon :
self.ptMuonMin.append(2000.) # lower pt cut makes no sense at trigger
def setL2SAMuonThresholds(self, thresholds) :
# in case of L2SA limits, take only 1 muon with lowest threshold
self.ptMuonMin = [] # reset, use thresholds from trigger name
for thr in sorted(thresholds) : # should should have lowest pt first, which is what we want
if len(self.ptMuonMin) < self.nL2SAMuon :
self.ptMuonMin.append( thr )
while len(self.ptMuonMin) < self.nL2SAMuon :
self.ptMuonMin.append(2000.) # lower pt cut makes no sense at trigger
######################################
# trkPhi selects 2 OS tracks pt>X,X GeV with M in [0.840,1.240] GeV
######################################
class TrigMultiTrkFex_trkPhi (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "MultiTrkFex_trkPhi"):
super( TrigMultiTrkFex_trkPhi, self ).__init__( name )
#self.trackCollectionKey = "'
self.nTrk = 2
self.trkMass = 105.6583745 # looking for di-muon resonances
self.nTrkCharge = 0
self.nTrkMassMin = [840.]
self.nTrkMassMax = [1240.]
self.ptTrkMin = [3600., 3600. ] # set minimal pt of tracks for Phi+Pi
self.diTrkMassMin = [] # phi window
self.diTrkMassMax = []
self.diTrkCharge = -1
# muons are not matched to tracks, but still require to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0 # as we run on muon RoIs all necessary muons are already requested.
self.ptMuonMin = [] #[3600.]
#self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
###################################################################################
# trkPhiX selects 3 tracks pt>X,X,1 GeV M<2.9GeV and OS pair, M in [0.840,1.240] GeV
#####################################################################################
class TrigMultiTrkFex_trkPhiX (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "MultiTrkFex_trkPhiX"):
super( TrigMultiTrkFex_trkPhiX, self ).__init__( name )
#self.trackCollectionKey = "'
self.nTrk = 3
self.trkMass = 105.6583745 # looking for di-muon resonances
self.nTrkCharge = 1
self.ptTrkMin = [3600., 3600., 1000. ] # set minimal pt of tracks for Phi+Pi
self.diTrkMassMin = [840.] # phi window
self.diTrkMassMax = [1240.]
self.diTrkCharge = 0
self.nTrkMassMin = [0.]
self.nTrkMassMax = [2900.] # cut away J/psi
# muons are not matched to tracks, but still require to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0 # as we run on muon RoIs all necessary muons are already requested.
self.ptMuonMin = [] #[3600.]
#self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
###################################################################################
# trkPhiXTight selects 3 tracks pt>X,X,1 GeV M [1.5,2.5]GeV and OS pair, M in [0.840,1.240] GeV
#####################################################################################
class TrigMultiTrkFex_trkPhiXTight (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "MultiTrkFex_trkPhiXTight"):
super( TrigMultiTrkFex_trkPhiXTight, self ).__init__( name )
self.nTrk = 3
self.trkMass = 105.6583745 # looking for di-muon resonances
self.nTrkCharge = 1
self.nTrkMassMin = [1500.]
self.nTrkMassMax = [2500.]
self.ptTrkMin = [3600., 3600., 1000. ] # set minimal pt of tracks for Phi+Pi
self.diTrkMassMin = [840.] # phi window
self.diTrkMassMax = [1240.]
self.diTrkCharge = 0
# muons are not matched to tracks, but still require to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0 # as we run on muon RoIs all necessary muons are already requested.
self.ptMuonMin = [] #[3600.]
#self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
###################################################################################
class TrigMultiTrkFex_trkTau (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "MultiTrkFex_trkTau"):
super( TrigMultiTrkFex_trkTau, self ).__init__( name )
# AcceptAll flag: if true take events regardless of cuts
self.AcceptAll = False
#self.trackCollectionKey = "'
self.maxNOutputObject = -1
self.trkMass = 105.6583745 # looking for di-muon
self.nTrk = 2
self.nTrkMassMin = [0.]
self.nTrkMassMax = [2900.] # cut away J/psi
self.nTrkCharge = -1
self.ptTrkMin = [3600., 3600. ] # set minimal pt of tracks for Phi+Pi
self.diTrkMassMin = [] # no sub-resonances
self.diTrkMassMax = []
# muons are not matched to tracks, but still require to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0
self.ptMuonMin = [] #3600., 3600.]
self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
###################################################################################
class TrigMultiTrkFex_bNmu (TrigMultiTrkFexPy):
__slots__ = []
# lets force name setting, as it needs to match pt cuts
def __init__(self, name, ptMuonMin ):
super( TrigMultiTrkFex_bNmu, self ).__init__( name )
# AcceptAll flag: if true take events regardless of cuts
self.AcceptAll = False
#self.trackCollectionKey = "'
self.trkMass = 105.6583745 # looking for di-muon resonances
self.nTrk = -1 # no cut
self.nTrkMassMin = []
self.nTrkMassMax = [] # cut away J/psi
self.nTrkCharge = -1
self.ptTrkMin = [] # set minimal pt of tracks for Phi+Pi
self.diTrkMassMin = [] # no sub-resonances
self.diTrkMassMax = []
# muons are not matched to tracks, but still could be required to be present in TE
self.nL2CombMuon = 0
self.nL2SAMuon = 0
# these are 2 cuts that matters. Set to the softest
self.nEfMuon = len(ptMuonMin)
self.ptMuonMin = []
for thr in ptMuonMin :
self.ptMuonMin.append(thr)
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
###################################################################################
# B+ -> mu mu K+
# Bd -> mu mu K*(K+ Pi-)
# Bs -> mu mu Phi(K+ K-)
# Lambda_b -> mu mu Lambda(P Pi)
# Bc -> mu mu Ds(Phi pi)
# Bc -> mu mu D+(K pi pi)
# Bc -> mu mu D*(D0 pi), D0 -> K- pi+
# Bc -> mu mu D0(K pi)
# BcD selects following channels
# - B_c+ -> J/psi D_s+, D_s+ -> phi pi+, phi -> K+ K-
# - B_c+ -> J/psi D+, D_+ -> K- pi+ pi+
# - B_c+ -> J/psi D*+, D*+ -> D0 pi+_s, D0 -> K- pi+
# - the same decay without reconstruction of pi+_s
#####################################################################################
class TrigMultiTrkFex_B_2mu1trk (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "MultiTrkFex_B_2mu1trk"):
super( TrigMultiTrkFex_B_2mu1trk, self ).__init__( name )
#self.trackCollectionKey = "'
# disable any parameter by setting it to -1 or giving an empty list
self.trkMass = 139.57018 # take pion mass for all tracks, adjust mass accordingly, if you need Kaons
self.nTrk = 3
self.nTrkCharge = 1
self.nTrkVertexChi2 = 20
self.ptTrkMin = [1000., 1000., 1000. ] # set minimal pt of tracks; first 2 thresholds will be replaced by muon thresholds in the menu
self.diTrkMassMin = [100.] # di-muon
self.diTrkMassMax = [5500.] #
self.diTrkCharge = 0 # set to -1 to disable
self.nTrkMassMin = [4040] # lower as we replaced Kaon with pion mass
self.nTrkMassMax = [5440.] # default cut is
# muons are not matched to tracks, but still could be required to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0 # as we run on muon RoIs all necessary muons are already requested.
self.ptMuonMin = [] #[3600.]
#self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
#####################################################################################
# K*mumu
class TrigMultiTrkFex_B_2mu2trk (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "MultiTrkFex_B_2mu2trk"):
super( TrigMultiTrkFex_B_2mu2trk, self ).__init__( name )
#self.trackCollectionKey = "'
# disable any parameter by setting it to -1 or giving an empty list
self.trkMass = 139.57018 # take pion mass for all tracks, adjust mass accordingly, if you need Kaons
self.nTrk = 4
self.nTrkCharge = 0
self.nTrkVertexChi2 = 60
self.ptTrkMin = [1000., 1000., 1000. ] # set minimal pt of tracks; first 2 thresholds will be replaced by muon thresholds in the menu
#self.diTrkMassMin = [100., 359] # di-muon , K* (600-1500) using pion hypo
#self.diTrkMassMax = [5500., 1421.] #
self.diTrkMassMin = [100., 300] # di-muon , K* (600-1500) using pion hypo
self.diTrkMassMax = [5500., 1400.] #
self.diTrkCharge = 0 # set to -1 to disable
self.nTrkMassMin = [4500] # 4600-6300 if we would use Kaon mass
self.nTrkMassMax = [6280.] #
# muons are not matched to tracks, but still could be required to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0 # as we run on muon RoIs all necessary muons are already requested.
self.ptMuonMin = [] #[3600.]
#self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
#############################################################################
class TrigMultiTrkFex_DiMu (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "MultiTrkFex_DiMu"):
super( TrigMultiTrkFex_DiMu, self ).__init__( name )
self.nTrk = 2
self.trkMass = 105.6583745 # looking for di-muon resonances
self.nTrkVertexChi2 = 20
self.nTrkCharge = 0
self.nTrkMassMin = [100.]
self.nTrkMassMax = [15000.]
self.ptTrkMin = [3600., 3600. ] # set minimal pt of tracks for 2mu passing L1
self.diTrkMassMin = [] # phi window
self.diTrkMassMax = []
self.diTrkCharge = -1
# muons are not matched to tracks, but still require to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0 # as we run on muon RoIs all necessary muons are already requested.
self.ptMuonMin = [] #[3600.]
#self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
class TrigMultiTrkFex_DiMu_noCut (TrigMultiTrkFex_DiMu):
__slots__ = []
def __init__(self, name = "MultiTrkFex_DiMu_noCut"):
super( TrigMultiTrkFex_DiMu_noCut, self ).__init__( name )
self.nTrkCharge = -1
self.nTrkVertexChi2 = -1
self.nTrkMassMin = [0.]
self.nTrkMassMax = [1e+8] # should be safe at LHC, no?
class TrigMultiTrkFex_DiMu_noVtx_noOS (TrigMultiTrkFex_DiMu):
__slots__ = []
def __init__(self, name = "MultiTrkFex_DiMu_noVtx_noOS"):
super( TrigMultiTrkFex_DiMu_noVtx_noOS, self ).__init__( name )
self.nTrkCharge = -1
self.nTrkVertexChi2 = -1
class TrigMultiTrkFex_DiMu_noVtx_noM_SS (TrigMultiTrkFex_DiMu):
__slots__ = []
def __init__(self, name = "MultiTrkFex_DiMu_noVtx_noM_SS"):
super( TrigMultiTrkFex_DiMu_noVtx_noM_SS, self ).__init__( name )
self.nTrkCharge = 2
self.nTrkVertexChi2 = -1
self.nTrkMassMin = [0.] # OI not sure if this will work...
self.nTrkMassMax = [1e+8] # should be safe at LHC, no?
#############################################################################
class TrigMultiTrkFex_Vtx2 (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "TrigMultiTrkFex_Vtx2"):
super( TrigMultiTrkFex_Vtx2, self ).__init__( name )
self.nTrk = 2
self.trkMass = 105.6583745 # looking for di-muon resonances
self.nTrkVertexChi2 = 100 # very loose here, tighter at Hypo
self.nTrkCharge = -1
self.nTrkMassMin = [0.]
self.nTrkMassMax = [1e+8] # should be safe at LHC, no?
self.ptTrkMin = [3600., 3600. ] # set minimal pt of tracks for 2mu passing L1
self.diTrkMassMin = [] # phi window
self.diTrkMassMax = []
self.diTrkCharge = -1
# muons are not matched to tracks, but still require to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0 # as we run on muon RoIs all necessary muons are already requested.
self.ptMuonMin = [] #[3600.]
#self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
class TrigMultiTrkFex_Vtx3 (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "TrigMultiTrkFex_Vtx3"):
super( TrigMultiTrkFex_Vtx3, self ).__init__( name )
self.nTrk = 3
self.ptTrkMin = [3600., 3600., 3600. ] # set minimal pt of tracks for 3mu passing L1
#############################################################################
class TrigMultiTrkFex_Jpsi (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "MultiTrkFex_Jpsi"):
super( TrigMultiTrkFex_Jpsi, self ).__init__( name )
self.nTrk = 2
self.trkMass = 105.6583745 # looking for di-muon resonances
self.nTrkVertexChi2 = 20
self.nTrkCharge = 0
self.nTrkMassMin = [2600.]
self.nTrkMassMax = [3600.]
self.ptTrkMin = [3500., 3500. ] # set minimal pt of tracks for 2mu passing L1
self.diTrkMassMin = [] # phi window
self.diTrkMassMax = []
self.diTrkCharge = -1
# muons are not matched to tracks, but still require to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0 # as we run on muon RoIs all necessary muons are already requested.
self.ptMuonMin = [] #[3600.]
#self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
class TrigMultiTrkFex_EMu (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "MultiTrkFex_EMu"):
super( TrigMultiTrkFex_EMu, self ).__init__( name )
self.nTrk = 2
self.trkMass = 0.5 # looking for electron-muon resonances ; ignore muon mass
self.nTrkVertexChi2 = 20
self.nTrkCharge = 0
self.nTrkMassMin = [100.]
self.nTrkMassMax = [7000.]
self.ptTrkMin = [3600., 3600. ] # set minimal pt of tracks for 2mu passing L1
self.diTrkMassMin = [] # phi window
self.diTrkMassMax = []
self.diTrkCharge = 0
# muons are not matched to tracks, but still require to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0 # as we run on muon RoIs all necessary muons are already requested.
self.ptMuonMin = [] #[3600.]
#self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
#############################################################################
class TrigMultiTrkFex_DiE (TrigMultiTrkFexPy):
__slots__ = []
def __init__(self, name = "MultiTrkFex_DiE"):
super( TrigMultiTrkFex_DiE, self ).__init__( name )
self.nTrk = 2
self.trkMass = 0.511 # looking for di-electron resonances
self.nTrkVertexChi2 = 20 # set twice worse than for muons
self.nTrkCharge = 0
self.nTrkMassMin = [10.]
self.nTrkMassMax = [9000.]
self.ptTrkMin = [3600., 3600. ] # set minimal pt of tracks for 2mu passing L1 - changed in generateBphysicsChainDefs
self.diTrkMassMin = [] # phi window
self.diTrkMassMax = []
self.diTrkCharge = -1
self.outputTrackCollectionKey = "MultiTrkFex_DiE"
self.bphysCollectionKey = "MultiTrkFex_DiE"
self.maxNOutputObject = 10
# muons are not matched to tracks, but still require to be present in TE
self.nEfMuon = 0
self.nL2CombMuon = 0
self.nL2SAMuon = 0 # as we run on muon RoIs all necessary muons are already requested.
self.ptMuonMin = [] #[3600.]
self.overlapdR = 0.005
time = TrigTimeHistToolConfig("Time")
validation = TrigMultiTrkFexValidationMonitoring()
online = TrigMultiTrkFexOnlineMonitoring()
self.AthenaMonTools = [ validation, online, time ]
| [
"[email protected]"
] | |
e5a530fc5cf0c4e945782e013924a8ad746bce15 | 933376c11498a6567da8d7eb7d2675100895c3ba | /pyzoo/zoo/chronos/examples/auto_model/autoprophet_nyc_taxi.py | db4282b6e4d0bc5f4e012b570aa603848f11ebcc | [
"Apache-2.0"
] | permissive | intel-analytics/analytics-zoo | 320a461765f86d41dd456b598b1cf1d51d57f4c4 | 7cc3e2849057d6429d03b1af0db13caae57960a5 | refs/heads/master | 2023-08-13T20:47:58.621714 | 2023-07-06T00:49:11 | 2023-07-06T00:49:11 | 90,328,920 | 3,104 | 996 | Apache-2.0 | 2023-09-06T01:51:18 | 2017-05-05T02:27:30 | Jupyter Notebook | UTF-8 | Python | false | false | 3,730 | py | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pandas as pd
import argparse
from zoo.chronos.forecaster.prophet_forecaster import ProphetForecaster
from zoo.chronos.autots.model.auto_prophet import AutoProphet
from zoo.orca.common import init_orca_context, stop_orca_context
def get_data(args):
dataset = args.datadir if args.datadir else args.url
df = pd.read_csv(dataset, parse_dates=[0])
return df
if __name__ == '__main__':
# arguments
parser = argparse.ArgumentParser()
parser.add_argument('--num_workers', type=int, default=2,
help="The number of nodes to be used in the cluster. "
"You can change it depending on your own cluster setting.")
parser.add_argument('--cluster_mode', type=str, default='local',
help="The mode for the Spark cluster.")
parser.add_argument('--cores', type=int, default=4,
help="The number of cpu cores you want to use on each node."
"You can change it depending on your own cluster setting.")
parser.add_argument('--memory', type=str, default="10g",
help="The memory you want to use on each node."
"You can change it depending on your own cluster setting.")
parser.add_argument('--cpus_per_trial', type=int, default=1,
help="Int. Number of cpus for each trial")
parser.add_argument('--n_sampling', type=int, default=20,
help="Number of times to sample from the search_space.")
parser.add_argument('--datadir', type=str,
help="Use local csv file by default.")
parser.add_argument('--url', type=str, default="https://raw.githubusercontent.com/numenta/NAB"
"/v1.0/data/realKnownCause/nyc_taxi.csv",
help="Download link of dataset.")
args = parser.parse_args()
# data prepare
df = get_data(args)
df = df.rename(columns={'timestamp': 'ds', 'value': 'y'})
# train/test split
end_date = '2015-1-28' # split by 1-28, which take the last 3 days as horizon
df_train = df[df['ds'] <= end_date]
df_test = df[df['ds'] > end_date]
# use prophet forecaster
prophet = ProphetForecaster()
prophet.fit(df_train, validation_data=df_test)
# use autoprophet for HPO
num_nodes = 1 if args.cluster_mode == "local" else args.num_workers
init_orca_context(cluster_mode=args.cluster_mode, cores=args.cores,
memory=args.memory, num_nodes=num_nodes, init_ray_on_spark=True)
autoprophet = AutoProphet(cpus_per_trial=args.cpus_per_trial)
autoprophet.fit(df_train, n_sampling=args.n_sampling)
stop_orca_context()
# evaluate
auto_searched_mse = autoprophet.evaluate(df_test, metrics=['mse'])[0]
nonauto_searched_mse = prophet.evaluate(df_test, metrics=['mse'])[0]
print("Autoprophet improve the mse by",
str(((nonauto_searched_mse - auto_searched_mse)/nonauto_searched_mse)*100), '%')
print("auto_searched_mse:", auto_searched_mse)
print("nonauto_searched_mse:", nonauto_searched_mse)
| [
"[email protected]"
] | |
a0a3b9b675a2be13b66e294e41e133e675115ea0 | a424742e3e784c33625bf29295483469d2a8962a | /eval.py | 0056f8bcb5514f1b0642a7c179a71a5194665ab7 | [] | no_license | eeyrw/PyTorch_YOLOv2 | 82b3b8bf6c9562a43367f7cfcbdea8954c685dd5 | 4ccf8fb8a61d484e70af611f021013fe53133178 | refs/heads/main | 2023-04-27T03:20:27.680365 | 2021-05-20T12:28:14 | 2021-05-20T12:28:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,541 | py | import torch
import torch.nn as nn
from data import *
import argparse
from utils.vocapi_evaluator import VOCAPIEvaluator
from utils.cocoapi_evaluator import COCOAPIEvaluator
parser = argparse.ArgumentParser(description='YOLOv2 Detector Evaluation')
parser.add_argument('-v', '--version', default='yolov2',
help='yolov2.')
parser.add_argument('-d', '--dataset', default='voc',
help='voc, coco-val, coco-test.')
parser.add_argument('--trained_model', type=str,
default='weights/yolov2/',
help='Trained state_dict file path to open')
parser.add_argument('-size', '--input_size', default=416, type=int,
help='input_size')
parser.add_argument('--cuda', action='store_true', default=False,
help='Use cuda')
args = parser.parse_args()
def voc_test(model, device, input_size):
evaluator = VOCAPIEvaluator(data_root=VOC_ROOT,
img_size=input_size,
device=device,
transform=BaseTransform(input_size),
labelmap=VOC_CLASSES,
display=True
)
# VOC evaluation
evaluator.evaluate(model)
def coco_test(model, device, input_size, test=False):
if test:
# test-dev
print('test on test-dev 2017')
evaluator = COCOAPIEvaluator(
data_dir=coco_root,
img_size=input_size,
device=device,
testset=True,
transform=BaseTransform(input_size)
)
else:
# eval
evaluator = COCOAPIEvaluator(
data_dir=coco_root,
img_size=input_size,
device=device,
testset=False,
transform=BaseTransform(input_size)
)
# COCO evaluation
evaluator.evaluate(model)
if __name__ == '__main__':
# dataset
if args.dataset == 'voc':
print('eval on voc ...')
num_classes = 20
elif args.dataset == 'coco-val':
print('eval on coco-val ...')
num_classes = 80
elif args.dataset == 'coco-test':
print('eval on coco-test-dev ...')
num_classes = 80
else:
print('unknow dataset !! we only support voc, coco-val, coco-test !!!')
exit(0)
# cuda
if args.cuda:
print('use cuda')
torch.backends.cudnn.benchmark = True
device = torch.device("cuda")
else:
device = torch.device("cpu")
# input size
input_size = args.input_size
# load net
if args.version == 'yolov2':
from models.yolov2 import YOLOv2
anchor_size = ANCHOR_SIZE if args.dataset == 'voc' else ANCHOR_SIZE_COCO
net = YOLOv2(device, input_size=input_size, num_classes=num_classes, anchor_size=anchor_size)
# load net
net.load_state_dict(torch.load(args.trained_model, map_location='cuda'))
net.eval()
print('Finished loading model!')
net = net.to(device)
# evaluation
with torch.no_grad():
if args.dataset == 'voc':
voc_test(net, device, input_size)
elif args.dataset == 'coco-val':
coco_test(net, device, input_size, test=False)
elif args.dataset == 'coco-test':
coco_test(net, device, input_size, test=True)
| [
"[email protected]"
] | |
1572e5579606a9957144da853c20f0c8c39c58bf | 520fcbe076fb1e04187512ddd33802b5c30b2f1a | /blender/nodes/converter/seratate_vector.py | 2fd1c13aae88a4b65d1eb496d93390704478ad49 | [
"MIT"
] | permissive | LewisOrton/taichi_elements_houdini | c604fa85c662369ee3db94224c5e0166482b0512 | 50ef3232f080030213bcb7578a48d03647a9445b | refs/heads/master | 2022-04-24T08:11:42.173539 | 2020-04-25T08:12:29 | 2020-04-25T08:12:29 | 259,212,298 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | from .. import base
def get_out_value_x(socket):
node = socket.node
vectors = node.inputs['Vector'].get_value()
x = node.outputs['X']
# scene
scn = bpy.context.scene
key = '{0}.{1}'.format(node.name, x.name)
res = []
for vector in vectors:
res.append(vector[0])
scn.elements_sockets[key] = res
def get_out_value_y(socket):
node = socket.node
vectors = node.inputs['Vector'].get_value()
y = node.outputs['Y']
# scene
scn = bpy.context.scene
key = '{0}.{1}'.format(node.name, y.name)
res = []
for vector in vectors:
res.append(vector[1])
scn.elements_sockets[key] = res
def get_out_value_z(socket):
node = socket.node
vectors = node.inputs['Vector'].get_value()
z = node.outputs['Z']
# scene
scn = bpy.context.scene
key = '{0}.{1}'.format(node.name, z.name)
res = []
for vector in vectors:
res.append(vector[2])
scn.elements_sockets[key] = res
class ElementsSeparateVectorNode(base.BaseNode):
bl_idname = 'elements_separate_vector_node'
bl_label = 'Separate Vector'
category = base.CONVERTER
get_value = {
'X': get_out_value_x,
'Y': get_out_value_y,
'Z': get_out_value_z
}
def init(self, context):
# x, y, z outputs
x = self.outputs.new('elements_float_socket', 'X')
x.text = 'X'
x.hide_value = True
y = self.outputs.new('elements_float_socket', 'Y')
y.text = 'Y'
y.hide_value = True
z = self.outputs.new('elements_float_socket', 'Z')
z.text = 'Z'
z.hide_value = True
# input vector
vector_in = self.inputs.new('elements_vector_socket', 'Vector')
vector_in.text = ''
| [
"[email protected]"
] | |
eed3afbb97e48fae80a14a977c246d46fc89030a | 0c469c4100fe9d352e83731688e388062a3c55c7 | /Binary_Search/374. Guess Number Higher or Lower.py | 48760947821ff471c3bde8720687f3f3cb131280 | [] | no_license | asperaa/back_to_grind | 9e055c7e6561384e5b7ae52f01063e4beb34a298 | 5ea1976b9d5c6d04800e296e45e8ff90fdde5001 | refs/heads/master | 2022-12-16T18:32:01.443743 | 2020-09-05T13:29:39 | 2020-09-05T13:29:39 | 254,910,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | """We are the captains of our ships, and we stay 'till the end. We see our stories through.
"""
"""374. Guess Number Higher or Lower
"""
def guess(num):
pass
class Solution:
def guessNumber(self, n):
left, right = 0, n
while left <= right:
mid = left + (right - left) // 2
result = guess(mid)
if result == 0:
return mid
elif result > 0:
left = mid + 1
else:
right = mid - 1
return -1 | [
"[email protected]"
] | |
2d8f1205dd887ce96442a59c6b67c95c73400344 | 70b5c70d67dfed37c0317c605821c747516a3b13 | /browser/sarafi/views.py | 618372a1c7abb02e788df944d7bf05c7d9389ac6 | [] | no_license | jaxman020/browser | ff153106ba12d0e74daa6cec388e283914e1cd41 | fff8dff28a6437f8bde739e2bd91f86ad767d1e6 | refs/heads/master | 2021-01-10T10:36:12.154569 | 2015-12-15T16:18:09 | 2015-12-15T16:18:09 | 47,606,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | from django.shortcuts import render
def sarafi(request):
context = {'message':'Django 很棒'}
return render(request, 'sarafi/sarafi.html', context) | [
"m516@m516"
] | m516@m516 |
32874edb86c1dd5846853da3cc4de69aabcc9f6a | 8d24ade82e14cca109da4b535ba08e77dc29b6ae | /Ex_DSA/ex_cap3a.py | c1ff816913f8375be3c5855f59dba0fe857c9953 | [
"MIT"
] | permissive | LuanGermano/DSAexercicios | 882f6ece355723d101cd5a7b1a39db58857ffa14 | 2de12be75fda2d113f302e8f1272e00e2d8622af | refs/heads/main | 2023-07-23T12:43:13.898367 | 2021-08-30T01:42:04 | 2021-08-30T01:42:04 | 397,451,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,943 | py | # Exercício 1 - Crie uma estrutura que pergunte ao usuário qual o dia da semana. Se o dia for igual a Domingo ou
# igual a sábado, imprima na tela "Hoje é dia de descanso", caso contrário imprima na tela "Você precisa trabalhar!"
"""
dia = str(input("Qual dia da semana é hoje? ")).lower()
if dia == "sabado" or dia == "domingo":
print('Hoje é dia de descanso')
else:
print('Voce precisa trabalhar!')"""
# Exercício 2 - Crie uma lista de 5 frutas e verifique se a fruta 'Morango' faz parte da lista
cont3 = 0
lista1 = ['Abacaxi', 'Laranja', 'Pera', 'maçã', 'goiaba']
lista2 = ['Abacaxi', 'Laranja', 'Pera', 'maçã', 'morango']
for i in lista2:
if i.lower() == "morango":
cont3 += 1
if cont3 == 0:
print('Não tem morango!')
else:
print('Existe morango na lista')
# Exercício 3 - Crie uma tupla de 4 elementos, multiplique cada elemento da tupla por 2 e guarde os resultados em uma lista
tupla = (1,2,3,4)
lista3 = []
for i in tupla:
lista3.append(i*2)
print(lista3)
# Exercício 4 - Crie uma sequência de números pares entre 100 e 150 e imprima na tela
for c in range(100,151,2):
if c == 150:
print(c)
else:
print(c, end=', ')
print()
# Exercício 5 - Crie uma variável chamada temperatura e atribua o valor 40. Enquanto temperatura for maior que 35,
# imprima as temperaturas na tela
temperatura = 40
while temperatura > 35:
print(temperatura, end=', ')
temperatura -= 1
print()
# Exercício 6 - Crie uma variável chamada contador = 0. Enquanto counter for menor que 100, imprima os valores na tela,
# mas quando for encontrado o valor 23, interrompa a execução do programa
contador = 0
while contador < 100:
print(contador, end=', ')
contador += 1
if contador == 23:
break
print()
# Exercício 7 - Crie uma lista vazia e uma variável com valor 4. Enquanto o valor da variável for menor ou igual a 20,
# adicione à lista, apenas os valores pares e imprima a lista
lista7 = []
var7 = 4
while var7 <= 20:
if var7 % 2 == 0:
lista7.append(var7)
var7 +=1
else:
var7 +=1
print(lista7)
# Exercício 8 - Transforme o resultado desta função range em uma lista: range(5, 45, 2)
nums = range(5, 45, 2)
print(list(nums))
# Exercício 9 - Faça a correção dos erros no código abaixo e execute o programa. Dica: são 3 erros.
temperatura = float(input('Qual a temperatura? '))
if temperatura > 30:
print('Vista roupas leves.')
else:
print('Busque seus casacos.')
# Exercício 10 - Faça um programa que conte quantas vezes a letra "r" aparece na frase abaixo. Use um placeholder na sua instrução de impressão
frase = "É melhor, muito melhor, contentar-se com a realidade; se ela não é tão brilhante como os sonhos, tem pelo menos a vantagem de existir."
for letra in range(0, len(frase)):
if frase[letra] == 'r':
contador += 1
print(f'Foram contados {contador} letras "r"')
| [
"[email protected]"
] | |
53782ebdaec5105beb96a0ea327db79cdcb20e31 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/MinWindow_20200713232140.py | da9aae373aa9bf562d26a1905143fca258c24897 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | # pseducode
# "ADOBECODEBANC"
# "ABC"
# if n(string) is less than k then return false
# have a left and right pointer where both start from the begining and right increases the window size and left reduces the window size
# make a dictionart for the char in k and the count of unique char in k
def small(n,k):
uniqueChar = {}
uniqueCount = 0
minCount = 0
for i in k:
if i not in uniqueChar:
uniqueChar[i] = 1
else:
uniqueChar[i] +=1
for i in uniqueChar:
if uniqueChar[i] > 1:
uniqueCount += uniqueChar[i]
if len(k) > len(n):
return 'false'
left = 0
right = 1
while left < len(n) and right < len(n):
if n[left:right] in uniqueChar:
print(n[left:right])
right +=1
small("ADOBECODEBANC","ABCC")
| [
"[email protected]"
] | |
834e0bfb683d36df04fe7b02bd8bdcaf5a952402 | 2b2be9649489d124579e7510c6dc1d8e2bc310f6 | /syn/types/a/base.py | 58dcbe8554dcde048a35361c43702d158ef0bf7d | [
"MIT"
] | permissive | pombredanne/syn-1 | b744d67addc434300bf54ba2d3fdf973dba48d1e | aeaa3ad8a49bac8f50cf89b6f1fe97ad43d1d258 | refs/heads/master | 2021-06-16T07:34:30.950848 | 2017-04-21T06:02:48 | 2017-04-21T06:02:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,391 | py | import six
import operator as op
from functools import wraps
from collections import Iterable
from syn.base_utils import nearest_base, is_hashable, tuple_prepend, \
get_fullname, get_mod, get_typename, AttrDict, hasmethod, import_module, \
quote_string, iteration_length, escape_for_eval, compose, safe_vars
#-------------------------------------------------------------------------------
# Type registry
TYPE_REGISTRY = {}
#-------------------------------------------------------------------------------
# Serialization Information
SER_KEYS = AttrDict(name = '___name',
mod = '___mod',
args = '___args',
kwargs = '___kwargs',
attrs = '___attrs',
is_type = '___is_type')
SER_IDEMPOTENT = {int, float, bool, type(None)}
SER_BUILTINS = list(vars(six.moves.builtins).values())
#-------------------------------------------------------------------------------
# Utilities
class return_if(object):
def __init__(self, check_func):
self.check_func = check_func
def __call__(self, f):
@wraps(f)
def func(self_):
if self.check_func(self_.obj):
return self_.obj
return f(self_)
return func
#-------------------------------------------------------------------------------
# TypeMeta
class TypeMeta(type):
def __init__(self, *args):
super(TypeMeta, self).__init__(*args)
# Prevent erroneous type registrations
if self.type is object and get_typename(self) != 'Type':
self.type = None
# Register type
if self.type is not None:
TYPE_REGISTRY[self.type] = self
# Populate ser_kwargmap as needed
if self.ser_kwargs and not self.ser_kwargmap:
for kwarg in self.ser_kwargs:
if kwarg not in self.ser_kwargmap.values():
self.ser_kwargmap[kwarg] = kwarg
#-------------------------------------------------------------------------------
# Type
@six.add_metaclass(TypeMeta)
class Type(object):
type = object
gen_type = None
gen_types = None
ser_args = ()
ser_kwargs = ()
ser_kwargmap = {} # kwarg: attr
ser_attrs = None
def __init__(self, obj):
self.obj = obj
def attrs(self, **kwargs):
ret = sorted(safe_vars(self.obj).keys())
return ret
def _collect(self, func, **kwargs):
if not self.attrs():
return func(self.obj)
ret = {attr: collect(val, func, **kwargs)
for attr, val in self.pairs(**kwargs)}
return func(ret, **kwargs)
def collect(self, func, **kwargs):
if hasmethod(self.obj, '_collect'):
return self.obj._collect(func, **kwargs)
return self._collect(func, **kwargs)
@classmethod
def dispatch(cls, obj):
return cls.type_dispatch(type(obj))(obj)
@classmethod
def deserialize_dispatch(cls, obj):
if not isinstance(obj, dict):
return cls.dispatch(obj)
if SER_KEYS.name not in obj or SER_KEYS.mod not in obj:
return cls.dispatch(obj)
mod = import_module(obj[SER_KEYS.mod])
return cls.type_dispatch(getattr(mod, obj[SER_KEYS.name]))
@classmethod
def type_dispatch(cls, typ):
if typ in TYPE_REGISTRY:
return TYPE_REGISTRY[typ]
base = nearest_base(typ, TYPE_REGISTRY.keys())
ret = TYPE_REGISTRY[base]
TYPE_REGISTRY[typ] = ret # cache the result to avoid future searches
return ret
@classmethod
def deserialize(cls, dct, **kwargs_):
if not isinstance(dct, dict):
return dct
name = dct[SER_KEYS.name]
mod = import_module(dct[SER_KEYS.mod])
args = dct.get(SER_KEYS.args, [])
kwargs = dct.get(SER_KEYS.kwargs, {})
attrs = dct.get(SER_KEYS.attrs, {})
if args:
args = deserialize(args, **kwargs_)
if kwargs:
kwargs = deserialize(kwargs, **kwargs_)
if attrs:
attrs = deserialize(attrs, **kwargs_)
typ = getattr(mod, name)
if dct.get(SER_KEYS.is_type, False):
return typ
if args and kwargs:
obj = typ(*args, **kwargs)
elif args:
obj = typ(*args)
elif kwargs:
obj = typ(**kwargs)
else:
obj = typ()
for attr, val in attrs.items():
setattr(obj, attr, val)
if hasmethod(obj, '_deserialize'):
obj._deserialize(dct)
return obj
@classmethod
def enumerate(cls, **kwargs):
start = kwargs.get('start', 0)
step = kwargs.get('step', 1)
max_enum = kwargs.get('max_enum', None)
k = 0
x = start
while True:
if k >= max_enum:
break
yield cls.enumeration_value(x, **kwargs)
x += step
k += 1
@classmethod
def _enumeration_value(cls, x, **kwargs):
raise NotImplementedError
@classmethod
def enumeration_value(cls, x, **kwargs):
if hasmethod(cls.type, '_enumeration_value'):
return cls.type._enumeration_value(x, **kwargs)
return cls._enumeration_value(x, **kwargs)
def estr(self, **kwargs):
'''Should return a string that can eval into an equivalent object'''
if hasmethod(self.obj, '_estr'):
return escape_for_eval(self.obj._estr(**kwargs))
objstr = escape_for_eval(quote_string(str(self.obj)))
return '{}({})'.format(get_typename(self.obj), objstr)
def _find_ne(self, other, func, **kwargs):
from .ne import DiffersAtAttribute, NotEqual
for attr in self.attrs():
if not func(getattr(self.obj, attr),
getattr(other, attr)):
return DiffersAtAttribute(self.obj, other, attr)
return NotEqual(self.obj, other)
def find_ne(self, other, func=op.eq, **kwargs):
if func(self.obj, other):
return
if type(self.obj) is not type(other):
from .ne import DifferentTypes
return DifferentTypes(self.obj, other)
if hasmethod(self.obj, '_find_ne'):
return self.obj._find_ne(other, func, **kwargs)
return self._find_ne(other, func, **kwargs)
@classmethod
def _generate(cls, **kwargs):
if hasmethod(cls.type, '_generate'):
return cls.type._generate(**kwargs)
raise NotImplementedError
@classmethod
def generate(cls, **kwargs):
if cls.gen_type is None and cls.gen_types is None:
return cls._generate(**kwargs)
elif cls.gen_type:
return cls.type(generate(cls.gen_type, **kwargs))
return cls.type(*[generate(typ, **kwargs) for typ in cls.gen_types])
def _hashable(self, **kwargs):
return hashable(serialize(self.obj))
@return_if(is_hashable)
def hashable(self, **kwargs):
if hasmethod(self.obj, '_hashable'):
return self.obj._hashable(**kwargs)
return self._hashable(**kwargs)
def pairs(self, **kwargs):
ret = [(attr, getattr(self.obj, attr)) for attr in self.attrs(**kwargs)]
return ret
def _primitive_form(self, **kwargs):
return collect(self.obj, **kwargs)
def primitive_form(self, **kwargs):
if hasattr(self.obj, '_primitive_form'):
return self.obj._primitive_form(**kwargs)
return self._primitive_form(**kwargs)
def _rstr(self, **kwargs):
return str(self.obj)
def rstr(self, **kwargs):
'''The idea is somethinig like a recursive str().'''
if hasmethod(self.obj, '_rstr'):
return self.obj._rstr(**kwargs)
return self._rstr(**kwargs)
def _serialize(self, dct, **kwargs):
if (self.ser_args or self.ser_kwargs) and self.ser_attrs is None:
ser_attrs = False
elif self.ser_attrs is None:
ser_attrs = True
else:
ser_attrs = bool(self.ser_attrs)
if self.ser_args:
dct[SER_KEYS.args] = serialize([getattr(self.obj, arg)
for arg in self.ser_args])
if self.ser_kwargs:
dct[SER_KEYS.kwargs] = \
serialize({kwarg: getattr(self.obj, self.ser_kwargmap[kwarg])
for kwarg in self.ser_kwargs})
if ser_attrs:
dct[SER_KEYS.attrs] = serialize(dict(self.pairs(**kwargs)), **kwargs)
return dct
@classmethod
def _serialize_dict(cls, typ, **kwargs):
if typ in SER_BUILTINS:
mod = 'six.moves.builtins'
else:
mod = get_mod(typ)
return {SER_KEYS.name: get_typename(typ),
SER_KEYS.mod: mod}
def serialize(self, **kwargs):
# TODO: option for custom idempotent types (may be different
# for different serialization methods)
if type(self.obj) in SER_IDEMPOTENT:
return self.obj
dct = self._serialize_dict(type(self.obj), **kwargs)
if hasmethod(self.obj, '_serialize'):
return self.obj._serialize(dct, **kwargs)
self._serialize(dct, **kwargs)
return dct
@classmethod
def serialize_type(cls, typ, **kwargs):
dct = cls._serialize_dict(typ, **kwargs)
dct[SER_KEYS.is_type] = True
return dct
def _visit(self, k, **kwargs):
if self.is_primitive:
return self.obj
attr = self._attrs[k]
val = getattr(self.obj, attr)
return attr, val
def visit(self, k, **kwargs):
step = kwargs.get('step', 1)
enum = kwargs.get('enumerate', False)
self._attrs = self.attrs(**kwargs)
self.is_primitive = not bool(self._attrs)
N = self.visit_len(**kwargs)
count = 0
limit = iteration_length(N, k, step)
while True:
if count >= limit:
raise StopIteration
if hasmethod(self.obj, '_visit'):
item = self.obj._visit(k, **kwargs)
else:
item = self._visit(k, **kwargs)
if enum:
yield k, item
else:
yield item
k += step
count += 1
def _visit_len(self, **kwargs):
if self.is_primitive:
return 1
return len(self._attrs)
def visit_len(self, **kwargs):
if hasmethod(self.obj, '_visit_len'):
return self.obj._visit_len(**kwargs)
return self._visit_len(**kwargs)
#-------------------------------------------------------------------------------
# TypeType
class TypeType(Type):
type = type
def attrs(self, **kwargs):
return []
#-------------------------------------------------------------------------------
# Utilities
def attrs(obj, **kwargs):
return Type.dispatch(obj).attrs(**kwargs)
identity = lambda x, **kwargs: x
def collect(obj, func=identity, **kwargs):
return Type.dispatch(obj).collect(func, **kwargs)
def deserialize(obj, **kwargs):
return Type.deserialize_dispatch(obj).deserialize(obj, **kwargs)
def enumerate(typ, **kwargs):
for item in Type.type_dispatch(typ).enumerate(**kwargs):
yield item
def enumeration_value(typ, x, **kwargs):
return Type.type_dispatch(typ).enumeration_value(x, **kwargs)
def estr(obj, **kwargs):
'''Return a string that can evaluate into an equivalent object.
NOTE: this function is experimental and not fully supported.
'''
return Type.dispatch(obj).estr(**kwargs)
def find_ne(a, b, func=op.eq, **kwargs):
return Type.dispatch(a).find_ne(b, func, **kwargs)
def generate(typ, **kwargs):
return Type.type_dispatch(typ).generate(**kwargs)
def hashable(obj, **kwargs):
return Type.dispatch(obj).hashable(**kwargs)
def pairs(obj, **kwargs):
return Type.dispatch(obj).pairs(**kwargs)
def primitive_form(obj, **kwargs):
'''Return obj, if possible, in a form composed of primitive or builtin objects.'''
if isinstance(obj, type):
return obj
return Type.dispatch(obj).primitive_form(**kwargs)
def rstr(obj, **kwargs):
return Type.dispatch(obj).rstr(**kwargs)
def serialize(obj, **kwargs):
if isinstance(obj, type):
return Type.type_dispatch(obj).serialize_type(obj, **kwargs)
return Type.dispatch(obj).serialize(**kwargs)
def visit(obj, k=0, **kwargs):
for item in Type.dispatch(obj).visit(k, **kwargs):
yield item
def safe_sorted(obj, **kwargs):
if not isinstance(obj, Iterable):
return obj
try:
return sorted(obj, **kwargs)
except (TypeError, UnicodeDecodeError):
kwargs['key'] = kwargs.get('key', compose(hash, hashable))
return sorted(obj, **kwargs)
#-------------------------------------------------------------------------------
# __all__
__all__ = ('TYPE_REGISTRY', 'SER_KEYS', 'Type', 'TypeType',
'deserialize', 'enumerate', 'estr', 'find_ne', 'generate', 'attrs',
'hashable', 'rstr', 'serialize', 'visit', 'safe_sorted', 'pairs',
'enumeration_value', 'primitive_form', 'collect')
#-------------------------------------------------------------------------------
| [
"[email protected]"
] | |
81c14dc95fe92cb71fd3ab3323a7d30f3c721564 | 45d6b7739ef7e61779d778b16e2d2cb9b92a08c0 | /test/run_in_parallel-200PU-grow/submit-49.py | dc64b6143eac91b619db28129379d724ba08a4f9 | [] | no_license | isobelojalvo/phase2L1TauAnalyzer | 40b545baec97bf287a8d8ab26bea70546bf9f6f8 | 98ef6d31a523698ba0de48763cadee1d5b2ce695 | refs/heads/master | 2021-01-22T08:38:17.965156 | 2019-07-25T17:25:51 | 2019-07-25T17:25:51 | 92,623,686 | 0 | 1 | null | 2019-07-23T19:43:55 | 2017-05-27T20:56:25 | Python | UTF-8 | Python | false | false | 302 | py |
process.source.secondaryFileNames = cms.untracked.vstring(
"/store/relval/CMSSW_9_3_7/RelValZTT_14TeV/GEN-SIM-DIGI-RAW/PU25ns_93X_upgrade2023_realistic_v5_2023D17PU200-v1/10000/6E64C932-2E2D-E811-86D5-0242AC130002.root")
process.source.lumisToProcess = cms.untracked.VLuminosityBlockRange("1:24")
| [
"[email protected]"
] | |
55b2bdc54e83600514c0cd6d09240c467148e50a | 381b75fe68a4da258e2e60a97105b66ac47214e4 | /qa/rpc-tests/mempool_packages.py | 03e130496fdfc82e4e2f6487723ce8dc062aefde | [
"MIT"
] | permissive | lipcoin/lipcoin | 3a5997dfc9193ee7dee6f9fa0adc1cb5fb8c92a3 | 7afc0a02d63620e5a5601474cca131cb0cf3bbe4 | refs/heads/master | 2021-01-24T07:57:56.248620 | 2018-03-17T19:04:38 | 2018-03-17T19:04:38 | 112,155,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,344 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The LipCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test descendant package tracking code
from test_framework.test_framework import LipCoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(LipCoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-limitancestorcount=5", "-debug"]))
connect_nodes(self.nodes[0], 1)
self.is_network_split = False
self.sync_all()
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
for i in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for i in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_size = 0
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
descendant_size += mempool[x]['size']
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert(chain[-1] not in v_ancestors.keys())
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert(chain[0] not in v_descendants.keys())
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(chain[-1], 0, 1000)
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
try:
self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
except JSONRPCException as e:
print("too-long-ancestor-chain successfully rejected")
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.nodes[0].generate(1)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(chain[-1], 0, 2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
if (x == chain[-1]):
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
# TODO: check that node1's mempool is as expected
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
# First create one parent tx with 10 children
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
for i in range(MAX_DESCENDANTS):
utxo = transaction_package.pop(0)
try:
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
if i == MAX_DESCENDANTS - 2:
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
except JSONRPCException as e:
print(e.error['message'])
assert_equal(i, MAX_DESCENDANTS - 1)
print("tx that would create too large descendant package successfully rejected")
# TODO: check that node1's mempool is as expected
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for i in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
(tx1_id, tx1_value) = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for i in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.nodes[0].generate(1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
sync_mempools(self.nodes)
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
sync_blocks(self.nodes)
if __name__ == '__main__':
MempoolPackagesTest().main()
| [
"[email protected]"
] | |
809dbdaf35c9f1d9c579f7c054c3957ee204aa1e | 4e163aa4aa0f4c4ddc22f74ae21b6fb1c85a7a09 | /238. 除自身以外数组的乘积.py | 2e458c88617acdbc6d2816c027b1d9510b858e13 | [] | no_license | dxc19951001/Everyday_LeetCode | 72f46a0ec2fc651168129720ad0b1e7b5c372b0b | 3f7b2ea959308eb80f4c65be35aaeed666570f80 | refs/heads/master | 2023-08-03T09:22:08.467100 | 2023-07-23T17:08:27 | 2023-07-23T17:08:27 | 270,723,436 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,634 | py | class Solution(object):
def productExceptSelf_0(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
# 核心思想:
# 用两层for循环可以轻松解决,时间复杂度为n^2,不符合题目要求
# 核心:利用列表切片,每次循环将不参与计算的数组剔除
output= [0] * len(nums)
for i in range(len(nums)):
j = 1
news = nums[:i] + nums[i+1 :]
for k in news:
j *= k
output[i] = j
return output
def productExceptSelf_1(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
# 核心思想:
# 分别定义i左边数组、右边数组、答案数组
# 分别计算i的左边数组和右边数组中每个的乘积,再将对应元素相乘即可得到答案
length = len(nums)
# L 和 R 分别表示左右两侧的乘积列表
L, R, answer = [0]*length, [0]*length, [0]*length
# L[i] 为索引 i 左侧所有元素的乘积
# 对于索引为 '0' 的元素,因为左侧没有元素,所以 L[0] = 1
L[0] = 1
for i in range(1, length):
L[i] = nums[i - 1] * L[i - 1]
# R[i] 为索引 i 右侧所有元素的乘积
# 对于索引为 'length-1' 的元素,因为右侧没有元素,所以 R[length-1] = 1
R[length - 1] = 1
for i in reversed(range(length - 1)):
# 相当于从(length-2)一直到0
R[i] = nums[i + 1] * R[i + 1]
# 对于索引 i,除 nums[i] 之外其余各元素的乘积就是左侧所有元素的乘积乘以右侧所有元素的乘积
for i in range(length):
answer[i] = L[i] * R[i]
return answer
def productExceptSelf_2(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
# 核心思想:
# 节约空间aanswer和左侧所有元素乘积公用一块空间
# answer[i] 表示索引 i 左侧所有元素的乘积
# 因为索引为 '0' 的元素左侧没有元素, 所以 answer[0] = 1
length = len(nums)
answer = [0]*length
answer[0] = 1
for i in range(1, length):
answer[i] = nums[i - 1] * answer[i - 1]
# R 为右侧所有元素的乘积
# 刚开始右边没有元素,所以 R = 1
R = 1
for i in reversed(range(length)):
# 对于索引 i,左边的乘积为 answer[i],右边的乘积为 R
answer[i] = answer[i] * R
# R 需要包含右边所有的乘积,所以计算下一个结果时需要将当前值乘到 R 上
R *= nums[i]
return answer
def productExceptSelf(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
# 用两个常量分别来计算左边数组和右边数组
left = 1
right = 1
result = [1] * len(nums)
# 算出左边列表
for i in range(len(nums)):
result[i] *= left
left *= nums[i]
# 算出右边列表
for i in range(len(nums)-1, -1, -1):
result[i] *= right
right *= nums[i]
return result
nums = [1,2,3,4]
s = Solution()
a = s.productExceptSelf(nums)
print(a) | [
"[email protected]"
] | |
6d48f45d6eb3ac4fe3fe69c45bf0ec4b44276d16 | 176f2533c07323f3eccb13d576092c32c46428fc | /game/game.py | 46f57da139764cbc2db7162a1541d4ddad3ab89f | [] | no_license | viciu/pyconpl-2014 | 3fbe382c5376cc54ca448efaca2777e6d242c607 | 4539ab8135c56cfbb2428c456ca182a86a2f46c9 | refs/heads/master | 2021-01-17T20:59:36.882412 | 2014-10-18T08:52:16 | 2014-10-18T08:52:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | import random
APPLES = ['X', 'O']
ORANGE = '.'
GRAPEFRUIT = ORANGE * 9
RAISINS = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[0, 3, 6],
[1, 4, 7],
[2, 5, 8],
[0, 4, 8],
[2, 4, 6]
]
def play():
banana = GRAPEFRUIT
melon = None
coconut(banana)
for plum in range(9):
prune = random.choice(walnut(banana))
nectarine = APPLES[plum % 2]
banana = peanut(banana, prune, nectarine)
coconut(banana)
if hazelnut(banana, nectarine):
melon = nectarine
break
if melon:
print 'Player {} wins'.format(melon)
else:
print 'It is a draw'
def coconut(lychee):
print '{} | {} | {}'.format(*lychee[:3])
print '--+---+--'
print '{} | {} | {}'.format(*lychee[3:6])
print '--+---+--'
print '{} | {} | {}'.format(*lychee[6:])
print
def peanut(pineapple, mango, papaya):
if not 0 <= mango < 9:
raise ValueError('Invalid position: {}'.format(mango))
if pineapple[mango] != ORANGE:
raise ValueError('Position is full: {}'.format(position))
return pineapple[:mango] + papaya + pineapple[mango+1:]
def walnut(lemon):
return [grape for grape in range(9) if lemon[grape] == ORANGE]
def hazelnut(lime, peach):
for p1, p2, p3 in RAISINS:
if lime[p1] == lime[p2] == lime[p3] == peach:
return True
return False
if __name__ == '__main__':
play()
| [
"[email protected]"
] | |
eb3e549e3b4b155a018967890bed3f1d8b5ba1da | 7233716fbf9fff94240d14770b3fc3f3ada10d9b | /devel/.private/gazebo_msgs/lib/python2.7/dist-packages/gazebo_msgs/srv/_ApplyJointEffort.py | 6c9d00df1dcea2740ee6afbc680cd66255a1ae00 | [] | no_license | shashankseth01/E-yantra | 58d42dce90667ca37f31f2cf111ee98c39468617 | 23432e058fce7733bd1a8399fd6edc20967fa6a3 | refs/heads/main | 2023-02-04T00:36:57.230996 | 2020-12-21T09:55:23 | 2020-12-21T09:55:23 | 316,716,460 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,825 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/ApplyJointEffortRequest.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import genpy
class ApplyJointEffortRequest(genpy.Message):
_md5sum = "2c3396ab9af67a509ecd2167a8fe41a2"
_type = "gazebo_msgs/ApplyJointEffortRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# set urdf joint effort
string joint_name # joint to apply wrench (linear force and torque)
float64 effort # effort to apply
time start_time # optional wrench application start time (seconds)
# if start_time < current time, start as soon as possible
duration duration # optional duration of wrench application time (seconds)
# if duration < 0, apply wrench continuously without end
# if duration = 0, do nothing
# if duration < step size, assume step size and
# display warning in status_message
"""
__slots__ = ['joint_name','effort','start_time','duration']
_slot_types = ['string','float64','time','duration']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
joint_name,effort,start_time,duration
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ApplyJointEffortRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.joint_name is None:
self.joint_name = ''
if self.effort is None:
self.effort = 0.
if self.start_time is None:
self.start_time = genpy.Time()
if self.duration is None:
self.duration = genpy.Duration()
else:
self.joint_name = ''
self.effort = 0.
self.start_time = genpy.Time()
self.duration = genpy.Duration()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.joint_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_d2I2i().pack(_x.effort, _x.start_time.secs, _x.start_time.nsecs, _x.duration.secs, _x.duration.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.start_time is None:
self.start_time = genpy.Time()
if self.duration is None:
self.duration = genpy.Duration()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.joint_name = str[start:end]
_x = self
start = end
end += 24
(_x.effort, _x.start_time.secs, _x.start_time.nsecs, _x.duration.secs, _x.duration.nsecs,) = _get_struct_d2I2i().unpack(str[start:end])
self.start_time.canon()
self.duration.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.joint_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_d2I2i().pack(_x.effort, _x.start_time.secs, _x.start_time.nsecs, _x.duration.secs, _x.duration.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.start_time is None:
self.start_time = genpy.Time()
if self.duration is None:
self.duration = genpy.Duration()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.joint_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.joint_name = str[start:end]
_x = self
start = end
end += 24
(_x.effort, _x.start_time.secs, _x.start_time.nsecs, _x.duration.secs, _x.duration.nsecs,) = _get_struct_d2I2i().unpack(str[start:end])
self.start_time.canon()
self.duration.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_d2I2i = None
def _get_struct_d2I2i():
global _struct_d2I2i
if _struct_d2I2i is None:
_struct_d2I2i = struct.Struct("<d2I2i")
return _struct_d2I2i
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from gazebo_msgs/ApplyJointEffortResponse.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class ApplyJointEffortResponse(genpy.Message):
_md5sum = "2ec6f3eff0161f4257b808b12bc830c2"
_type = "gazebo_msgs/ApplyJointEffortResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """bool success # return true if effort application is successful
string status_message # comments if available
"""
__slots__ = ['success','status_message']
_slot_types = ['bool','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,status_message
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(ApplyJointEffortResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.status_message is None:
self.status_message = ''
else:
self.success = False
self.status_message = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.success
buff.write(_get_struct_B().pack(_x))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.success
buff.write(_get_struct_B().pack(_x))
_x = self.status_message
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
start = end
end += 1
(self.success,) = _get_struct_B().unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status_message = str[start:end].decode('utf-8', 'rosmsg')
else:
self.status_message = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
class ApplyJointEffort(object):
_type = 'gazebo_msgs/ApplyJointEffort'
_md5sum = 'c0039811b8cc919490b3cff748cdf46b'
_request_class = ApplyJointEffortRequest
_response_class = ApplyJointEffortResponse
| [
"[email protected]"
] | |
9c49e21e8fccfbd0b43c02b5ef63fdc32eccfdd4 | 9b7291d81a416bde2ec181229601eb2e33c7b8b2 | /monophoton/spikes/collect.py | d0cf880677d760340d04e54dfd7cde2976bd94cd | [] | no_license | MiT-HEP/MonoX | ab1528e72dad2590a0ae64f1a1d47195139e1749 | 224ee01107a94cedf8563c497edb2f326b99d9b1 | refs/heads/master | 2021-01-24T06:04:16.645559 | 2019-11-15T09:18:40 | 2019-11-15T09:18:40 | 41,823,403 | 1 | 9 | null | 2018-07-19T17:05:30 | 2015-09-02T19:33:33 | Python | UTF-8 | Python | false | false | 3,528 | py | import os
import sys
import re
import math
import array
thisdir = os.path.dirname(os.path.realpath(__file__))
basedir = os.path.dirname(thisdir)
sys.path.append(basedir)
import config
import utils
from datasets import allsamples
import ROOT
arun = array.array('I', [0])
alumi = array.array('I', [0])
aevent = array.array('I', [0])
aeta = array.array('f', [0.] * 10)
aphi = array.array('f', [0.] * 10)
positions = {}
#for sname in ['sph-16b-m', 'sph-16c-m', 'sph-16d-m', 'sph-16e-m', 'sph-16f-m', 'sph-16g-m', 'sph-16h-m']:
for sname in ['sph-16b-m', 'sph-16c-m', 'sph-16d-m']:
positions[sname] = {}
source = ROOT.TFile.Open(utils.getSkimPath(sname, 'monoph'))
tree = source.Get('events')
tree.Draw('>>elist', 'photons.scRawPt[0] > 175. && t1Met.pt > 170. && t1Met.photonDPhi > 0.5 && t1Met.minJetDPhi > 0.5', 'entrylist')
elist = ROOT.gDirectory.Get('elist')
tree.SetEntryList(elist)
tree.SetBranchAddress('runNumber', arun)
tree.SetBranchAddress('lumiNumber', alumi)
tree.SetBranchAddress('eventNumber', aevent)
tree.SetBranchAddress('photons.eta_', aeta)
tree.SetBranchAddress('photons.phi_', aphi)
ientry = 0
while True:
ilocal = tree.GetEntryNumber(ientry)
if ilocal < 0:
break
ientry += 1
tree.GetEntry(ilocal)
positions[sname][(arun[0], alumi[0], aevent[0])] = (aeta[0], aphi[0])
print sname, len(positions[sname]), 'photons'
source.Close()
outTrees = {}
outFiles = []
aieta = array.array('h', [0])
aiphi = array.array('h', [0])
sourcedir = '/mnt/hadoop/scratch/yiiyama/spike_event'
for fname in os.listdir(sourcedir):
if 'Run2016B' in fname:
sname = 'sph-16b-m'
elif 'Run2016C' in fname:
sname = 'sph-16c-m'
elif 'Run2016D' in fname:
sname = 'sph-16d-m'
elif 'Run2016E' in fname:
sname = 'sph-16e-m'
elif 'Run2016F' in fname:
sname = 'sph-16f-m'
elif 'Run2016G' in fname:
sname = 'sph-16g-m'
elif 'Run2016H' in fname:
sname = 'sph-16h-m'
if sname not in ['sph-16b-m', 'sph-16c-m', 'sph-16d-m']:
continue
matches = re.match('.+AOD_([0-9]+)_([0-9]+)_([0-9]+)[.]root', fname)
event = (int(matches.group(1)), int(matches.group(2)), int(matches.group(3)))
position = positions[sname][event]
# print event, position
source = ROOT.TFile.Open(sourcedir + '/' + fname)
tree = source.Get('outTree/hits')
if sname not in outTrees:
outFile = ROOT.TFile.Open(config.histDir + '/spikes/hits_' + sname + '.root', 'recreate')
outFiles.append(outFile)
outTree = tree.CloneTree(0)
outTrees[sname] = outTree
tree.SetBranchAddress('ieta', aieta)
tree.SetBranchAddress('iphi', aiphi)
ientry = 0
while tree.GetEntry(ientry) > 0:
ientry += 1
eta = aieta[0] * 0.0174
phi = (aiphi[0] - 10) / 180. * math.pi
deta = position[0] - eta
dphi = position[1] - phi
while dphi > math.pi:
dphi -= 2. * math.pi
while dphi < -math.pi:
dphi += 2. * math.pi
if deta * deta + dphi * dphi < 0.01:
tree.CopyAddresses(outTrees[sname])
outTrees[sname].Fill()
break
else:
print 'Matching photon not found for event', event
tree.CopyAddresses(outTrees[sname], True)
source.Close()
for tree in outTrees.itervalues():
outFile = tree.GetCurrentFile()
outFile.cd()
tree.Write()
outFile.Close()
| [
"[email protected]"
] | |
463dcc5b3f6bd9f93ff40fc5eea5cc0d69680a9e | 83a637ff77108f2582397c4ca4b2e7953ef4e137 | /categorical_embedder/processors/DiscriminativeWrapper.py | 09dac41aa0d8882e5556a2fbe6f7a17d8590bdee | [
"Apache-2.0"
] | permissive | erelcan/categorical-embedder | 066e0e279826f27aae0e927744d745bd724ba340 | 376b8779500af2aa459c879f8e525f2ef25d6b31 | refs/heads/master | 2023-02-03T01:44:01.896677 | 2020-12-19T10:51:11 | 2020-12-19T10:51:11 | 322,824,753 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,574 | py | from categorical_embedder.processors.ProcessorABC import ProcessorABC
class DiscriminativeWrapper(ProcessorABC):
def __init__(self, feature_processor, label_processor):
super().__init__()
self._feature_processor = feature_processor
self._label_processor = label_processor
def process(self, data, training=True):
if training:
# data: [features: numpy ndarray, labels: numpy ndarray]
# If features is a list or tuple, we will assume the last one is for target!
# Re-consider and better design this.~
if (isinstance(data, list) or isinstance(data, tuple)) and len(data) == 2:
processed1 = self._feature_processor.process(data[0])
processed2 = self._label_processor.process(data[1])
if isinstance(processed1, list) or isinstance(processed1, tuple):
return processed1[0:-1], {"main": processed1[-1], "discriminative": processed2}
else:
raise Exception("Data for DiscriminativeWrapper should have at least 2 target data: one for main embedding, and one for discriminative.")
else:
raise Exception("Data for DiscriminativeWrapper should be a list or tuple with length 2, for training.")
else:
# data: numpy ndarray
return self._feature_processor.process(data, training=False)
def get_feature_processor(self):
return self._feature_processor
def get_label_processor(self):
return self._label_processor
| [
"[email protected]"
] | |
0ffcd09026f2a8b1327d97c4bfc0ae63dcfbf8bf | 711756b796d68035dc6a39060515200d1d37a274 | /output_exocyst_tags/optimized_8006.py | 92f04b4eb65847eae8c1440af4a4a0c165c44645 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,845 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Sec3_GFPN" not in marker_sets:
s=new_marker_set('Sec3_GFPN')
marker_sets["Sec3_GFPN"]=s
s= marker_sets["Sec3_GFPN"]
mark=s.place_marker((541.596, 512.333, 499.687), (0.15, 0.78, 0.66), 21.9005)
if "Sec3_GFPC" not in marker_sets:
s=new_marker_set('Sec3_GFPC')
marker_sets["Sec3_GFPC"]=s
s= marker_sets["Sec3_GFPC"]
mark=s.place_marker((568.86, 487.556, 533.851), (0.15, 0.78, 0.66), 31.586)
if "Sec3_Anch" not in marker_sets:
s=new_marker_set('Sec3_Anch')
marker_sets["Sec3_Anch"]=s
s= marker_sets["Sec3_Anch"]
mark=s.place_marker((443.376, 310.402, 599.636), (0.15, 0.58, 0.66), 26.9335)
if "Sec5_GFPN" not in marker_sets:
s=new_marker_set('Sec5_GFPN')
marker_sets["Sec5_GFPN"]=s
s= marker_sets["Sec5_GFPN"]
mark=s.place_marker((502.226, 474.639, 462.699), (0.38, 0.24, 0.37), 21.9005)
if "Sec5_GFPC" not in marker_sets:
s=new_marker_set('Sec5_GFPC')
marker_sets["Sec5_GFPC"]=s
s= marker_sets["Sec5_GFPC"]
mark=s.place_marker((560.207, 476.263, 566.453), (0.38, 0.24, 0.37), 31.586)
if "Sec6_GFPN" not in marker_sets:
s=new_marker_set('Sec6_GFPN')
marker_sets["Sec6_GFPN"]=s
s= marker_sets["Sec6_GFPN"]
mark=s.place_marker((552.054, 434.475, 481.425), (0.84, 0.98, 0.24), 21.9005)
if "Sec6_GFPC" not in marker_sets:
s=new_marker_set('Sec6_GFPC')
marker_sets["Sec6_GFPC"]=s
s= marker_sets["Sec6_GFPC"]
mark=s.place_marker((366.021, 454.286, 498.926), (0.84, 0.98, 0.24), 31.586)
if "Sec6_Anch" not in marker_sets:
s=new_marker_set('Sec6_Anch')
marker_sets["Sec6_Anch"]=s
s= marker_sets["Sec6_Anch"]
mark=s.place_marker((374.226, 646.956, 533.807), (0.84, 0.78, 0.24), 26.9335)
if "Sec8_GFPC" not in marker_sets:
s=new_marker_set('Sec8_GFPC')
marker_sets["Sec8_GFPC"]=s
s= marker_sets["Sec8_GFPC"]
mark=s.place_marker((348.131, 467.846, 564.892), (0.62, 0.67, 0.45), 31.586)
if "Sec8_Anch" not in marker_sets:
s=new_marker_set('Sec8_Anch')
marker_sets["Sec8_Anch"]=s
s= marker_sets["Sec8_Anch"]
mark=s.place_marker((502.808, 301.01, 608.5), (0.62, 0.47, 0.45), 26.9335)
if "Sec10_GFPN" not in marker_sets:
s=new_marker_set('Sec10_GFPN')
marker_sets["Sec10_GFPN"]=s
s= marker_sets["Sec10_GFPN"]
mark=s.place_marker((303.358, 468.232, 544.156), (0, 0.91, 0), 21.9005)
if "Sec10_GFPC" not in marker_sets:
s=new_marker_set('Sec10_GFPC')
marker_sets["Sec10_GFPC"]=s
s= marker_sets["Sec10_GFPC"]
mark=s.place_marker((556.988, 502.166, 655.685), (0, 0.91, 0), 31.586)
if "Sec10_Anch" not in marker_sets:
s=new_marker_set('Sec10_Anch')
marker_sets["Sec10_Anch"]=s
s= marker_sets["Sec10_Anch"]
mark=s.place_marker((401.96, 607.326, 428.684), (0, 0.71, 0), 26.9335)
if "Sec15_GFPN" not in marker_sets:
s=new_marker_set('Sec15_GFPN')
marker_sets["Sec15_GFPN"]=s
s= marker_sets["Sec15_GFPN"]
mark=s.place_marker((463.517, 451.821, 412.144), (0.11, 0.51, 0.86), 21.9005)
if "Sec15_GFPC" not in marker_sets:
s=new_marker_set('Sec15_GFPC')
marker_sets["Sec15_GFPC"]=s
s= marker_sets["Sec15_GFPC"]
mark=s.place_marker((313.133, 480.912, 604.313), (0.11, 0.51, 0.86), 31.586)
if "Sec15_Anch" not in marker_sets:
s=new_marker_set('Sec15_Anch')
marker_sets["Sec15_Anch"]=s
s= marker_sets["Sec15_Anch"]
mark=s.place_marker((421.945, 580.637, 681.719), (0.11, 0.31, 0.86), 26.9335)
if "Exo70_GFPN" not in marker_sets:
s=new_marker_set('Exo70_GFPN')
marker_sets["Exo70_GFPN"]=s
s= marker_sets["Exo70_GFPN"]
mark=s.place_marker((560.517, 511.605, 542.302), (0.89, 0.47, 0.4), 21.9005)
if "Exo70_GFPC" not in marker_sets:
s=new_marker_set('Exo70_GFPC')
marker_sets["Exo70_GFPC"]=s
s= marker_sets["Exo70_GFPC"]
mark=s.place_marker((327.274, 424.61, 563.459), (0.89, 0.47, 0.4), 31.586)
if "Exo70_Anch" not in marker_sets:
s=new_marker_set('Exo70_Anch')
marker_sets["Exo70_Anch"]=s
s= marker_sets["Exo70_Anch"]
mark=s.place_marker((480.52, 659.803, 387.92), (0.89, 0.27, 0.4), 26.9335)
if "Exo84_GFPN" not in marker_sets:
s=new_marker_set('Exo84_GFPN')
marker_sets["Exo84_GFPN"]=s
s= marker_sets["Exo84_GFPN"]
mark=s.place_marker((489.953, 490.909, 468.969), (0.5, 0.7, 0), 31.586)
if "Exo84_GFPC" not in marker_sets:
s=new_marker_set('Exo84_GFPC')
marker_sets["Exo84_GFPC"]=s
s= marker_sets["Exo84_GFPC"]
mark=s.place_marker((549.547, 470.354, 589.704), (0.5, 0.7, 0), 31.586)
if "Exo84_Anch" not in marker_sets:
s=new_marker_set('Exo84_Anch')
marker_sets["Exo84_Anch"]=s
s= marker_sets["Exo84_Anch"]
mark=s.place_marker((447.784, 637.856, 623.069), (0.5, 0.5, 0), 26.9335)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
b2dcbad518e02a363f8d5ba7c30bfe5a7c22ce1e | 2a54e8d6ed124c64abb9e075cc5524bb859ba0fa | /.history/1-Python-Basics/30-sets2_20200413211158.py | 368c37538e1350070bdbcc7b3a0b51dc8b9c681f | [] | no_license | CaptainStorm21/Python-Foundation | 01b5fbaf7a913506518cf22e0339dd948e65cea1 | a385adeda74f43dd7fb2d99d326b0be23db25024 | refs/heads/master | 2021-05-23T01:29:18.885239 | 2020-04-23T19:18:06 | 2020-04-23T19:18:06 | 253,171,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,001 | py | # methods
my_set = { 342, 23, 1, 2, 3, 9, 10, 9 }
your_set = [ 342, 23, 42, 46, 53, 34, 10 ]
print(my_set)
#output {1, 2, 3, 9}
print(my_set.difference(your_set))
#output {1, 2, 3, 9, 342, 23}
my_set.discard(10)
print(my_set)
#output {1, 2, 3, 9, 342, 23}
my_set.intersection(your_set)
print(my_set)
#output {1, 2, 3, 4, 9, 10, 14, 23}
my_set1 = { 3, 3, 4, 10, 14, 23, 1, 2, 3, 9, 10, 9 }
your_set1= [ 342, 23, 42, 46, 53, 34, 10 ]
my_set1.isdisjoint(your_set1)
print(my_set1)
#output - {1, 2, 3, 9, 10, 342, 23}
my_set2 = { 342, 23, 1, 2, 3, 9, 10, 9 }
your_set2 = [ 342, 23, 42, 46, 53, 34, 10 ]
my_set2.union(your_set2)
print(my_set2)
#output - {1, 2, 3, 9, 10, 23, 8888}
my_set3 = { 8888, 23, 1, 2, 3, 9, 10, 9 }
your_set3 = [ 342, 23, 42, 46, 53, 34, 10 ]
my_set3.issuperset(your_set3)
print(my_set3)
#ouput - {1, 2, 3, 9, 10, 23, 8888}
my_set3 = { 8888, 23, 1, 2, 3, 9, 10, 9 }
your_set3 = [ 342, 23, 42, 46, 53, 34, 10 ]
print(my_set3.isdisjoint(your_set3))
| [
"[email protected]"
] | |
26591cf0c6446d0ec56368b1f6e560280d10c616 | c01a58ecd6614128e3c29a70e3e768b220a2a4a2 | /common/xrd-ui-tests-python/helpers/mockrunner.py | 76c1f651e438d70bb1f13223c5d66af078e33553 | [
"MIT"
] | permissive | nordic-institute/X-Road-tests | 772a6d7485606c1f10b61a1260b8fb66111bf0be | e030661a0ad8ceab74dd8122b751e88025a3474a | refs/heads/develop | 2021-06-03T01:38:20.542859 | 2019-03-18T12:16:18 | 2019-03-18T12:16:18 | 125,643,677 | 2 | 3 | MIT | 2018-06-14T15:09:21 | 2018-03-17T15:36:32 | Python | UTF-8 | Python | false | false | 4,103 | py | from helpers import ssh_client
import re
import time
class MockRunner:
'''
Class that tries to control the mock service script (SoapUI MockRunner) over an SSH connection. Uses
ssh_helper.SSHClient component.
Connects to SSH server, sends a one-liner command and then waits until a specified regex matches output or a timeout
occurs. To stop the service, sends a single keycode (Ctrl-C by default).
'''
running = False # Internal variable - service running or not
error = None # Last error
command = None # Mock start command
debug = False
def __init__(self, host, username, password, command,
ready_regex='.*\[SoapUIMockServiceRunner\] Started.*', ready_timeout=60, stop_keycode=3):
'''
Initialize the class and open the SSH connection.
:param host: str - hostname of the server
:param username: str - username
:param password: str - password
:param command: str - mock service start command, one-liner (semicolons can be used for command sequence)
:param ready_regex: str - regex to wait for until concluding that the service is up and running
:param ready_timeout: int - service start timeout in seconds; if this passes, starting failed
:param stop_keycode: int - keycode to send to kill the service; can be Ctrl-C (3) or Enter (13) for SoapUI
'''
self.ssh = ssh_client.SSHClient(host=host, username=username, password=password)
self.command = command
self.ready_regex = re.compile(ready_regex)
self.ready_timeout = ready_timeout
self.stop_keycode = stop_keycode
def start(self):
'''
Tries to start the mock service.
:return: bool - if the service was started
'''
# No errors by default
self.error = None
# If the service is already running, set an error and fail start (return False)
if self.running:
self.error = 'Already running'
return False
# Set running to be true to block other start requests
self.running = True
# Execute command over SSH, line reading timeout is 1 second
self.ssh.exec_command(self.command, timeout=1)
# Get the current time to check for timeout
start_time = time.time()
while True:
# Read lines from SSH
try:
line = self.ssh.readline()
if line:
if self.debug:
# Print line for logging
print(line)
# If the line matches the specified regex, mock is running, break the loop.
if self.ready_regex.match(line):
break
else:
# Go to the exception
raise RuntimeError
except:
# If time limit passed, set an error and return False
if time.time() > start_time + self.ready_timeout:
self.error = 'Mock start timeout'
return False
return True
def restart(self):
'''
Restart mock service.
:return:
'''
# If already running, stop it.
if self.running:
self.stop()
# Start again.
self.start()
def stop(self):
'''
Stop the mock service.
:return:
'''
if self.running:
if self.debug:
print("Mock stopping")
# Send a stop character and flush it.
try:
self.ssh.write(chr(self.stop_keycode), flush=True)
except:
pass
# Not running and no error.
self.running = False
self.error = None
def get_error(self):
'''
Returns the last error.
:return: str|None - last error message
'''
return self.error
| [
"[email protected]"
] | |
2ca0fac55fa66f6d34206280619eb6e5a2cb335a | 7a30e60c1fedb9e03ef772e419a0b1258c605586 | /superlists/urls.py | feb9e81f54a586dcf30294be1af0a3c74053954c | [] | no_license | ndjman7/TDD | 1e6b5fb2210b48950c9ad1fc4d101ebf6e4fcce7 | 0950e2aab436ba00bf4a9b072930c8c2b225b9f0 | refs/heads/master | 2021-01-12T04:29:40.717412 | 2017-02-20T06:13:12 | 2017-02-20T06:13:12 | 77,623,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | """superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from lists import views
urlpatterns = [
url(r'^$', views.home_page, name='home'),
url(r'^lists/', include('lists.urls')),
url(r'^admin/', admin.site.urls),
]
| [
"[email protected]"
] | |
1b994a2be95d50152c5761e91816f512f0cd103d | 1676168244eed1c5610b2c1c38f692f89990b112 | /part4-ML/from_1021_Django/django_movie_second/movies/urls.py | a7004f938653e54f8ea8c49830a0f6a92dc15b22 | [] | no_license | gtpgg1013/AI_docs | 351e83f986d66224c82fff2de944753c98336d03 | 43f8eed8b2732314bd40ed65e1d7eb44dd28fc04 | refs/heads/master | 2022-12-09T17:32:02.992554 | 2019-11-20T09:03:56 | 2019-11-20T09:03:56 | 182,927,565 | 1 | 0 | null | 2022-12-08T06:50:23 | 2019-04-23T03:54:56 | Jupyter Notebook | UTF-8 | Python | false | false | 533 | py | from django.urls import path
from . import views
app_name = 'movies'
urlpatterns = [
path('', views.index, name="index"),
path('new/', views.new, name="new"),
path('<int:movie_pk>/', views.detail, name="detail"),
path('<int:movie_pk>/edit/', views.edit, name="edit"),
path('<int:movie_pk>/delete/', views.delete, name="delete"),
path('<int:movie_pk>/ratings/new/', views.new_rating, name="new_rating"),
path('<int:movie_pk>/ratings/<int:rating_pk>/delete/', views.delete_rating, name='delete_rating'),
] | [
"[email protected]"
] | |
6063ac49e1b928783643b11ac36053dbf051478d | 2c1429a1bd2d0477fd88119d4d778fc68c82adcf | /python/DeepSeaSceneLighting/SceneShadowManagerPrepare.py | f17117f268680fdab051407968ae346b70024422 | [
"Apache-2.0"
] | permissive | akb825/DeepSea | d7ac54f6d8243d43d6ea538159f3067ab7e79880 | 5a909b4f51717bc59682e51ad6aa598a25a9b965 | refs/heads/master | 2023-08-31T23:45:19.533393 | 2023-08-29T07:30:36 | 2023-08-29T07:30:43 | 142,716,767 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaSceneLighting
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SceneShadowManagerPrepare(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SceneShadowManagerPrepare()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSceneShadowManagerPrepare(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# SceneShadowManagerPrepare
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SceneShadowManagerPrepare
def ShadowManager(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def SceneShadowManagerPrepareStart(builder):
builder.StartObject(1)
def Start(builder):
SceneShadowManagerPrepareStart(builder)
def SceneShadowManagerPrepareAddShadowManager(builder, shadowManager):
builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(shadowManager), 0)
def AddShadowManager(builder, shadowManager):
SceneShadowManagerPrepareAddShadowManager(builder, shadowManager)
def SceneShadowManagerPrepareEnd(builder):
return builder.EndObject()
def End(builder):
return SceneShadowManagerPrepareEnd(builder)
| [
"[email protected]"
] | |
76de8db7db7b116df2a653e92efdb7a12c91cddb | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/actrl/rulehitparthist1w.py | 8a6a3bb187bab86c4cfee202fd4bda3c43cf816b | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 34,176 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RuleHitPartHist1w(Mo):
"""
A class that represents historical portion of the statistics for rule hits in a 1 week sampling interval. This class updates every day.
"""
meta = StatsClassMeta("cobra.model.actrl.RuleHitPartHist1w", "rule hits")
counter = CounterMeta("revPkts", CounterCategory.COUNTER, "packets", "reverse hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "revPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "revPktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "revPktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "revPktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "revPktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "revPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "revPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "revPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "revPktsRate"
meta._counters.append(counter)
counter = CounterMeta("pkts", CounterCategory.COUNTER, "packets", "hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "pktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "pktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "pktsRate"
meta._counters.append(counter)
counter = CounterMeta("egrPkts", CounterCategory.COUNTER, "packets", "egress hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "egrPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "egrPktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "egrPktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "egrPktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "egrPktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "egrPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "egrPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "egrPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "egrPktsRate"
meta._counters.append(counter)
counter = CounterMeta("ingrPkts", CounterCategory.COUNTER, "packets", "ingress hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "ingrPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "ingrPktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "ingrPktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "ingrPktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "ingrPktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "ingrPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "ingrPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "ingrPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "ingrPktsRate"
meta._counters.append(counter)
meta.moClassName = "actrlRuleHitPartHist1w"
meta.rnFormat = "HDactrlRuleHitPart1w-%(index)s-node-%(nodeId)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical portion of the rule hits stats in 1 week"
meta.writeAccessMask = 0x601
meta.readAccessMask = 0x601
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.fv.RInfoHolder")
meta.superClasses.add("cobra.model.stats.HistAgPart")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.actrl.RuleHitPartHist")
meta.rnPrefixes = [
('HDactrlRuleHitPart1w-', True),
('-node-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "egrPktsAvg", "egrPktsAvg", 7471, PropCategory.IMPLICIT_AVG)
prop.label = "egress hit packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsAvg", prop)
prop = PropMeta("str", "egrPktsCum", "egrPktsCum", 7467, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "egress hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsCum", prop)
prop = PropMeta("str", "egrPktsMax", "egrPktsMax", 7470, PropCategory.IMPLICIT_MAX)
prop.label = "egress hit packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsMax", prop)
prop = PropMeta("str", "egrPktsMin", "egrPktsMin", 7469, PropCategory.IMPLICIT_MIN)
prop.label = "egress hit packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsMin", prop)
prop = PropMeta("str", "egrPktsPer", "egrPktsPer", 7468, PropCategory.IMPLICIT_PERIODIC)
prop.label = "egress hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsPer", prop)
prop = PropMeta("str", "egrPktsRate", "egrPktsRate", 7475, PropCategory.IMPLICIT_RATE)
prop.label = "egress hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsRate", prop)
prop = PropMeta("str", "egrPktsSpct", "egrPktsSpct", 7472, PropCategory.IMPLICIT_SUSPECT)
prop.label = "egress hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsSpct", prop)
prop = PropMeta("str", "egrPktsThr", "egrPktsThr", 7473, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "egress hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("egrPktsThr", prop)
prop = PropMeta("str", "egrPktsTr", "egrPktsTr", 7474, PropCategory.IMPLICIT_TREND)
prop.label = "egress hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsTr", prop)
prop = PropMeta("str", "index", "index", 5849, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "ingrPktsAvg", "ingrPktsAvg", 7532, PropCategory.IMPLICIT_AVG)
prop.label = "ingress hit packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsAvg", prop)
prop = PropMeta("str", "ingrPktsCum", "ingrPktsCum", 7528, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "ingress hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsCum", prop)
prop = PropMeta("str", "ingrPktsMax", "ingrPktsMax", 7531, PropCategory.IMPLICIT_MAX)
prop.label = "ingress hit packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsMax", prop)
prop = PropMeta("str", "ingrPktsMin", "ingrPktsMin", 7530, PropCategory.IMPLICIT_MIN)
prop.label = "ingress hit packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsMin", prop)
prop = PropMeta("str", "ingrPktsPer", "ingrPktsPer", 7529, PropCategory.IMPLICIT_PERIODIC)
prop.label = "ingress hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsPer", prop)
prop = PropMeta("str", "ingrPktsRate", "ingrPktsRate", 7536, PropCategory.IMPLICIT_RATE)
prop.label = "ingress hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsRate", prop)
prop = PropMeta("str", "ingrPktsSpct", "ingrPktsSpct", 7533, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ingress hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsSpct", prop)
prop = PropMeta("str", "ingrPktsThr", "ingrPktsThr", 7534, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ingress hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("ingrPktsThr", prop)
prop = PropMeta("str", "ingrPktsTr", "ingrPktsTr", 7535, PropCategory.IMPLICIT_TREND)
prop.label = "ingress hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsTr", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "nodeId", "nodeId", 5850, PropCategory.REGULAR)
prop.label = "Node Id"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("nodeId", prop)
prop = PropMeta("str", "pktsAvg", "pktsAvg", 24176, PropCategory.IMPLICIT_AVG)
prop.label = "hit packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsAvg", prop)
prop = PropMeta("str", "pktsCum", "pktsCum", 24172, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsCum", prop)
prop = PropMeta("str", "pktsMax", "pktsMax", 24175, PropCategory.IMPLICIT_MAX)
prop.label = "hit packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMax", prop)
prop = PropMeta("str", "pktsMin", "pktsMin", 24174, PropCategory.IMPLICIT_MIN)
prop.label = "hit packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMin", prop)
prop = PropMeta("str", "pktsPer", "pktsPer", 24173, PropCategory.IMPLICIT_PERIODIC)
prop.label = "hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsPer", prop)
prop = PropMeta("str", "pktsRate", "pktsRate", 24180, PropCategory.IMPLICIT_RATE)
prop.label = "hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRate", prop)
prop = PropMeta("str", "pktsSpct", "pktsSpct", 24177, PropCategory.IMPLICIT_SUSPECT)
prop.label = "hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsSpct", prop)
prop = PropMeta("str", "pktsThr", "pktsThr", 24178, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsThr", prop)
prop = PropMeta("str", "pktsTr", "pktsTr", 24179, PropCategory.IMPLICIT_TREND)
prop.label = "hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "revPktsAvg", "revPktsAvg", 24231, PropCategory.IMPLICIT_AVG)
prop.label = "reverse hit packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsAvg", prop)
prop = PropMeta("str", "revPktsCum", "revPktsCum", 24227, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "reverse hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsCum", prop)
prop = PropMeta("str", "revPktsMax", "revPktsMax", 24230, PropCategory.IMPLICIT_MAX)
prop.label = "reverse hit packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsMax", prop)
prop = PropMeta("str", "revPktsMin", "revPktsMin", 24229, PropCategory.IMPLICIT_MIN)
prop.label = "reverse hit packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsMin", prop)
prop = PropMeta("str", "revPktsPer", "revPktsPer", 24228, PropCategory.IMPLICIT_PERIODIC)
prop.label = "reverse hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsPer", prop)
prop = PropMeta("str", "revPktsRate", "revPktsRate", 24235, PropCategory.IMPLICIT_RATE)
prop.label = "reverse hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsRate", prop)
prop = PropMeta("str", "revPktsSpct", "revPktsSpct", 24232, PropCategory.IMPLICIT_SUSPECT)
prop.label = "reverse hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsSpct", prop)
prop = PropMeta("str", "revPktsThr", "revPktsThr", 24233, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "reverse hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("revPktsThr", prop)
prop = PropMeta("str", "revPktsTr", "revPktsTr", 24234, PropCategory.IMPLICIT_TREND)
prop.label = "reverse hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsTr", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
meta.namingProps.append(getattr(meta.props, "nodeId"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("ATgToGraphInst", "Graph Instances", "cobra.model.vns.GraphInst"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AEPgToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("MgmtInstPToNode", "External Management Network EPG to Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("OoBToNode", "Out-of-band Management EPG to Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("InBToNode", "Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("EPgToNwIf", "Interface", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, index, nodeId, markDirty=True, **creationProps):
namingVals = [index, nodeId]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
abdafe161112ab293d182ea37b60e0b2175203e4 | 1eaa6c2500868d0c60b5b2cd552cd671b635de32 | /Algorithm/sword of offer/2.替换空格.py | d830f7aee41e3cd0884252602f7cb3b806b7c3bc | [] | no_license | jiangyuwei666/my-study-demo | f85f14a599c328addb5af09078d404f1139e0a82 | 9e2baef2f36f071f8903768adb8d5a5a8c1123f6 | refs/heads/master | 2022-04-30T16:47:24.715570 | 2022-03-24T09:08:43 | 2022-03-24T09:08:43 | 152,565,041 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | py | def solution_by_py(s):
s = s.replace(' ', '%20')
return s
def solution_by_py1(s):
s = s.split(' ')
s = '%20'.join(s)
return s
# def solution_by_re(s):
#
print(solution_by_py('a b c'))
print(solution_by_py1('a b c'))
| [
"[email protected]"
] | |
382ea429b816b3ca6a1ce86c23964238e697ed53 | 5796cdc0bf59ed09e1493804bd86e982daf73f7f | /python/interp.py | d5bb30cfd8ef1975b51b3ba3c6504a05101289a1 | [] | no_license | pgDora56/BrainfxckInterpreter | ec67113b4480a8b328fde126932ac1061f2999d0 | c3544922e4422633d869266b5a9035d87806c92c | refs/heads/master | 2020-09-20T06:36:23.508956 | 2019-12-09T08:13:05 | 2019-12-09T08:13:05 | 224,401,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,506 | py | import sys
ptr = 0
memory = [0]
readcnt = 0
whilemark = []
if len(sys.argv) == 2:
fname = sys.argv[1]
with open(fname) as f:
program = f.read()
while readcnt < len(program):
c = program[readcnt]
if c == ">":
if ptr < 1000000:
ptr += 1
while len(memory) <= ptr:
memory.append(0)
else:
raise Exception("ptr is too large")
elif c == "<":
if ptr > 0:
ptr -= 1
else:
raise Exception("ptr must be positive value")
elif c == "+":
memory[ptr] += 1
elif c == "-":
memory[ptr] -= 1
elif c == ".":
print(chr(memory[ptr]))
elif c == "[":
if memory[ptr] == 0:
wcnt = 1
readcnt += 1
while wcnt > 0:
if readcnt >= len(program):
raise Exception("] isn't found.")
if program[readcnt] == "[":
wcnt += 1
elif program[readcnt] == "]":
wcnt -= 1
readcnt += 1
else:
whilemark.append(readcnt)
elif c == "]":
if memory[ptr] != 0:
readcnt = whilemark[-1]
else:
whilemark.pop(len(whilemark)-1)
readcnt += 1
| [
"[email protected]"
] | |
f94f3789185b61d958643300f7e8cde8600bad3e | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_23037.py | 3bd2a710d8fe38850715610ce1eaa8fd6bce9240 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # create dictionary based on filter in sqlalchemy
by_name = {g.name: g.users for g in Group.query.options(db.joinedload(Group.users))}
| [
"[email protected]"
] | |
658baaf3e894c98deb60ec089ad7d2c063bd3ff8 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stubs/humanfriendly/humanfriendly/tables.pyi | 0b324b1f32331119ff29d6ec1a91025a38402f4f | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 304 | pyi | from typing import Any
def format_smart_table(data, column_names): ...
def format_pretty_table(data, column_names: Any | None = ..., horizontal_bar: str = ..., vertical_bar: str = ...): ...
def format_robust_table(data, column_names): ...
def format_rst_table(data, column_names: Any | None = ...): ...
| [
"[email protected]"
] | |
068979971645c1203a8ee605549bd08a140f44c8 | 2d8a3a9b15e76bacd24e1627a124f0f60a83b253 | /sfepy/terms/terms_new.py | a3b8b1b2718a856c97abc55b196a36c84c25a6e7 | [
"BSD-3-Clause"
] | permissive | cheon7886/sfepy | 2722ae15bb52cdf20bac264771c32b1b051bb2ae | 2e9eb78341f9072ad07424221a64306c95c5ebd1 | refs/heads/master | 2021-01-19T19:33:11.938856 | 2015-03-29T08:56:35 | 2015-03-29T08:56:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,892 | py | """
todo:
- get row variable, col variable (if diff_var)
- determine out shape
- set current group to all variable arguments
- loop over row/col dofs:
- call term
? how to deal with components of (vector) variables?
(*) for given group, Variable has to be able to:
- evaluate value in quadrature points
- evaluate gradient in quadrature points
- ? evaluate divergence in quadrature points
- ? lazy evaluation, cache!
?? base function gradients in space elements stored now in terms - in
geometry, shared dict of geometries belongs to Equations
-> where to cache stuff? - in variables!
"""
import numpy as nm
from sfepy.base.base import output
from sfepy.terms.terms import Term, get_shape_kind
from sfepy.terms.utils import get_range_indices
from sfepy.mechanics.tensors import get_full_indices
from sfepy.linalg import dot_sequences as dot
class NewTerm(Term):
def get_geometry_key(self, variable):
is_trace = self.arg_traces[variable.name]
geometry_type = self.geometry_types[variable.name]
region_name, iorder, ig = self.get_current_group()
if is_trace:
region, ig_map, ig_map_i = self.region.get_mirror_region()
region_name = region.name
ig = ig_map_i[ig]
ap = variable.get_approximation(ig)
key = (region_name, iorder, geometry_type, ap.name)
return key, ig
def get_geometry(self, variable):
key, ig = self.get_geometry_key(variable)
geo = self.get_mapping(variable)[0]
return geo, key, ig
def set_current_group(self, ig):
"""
Set current group for the term and all variables in its
arguments.
"""
self.char_fun.set_current_group(ig)
shape_kind = get_shape_kind(self.integration)
for var in self.get_variables():
geo, geo_key, geo_ig = self.get_geometry(var)
var.setup_bases(geo_key, geo_ig, geo, self.integral, shape_kind)
var.set_current_group(geo_key, geo_ig)
def integrate(self, val_qp, variable):
shape_kind = get_shape_kind(self.integration)
geo, _, _ = self.get_geometry(variable)
sh = val_qp.shape
val = nm.zeros((sh[0], 1, sh[2], sh[3]), dtype=val_qp.dtype)
if shape_kind == 'volume':
geo.integrate(val, val_qp)
else:
geo.integrate(val, val_qp)
return val
def evaluate(self, mode='eval', diff_var=None, **kwargs):
shape_kind = get_shape_kind(self.integration)
if mode == 'eval':
var = self.get_variables()[0]
val = 0.0
for ig in self.iter_groups():
args = self.get_args(**kwargs)
val_qp = self(*args, **kwargs)
_val = self.integrate(val_qp, var)
val += self.sign * _val.sum()
elif mode in ('el_avg', 'qp'):
raise NotImplementedError()
elif mode == 'weak':
varr = self.get_virtual_variable()
vals = []
iels = []
if diff_var is None:
for ig in self.iter_groups():
args = self.get_args(**kwargs)
aux = varr.get_data_shape(ig, self.integral,
shape_kind, self.region.name)
n_elr, n_qpr, dim, n_enr, n_cr = aux
n_row = n_cr * n_enr
shape = (n_elr, 1, n_row, 1)
val = nm.zeros(shape, dtype=varr.dtype)
for ir in varr.iter_dofs():
irs = slice(ir, ir + 1)
try:
val_qp = self(*args, **kwargs)
except ValueError:
output('%s term evaluation failed!' % self.name)
raise
_val = self.integrate(val_qp, varr)
val[..., irs, :] = _val
vals.append(self.sign * val)
iels.append((ig, nm.arange(n_elr, dtype=nm.int32)))
else:
varc = self.get_variables(as_list=False)[diff_var]
for ig in self.iter_groups():
args = self.get_args(**kwargs)
aux = varr.get_data_shape(ig, self.integral,
shape_kind, self.region.name)
n_elr, n_qpr, dim, n_enr, n_cr = aux
n_row = n_cr * n_enr
aux = varc.get_data_shape(ig, self.integral,
shape_kind, self.region.name)
n_elc, n_qpc, dim, n_enc, n_cc = aux
n_col = n_cc * n_enc
shape = (n_elr, 1, n_row, n_col)
val = nm.zeros(shape, dtype=varr.dtype)
for ir in varr.iter_dofs():
irs = slice(ir, ir + 1)
for ic in varc.iter_dofs():
ics = slice(ic, ic + 1)
try:
val_qp = self(*args, **kwargs)
except ValueError:
output('%s term evaluation failed!' % self.name)
raise
_val = self.integrate(val_qp, varr)
val[..., irs, ics] = _val
vals.append(self.sign * val)
iels.append((ig, nm.arange(n_elr, dtype=nm.int32)))
# Setup return value.
if mode == 'eval':
out = (val,)
else:
out = (vals, iels)
# Hack: add zero status.
out = out + (0,)
if len(out) == 1:
out = out[0]
return out
class NewDiffusionTerm(NewTerm):
"""
"""
name = 'dw_new_diffusion'
arg_types = ('material', 'virtual', 'state')
def __call__(self, mat, virtual, state, **kwargs):
val = dot(virtual.grad(), dot(mat, state.grad()), 'ATB')
return val
class NewMassScalarTerm(NewTerm):
"""
"""
name = 'dw_new_mass_scalar'
arg_types = ('virtual', 'state')
def __call__(self, virtual, state, **kwargs):
val = virtual.val() * state.val()
return val
class NewMassTerm(NewTerm):
"""
Works for both scalar and vector variables.
"""
name = 'dw_new_mass'
arg_types = ('virtual', 'state')
def __call__(self, virtual, state, **kwargs):
rindx = virtual.get_component_indices()
cindx = state.get_component_indices()
val = virtual.get_element_zeros()
for ir, irs in rindx:
for ic, ics in cindx:
if ir == ic:
val += virtual.val(ir) * state.val(ic)
return val
class NewLinearElasticTerm(NewTerm):
"""
"""
name = 'dw_new_lin_elastic'
arg_types = ('material', 'virtual', 'state')
def __call__(self, mat, virtual, state, **kwargs):
"""
Doubled out-of-diagonal strain entries!
"""
rindx = virtual.get_component_indices()
cindx = state.get_component_indices()
kindx = lindx = get_range_indices(state.dim)
fi = nm.array(get_full_indices(state.dim))
val = virtual.get_element_zeros()
for ir, irs in rindx:
for ik, iks in kindx:
irk = fi[ir, ik]
irks = slice(irk, irk + 1)
erk = virtual.grad(ir, ik)
for ic, ics in cindx:
for il, ils in lindx:
icl = fi[ic, il]
icls = slice(icl, icl + 1)
ecl = state.grad(ic, il)
val += mat[..., irks, icls] * erk * ecl
return val
| [
"[email protected]"
] | |
1ae492172aa438a72336aba09bcd68fe23e03941 | b90190cd97f1aa2a3168d3f25ce6832a1c22d1b2 | /Code/models/arena/arena.py | 0cb906374edd997cd4c0ec9067f3c7282035faab | [] | no_license | stjordanis/Decentralized-and-multi-agent-control-of-Franka-Emika-Panda-robot-in-continuous-task-execution | 7c002cd4dea95b1a1256172c6d8d38c6226199f9 | c2b27e7f8059e3c29c876b60656f6a20d55e5da2 | refs/heads/main | 2023-07-31T20:47:24.476564 | 2021-10-01T08:44:19 | 2021-10-01T08:44:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | import numpy as np
from models.base import MujocoXML
from utils.mjcf_utils import array_to_string, string_to_array
class Arena(MujocoXML):
"""Base arena class."""
def set_origin(self, offset):
"""Applies a constant offset to all objects."""
offset = np.array(offset)
for node in self.worldbody.findall("./*[@pos]"):
cur_pos = string_to_array(node.get("pos"))
new_pos = cur_pos + offset
node.set("pos", array_to_string(new_pos))
| [
"[email protected]"
] | |
699ff99fdcca12765962e862f59dd227a46dd7b7 | edf7fc01b731c1d17324a1acd095bac93c3537ef | /test/test_sampling.py | ea93e3e3d7755398f50c401a7758c01032c10eae | [
"BSD-3-Clause"
] | permissive | bernardotorres/profiling | f6d3d8a764c75ce6bb3478deb562c8160d9bad04 | 8763a5d11c4ebd06b5a90bdced0a01aaadf02687 | refs/heads/master | 2020-04-01T16:45:40.985548 | 2018-10-18T06:53:41 | 2018-10-18T06:53:41 | 153,396,990 | 0 | 0 | BSD-3-Clause | 2018-10-17T04:49:31 | 2018-10-17T04:49:31 | null | UTF-8 | Python | false | false | 2,752 | py | # -*- coding: utf-8 -*-
from __future__ import division
import os
import signal
import sys
import pytest
from _utils import find_stats, spin
from profiling.sampling import SamplingProfiler
from profiling.sampling.samplers import ItimerSampler, TracingSampler
def spin_100ms():
spin(0.1)
def spin_500ms():
spin(0.5)
def _test_sampling_profiler(sampler):
profiler = SamplingProfiler(base_frame=sys._getframe(), sampler=sampler)
with profiler:
spin_100ms()
spin_500ms()
stat1 = find_stats(profiler.stats, 'spin_100ms')
stat2 = find_stats(profiler.stats, 'spin_500ms')
ratio = stat1.deep_hits / stat2.deep_hits
# 1:5 expaected, but tolerate (0.8~1.2):5
assert 0.8 <= ratio * 5 <= 1.2
@pytest.mark.flaky(reruns=10)
def test_itimer_sampler():
assert signal.getsignal(signal.SIGPROF) == signal.SIG_DFL
try:
_test_sampling_profiler(ItimerSampler(0.0001))
# no crash caused by SIGPROF.
assert signal.getsignal(signal.SIGPROF) == signal.SIG_IGN
for x in range(10):
os.kill(os.getpid(), signal.SIGPROF)
# respect custom handler.
handler = lambda *x: x
signal.signal(signal.SIGPROF, handler)
_test_sampling_profiler(ItimerSampler(0.0001))
assert signal.getsignal(signal.SIGPROF) == handler
finally:
signal.signal(signal.SIGPROF, signal.SIG_DFL)
@pytest.mark.flaky(reruns=10)
def test_tracing_sampler():
pytest.importorskip('yappi')
_test_sampling_profiler(TracingSampler(0.0001))
@pytest.mark.flaky(reruns=10)
def test_tracing_sampler_does_not_sample_too_often():
pytest.importorskip('yappi')
# pytest-cov cannot detect a callback function registered by
# :func:`sys.setprofile`.
class fake_profiler(object):
samples = []
@classmethod
def sample(cls, frame):
cls.samples.append(frame)
@classmethod
def count_and_clear_samples(cls):
count = len(cls.samples)
del cls.samples[:]
return count
sampler = TracingSampler(0.1)
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 1
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 0
spin(0.5)
sampler._profile(fake_profiler, None, None, None)
assert fake_profiler.count_and_clear_samples() == 1
def test_not_sampler():
with pytest.raises(TypeError):
SamplingProfiler(sampler=123)
def test_sample_1_depth():
frame = sys._getframe()
while frame.f_back is not None:
frame = frame.f_back
assert frame.f_back is None
profiler = SamplingProfiler()
profiler.sample(frame)
| [
"[email protected]"
] | |
94a491393853c4e7890846273a17e6a7fd0545c2 | 422ce4dad362cd9a1112965e6c5df17d13fe2287 | /econom_game/teams/migrations/0012_auto_20180830_1921.py | fa4b37689379a6dbbb2ad4f08fb70b61f95351cc | [] | no_license | zzaakiirr/econom_game | 22bfc7f8b009ab4e6366a912df731f5a234da506 | 56f0ca2e29e17b18cc7ec5248e66066bb061bc19 | refs/heads/master | 2020-03-16T15:05:21.872872 | 2018-09-06T11:10:08 | 2018-09-06T11:10:08 | 132,727,278 | 3 | 2 | null | 2018-08-30T22:32:43 | 2018-05-09T08:38:26 | Python | UTF-8 | Python | false | false | 501 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-08-30 19:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teams', '0011_auto_20180829_1251'),
]
operations = [
migrations.AlterField(
model_name='team',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"[email protected]"
] | |
de13a9ae414154f6bec5b36554ab9c6650cf7929 | d93159d0784fc489a5066d3ee592e6c9563b228b | /Validation/RecoParticleFlow/Benchmarks/Tools/submit.py | 87e493ec5ff2edf11475e492be5dbb1190793c81 | [] | permissive | simonecid/cmssw | 86396e31d41a003a179690f8c322e82e250e33b2 | 2559fdc9545b2c7e337f5113b231025106dd22ab | refs/heads/CAallInOne_81X | 2021-08-15T23:25:02.901905 | 2016-09-13T08:10:20 | 2016-09-13T08:53:42 | 176,462,898 | 0 | 1 | Apache-2.0 | 2019-03-19T08:30:28 | 2019-03-19T08:30:24 | null | UTF-8 | Python | false | false | 2,014 | py | #!/usr/bin/env python
# to submit a benchmark webpage to the validation website
# author: Colin
import shutil, sys, os, valtools
from optparse import OptionParser
parser = OptionParser()
parser.usage = "usage: %prog"
parser.add_option("-e", "--extension", dest="extension",
help="adds an extension to the name of this benchmark",
default=None)
parser.add_option("-f", "--force", dest="force",action="store_true",
help="force the submission. Be careful!",
default=False)
(options,args) = parser.parse_args()
if len(args)!=0:
parser.print_help()
sys.exit(1)
website = valtools.website()
bench = valtools.benchmark( options.extension )
localBench = valtools.benchmark()
print 'submitting from local: ', localBench
print ' to: ', bench
comparisons = website.listComparisons( bench )
if len(comparisons)>0:
print 'You are about to make the following list of comparison pages obsolete. These pages will thus be removed:'
print comparisons
answer = None
while answer != 'y' and answer != 'n':
answer = raw_input('do you agree? [y/n]')
if answer == 'n':
sys.exit(0)
# check that the user can write in the website
website.writeAccess()
bench.makeRelease( website )
if bench.exists( website ) == True:
if options.force == False:
print 'please use the -e option to choose another extension'
print ' e.g: submit.py -e Feb10'
print 'or force it.'
sys.exit(1)
else:
print 'overwriting...'
shutil.rmtree(bench.benchmarkOnWebSite(website))
# local benchmark. this one does not have an extension!
shutil.copytree(localBench.fullName(), bench.benchmarkOnWebSite(website) )
print 'done. Access your benchmark here:'
print bench.benchmarkUrl( website )
# removing comparisons
# COMPARISONS COULD ALSO BE REDONE.
for comparison in comparisons:
rm = 'rm -rf '+comparison
os.system(rm)
| [
"[email protected]"
] | |
4b0ad8b0dbc902231fb8f660ff4c04d461fd7e54 | bb6e80f7deff48a720d04850d9b4fd2bb379e14d | /ExpEYES17/UserManual/fr/rst/exp/prettyLaTeX.py | ae038f27591a7cd40285480463f4fb97f8fcf025 | [] | no_license | EduCodeBe/expeyes-programs | 6a04881c4c2c4a198999baf57802508985ad8a06 | 2464866382155ed4c951962be4313fdcfe73dcec | refs/heads/master | 2020-03-22T15:21:29.519546 | 2018-07-08T17:21:32 | 2018-07-08T17:21:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | #!/usr/bin/python3
import os, sys, re
def filterSVG(t,verbose=False):
"""
remove .svg in image names, replace by .pdf
"""
if verbose:
sys.stderr.write("replacing .svg extensions by .pdf for included graphics\n")
return re.sub(r"\.svg", ".pdf", t)
def filterPNG(t,verbose=False):
"""
remove .png in image names, replace by .pdf
"""
if verbose:
sys.stderr.write("replacing .png extensions by .pdf for included graphics\n")
return re.sub(r"\.png", ".pdf", t)
def filterJPG(t,verbose=False):
"""
remove .jpg in image names, replace by .pdf
"""
if verbose:
sys.stderr.write("replacing .jpg extensions by .pdf for included graphics\n")
return re.sub(r"\.jpg", ".pdf", t)
def filterNastyUnicode(t,verbose=False):
"""
remove problematic Unicode characters, like dots, thin spaces,
special minus sign
"""
if verbose:
sys.stderr.write("removing nasty unicode chars\n")
toReplace={
"\u2005": " ",
"\u2003": " ",
"\u200a": " ",
"\u22ef": "\\dots",
"\u2212": "-",
"↑": "",
"↓": "",
}
for code, repl in toReplace.items():
t=re.sub(code, repl, t)
return t
def filterSphinxIncludeGraphics(t, verbose=False):
if verbose:
sys.stderr.write("remove empty lines between SphinxIncludeGraphics")
pattern=re.compile(r"\\noindent\\sphinxincludegraphics.*")
lines=t.split("\n")
new=[lines[0], lines[1]] # always keep the two first lines
for i in range(2, len(lines)):
if pattern.match(new[-2]) and new[-1]=="" and pattern.match(lines[i]):
new[-1]=lines[i] # this drops the empty line
else:
new.append(lines[i])
return "\n".join(new)
filters=(
filterSVG,
filterPNG,
filterJPG,
filterNastyUnicode,
filterSphinxIncludeGraphics
)
if __name__=="__main__":
buildDir=sys.argv[1]
texFile=sys.argv[2]
t=""
with open(buildDir+"/"+texFile) as infile:
t=infile.read()
for f in filters:
t=f(t, verbose=True)
with open(buildDir+"/"+texFile+".tmp","w") as outfile:
outfile.write(t)
os.rename(buildDir+"/"+texFile+".tmp", buildDir+"/"+texFile)
| [
"[email protected]"
] | |
65671d098435909935799ce253b7923906501a6e | ffa8a728f43b6de2b9a4dbfda18f3eb8518fbbbd | /snmp-mibs/DIFFSERV-MIB.py | a6c5c50bfcc5d7792053db572f5febdaddf3aee1 | [] | no_license | oriordan/pysnmp_mibs | 60e0d80e3f50490d9e6ab29d21627fec59ab0cfc | 92d39abf358a952e55a426e2a4658f4b0824182f | refs/heads/master | 2021-01-09T23:37:59.137750 | 2014-11-26T20:07:28 | 2014-11-26T20:07:28 | 20,253,987 | 11 | 15 | null | 2020-07-26T02:49:32 | 2014-05-28T10:43:18 | Python | UTF-8 | Python | false | false | 99,645 | py | # PySNMP SMI module. Autogenerated from smidump -f python DIFFSERV-MIB
# by libsmi2pysnmp-0.1.3 at Thu May 22 11:57:34 2014,
# Python version sys.version_info(major=2, minor=7, micro=2, releaselevel='final', serial=0)
# Imports
( Integer, ObjectIdentifier, OctetString, ) = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
( Dscp, DscpOrAny, ) = mibBuilder.importSymbols("DIFFSERV-DSCP-TC", "Dscp", "DscpOrAny")
( InterfaceIndexOrZero, ifCounterDiscontinuityGroup, ifCounterDiscontinuityGroup, ifIndex, ) = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero", "ifCounterDiscontinuityGroup", "ifCounterDiscontinuityGroup", "ifIndex")
( InetAddress, InetAddressPrefixLength, InetAddressType, InetPortNumber, ) = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressPrefixLength", "InetAddressType", "InetPortNumber")
( BurstSize, ) = mibBuilder.importSymbols("INTEGRATED-SERVICES-MIB", "BurstSize")
( ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup")
( Bits, Counter64, Integer32, ModuleIdentity, MibIdentifier, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Unsigned32, mib_2, zeroDotZero, ) = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter64", "Integer32", "ModuleIdentity", "MibIdentifier", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Unsigned32", "mib-2", "zeroDotZero")
( AutonomousType, RowPointer, RowStatus, StorageType, TextualConvention, ) = mibBuilder.importSymbols("SNMPv2-TC", "AutonomousType", "RowPointer", "RowStatus", "StorageType", "TextualConvention")
# Types
class IfDirection(Integer):
subtypeSpec = Integer.subtypeSpec+SingleValueConstraint(1,2,)
namedValues = NamedValues(("inbound", 1), ("outbound", 2), )
class IndexInteger(TextualConvention, Unsigned32):
displayHint = "d"
subtypeSpec = Unsigned32.subtypeSpec+ValueRangeConstraint(1,4294967295)
class IndexIntegerNextFree(TextualConvention, Unsigned32):
displayHint = "d"
subtypeSpec = Unsigned32.subtypeSpec+ValueRangeConstraint(0,4294967295)
# Objects
diffServMib = ModuleIdentity((1, 3, 6, 1, 2, 1, 97)).setRevisions(("2002-02-07 00:00",))
if mibBuilder.loadTexts: diffServMib.setOrganization("IETF Differentiated Services WG")
if mibBuilder.loadTexts: diffServMib.setContactInfo(" Fred Baker\nCisco Systems\n1121 Via Del Rey\nSanta Barbara, CA 93117, USA\nE-mail: [email protected]\n\nKwok Ho Chan\nNortel Networks\n600 Technology Park Drive\nBillerica, MA 01821, USA\nE-mail: [email protected]\n\nAndrew Smith\nHarbour Networks\nJiuling Building\n\n\n21 North Xisanhuan Ave.\nBeijing, 100089, PRC\nE-mail: [email protected]\n\nDifferentiated Services Working Group:\[email protected]")
if mibBuilder.loadTexts: diffServMib.setDescription("This MIB defines the objects necessary to manage a device that\nuses the Differentiated Services Architecture described in RFC\n2475. The Conceptual Model of a Differentiated Services Router\nprovides supporting information on how such a router is modeled.")
diffServMIBObjects = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 1))
diffServDataPath = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 1, 1))
diffServDataPathTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 1, 1))
if mibBuilder.loadTexts: diffServDataPathTable.setDescription("The data path table contains RowPointers indicating the start of\nthe functional data path for each interface and traffic direction\nin this device. These may merge, or be separated into parallel\ndata paths.")
diffServDataPathEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 1, 1, 1)).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "DIFFSERV-MIB", "diffServDataPathIfDirection"))
if mibBuilder.loadTexts: diffServDataPathEntry.setDescription("An entry in the data path table indicates the start of a single\nDifferentiated Services Functional Data Path in this device.\n\nThese are associated with individual interfaces, logical or\nphysical, and therefore are instantiated by ifIndex. Therefore,\nthe interface index must have been assigned, according to the\nprocedures applicable to that, before it can be meaningfully\nused. Generally, this means that the interface must exist.\n\nWhen diffServDataPathStorage is of type nonVolatile, however,\nthis may reflect the configuration for an interface whose ifIndex\nhas been assigned but for which the supporting implementation is\nnot currently present.")
diffServDataPathIfDirection = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 1, 1, 1, 1), IfDirection()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServDataPathIfDirection.setDescription("IfDirection specifies whether the reception or transmission path\nfor this interface is in view.")
diffServDataPathStart = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 1, 1, 1, 2), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServDataPathStart.setDescription("This selects the first Differentiated Services Functional Data\nPath Element to handle traffic for this data path. This\nRowPointer should point to an instance of one of:\n diffServClfrEntry\n diffServMeterEntry\n diffServActionEntry\n diffServAlgDropEntry\n diffServQEntry\n\nA value of zeroDotZero in this attribute indicates that no\nDifferentiated Services treatment is performed on traffic of this\ndata path. A pointer with the value zeroDotZero normally\nterminates a functional data path.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServDataPathStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 1, 1, 1, 3), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServDataPathStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServDataPathStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 1, 1, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServDataPathStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time.")
diffServClassifier = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 1, 2))
diffServClfrNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 2, 1), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServClfrNextFree.setDescription("This object contains an unused value for diffServClfrId, or a\nzero to indicate that none exist.")
diffServClfrTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 2, 2))
if mibBuilder.loadTexts: diffServClfrTable.setDescription("This table enumerates all the diffserv classifier functional\ndata path elements of this device. The actual classification\ndefinitions are defined in diffServClfrElementTable entries\nbelonging to each classifier.\n\nAn entry in this table, pointed to by a RowPointer specifying an\ninstance of diffServClfrStatus, is frequently used as the name\nfor a set of classifier elements, which all use the index\ndiffServClfrId. Per the semantics of the classifier element\ntable, these entries constitute one or more unordered sets of\ntests which may be simultaneously applied to a message to\n\n\n\nclassify it.\n\nThe primary function of this table is to ensure that the value of\ndiffServClfrId is unique before attempting to use it in creating\na diffServClfrElementEntry. Therefore, the diffServClfrEntry must\nbe created on the same SET as the diffServClfrElementEntry, or\nbefore the diffServClfrElementEntry is created.")
diffServClfrEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 2, 2, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServClfrId"))
if mibBuilder.loadTexts: diffServClfrEntry.setDescription("An entry in the classifier table describes a single classifier.\nAll classifier elements belonging to the same classifier use the\nclassifier's diffServClfrId as part of their index.")
diffServClfrId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 2, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServClfrId.setDescription("An index that enumerates the classifier entries. Managers\nshould obtain new values for row creation in this table by\nreading diffServClfrNextFree.")
diffServClfrStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 2, 1, 2), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServClfrStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServClfrStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServClfrStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServClfrElementNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 2, 3), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServClfrElementNextFree.setDescription("This object contains an unused value for diffServClfrElementId,\nor a zero to indicate that none exist.")
diffServClfrElementTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 2, 4))
if mibBuilder.loadTexts: diffServClfrElementTable.setDescription("The classifier element table enumerates the relationship between\nclassification patterns and subsequent downstream Differentiated\nServices Functional Data Path elements.\ndiffServClfrElementSpecific points to a filter that specifies the\nclassification parameters. A classifier may use filter tables of\ndifferent types together.\n\nOne example of a filter table defined in this MIB is\ndiffServMultiFieldClfrTable, for IP Multi-Field Classifiers\n(MFCs). Such an entry might identify anything from a single\nmicro-flow (an identifiable sub-session packet stream directed\nfrom one sending transport to the receiving transport or\ntransports), or aggregates of those such as the traffic from a\nhost, traffic for an application, or traffic between two hosts\nusing an application and a given DSCP. The standard Behavior\nAggregate used in the Differentiated Services Architecture is\nencoded as a degenerate case of such an aggregate - the traffic\nusing a particular DSCP value.\n\nFilter tables for other filter types may be defined elsewhere.")
diffServClfrElementEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 2, 4, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServClfrId"), (0, "DIFFSERV-MIB", "diffServClfrElementId"))
if mibBuilder.loadTexts: diffServClfrElementEntry.setDescription("An entry in the classifier element table describes a single\nelement of the classifier.")
diffServClfrElementId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 4, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServClfrElementId.setDescription("An index that enumerates the Classifier Element entries.\nManagers obtain new values for row creation in this table by\nreading diffServClfrElementNextFree.")
diffServClfrElementPrecedence = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServClfrElementPrecedence.setDescription("The relative order in which classifier elements are applied:\nhigher numbers represent classifier element with higher\nprecedence. Classifier elements with the same order must be\nunambiguous i.e. they must define non-overlapping patterns, and\nare considered to be applied simultaneously to the traffic\nstream. Classifier elements with different order may overlap in\ntheir filters: the classifier element with the highest order\nthat matches is taken.\n\nOn a given interface, there must be a complete classifier in\nplace at all times in the ingress direction. This means one or\nmore filters must match any possible pattern. There is no such\n\n\n\nrequirement in the egress direction.")
diffServClfrElementNext = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 4, 1, 3), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServClfrElementNext.setDescription("This attribute provides one branch of the fan-out functionality\nof a classifier described in the Informal Differentiated Services\nModel section 4.1.\n\nThis selects the next Differentiated Services Functional Data\nPath Element to handle traffic for this data path. This\nRowPointer should point to an instance of one of:\n diffServClfrEntry\n diffServMeterEntry\n diffServActionEntry\n diffServAlgDropEntry\n diffServQEntry\n\nA value of zeroDotZero in this attribute indicates no further\nDifferentiated Services treatment is performed on traffic of this\ndata path. The use of zeroDotZero is the normal usage for the\nlast functional data path element of the current data path.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServClfrElementSpecific = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 4, 1, 4), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServClfrElementSpecific.setDescription("A pointer to a valid entry in another table, filter table, that\ndescribes the applicable classification parameters, e.g. an entry\nin diffServMultiFieldClfrTable.\n\nThe value zeroDotZero is interpreted to match anything not\nmatched by another classifier element - only one such entry may\nexist for each classifier.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\n\n\n\nbecomes inactive by other means, the element is ignored.")
diffServClfrElementStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 4, 1, 5), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServClfrElementStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServClfrElementStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 4, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServClfrElementStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServMultiFieldClfrNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 2, 5), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServMultiFieldClfrNextFree.setDescription("This object contains an unused value for\ndiffServMultiFieldClfrId, or a zero to indicate that none exist.")
diffServMultiFieldClfrTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 2, 6))
if mibBuilder.loadTexts: diffServMultiFieldClfrTable.setDescription("A table of IP Multi-field Classifier filter entries that a\n\n\n\nsystem may use to identify IP traffic.")
diffServMultiFieldClfrEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServMultiFieldClfrId"))
if mibBuilder.loadTexts: diffServMultiFieldClfrEntry.setDescription("An IP Multi-field Classifier entry describes a single filter.")
diffServMultiFieldClfrId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServMultiFieldClfrId.setDescription("An index that enumerates the MultiField Classifier filter\nentries. Managers obtain new values for row creation in this\ntable by reading diffServMultiFieldClfrNextFree.")
diffServMultiFieldClfrAddrType = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 2), InetAddressType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrAddrType.setDescription("The type of IP address used by this classifier entry. While\nother types of addresses are defined in the InetAddressType\n\n\n\ntextual convention, and DNS names, a classifier can only look at\npackets on the wire. Therefore, this object is limited to IPv4\nand IPv6 addresses.")
diffServMultiFieldClfrDstAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 3), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrDstAddr.setDescription("The IP address to match against the packet's destination IP\naddress. This may not be a DNS name, but may be an IPv4 or IPv6\nprefix. diffServMultiFieldClfrDstPrefixLength indicates the\nnumber of bits that are relevant.")
diffServMultiFieldClfrDstPrefixLength = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 4), InetAddressPrefixLength().clone('0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrDstPrefixLength.setDescription("The length of the CIDR Prefix carried in\ndiffServMultiFieldClfrDstAddr. In IPv4 addresses, a length of 0\nindicates a match of any address; a length of 32 indicates a\nmatch of a single host address, and a length between 0 and 32\nindicates the use of a CIDR Prefix. IPv6 is similar, except that\nprefix lengths range from 0..128.")
diffServMultiFieldClfrSrcAddr = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 5), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrSrcAddr.setDescription("The IP address to match against the packet's source IP address.\nThis may not be a DNS name, but may be an IPv4 or IPv6 prefix.\ndiffServMultiFieldClfrSrcPrefixLength indicates the number of\nbits that are relevant.")
diffServMultiFieldClfrSrcPrefixLength = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 6), InetAddressPrefixLength().clone('0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrSrcPrefixLength.setDescription("The length of the CIDR Prefix carried in\ndiffServMultiFieldClfrSrcAddr. In IPv4 addresses, a length of 0\nindicates a match of any address; a length of 32 indicates a\nmatch of a single host address, and a length between 0 and 32\nindicates the use of a CIDR Prefix. IPv6 is similar, except that\nprefix lengths range from 0..128.")
diffServMultiFieldClfrDscp = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 7), DscpOrAny().clone('-1')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrDscp.setDescription("The value that the DSCP in the packet must have to match this\nentry. A value of -1 indicates that a specific DSCP value has not\nbeen defined and thus all DSCP values are considered a match.")
diffServMultiFieldClfrFlowId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1048575))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrFlowId.setDescription("The flow identifier in an IPv6 header.")
diffServMultiFieldClfrProtocol = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255)).clone(255)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrProtocol.setDescription("The IP protocol to match against the IPv4 protocol number or the\nIPv6 Next- Header number in the packet. A value of 255 means\nmatch all. Note the protocol number of 255 is reserved by IANA,\nand Next-Header number of 0 is used in IPv6.")
diffServMultiFieldClfrDstL4PortMin = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 10), InetPortNumber().clone('0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrDstL4PortMin.setDescription("The minimum value that the layer-4 destination port number in\nthe packet must have in order to match this classifier entry.")
diffServMultiFieldClfrDstL4PortMax = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 11), InetPortNumber().clone('65535')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrDstL4PortMax.setDescription("The maximum value that the layer-4 destination port number in\nthe packet must have in order to match this classifier entry.\nThis value must be equal to or greater than the value specified\nfor this entry in diffServMultiFieldClfrDstL4PortMin.")
diffServMultiFieldClfrSrcL4PortMin = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 12), InetPortNumber().clone('0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrSrcL4PortMin.setDescription("The minimum value that the layer-4 source port number in the\npacket must have in order to match this classifier entry.")
diffServMultiFieldClfrSrcL4PortMax = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 13), InetPortNumber().clone('65535')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrSrcL4PortMax.setDescription("The maximum value that the layer-4 source port number in the\npacket must have in order to match this classifier entry. This\nvalue must be equal to or greater than the value specified for\nthis entry in diffServMultiFieldClfrSrcL4PortMin.")
diffServMultiFieldClfrStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 14), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServMultiFieldClfrStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 2, 6, 1, 15), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMultiFieldClfrStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServMeter = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 1, 3))
diffServMeterNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 3, 1), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServMeterNextFree.setDescription("This object contains an unused value for diffServMeterId, or a\nzero to indicate that none exist.")
diffServMeterTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 3, 2))
if mibBuilder.loadTexts: diffServMeterTable.setDescription("This table enumerates specific meters that a system may use to\npolice a stream of traffic. The traffic stream to be metered is\ndetermined by the Differentiated Services Functional Data Path\nElement(s) upstream of the meter i.e. by the object(s) that point\nto each entry in this table. This may include all traffic on an\ninterface.\n\nSpecific meter details are to be found in table entry referenced\nby diffServMeterSpecific.")
diffServMeterEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 3, 2, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServMeterId"))
if mibBuilder.loadTexts: diffServMeterEntry.setDescription("An entry in the meter table describes a single conformance level\nof a meter.")
diffServMeterId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 3, 2, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServMeterId.setDescription("An index that enumerates the Meter entries. Managers obtain new\nvalues for row creation in this table by reading\ndiffServMeterNextFree.")
diffServMeterSucceedNext = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 3, 2, 1, 2), RowPointer().clone('0.0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMeterSucceedNext.setDescription("If the traffic does conform, this selects the next\nDifferentiated Services Functional Data Path element to handle\ntraffic for this data path. This RowPointer should point to an\ninstance of one of:\n diffServClfrEntry\n diffServMeterEntry\n diffServActionEntry\n diffServAlgDropEntry\n diffServQEntry\n\nA value of zeroDotZero in this attribute indicates that no\nfurther Differentiated Services treatment is performed on traffic\nof this data path. The use of zeroDotZero is the normal usage for\nthe last functional data path element of the current data path.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServMeterFailNext = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 3, 2, 1, 3), RowPointer().clone('0.0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMeterFailNext.setDescription("If the traffic does not conform, this selects the next\nDifferentiated Services Functional Data Path element to handle\ntraffic for this data path. This RowPointer should point to an\ninstance of one of:\n diffServClfrEntry\n diffServMeterEntry\n\n\n\n diffServActionEntry\n diffServAlgDropEntry\n diffServQEntry\n\nA value of zeroDotZero in this attribute indicates no further\nDifferentiated Services treatment is performed on traffic of this\ndata path. The use of zeroDotZero is the normal usage for the\nlast functional data path element of the current data path.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServMeterSpecific = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 3, 2, 1, 4), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMeterSpecific.setDescription("This indicates the behavior of the meter by pointing to an entry\ncontaining detailed parameters. Note that entries in that\nspecific table must be managed explicitly.\n\nFor example, diffServMeterSpecific may point to an entry in\ndiffServTBParamTable, which contains an instance of a single set\nof Token Bucket parameters.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the meter always succeeds.")
diffServMeterStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 3, 2, 1, 5), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMeterStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServMeterStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 3, 2, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMeterStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServTBParam = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 1, 4))
diffServTBParamNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 4, 1), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServTBParamNextFree.setDescription("This object contains an unused value for diffServTBParamId, or a\nzero to indicate that none exist.")
diffServTBParamTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 4, 2))
if mibBuilder.loadTexts: diffServTBParamTable.setDescription("This table enumerates a single set of token bucket meter\nparameters that a system may use to police a stream of traffic.\nSuch meters are modeled here as having a single rate and a single\nburst size. Multiple entries are used when multiple rates/burst\nsizes are needed.")
diffServTBParamEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 4, 2, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServTBParamId"))
if mibBuilder.loadTexts: diffServTBParamEntry.setDescription("An entry that describes a single set of token bucket\nparameters.")
diffServTBParamId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 4, 2, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServTBParamId.setDescription("An index that enumerates the Token Bucket Parameter entries.\nManagers obtain new values for row creation in this table by\nreading diffServTBParamNextFree.")
diffServTBParamType = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 4, 2, 1, 2), AutonomousType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServTBParamType.setDescription("The Metering algorithm associated with the Token Bucket\nparameters. zeroDotZero indicates this is unknown.\n\nStandard values for generic algorithms:\ndiffServTBParamSimpleTokenBucket, diffServTBParamAvgRate,\ndiffServTBParamSrTCMBlind, diffServTBParamSrTCMAware,\ndiffServTBParamTrTCMBlind, diffServTBParamTrTCMAware, and\ndiffServTBParamTswTCM are specified in this MIB as OBJECT-\nIDENTITYs; additional values may be further specified in other\nMIBs.")
diffServTBParamRate = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 4, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServTBParamRate.setDescription("The token-bucket rate, in kilobits per second (kbps). This\nattribute is used for:\n1. CIR in RFC 2697 for srTCM\n2. CIR and PIR in RFC 2698 for trTCM\n3. CTR and PTR in RFC 2859 for TSWTCM\n4. AverageRate in RFC 3290.")
diffServTBParamBurstSize = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 4, 2, 1, 4), BurstSize()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServTBParamBurstSize.setDescription("The maximum number of bytes in a single transmission burst. This\nattribute is used for:\n1. CBS and EBS in RFC 2697 for srTCM\n2. CBS and PBS in RFC 2698 for trTCM\n3. Burst Size in RFC 3290.")
diffServTBParamInterval = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 4, 2, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServTBParamInterval.setDescription("The time interval used with the token bucket. For:\n1. Average Rate Meter, the Informal Differentiated Services Model\n section 5.2.1, - Delta.\n2. Simple Token Bucket Meter, the Informal Differentiated\n Services Model section 5.1, - time interval t.\n3. RFC 2859 TSWTCM, - AVG_INTERVAL.\n4. RFC 2697 srTCM, RFC 2698 trTCM, - token bucket update time\n interval.")
diffServTBParamStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 4, 2, 1, 6), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServTBParamStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServTBParamStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 4, 2, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServTBParamStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServAction = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 1, 5))
diffServActionNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 5, 1), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServActionNextFree.setDescription("This object contains an unused value for diffServActionId, or a\nzero to indicate that none exist.")
diffServActionTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 5, 2))
if mibBuilder.loadTexts: diffServActionTable.setDescription("The Action Table enumerates actions that can be performed to a\nstream of traffic. Multiple actions can be concatenated. For\nexample, traffic exiting from a meter may be counted, marked, and\npotentially dropped before entering a queue.\n\nSpecific actions are indicated by diffServActionSpecific which\npoints to an entry of a specific action type parameterizing the\naction in detail.")
diffServActionEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 5, 2, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServActionId"))
if mibBuilder.loadTexts: diffServActionEntry.setDescription("Each entry in the action table allows description of one\nspecific action to be applied to traffic.")
diffServActionId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 2, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServActionId.setDescription("An index that enumerates the Action entries. Managers obtain\nnew values for row creation in this table by reading\ndiffServActionNextFree.")
diffServActionInterface = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 2, 1, 2), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServActionInterface.setDescription("The interface index (value of ifIndex) that this action occurs\non. This may be derived from the diffServDataPathStartEntry's\nindex by extension through the various RowPointers. However, as\nthis may be difficult for a network management station, it is\nplaced here as well. If this is indeterminate, the value is\nzero.\n\nThis is of especial relevance when reporting the counters which\nmay apply to traffic crossing an interface:\n diffServCountActOctets,\n diffServCountActPkts,\n diffServAlgDropOctets,\n diffServAlgDropPkts,\n diffServAlgRandomDropOctets, and\n diffServAlgRandomDropPkts.\n\nIt is also especially relevant to the queue and scheduler which\nmay be subsequently applied.")
diffServActionNext = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 2, 1, 3), RowPointer().clone('0.0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServActionNext.setDescription("This selects the next Differentiated Services Functional Data\nPath Element to handle traffic for this data path. This\nRowPointer should point to an instance of one of:\n diffServClfrEntry\n diffServMeterEntry\n diffServActionEntry\n diffServAlgDropEntry\n diffServQEntry\n\nA value of zeroDotZero in this attribute indicates no further\nDifferentiated Services treatment is performed on traffic of this\ndata path. The use of zeroDotZero is the normal usage for the\nlast functional data path element of the current data path.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServActionSpecific = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 2, 1, 4), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServActionSpecific.setDescription("A pointer to an object instance providing additional information\nfor the type of action indicated by this action table entry.\n\nFor the standard actions defined by this MIB module, this should\npoint to either a diffServDscpMarkActEntry or a\ndiffServCountActEntry. For other actions, it may point to an\nobject instance defined in some other MIB.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the Meter should be treated as\nif it were not present. This may lead to incorrect policy\nbehavior.")
diffServActionStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 2, 1, 5), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServActionStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServActionStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 2, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServActionStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServDscpMarkActTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 5, 3))
if mibBuilder.loadTexts: diffServDscpMarkActTable.setDescription("This table enumerates specific DSCPs used for marking or\nremarking the DSCP field of IP packets. The entries of this table\nmay be referenced by a diffServActionSpecific attribute.")
diffServDscpMarkActEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 5, 3, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServDscpMarkActDscp"))
if mibBuilder.loadTexts: diffServDscpMarkActEntry.setDescription("An entry in the DSCP mark action table that describes a single\nDSCP used for marking.")
diffServDscpMarkActDscp = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 3, 1, 1), Dscp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServDscpMarkActDscp.setDescription("The DSCP that this Action will store into the DSCP field of the\nsubject. It is quite possible that the only packets subject to\nthis Action are already marked with this DSCP. Note also that\nDifferentiated Services processing may result in packet being\nmarked on both ingress to a network and on egress from it, and\nthat ingress and egress can occur in the same router.")
diffServCountActNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 5, 4), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServCountActNextFree.setDescription("This object contains an unused value for\ndiffServCountActId, or a zero to indicate that none exist.")
diffServCountActTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 5, 5))
if mibBuilder.loadTexts: diffServCountActTable.setDescription("This table contains counters for all the traffic passing through\nan action element.")
diffServCountActEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 5, 5, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServCountActId"))
if mibBuilder.loadTexts: diffServCountActEntry.setDescription("An entry in the count action table describes a single set of\ntraffic counters.")
diffServCountActId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 5, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServCountActId.setDescription("An index that enumerates the Count Action entries. Managers\nobtain new values for row creation in this table by reading\n\n\n\ndiffServCountActNextFree.")
diffServCountActOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 5, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServCountActOctets.setDescription("The number of octets at the Action data path element.\n\nDiscontinuities in the value of this counter can occur at re-\ninitialization of the management system and at other times as\nindicated by the value of ifCounterDiscontinuityTime on the\nrelevant interface.")
diffServCountActPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 5, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServCountActPkts.setDescription("The number of packets at the Action data path element.\n\nDiscontinuities in the value of this counter can occur at re-\ninitialization of the management system and at other times as\nindicated by the value of ifCounterDiscontinuityTime on the\nrelevant interface.")
diffServCountActStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 5, 1, 4), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServCountActStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServCountActStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 5, 5, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServCountActStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\n\n\n\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServAlgDrop = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 1, 6))
diffServAlgDropNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 6, 1), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServAlgDropNextFree.setDescription("This object contains an unused value for diffServAlgDropId, or a\nzero to indicate that none exist.")
diffServAlgDropTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 6, 2))
if mibBuilder.loadTexts: diffServAlgDropTable.setDescription("The algorithmic drop table contains entries describing an\nelement that drops packets according to some algorithm.")
diffServAlgDropEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServAlgDropId"))
if mibBuilder.loadTexts: diffServAlgDropEntry.setDescription("An entry describes a process that drops packets according to\nsome algorithm. Further details of the algorithm type are to be\nfound in diffServAlgDropType and with more detail parameter entry\npointed to by diffServAlgDropSpecific when necessary.")
diffServAlgDropId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServAlgDropId.setDescription("An index that enumerates the Algorithmic Dropper entries.\nManagers obtain new values for row creation in this table by\nreading diffServAlgDropNextFree.")
diffServAlgDropType = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 2), Integer().subtype(subtypeSpec=SingleValueConstraint(4,3,5,1,2,)).subtype(namedValues=NamedValues(("other", 1), ("tailDrop", 2), ("headDrop", 3), ("randomDrop", 4), ("alwaysDrop", 5), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServAlgDropType.setDescription("The type of algorithm used by this dropper. The value other(1)\nrequires further specification in some other MIB module.\n\nIn the tailDrop(2) algorithm, diffServAlgDropQThreshold\nrepresents the maximum depth of the queue, pointed to by\ndiffServAlgDropQMeasure, beyond which all newly arriving packets\nwill be dropped.\n\nIn the headDrop(3) algorithm, if a packet arrives when the\ncurrent depth of the queue, pointed to by\ndiffServAlgDropQMeasure, is at diffServAlgDropQThreshold, packets\ncurrently at the head of the queue are dropped to make room for\nthe new packet to be enqueued at the tail of the queue.\n\nIn the randomDrop(4) algorithm, on packet arrival, an Active\nQueue Management algorithm is executed which may randomly drop a\npacket. This algorithm may be proprietary, and it may drop either\nthe arriving packet or another packet in the queue.\ndiffServAlgDropSpecific points to a diffServRandomDropEntry that\ndescribes the algorithm. For this algorithm,\n\n\n\ndiffServAlgDropQThreshold is understood to be the absolute\nmaximum size of the queue and additional parameters are described\nin diffServRandomDropTable.\n\nThe alwaysDrop(5) algorithm is as its name specifies; always\ndrop. In this case, the other configuration values in this Entry\nare not meaningful; There is no useful 'next' processing step,\nthere is no queue, and parameters describing the queue are not\nuseful. Therefore, diffServAlgDropNext, diffServAlgDropMeasure,\nand diffServAlgDropSpecific are all zeroDotZero.")
diffServAlgDropNext = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 3), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServAlgDropNext.setDescription("This selects the next Differentiated Services Functional Data\nPath Element to handle traffic for this data path. This\nRowPointer should point to an instance of one of:\n diffServClfrEntry\n diffServMeterEntry\n diffServActionEntry\n diffServQEntry\n\nA value of zeroDotZero in this attribute indicates no further\nDifferentiated Services treatment is performed on traffic of this\ndata path. The use of zeroDotZero is the normal usage for the\nlast functional data path element of the current data path.\n\nWhen diffServAlgDropType is alwaysDrop(5), this object is\nignored.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServAlgDropQMeasure = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 4), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServAlgDropQMeasure.setDescription("Points to an entry in the diffServQTable to indicate the queue\nthat a drop algorithm is to monitor when deciding whether to drop\na packet. If the row pointed to does not exist, the algorithmic\ndropper element is considered inactive.\n\n\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServAlgDropQThreshold = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServAlgDropQThreshold.setDescription("A threshold on the depth in bytes of the queue being measured at\nwhich a trigger is generated to the dropping algorithm, unless\ndiffServAlgDropType is alwaysDrop(5) where this object is\nignored.\n\nFor the tailDrop(2) or headDrop(3) algorithms, this represents\nthe depth of the queue, pointed to by diffServAlgDropQMeasure, at\nwhich the drop action will take place. Other algorithms will need\nto define their own semantics for this threshold.")
diffServAlgDropSpecific = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 6), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServAlgDropSpecific.setDescription("Points to a table entry that provides further detail regarding a\ndrop algorithm.\n\nEntries with diffServAlgDropType equal to other(1) may have this\npoint to a table defined in another MIB module.\n\nEntries with diffServAlgDropType equal to randomDrop(4) must have\nthis point to an entry in diffServRandomDropTable.\n\nFor all other algorithms specified in this MIB, this should take\nthe value zeroDotZero.\n\nThe diffServAlgDropType is authoritative for the type of the drop\nalgorithm and the specific parameters for the drop algorithm\nneeds to be evaluated based on the diffServAlgDropType.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServAlgDropOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServAlgDropOctets.setDescription("The number of octets that have been deterministically dropped by\nthis drop process.\n\nDiscontinuities in the value of this counter can occur at re-\ninitialization of the management system and at other times as\nindicated by the value of ifCounterDiscontinuityTime on the\nrelevant interface.")
diffServAlgDropPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServAlgDropPkts.setDescription("The number of packets that have been deterministically dropped\nby this drop process.\n\nDiscontinuities in the value of this counter can occur at re-\ninitialization of the management system and at other times as\nindicated by the value of ifCounterDiscontinuityTime on the\nrelevant interface.")
diffServAlgRandomDropOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServAlgRandomDropOctets.setDescription("The number of octets that have been randomly dropped by this\ndrop process. This counter applies, therefore, only to random\ndroppers.\n\nDiscontinuities in the value of this counter can occur at re-\ninitialization of the management system and at other times as\nindicated by the value of ifCounterDiscontinuityTime on the\nrelevant interface.")
diffServAlgRandomDropPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServAlgRandomDropPkts.setDescription("The number of packets that have been randomly dropped by this\ndrop process. This counter applies, therefore, only to random\ndroppers.\n\nDiscontinuities in the value of this counter can occur at re-\ninitialization of the management system and at other times as\nindicated by the value of ifCounterDiscontinuityTime on the\nrelevant interface.")
diffServAlgDropStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 11), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServAlgDropStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServAlgDropStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 2, 1, 12), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServAlgDropStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServRandomDropNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 6, 3), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServRandomDropNextFree.setDescription("This object contains an unused value for diffServRandomDropId,\nor a zero to indicate that none exist.")
diffServRandomDropTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 6, 4))
if mibBuilder.loadTexts: diffServRandomDropTable.setDescription("The random drop table contains entries describing a process that\ndrops packets randomly. Entries in this table are pointed to by\ndiffServAlgDropSpecific.")
diffServRandomDropEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServRandomDropId"))
if mibBuilder.loadTexts: diffServRandomDropEntry.setDescription("An entry describes a process that drops packets according to a\nrandom algorithm.")
diffServRandomDropId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServRandomDropId.setDescription("An index that enumerates the Random Drop entries. Managers\nobtain new values for row creation in this table by reading\ndiffServRandomDropNextFree.")
diffServRandomDropMinThreshBytes = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServRandomDropMinThreshBytes.setDescription("The average queue depth in bytes, beyond which traffic has a\nnon-zero probability of being dropped. Changes in this variable\nmay or may not be reflected in the reported value of\ndiffServRandomDropMinThreshPkts.")
diffServRandomDropMinThreshPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServRandomDropMinThreshPkts.setDescription("The average queue depth in packets, beyond which traffic has a\nnon-zero probability of being dropped. Changes in this variable\nmay or may not be reflected in the reported value of\ndiffServRandomDropMinThreshBytes.")
diffServRandomDropMaxThreshBytes = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServRandomDropMaxThreshBytes.setDescription("The average queue depth beyond which traffic has a probability\nindicated by diffServRandomDropProbMax of being dropped or\nmarked. Note that this differs from the physical queue limit,\nwhich is stored in diffServAlgDropQThreshold. Changes in this\nvariable may or may not be reflected in the reported value of\ndiffServRandomDropMaxThreshPkts.")
diffServRandomDropMaxThreshPkts = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServRandomDropMaxThreshPkts.setDescription("The average queue depth beyond which traffic has a probability\nindicated by diffServRandomDropProbMax of being dropped or\nmarked. Note that this differs from the physical queue limit,\nwhich is stored in diffServAlgDropQThreshold. Changes in this\nvariable may or may not be reflected in the reported value of\ndiffServRandomDropMaxThreshBytes.")
diffServRandomDropProbMax = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServRandomDropProbMax.setDescription("The worst case random drop probability, expressed in drops per\nthousand packets.\n\nFor example, if in the worst case every arriving packet may be\ndropped (100%) for a period, this has the value 1000.\nAlternatively, if in the worst case only one percent (1%) of\ntraffic may be dropped, it has the value 10.")
diffServRandomDropWeight = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65536))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServRandomDropWeight.setDescription("The weighting of past history in affecting the Exponentially\nWeighted Moving Average function that calculates the current\naverage queue depth. The equation uses\ndiffServRandomDropWeight/65536 as the coefficient for the new\nsample in the equation, and (65536 -\ndiffServRandomDropWeight)/65536 as the coefficient of the old\nvalue.\n\nImplementations may limit the values of diffServRandomDropWeight\nto a subset of the possible range of values, such as powers of\ntwo. Doing this would facilitate implementation of the\nExponentially Weighted Moving Average using shift instructions or\nregisters.")
diffServRandomDropSamplingRate = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000000))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServRandomDropSamplingRate.setDescription("The number of times per second the queue is sampled for queue\naverage calculation. A value of zero is used to mean that the\nqueue is sampled approximately each time a packet is enqueued (or\ndequeued).")
diffServRandomDropStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1, 9), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServRandomDropStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServRandomDropStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 6, 4, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServRandomDropStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServQueue = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 1, 7))
diffServQNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 7, 1), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServQNextFree.setDescription("This object contains an unused value for diffServQId, or a zero\nto indicate that none exist.")
diffServQTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 7, 2))
if mibBuilder.loadTexts: diffServQTable.setDescription("The Queue Table enumerates the individual queues. Note that the\nMIB models queuing systems as composed of individual queues, one\nper class of traffic, even though they may in fact be structured\nas classes of traffic scheduled using a common calendar queue, or\nin other ways.")
diffServQEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 7, 2, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServQId"))
if mibBuilder.loadTexts: diffServQEntry.setDescription("An entry in the Queue Table describes a single queue or class of\ntraffic.")
diffServQId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 7, 2, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServQId.setDescription("An index that enumerates the Queue entries. Managers obtain new\nvalues for row creation in this table by reading\ndiffServQNextFree.")
diffServQNext = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 7, 2, 1, 2), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServQNext.setDescription("This selects the next Differentiated Services Scheduler. The\nRowPointer must point to a diffServSchedulerEntry.\n\nA value of zeroDotZero in this attribute indicates an incomplete\ndiffServQEntry instance. In such a case, the entry has no\noperational effect, since it has no parameters to give it\nmeaning.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServQMinRate = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 7, 2, 1, 3), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServQMinRate.setDescription("This RowPointer indicates the diffServMinRateEntry that the\nscheduler, pointed to by diffServQNext, should use to service\nthis queue.\n\nIf the row pointed to is zeroDotZero, the minimum rate and\npriority is unspecified.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServQMaxRate = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 7, 2, 1, 4), RowPointer()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServQMaxRate.setDescription("This RowPointer indicates the diffServMaxRateEntry that the\nscheduler, pointed to by diffServQNext, should use to service\nthis queue.\n\nIf the row pointed to is zeroDotZero, the maximum rate is the\nline speed of the interface.\n\n\n\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServQStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 7, 2, 1, 5), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServQStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServQStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 7, 2, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServQStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServScheduler = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 1, 8))
diffServSchedulerNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 8, 1), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServSchedulerNextFree.setDescription("This object contains an unused value for diffServSchedulerId, or\na zero to indicate that none exist.")
diffServSchedulerTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 8, 2))
if mibBuilder.loadTexts: diffServSchedulerTable.setDescription("The Scheduler Table enumerates packet schedulers. Multiple\nscheduling algorithms can be used on a given data path, with each\nalgorithm described by one diffServSchedulerEntry.")
diffServSchedulerEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 8, 2, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServSchedulerId"))
if mibBuilder.loadTexts: diffServSchedulerEntry.setDescription("An entry in the Scheduler Table describing a single instance of\na scheduling algorithm.")
diffServSchedulerId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 2, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServSchedulerId.setDescription("An index that enumerates the Scheduler entries. Managers obtain\nnew values for row creation in this table by reading\ndiffServSchedulerNextFree.")
diffServSchedulerNext = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 2, 1, 2), RowPointer().clone('0.0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServSchedulerNext.setDescription("This selects the next Differentiated Services Functional Data\nPath Element to handle traffic for this data path. This normally\nis null (zeroDotZero), or points to a diffServSchedulerEntry or a\ndiffServQEntry.\n\nHowever, this RowPointer may also point to an instance of:\n diffServClfrEntry,\n diffServMeterEntry,\n diffServActionEntry,\n diffServAlgDropEntry.\n\nIt would point another diffServSchedulerEntry when implementing\nmultiple scheduler methods for the same data path, such as having\none set of queues scheduled by WRR and that group participating\nin a priority scheduling system in which other queues compete\nwith it in that way. It might also point to a second scheduler\nin a hierarchical scheduling system.\n\nIf the row pointed to is zeroDotZero, no further Differentiated\nServices treatment is performed on traffic of this data path.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServSchedulerMethod = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 2, 1, 3), AutonomousType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServSchedulerMethod.setDescription("The scheduling algorithm used by this Scheduler. zeroDotZero\nindicates that this is unknown. Standard values for generic\nalgorithms: diffServSchedulerPriority, diffServSchedulerWRR, and\ndiffServSchedulerWFQ are specified in this MIB; additional values\n\n\n\nmay be further specified in other MIBs.")
diffServSchedulerMinRate = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 2, 1, 4), RowPointer().clone('0.0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServSchedulerMinRate.setDescription("This RowPointer indicates the entry in diffServMinRateTable\nwhich indicates the priority or minimum output rate from this\nscheduler. This attribute is used only when there is more than\none level of scheduler.\n\nWhen it has the value zeroDotZero, it indicates that no minimum\nrate or priority is imposed.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServSchedulerMaxRate = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 2, 1, 5), RowPointer().clone('0.0')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServSchedulerMaxRate.setDescription("This RowPointer indicates the entry in diffServMaxRateTable\nwhich indicates the maximum output rate from this scheduler.\nWhen more than one maximum rate applies (eg, when a multi-rate\nshaper is in view), it points to the first of those rate entries.\nThis attribute is used only when there is more than one level of\nscheduler.\n\nWhen it has the value zeroDotZero, it indicates that no maximum\nrate is imposed.\n\nSetting this to point to a target that does not exist results in\nan inconsistentValue error. If the row pointed to is removed or\nbecomes inactive by other means, the treatment is as if this\nattribute contains a value of zeroDotZero.")
diffServSchedulerStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 2, 1, 6), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServSchedulerStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServSchedulerStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 2, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServSchedulerStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServMinRateNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 8, 3), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServMinRateNextFree.setDescription("This object contains an unused value for diffServMinRateId, or a\nzero to indicate that none exist.")
diffServMinRateTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 8, 4))
if mibBuilder.loadTexts: diffServMinRateTable.setDescription("The Minimum Rate Parameters Table enumerates individual sets of\nscheduling parameter that can be used/reused by Queues and\nSchedulers.")
diffServMinRateEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 8, 4, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServMinRateId"))
if mibBuilder.loadTexts: diffServMinRateEntry.setDescription("An entry in the Minimum Rate Parameters Table describes a single\nset of scheduling parameters for use by one or more queues or\nschedulers.")
diffServMinRateId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 4, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServMinRateId.setDescription("An index that enumerates the Scheduler Parameter entries.\nManagers obtain new values for row creation in this table by\nreading diffServMinRateNextFree.")
diffServMinRatePriority = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMinRatePriority.setDescription("The priority of this input to the associated scheduler, relative\n\n\n\nto the scheduler's other inputs. A queue or scheduler with a\nlarger numeric value will be served before another with a smaller\nnumeric value.")
diffServMinRateAbsolute = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 4, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMinRateAbsolute.setDescription("The minimum absolute rate, in kilobits/sec, that a downstream\nscheduler element should allocate to this queue. If the value is\nzero, then there is effectively no minimum rate guarantee. If the\nvalue is non-zero, the scheduler will assure the servicing of\nthis queue to at least this rate.\n\nNote that this attribute value and that of\ndiffServMinRateRelative are coupled: changes to one will affect\nthe value of the other. They are linked by the following\nequation, in that setting one will change the other:\n\n diffServMinRateRelative =\n (diffServMinRateAbsolute*1000000)/ifSpeed\n\nor, if appropriate:\n\n diffServMinRateRelative = diffServMinRateAbsolute/ifHighSpeed")
diffServMinRateRelative = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 4, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMinRateRelative.setDescription("The minimum rate that a downstream scheduler element should\nallocate to this queue, relative to the maximum rate of the\ninterface as reported by ifSpeed or ifHighSpeed, in units of\n1/1000 of 1. If the value is zero, then there is effectively no\nminimum rate guarantee. If the value is non-zero, the scheduler\nwill assure the servicing of this queue to at least this rate.\n\nNote that this attribute value and that of\ndiffServMinRateAbsolute are coupled: changes to one will affect\nthe value of the other. They are linked by the following\nequation, in that setting one will change the other:\n\n\n\n diffServMinRateRelative =\n (diffServMinRateAbsolute*1000000)/ifSpeed\n\nor, if appropriate:\n\n diffServMinRateRelative = diffServMinRateAbsolute/ifHighSpeed")
diffServMinRateStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 4, 1, 5), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMinRateStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServMinRateStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 4, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMinRateStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServMaxRateNextFree = MibScalar((1, 3, 6, 1, 2, 1, 97, 1, 8, 5), IndexIntegerNextFree()).setMaxAccess("readonly")
if mibBuilder.loadTexts: diffServMaxRateNextFree.setDescription("This object contains an unused value for diffServMaxRateId, or a\nzero to indicate that none exist.")
diffServMaxRateTable = MibTable((1, 3, 6, 1, 2, 1, 97, 1, 8, 6))
if mibBuilder.loadTexts: diffServMaxRateTable.setDescription("The Maximum Rate Parameter Table enumerates individual sets of\nscheduling parameter that can be used/reused by Queues and\nSchedulers.")
diffServMaxRateEntry = MibTableRow((1, 3, 6, 1, 2, 1, 97, 1, 8, 6, 1)).setIndexNames((0, "DIFFSERV-MIB", "diffServMaxRateId"), (0, "DIFFSERV-MIB", "diffServMaxRateLevel"))
if mibBuilder.loadTexts: diffServMaxRateEntry.setDescription("An entry in the Maximum Rate Parameter Table describes a single\nset of scheduling parameters for use by one or more queues or\nschedulers.")
diffServMaxRateId = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 6, 1, 1), IndexInteger()).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServMaxRateId.setDescription("An index that enumerates the Maximum Rate Parameter entries.\nManagers obtain new values for row creation in this table by\nreading diffServMaxRateNextFree.")
diffServMaxRateLevel = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 6, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 32))).setMaxAccess("noaccess")
if mibBuilder.loadTexts: diffServMaxRateLevel.setDescription("An index that indicates which level of a multi-rate shaper is\nbeing given its parameters. A multi-rate shaper has some number\nof rate levels. Frame Relay's dual rate specification refers to a\n'committed' and an 'excess' rate; ATM's dual rate specification\nrefers to a 'mean' and a 'peak' rate. This table is generalized\nto support an arbitrary number of rates. The committed or mean\nrate is level 1, the peak rate (if any) is the highest level rate\nconfigured, and if there are other rates they are distributed in\nmonotonically increasing order between them.")
diffServMaxRateAbsolute = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 6, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMaxRateAbsolute.setDescription("The maximum rate in kilobits/sec that a downstream scheduler\nelement should allocate to this queue. If the value is zero, then\nthere is effectively no maximum rate limit and that the scheduler\nshould attempt to be work conserving for this queue. If the value\nis non-zero, the scheduler will limit the servicing of this queue\nto, at most, this rate in a non-work-conserving manner.\n\nNote that this attribute value and that of\ndiffServMaxRateRelative are coupled: changes to one will affect\nthe value of the other. They are linked by the following\n\n\n\nequation, in that setting one will change the other:\n\n diffServMaxRateRelative =\n (diffServMaxRateAbsolute*1000000)/ifSpeed\n\nor, if appropriate:\n\n diffServMaxRateRelative = diffServMaxRateAbsolute/ifHighSpeed")
diffServMaxRateRelative = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 6, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMaxRateRelative.setDescription("The maximum rate that a downstream scheduler element should\nallocate to this queue, relative to the maximum rate of the\ninterface as reported by ifSpeed or ifHighSpeed, in units of\n1/1000 of 1. If the value is zero, then there is effectively no\nmaximum rate limit and the scheduler should attempt to be work\nconserving for this queue. If the value is non-zero, the\nscheduler will limit the servicing of this queue to, at most,\nthis rate in a non-work-conserving manner.\n\nNote that this attribute value and that of\ndiffServMaxRateAbsolute are coupled: changes to one will affect\nthe value of the other. They are linked by the following\nequation, in that setting one will change the other:\n\n diffServMaxRateRelative =\n (diffServMaxRateAbsolute*1000000)/ifSpeed\n\nor, if appropriate:\n\n diffServMaxRateRelative = diffServMaxRateAbsolute/ifHighSpeed")
diffServMaxRateThreshold = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 6, 1, 5), BurstSize()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMaxRateThreshold.setDescription("The number of bytes of queue depth at which the rate of a\n\n\n\nmulti-rate scheduler will increase to the next output rate. In\nthe last conceptual row for such a shaper, this threshold is\nignored and by convention is zero.")
diffServMaxRateStorage = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 6, 1, 6), StorageType().clone('nonVolatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMaxRateStorage.setDescription("The storage type for this conceptual row. Conceptual rows\nhaving the value 'permanent' need not allow write-access to any\ncolumnar objects in the row.")
diffServMaxRateStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 97, 1, 8, 6, 1, 7), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: diffServMaxRateStatus.setDescription("The status of this conceptual row. All writable objects in this\nrow may be modified at any time. Setting this variable to\n'destroy' when the MIB contains one or more RowPointers pointing\nto it results in destruction being delayed until the row is no\nlonger used.")
diffServMIBConformance = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 2))
diffServMIBCompliances = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 2, 1))
diffServMIBGroups = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 2, 2))
diffServMIBAdmin = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 3))
diffServTBMeters = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 3, 1))
diffServTBParamSimpleTokenBucket = ObjectIdentity((1, 3, 6, 1, 2, 1, 97, 3, 1, 1))
if mibBuilder.loadTexts: diffServTBParamSimpleTokenBucket.setDescription("Two Parameter Token Bucket Meter as described in the Informal\nDifferentiated Services Model section 5.2.3.")
diffServTBParamAvgRate = ObjectIdentity((1, 3, 6, 1, 2, 1, 97, 3, 1, 2))
if mibBuilder.loadTexts: diffServTBParamAvgRate.setDescription("Average Rate Meter as described in the Informal Differentiated\nServices Model section 5.2.1.")
diffServTBParamSrTCMBlind = ObjectIdentity((1, 3, 6, 1, 2, 1, 97, 3, 1, 3))
if mibBuilder.loadTexts: diffServTBParamSrTCMBlind.setDescription("Single Rate Three Color Marker Metering as defined by RFC 2697,\nin the `Color Blind' mode as described by the RFC.")
diffServTBParamSrTCMAware = ObjectIdentity((1, 3, 6, 1, 2, 1, 97, 3, 1, 4))
if mibBuilder.loadTexts: diffServTBParamSrTCMAware.setDescription("Single Rate Three Color Marker Metering as defined by RFC 2697,\nin the `Color Aware' mode as described by the RFC.")
diffServTBParamTrTCMBlind = ObjectIdentity((1, 3, 6, 1, 2, 1, 97, 3, 1, 5))
if mibBuilder.loadTexts: diffServTBParamTrTCMBlind.setDescription("Two Rate Three Color Marker Metering as defined by RFC 2698, in\nthe `Color Blind' mode as described by the RFC.")
diffServTBParamTrTCMAware = ObjectIdentity((1, 3, 6, 1, 2, 1, 97, 3, 1, 6))
if mibBuilder.loadTexts: diffServTBParamTrTCMAware.setDescription("Two Rate Three Color Marker Metering as defined by RFC 2698, in\nthe `Color Aware' mode as described by the RFC.")
diffServTBParamTswTCM = ObjectIdentity((1, 3, 6, 1, 2, 1, 97, 3, 1, 7))
if mibBuilder.loadTexts: diffServTBParamTswTCM.setDescription("Time Sliding Window Three Color Marker Metering as defined by\nRFC 2859.")
diffServSchedulers = MibIdentifier((1, 3, 6, 1, 2, 1, 97, 3, 2))
diffServSchedulerPriority = ObjectIdentity((1, 3, 6, 1, 2, 1, 97, 3, 2, 1))
if mibBuilder.loadTexts: diffServSchedulerPriority.setDescription("For use with diffServSchedulerMethod to indicate the Priority\nscheduling method. This is defined as an algorithm in which the\npresence of data in a queue or set of queues absolutely precludes\ndequeue from another queue or set of queues of lower priority.\nNote that attributes from diffServMinRateEntry of the\nqueues/schedulers feeding this scheduler are used when\ndetermining the next packet to schedule.")
diffServSchedulerWRR = ObjectIdentity((1, 3, 6, 1, 2, 1, 97, 3, 2, 2))
if mibBuilder.loadTexts: diffServSchedulerWRR.setDescription("For use with diffServSchedulerMethod to indicate the Weighted\nRound Robin scheduling method, defined as any algorithm in which\na set of queues are visited in a fixed order, and varying amounts\nof traffic are removed from each queue in turn to implement an\naverage output rate by class. Notice attributes from\ndiffServMinRateEntry of the queues/schedulers feeding this\nscheduler are used when determining the next packet to schedule.")
diffServSchedulerWFQ = ObjectIdentity((1, 3, 6, 1, 2, 1, 97, 3, 2, 3))
if mibBuilder.loadTexts: diffServSchedulerWFQ.setDescription("For use with diffServSchedulerMethod to indicate the Weighted\nFair Queuing scheduling method, defined as any algorithm in which\na set of queues are conceptually visited in some order, to\nimplement an average output rate by class. Notice attributes from\ndiffServMinRateEntry of the queues/schedulers feeding this\nscheduler are used when determining the next packet to schedule.")
# Augmentions
# Groups
diffServMIBDataPathGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 1)).setObjects(*(("DIFFSERV-MIB", "diffServDataPathStatus"), ("DIFFSERV-MIB", "diffServDataPathStart"), ("DIFFSERV-MIB", "diffServDataPathStorage"), ) )
if mibBuilder.loadTexts: diffServMIBDataPathGroup.setDescription("The Data Path Group defines the MIB Objects that describe a\nfunctional data path.")
diffServMIBClfrGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 2)).setObjects(*(("DIFFSERV-MIB", "diffServClfrStorage"), ("DIFFSERV-MIB", "diffServClfrNextFree"), ("DIFFSERV-MIB", "diffServClfrStatus"), ) )
if mibBuilder.loadTexts: diffServMIBClfrGroup.setDescription("The Classifier Group defines the MIB Objects that describe the\n\n\n\nlist the starts of individual classifiers.")
diffServMIBClfrElementGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 3)).setObjects(*(("DIFFSERV-MIB", "diffServClfrElementNext"), ("DIFFSERV-MIB", "diffServClfrElementNextFree"), ("DIFFSERV-MIB", "diffServClfrElementStorage"), ("DIFFSERV-MIB", "diffServClfrElementStatus"), ("DIFFSERV-MIB", "diffServClfrElementPrecedence"), ("DIFFSERV-MIB", "diffServClfrElementSpecific"), ) )
if mibBuilder.loadTexts: diffServMIBClfrElementGroup.setDescription("The Classifier Element Group defines the MIB Objects that\ndescribe the classifier elements that make up a generic\nclassifier.")
diffServMIBMultiFieldClfrGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 4)).setObjects(*(("DIFFSERV-MIB", "diffServMultiFieldClfrDstAddr"), ("DIFFSERV-MIB", "diffServMultiFieldClfrStorage"), ("DIFFSERV-MIB", "diffServMultiFieldClfrSrcAddr"), ("DIFFSERV-MIB", "diffServMultiFieldClfrSrcL4PortMin"), ("DIFFSERV-MIB", "diffServMultiFieldClfrDstL4PortMax"), ("DIFFSERV-MIB", "diffServMultiFieldClfrAddrType"), ("DIFFSERV-MIB", "diffServMultiFieldClfrSrcL4PortMax"), ("DIFFSERV-MIB", "diffServMultiFieldClfrSrcPrefixLength"), ("DIFFSERV-MIB", "diffServMultiFieldClfrNextFree"), ("DIFFSERV-MIB", "diffServMultiFieldClfrFlowId"), ("DIFFSERV-MIB", "diffServMultiFieldClfrDstPrefixLength"), ("DIFFSERV-MIB", "diffServMultiFieldClfrDstL4PortMin"), ("DIFFSERV-MIB", "diffServMultiFieldClfrStatus"), ("DIFFSERV-MIB", "diffServMultiFieldClfrDscp"), ("DIFFSERV-MIB", "diffServMultiFieldClfrProtocol"), ) )
if mibBuilder.loadTexts: diffServMIBMultiFieldClfrGroup.setDescription("The Multi-field Classifier Group defines the MIB Objects that\ndescribe a classifier element for matching on various fields of\nan IP and upper-layer protocol header.")
diffServMIBMeterGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 5)).setObjects(*(("DIFFSERV-MIB", "diffServMeterSucceedNext"), ("DIFFSERV-MIB", "diffServMeterNextFree"), ("DIFFSERV-MIB", "diffServMeterStorage"), ("DIFFSERV-MIB", "diffServMeterSpecific"), ("DIFFSERV-MIB", "diffServMeterFailNext"), ("DIFFSERV-MIB", "diffServMeterStatus"), ) )
if mibBuilder.loadTexts: diffServMIBMeterGroup.setDescription("The Meter Group defines the objects used in describing a generic\nmeter element.")
diffServMIBTBParamGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 6)).setObjects(*(("DIFFSERV-MIB", "diffServTBParamType"), ("DIFFSERV-MIB", "diffServTBParamBurstSize"), ("DIFFSERV-MIB", "diffServTBParamNextFree"), ("DIFFSERV-MIB", "diffServTBParamStatus"), ("DIFFSERV-MIB", "diffServTBParamRate"), ("DIFFSERV-MIB", "diffServTBParamInterval"), ("DIFFSERV-MIB", "diffServTBParamStorage"), ) )
if mibBuilder.loadTexts: diffServMIBTBParamGroup.setDescription("The Token-Bucket Meter Group defines the objects used in\ndescribing a token bucket meter element.")
diffServMIBActionGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 7)).setObjects(*(("DIFFSERV-MIB", "diffServActionNext"), ("DIFFSERV-MIB", "diffServActionStatus"), ("DIFFSERV-MIB", "diffServActionNextFree"), ("DIFFSERV-MIB", "diffServActionSpecific"), ("DIFFSERV-MIB", "diffServActionStorage"), ("DIFFSERV-MIB", "diffServActionInterface"), ) )
if mibBuilder.loadTexts: diffServMIBActionGroup.setDescription("The Action Group defines the objects used in describing a\ngeneric action element.")
diffServMIBDscpMarkActGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 8)).setObjects(*(("DIFFSERV-MIB", "diffServDscpMarkActDscp"), ) )
if mibBuilder.loadTexts: diffServMIBDscpMarkActGroup.setDescription("The DSCP Mark Action Group defines the objects used in\ndescribing a DSCP Marking Action element.")
diffServMIBCounterGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 9)).setObjects(*(("DIFFSERV-MIB", "diffServAlgRandomDropPkts"), ("DIFFSERV-MIB", "diffServCountActOctets"), ("DIFFSERV-MIB", "diffServAlgDropPkts"), ("DIFFSERV-MIB", "diffServCountActPkts"), ("DIFFSERV-MIB", "diffServAlgRandomDropOctets"), ("DIFFSERV-MIB", "diffServCountActStatus"), ("DIFFSERV-MIB", "diffServAlgDropOctets"), ("DIFFSERV-MIB", "diffServCountActStorage"), ("DIFFSERV-MIB", "diffServCountActNextFree"), ) )
if mibBuilder.loadTexts: diffServMIBCounterGroup.setDescription("A collection of objects providing information specific to\npacket-oriented network interfaces.")
diffServMIBAlgDropGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 10)).setObjects(*(("DIFFSERV-MIB", "diffServAlgDropNext"), ("DIFFSERV-MIB", "diffServAlgDropStatus"), ("DIFFSERV-MIB", "diffServAlgDropNextFree"), ("DIFFSERV-MIB", "diffServAlgDropSpecific"), ("DIFFSERV-MIB", "diffServAlgDropQThreshold"), ("DIFFSERV-MIB", "diffServAlgDropType"), ("DIFFSERV-MIB", "diffServAlgDropQMeasure"), ("DIFFSERV-MIB", "diffServAlgDropStorage"), ) )
if mibBuilder.loadTexts: diffServMIBAlgDropGroup.setDescription("The Algorithmic Drop Group contains the objects that describe\nalgorithmic dropper operation and configuration.")
diffServMIBRandomDropGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 11)).setObjects(*(("DIFFSERV-MIB", "diffServRandomDropMinThreshBytes"), ("DIFFSERV-MIB", "diffServRandomDropMaxThreshPkts"), ("DIFFSERV-MIB", "diffServRandomDropStorage"), ("DIFFSERV-MIB", "diffServRandomDropStatus"), ("DIFFSERV-MIB", "diffServRandomDropNextFree"), ("DIFFSERV-MIB", "diffServRandomDropProbMax"), ("DIFFSERV-MIB", "diffServRandomDropMinThreshPkts"), ("DIFFSERV-MIB", "diffServRandomDropWeight"), ("DIFFSERV-MIB", "diffServRandomDropMaxThreshBytes"), ("DIFFSERV-MIB", "diffServRandomDropSamplingRate"), ) )
if mibBuilder.loadTexts: diffServMIBRandomDropGroup.setDescription("The Random Drop Group augments the Algorithmic Drop Group for\nrandom dropper operation and configuration.")
diffServMIBQGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 12)).setObjects(*(("DIFFSERV-MIB", "diffServQNext"), ("DIFFSERV-MIB", "diffServQNextFree"), ("DIFFSERV-MIB", "diffServQMaxRate"), ("DIFFSERV-MIB", "diffServQStatus"), ("DIFFSERV-MIB", "diffServQMinRate"), ("DIFFSERV-MIB", "diffServQStorage"), ) )
if mibBuilder.loadTexts: diffServMIBQGroup.setDescription("The Queue Group contains the objects that describe an\n\n\n\ninterface's queues.")
diffServMIBSchedulerGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 13)).setObjects(*(("DIFFSERV-MIB", "diffServSchedulerNextFree"), ("DIFFSERV-MIB", "diffServSchedulerMethod"), ("DIFFSERV-MIB", "diffServSchedulerNext"), ("DIFFSERV-MIB", "diffServSchedulerStatus"), ("DIFFSERV-MIB", "diffServSchedulerMinRate"), ("DIFFSERV-MIB", "diffServSchedulerMaxRate"), ("DIFFSERV-MIB", "diffServSchedulerStorage"), ) )
if mibBuilder.loadTexts: diffServMIBSchedulerGroup.setDescription("The Scheduler Group contains the objects that describe packet\nschedulers on interfaces.")
diffServMIBMinRateGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 14)).setObjects(*(("DIFFSERV-MIB", "diffServMinRateStorage"), ("DIFFSERV-MIB", "diffServMinRateAbsolute"), ("DIFFSERV-MIB", "diffServMinRatePriority"), ("DIFFSERV-MIB", "diffServMinRateStatus"), ("DIFFSERV-MIB", "diffServMinRateNextFree"), ("DIFFSERV-MIB", "diffServMinRateRelative"), ) )
if mibBuilder.loadTexts: diffServMIBMinRateGroup.setDescription("The Minimum Rate Parameter Group contains the objects that\ndescribe packet schedulers' minimum rate or priority guarantees.")
diffServMIBMaxRateGroup = ObjectGroup((1, 3, 6, 1, 2, 1, 97, 2, 2, 15)).setObjects(*(("DIFFSERV-MIB", "diffServMaxRateStatus"), ("DIFFSERV-MIB", "diffServMaxRateAbsolute"), ("DIFFSERV-MIB", "diffServMaxRateThreshold"), ("DIFFSERV-MIB", "diffServMaxRateStorage"), ("DIFFSERV-MIB", "diffServMaxRateNextFree"), ("DIFFSERV-MIB", "diffServMaxRateRelative"), ) )
if mibBuilder.loadTexts: diffServMIBMaxRateGroup.setDescription("The Maximum Rate Parameter Group contains the objects that\ndescribe packet schedulers' maximum rate guarantees.")
# Compliances
diffServMIBFullCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 97, 2, 1, 1)).setObjects(*(("DIFFSERV-MIB", "diffServMIBMaxRateGroup"), ("DIFFSERV-MIB", "diffServMIBMultiFieldClfrGroup"), ("DIFFSERV-MIB", "diffServMIBMinRateGroup"), ("DIFFSERV-MIB", "diffServMIBClfrElementGroup"), ("DIFFSERV-MIB", "diffServMIBSchedulerGroup"), ("DIFFSERV-MIB", "diffServMIBMeterGroup"), ("DIFFSERV-MIB", "diffServMIBDscpMarkActGroup"), ("DIFFSERV-MIB", "diffServMIBAlgDropGroup"), ("IF-MIB", "ifCounterDiscontinuityGroup"), ("DIFFSERV-MIB", "diffServMIBRandomDropGroup"), ("DIFFSERV-MIB", "diffServMIBClfrGroup"), ("DIFFSERV-MIB", "diffServMIBActionGroup"), ("DIFFSERV-MIB", "diffServMIBTBParamGroup"), ("DIFFSERV-MIB", "diffServMIBCounterGroup"), ("DIFFSERV-MIB", "diffServMIBQGroup"), ("DIFFSERV-MIB", "diffServMIBDataPathGroup"), ) )
if mibBuilder.loadTexts: diffServMIBFullCompliance.setDescription("When this MIB is implemented with support for read-create, then\nsuch an implementation can claim full compliance. Such devices\ncan then be both monitored and configured with this MIB.")
diffServMIBReadOnlyCompliance = ModuleCompliance((1, 3, 6, 1, 2, 1, 97, 2, 1, 2)).setObjects(*(("DIFFSERV-MIB", "diffServMIBMaxRateGroup"), ("DIFFSERV-MIB", "diffServMIBMultiFieldClfrGroup"), ("DIFFSERV-MIB", "diffServMIBMinRateGroup"), ("DIFFSERV-MIB", "diffServMIBClfrElementGroup"), ("DIFFSERV-MIB", "diffServMIBSchedulerGroup"), ("DIFFSERV-MIB", "diffServMIBMeterGroup"), ("DIFFSERV-MIB", "diffServMIBDscpMarkActGroup"), ("DIFFSERV-MIB", "diffServMIBAlgDropGroup"), ("IF-MIB", "ifCounterDiscontinuityGroup"), ("DIFFSERV-MIB", "diffServMIBRandomDropGroup"), ("DIFFSERV-MIB", "diffServMIBClfrGroup"), ("DIFFSERV-MIB", "diffServMIBActionGroup"), ("DIFFSERV-MIB", "diffServMIBTBParamGroup"), ("DIFFSERV-MIB", "diffServMIBCounterGroup"), ("DIFFSERV-MIB", "diffServMIBQGroup"), ("DIFFSERV-MIB", "diffServMIBDataPathGroup"), ) )
if mibBuilder.loadTexts: diffServMIBReadOnlyCompliance.setDescription("When this MIB is implemented without support for read-create\n(i.e. in read-only mode), then such an implementation can claim\nread-only compliance. Such a device can then be monitored but can\nnot be configured with this MIB.")
# Exports
# Module identity
mibBuilder.exportSymbols("DIFFSERV-MIB", PYSNMP_MODULE_ID=diffServMib)
# Types
mibBuilder.exportSymbols("DIFFSERV-MIB", IfDirection=IfDirection, IndexInteger=IndexInteger, IndexIntegerNextFree=IndexIntegerNextFree)
# Objects
mibBuilder.exportSymbols("DIFFSERV-MIB", diffServMib=diffServMib, diffServMIBObjects=diffServMIBObjects, diffServDataPath=diffServDataPath, diffServDataPathTable=diffServDataPathTable, diffServDataPathEntry=diffServDataPathEntry, diffServDataPathIfDirection=diffServDataPathIfDirection, diffServDataPathStart=diffServDataPathStart, diffServDataPathStorage=diffServDataPathStorage, diffServDataPathStatus=diffServDataPathStatus, diffServClassifier=diffServClassifier, diffServClfrNextFree=diffServClfrNextFree, diffServClfrTable=diffServClfrTable, diffServClfrEntry=diffServClfrEntry, diffServClfrId=diffServClfrId, diffServClfrStorage=diffServClfrStorage, diffServClfrStatus=diffServClfrStatus, diffServClfrElementNextFree=diffServClfrElementNextFree, diffServClfrElementTable=diffServClfrElementTable, diffServClfrElementEntry=diffServClfrElementEntry, diffServClfrElementId=diffServClfrElementId, diffServClfrElementPrecedence=diffServClfrElementPrecedence, diffServClfrElementNext=diffServClfrElementNext, diffServClfrElementSpecific=diffServClfrElementSpecific, diffServClfrElementStorage=diffServClfrElementStorage, diffServClfrElementStatus=diffServClfrElementStatus, diffServMultiFieldClfrNextFree=diffServMultiFieldClfrNextFree, diffServMultiFieldClfrTable=diffServMultiFieldClfrTable, diffServMultiFieldClfrEntry=diffServMultiFieldClfrEntry, diffServMultiFieldClfrId=diffServMultiFieldClfrId, diffServMultiFieldClfrAddrType=diffServMultiFieldClfrAddrType, diffServMultiFieldClfrDstAddr=diffServMultiFieldClfrDstAddr, diffServMultiFieldClfrDstPrefixLength=diffServMultiFieldClfrDstPrefixLength, diffServMultiFieldClfrSrcAddr=diffServMultiFieldClfrSrcAddr, diffServMultiFieldClfrSrcPrefixLength=diffServMultiFieldClfrSrcPrefixLength, diffServMultiFieldClfrDscp=diffServMultiFieldClfrDscp, diffServMultiFieldClfrFlowId=diffServMultiFieldClfrFlowId, diffServMultiFieldClfrProtocol=diffServMultiFieldClfrProtocol, diffServMultiFieldClfrDstL4PortMin=diffServMultiFieldClfrDstL4PortMin, diffServMultiFieldClfrDstL4PortMax=diffServMultiFieldClfrDstL4PortMax, diffServMultiFieldClfrSrcL4PortMin=diffServMultiFieldClfrSrcL4PortMin, diffServMultiFieldClfrSrcL4PortMax=diffServMultiFieldClfrSrcL4PortMax, diffServMultiFieldClfrStorage=diffServMultiFieldClfrStorage, diffServMultiFieldClfrStatus=diffServMultiFieldClfrStatus, diffServMeter=diffServMeter, diffServMeterNextFree=diffServMeterNextFree, diffServMeterTable=diffServMeterTable, diffServMeterEntry=diffServMeterEntry, diffServMeterId=diffServMeterId, diffServMeterSucceedNext=diffServMeterSucceedNext, diffServMeterFailNext=diffServMeterFailNext, diffServMeterSpecific=diffServMeterSpecific, diffServMeterStorage=diffServMeterStorage, diffServMeterStatus=diffServMeterStatus, diffServTBParam=diffServTBParam, diffServTBParamNextFree=diffServTBParamNextFree, diffServTBParamTable=diffServTBParamTable, diffServTBParamEntry=diffServTBParamEntry, diffServTBParamId=diffServTBParamId, diffServTBParamType=diffServTBParamType, diffServTBParamRate=diffServTBParamRate, diffServTBParamBurstSize=diffServTBParamBurstSize, diffServTBParamInterval=diffServTBParamInterval, diffServTBParamStorage=diffServTBParamStorage, diffServTBParamStatus=diffServTBParamStatus, diffServAction=diffServAction, diffServActionNextFree=diffServActionNextFree, diffServActionTable=diffServActionTable, diffServActionEntry=diffServActionEntry, diffServActionId=diffServActionId, diffServActionInterface=diffServActionInterface, diffServActionNext=diffServActionNext, diffServActionSpecific=diffServActionSpecific, diffServActionStorage=diffServActionStorage, diffServActionStatus=diffServActionStatus, diffServDscpMarkActTable=diffServDscpMarkActTable, diffServDscpMarkActEntry=diffServDscpMarkActEntry, diffServDscpMarkActDscp=diffServDscpMarkActDscp, diffServCountActNextFree=diffServCountActNextFree, diffServCountActTable=diffServCountActTable, diffServCountActEntry=diffServCountActEntry, diffServCountActId=diffServCountActId, diffServCountActOctets=diffServCountActOctets, diffServCountActPkts=diffServCountActPkts, diffServCountActStorage=diffServCountActStorage, diffServCountActStatus=diffServCountActStatus, diffServAlgDrop=diffServAlgDrop, diffServAlgDropNextFree=diffServAlgDropNextFree, diffServAlgDropTable=diffServAlgDropTable, diffServAlgDropEntry=diffServAlgDropEntry, diffServAlgDropId=diffServAlgDropId, diffServAlgDropType=diffServAlgDropType, diffServAlgDropNext=diffServAlgDropNext, diffServAlgDropQMeasure=diffServAlgDropQMeasure, diffServAlgDropQThreshold=diffServAlgDropQThreshold, diffServAlgDropSpecific=diffServAlgDropSpecific, diffServAlgDropOctets=diffServAlgDropOctets, diffServAlgDropPkts=diffServAlgDropPkts, diffServAlgRandomDropOctets=diffServAlgRandomDropOctets, diffServAlgRandomDropPkts=diffServAlgRandomDropPkts, diffServAlgDropStorage=diffServAlgDropStorage, diffServAlgDropStatus=diffServAlgDropStatus, diffServRandomDropNextFree=diffServRandomDropNextFree, diffServRandomDropTable=diffServRandomDropTable, diffServRandomDropEntry=diffServRandomDropEntry, diffServRandomDropId=diffServRandomDropId, diffServRandomDropMinThreshBytes=diffServRandomDropMinThreshBytes, diffServRandomDropMinThreshPkts=diffServRandomDropMinThreshPkts, diffServRandomDropMaxThreshBytes=diffServRandomDropMaxThreshBytes, diffServRandomDropMaxThreshPkts=diffServRandomDropMaxThreshPkts, diffServRandomDropProbMax=diffServRandomDropProbMax, diffServRandomDropWeight=diffServRandomDropWeight, diffServRandomDropSamplingRate=diffServRandomDropSamplingRate, diffServRandomDropStorage=diffServRandomDropStorage, diffServRandomDropStatus=diffServRandomDropStatus, diffServQueue=diffServQueue, diffServQNextFree=diffServQNextFree, diffServQTable=diffServQTable, diffServQEntry=diffServQEntry, diffServQId=diffServQId, diffServQNext=diffServQNext, diffServQMinRate=diffServQMinRate, diffServQMaxRate=diffServQMaxRate, diffServQStorage=diffServQStorage, diffServQStatus=diffServQStatus, diffServScheduler=diffServScheduler, diffServSchedulerNextFree=diffServSchedulerNextFree)
mibBuilder.exportSymbols("DIFFSERV-MIB", diffServSchedulerTable=diffServSchedulerTable, diffServSchedulerEntry=diffServSchedulerEntry, diffServSchedulerId=diffServSchedulerId, diffServSchedulerNext=diffServSchedulerNext, diffServSchedulerMethod=diffServSchedulerMethod, diffServSchedulerMinRate=diffServSchedulerMinRate, diffServSchedulerMaxRate=diffServSchedulerMaxRate, diffServSchedulerStorage=diffServSchedulerStorage, diffServSchedulerStatus=diffServSchedulerStatus, diffServMinRateNextFree=diffServMinRateNextFree, diffServMinRateTable=diffServMinRateTable, diffServMinRateEntry=diffServMinRateEntry, diffServMinRateId=diffServMinRateId, diffServMinRatePriority=diffServMinRatePriority, diffServMinRateAbsolute=diffServMinRateAbsolute, diffServMinRateRelative=diffServMinRateRelative, diffServMinRateStorage=diffServMinRateStorage, diffServMinRateStatus=diffServMinRateStatus, diffServMaxRateNextFree=diffServMaxRateNextFree, diffServMaxRateTable=diffServMaxRateTable, diffServMaxRateEntry=diffServMaxRateEntry, diffServMaxRateId=diffServMaxRateId, diffServMaxRateLevel=diffServMaxRateLevel, diffServMaxRateAbsolute=diffServMaxRateAbsolute, diffServMaxRateRelative=diffServMaxRateRelative, diffServMaxRateThreshold=diffServMaxRateThreshold, diffServMaxRateStorage=diffServMaxRateStorage, diffServMaxRateStatus=diffServMaxRateStatus, diffServMIBConformance=diffServMIBConformance, diffServMIBCompliances=diffServMIBCompliances, diffServMIBGroups=diffServMIBGroups, diffServMIBAdmin=diffServMIBAdmin, diffServTBMeters=diffServTBMeters, diffServTBParamSimpleTokenBucket=diffServTBParamSimpleTokenBucket, diffServTBParamAvgRate=diffServTBParamAvgRate, diffServTBParamSrTCMBlind=diffServTBParamSrTCMBlind, diffServTBParamSrTCMAware=diffServTBParamSrTCMAware, diffServTBParamTrTCMBlind=diffServTBParamTrTCMBlind, diffServTBParamTrTCMAware=diffServTBParamTrTCMAware, diffServTBParamTswTCM=diffServTBParamTswTCM, diffServSchedulers=diffServSchedulers, diffServSchedulerPriority=diffServSchedulerPriority, diffServSchedulerWRR=diffServSchedulerWRR, diffServSchedulerWFQ=diffServSchedulerWFQ)
# Groups
mibBuilder.exportSymbols("DIFFSERV-MIB", diffServMIBDataPathGroup=diffServMIBDataPathGroup, diffServMIBClfrGroup=diffServMIBClfrGroup, diffServMIBClfrElementGroup=diffServMIBClfrElementGroup, diffServMIBMultiFieldClfrGroup=diffServMIBMultiFieldClfrGroup, diffServMIBMeterGroup=diffServMIBMeterGroup, diffServMIBTBParamGroup=diffServMIBTBParamGroup, diffServMIBActionGroup=diffServMIBActionGroup, diffServMIBDscpMarkActGroup=diffServMIBDscpMarkActGroup, diffServMIBCounterGroup=diffServMIBCounterGroup, diffServMIBAlgDropGroup=diffServMIBAlgDropGroup, diffServMIBRandomDropGroup=diffServMIBRandomDropGroup, diffServMIBQGroup=diffServMIBQGroup, diffServMIBSchedulerGroup=diffServMIBSchedulerGroup, diffServMIBMinRateGroup=diffServMIBMinRateGroup, diffServMIBMaxRateGroup=diffServMIBMaxRateGroup)
# Compliances
mibBuilder.exportSymbols("DIFFSERV-MIB", diffServMIBFullCompliance=diffServMIBFullCompliance, diffServMIBReadOnlyCompliance=diffServMIBReadOnlyCompliance)
| [
"[email protected]"
] | |
03c83a59017b89259eefe545f24d5e0bce961cf1 | 9e85747a446175575533485593054834971cd372 | /colegio/educa/migrations/0003_auto_20200620_1553.py | d394ddec856dac48584594ac20e8286dd0d25f87 | [] | no_license | JTorero/Colegios | 63337038c1b67cc8dcf419a5d35d89b9342ec6b0 | e0403d0cd3ea8ebfbe8f0d7804270eb398c8e560 | refs/heads/master | 2022-11-09T02:12:03.604676 | 2020-06-22T03:58:55 | 2020-06-22T03:58:55 | 273,765,254 | 0 | 2 | null | 2020-06-22T03:58:56 | 2020-06-20T18:43:14 | Python | UTF-8 | Python | false | false | 1,190 | py | # Generated by Django 2.2.7 on 2020-06-20 20:53
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('educa', '0002_alumno'),
]
operations = [
migrations.CreateModel(
name='Periodo',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('nombre_periodo', models.CharField(max_length=50)),
],
),
migrations.AlterModelOptions(
name='aula',
options={'ordering': ['id'], 'verbose_name': 'aula', 'verbose_name_plural': 'aulas'},
),
migrations.AlterModelTable(
name='aula',
table='educa_aula',
),
migrations.CreateModel(
name='Aula_Periodo',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('aula', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='educa.Aula')),
('periodo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='educa.Periodo')),
],
),
]
| [
"[email protected]"
] | |
367414398a374e2ad96f456f195a778d28ec7824 | 40c4b0c31a5870a9201d3d42a63c5547092e5912 | /frappe/website/doctype/personal_data_download_request/test_personal_data_download_request.py | 71e269f0700206e2de4862dc22964e0a0d24b531 | [
"MIT"
] | permissive | ektai/frappe3 | fab138cdbe15bab8214cf623d9eb461e9b9fb1cd | 44aa948b4d5a0d729eacfb3dabdc9c8894ae1799 | refs/heads/master | 2022-12-25T15:48:36.926197 | 2020-10-07T09:19:20 | 2020-10-07T09:19:20 | 301,951,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
import json
from frappe.website.doctype.personal_data_download_request.personal_data_download_request import get_user_data
from frappe.contacts.doctype.contact.contact import get_contact_name
class TestRequestPersonalData(unittest.TestCase):
def setUp(self):
create_user_if_not_exists(email='[email protected]')
def tearDown(self):
frappe.db.sql("""DELETE FROM `tabPersonal Data Download Request`""")
def test_user_data_creation(self):
user_data = json.loads(get_user_data('[email protected]'))
contact_name = get_contact_name('[email protected]')
expected_data = {'Contact': frappe.get_all('Contact', {"name": contact_name}, ["*"])}
expected_data = json.loads(json.dumps(expected_data, default=str))
self.assertEqual({'Contact': user_data['Contact']}, expected_data)
def test_file_and_email_creation(self):
frappe.set_user('[email protected]')
download_request = frappe.get_doc({
"doctype": 'Personal Data Download Request',
'user': '[email protected]'
})
download_request.save(ignore_permissions=True)
frappe.set_user('Administrator')
file_count = frappe.db.count('File', {
'attached_to_doctype':'Personal Data Download Request',
'attached_to_name': download_request.name
})
self.assertEqual(file_count, 1)
email_queue = frappe.get_all('Email Queue',
fields=['message'],
order_by="creation DESC",
limit=1)
self.assertTrue("Subject: Download Your Data" in email_queue[0].message)
frappe.db.sql("delete from `tabEmail Queue`")
def create_user_if_not_exists(email, first_name = None):
frappe.delete_doc_if_exists("User", email)
frappe.get_doc({
"doctype": "User",
"user_type": "Website User",
"email": email,
"send_welcome_email": 0,
"first_name": first_name or email.split("@")[0],
"birth_date": frappe.utils.now_datetime()
}).insert(ignore_permissions=True) | [
"[email protected]"
] | |
6bf950b5543e6aa415842c0dcc5d252241851b6a | 016ad44fdc47417de1dbb86e97bd330944020b94 | /tests/test_urls.py | 9f8e02827e3f968bf924971b60e43ba594ed8cc9 | [
"MIT"
] | permissive | sbraz/sphinx-notfound-page | f205be67cb626ed612047880bcf7c208966eda1e | 37a217992922ce07f116bf25df51a7cf5c927279 | refs/heads/master | 2022-12-11T12:44:34.261971 | 2020-09-10T10:54:19 | 2020-09-10T10:54:19 | 295,368,656 | 0 | 0 | null | 2020-09-14T09:32:25 | 2020-09-14T09:32:24 | null | UTF-8 | Python | false | false | 20,366 | py | # -*- coding: utf-8 -*-
import os
import pytest
import sphinx
import shutil
import warnings
srcdir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'examples',
'default',
)
rstsrcdir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'examples',
'404rst',
)
@pytest.fixture(autouse=True, scope='function')
def remove_sphinx_build_output():
"""Remove _build/ folder, if exist."""
for path in (srcdir, rstsrcdir):
build_path = os.path.join(path, '_build')
if os.path.exists(build_path):
shutil.rmtree(build_path)
@pytest.mark.sphinx(srcdir=srcdir)
def test_404_page_created(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
@pytest.mark.sphinx(srcdir=srcdir)
def test_default_settings(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
'<h1>Page not found</h1>',
'Thanks for trying.',
'<title>Page not found — Python documentation</title>',
# sidebar URLs
'<h1 class="logo"><a href="/en/latest/index.html">Python</a></h1>',
'<form class="search" action="/en/latest/search.html" method="get">',
'<li><a href="/en/latest/index.html">Documentation overview</a><ul>',
# resources
'<link rel="stylesheet" href="/en/latest/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/en/latest/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/en/latest/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_context': {'title': 'My custom title', 'body': '<h1>Boo!</h1>My bad.'},
},
)
def test_context_settings(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
'<h1>Boo!</h1>',
'My bad.',
'<title>My custom title — Python documentation</title>',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_pagename': '500',
},
)
def test_pagename_setting(app, status, warning):
app.build()
path = app.outdir / '500.html'
assert path.exists()
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_default_language': 'ja',
},
)
def test_default_language_setting(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
# sidebar URLs
'<h1 class="logo"><a href="/ja/latest/index.html">Python</a></h1>',
'<form class="search" action="/ja/latest/search.html" method="get">',
'<li><a href="/ja/latest/index.html">Documentation overview</a><ul>',
# resources
'<link rel="stylesheet" href="/ja/latest/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/ja/latest/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/ja/latest/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_default_version': 'customversion',
},
)
def test_default_version_setting(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
# sidebar URLs
'<h1 class="logo"><a href="/en/customversion/index.html">Python</a></h1>',
'<form class="search" action="/en/customversion/search.html" method="get">',
'<li><a href="/en/customversion/index.html">Documentation overview</a><ul>',
# resources
'<link rel="stylesheet" href="/en/customversion/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/en/customversion/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/en/customversion/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_no_urls_prefix': True,
},
)
def test_no_urls_prefix_setting(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
# sidebar URLs
'<h1 class="logo"><a href="/index.html">Python</a></h1>',
'<form class="search" action="/search.html" method="get">',
'<li><a href="/index.html">Documentation overview</a><ul>',
# resources
'<link rel="stylesheet" href="/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_urls_prefix': '/language/version/',
},
)
def test_urls_prefix_setting(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
# sidebar URLs
'<h1 class="logo"><a href="/language/version/index.html">Python</a></h1>',
'<form class="search" action="/language/version/search.html" method="get">',
'<li><a href="/language/version/index.html">Documentation overview</a><ul>',
# resources
'<link rel="stylesheet" href="/language/version/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/language/version/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/language/version/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_urls_prefix': None,
},
)
def test_urls_prefix_setting_none(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
# sidebar URLs
'<h1 class="logo"><a href="/index.html">Python</a></h1>',
'<form class="search" action="/search.html" method="get">',
'<li><a href="/index.html">Documentation overview</a><ul>',
# resources
'<link rel="stylesheet" href="/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_default_language': 'es',
'notfound_default_version': 'customversion',
'notfound_no_urls_prefix': True,
},
)
def test_no_urls_prefix_setting_preference(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
# sidebar URLs
'<h1 class="logo"><a href="/index.html">Python</a></h1>',
'<form class="search" action="/search.html" method="get">',
'<li><a href="/index.html">Documentation overview</a><ul>',
# resources
'<link rel="stylesheet" href="/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_default_version': 'v2.0.5',
'notfound_default_language': 'pt',
},
)
def test_default_version_language_setting(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
# sidebar URLs
'<h1 class="logo"><a href="/pt/v2.0.5/index.html">Python</a></h1>',
'<form class="search" action="/pt/v2.0.5/search.html" method="get">',
'<li><a href="/pt/v2.0.5/index.html">Documentation overview</a><ul>',
# resource URLs
'<link rel="stylesheet" href="/pt/v2.0.5/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/pt/v2.0.5/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/pt/v2.0.5/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_template': 'template.html',
'notfound_context': {
'body': 'The body goes here',
'title': 'Custom title',
'special_setting': 'a special value',
},
},
)
def test_template_setting(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
'Custom title',
'The body goes here',
'<p>This is rendered using a custom template</p>',
'<p>... which has a custom context as well: a special value</p>',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=rstsrcdir,
confoverrides={
'version': '2.5.1',
},
)
def test_custom_404_rst_source(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
# custom 404.rst file content
'<title>Oh, oh - Page not found — Python documentation</title>',
'<p>This is a custom 404.rst file.</p>',
'<p>This file should be rendered instead of the default one.</p>',
"<p>Variables Sphinx substitution should be allowed here.\nExample, version: 2.5.1.</p>",
# sidebar URLs
'<h1 class="logo"><a href="/en/latest/index.html">Python</a></h1>',
'<form class="search" action="/en/latest/search.html" method="get">',
'<li><a href="/en/latest/index.html">Documentation overview</a><ul>',
# resources
'<link rel="stylesheet" href="/en/latest/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/en/latest/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/en/latest/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(srcdir=rstsrcdir)
def test_image_on_404_rst_source(app, status, warning):
app.build()
# Check the image was added to the builder/environment images
assert 'test.png' in app.builder.images
assert 'test.png' in app.env.images
# Check the image was copied into the output dir
path = app.outdir / '_images' / 'test.png'
assert path.exists()
path = app.outdir / '_images' / 'loudly-crying-face.png'
assert path.exists()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
# .. image::
'<img alt="An image" src="/en/latest/_images/test.png" />',
'<img alt="Image from folder" src="/en/latest/_images/loudly-crying-face.png" />',
]
# .. figure::
if sphinx.version_info < (2, 0):
chunks.append(
'<div class="figure" id="id1">\n<img alt="/en/latest/_images/test.png" src="/en/latest/_images/test.png" />\n<p class="caption"><span class="caption-text">Description.</span></p>\n</div>'
)
elif sphinx.version_info < (2, 1):
chunks.append(
u'<div class="figure align-center" id="id1">\n<img alt="/en/latest/_images/test.png" src="/en/latest/_images/test.png" />\n<p class="caption"><span class="caption-text">Description.</span><a class="headerlink" href="#id1" title="Permalink to this image">¶</a></p>\n</div>',
)
else:
chunks.append(
u'<div class="figure align-default" id="id1">\n<img alt="/en/latest/_images/test.png" src="/en/latest/_images/test.png" />\n<p class="caption"><span class="caption-text">Description.</span><a class="headerlink" href="#id1" title="Permalink to this image">¶</a></p>\n</div>',
)
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(srcdir=rstsrcdir)
def test_image_looks_like_absolute_url(app, status, warning):
app.build()
path = app.outdir / '_images' / 'https.png'
assert path.exists()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
'<img alt="PATH looking as an URL" src="/en/latest/_images/https.png" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(srcdir=rstsrcdir)
def test_image_absolute_url(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists() == True
content = open(path).read()
chunks = [
'<img alt="Read the Docs Logo" src="https://read-the-docs-guidelines.readthedocs-hosted.com/_images/logo-dark.png" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
buildername='dirhtml',
)
def test_urls_for_dirhtml_builder(app, status, warning):
app.build()
path = app.outdir / '404' / 'index.html'
assert path.exists()
content = open(path).read()
chunks = [
# sidebar URLs
'<form class="search" action="/en/latest/search/" method="get">',
'<li class="toctree-l1"><a class="reference internal" href="/en/latest/chapter/">Chapter</a></li>',
# resources
'<link rel="stylesheet" href="/en/latest/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/en/latest/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/en/latest/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
buildername='dirhtml',
confoverrides={
'notfound_no_urls_prefix': True,
},
)
def test_no_prefix_urls_for_dirhtml_builder(app, status, warning):
app.build()
path = app.outdir / '404' / 'index.html'
assert path.exists()
content = open(path).read()
chunks = [
# sidebar URLs
'<form class="search" action="/search/" method="get">',
'<li class="toctree-l1"><a class="reference internal" href="/chapter/">Chapter</a></li>',
# resources
'<link rel="stylesheet" href="/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(srcdir=srcdir)
def test_sphinx_resource_urls(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
if sphinx.version_info < (2, 4, 0):
chunks = [
# Sphinx's resources URLs
'<script type="text/javascript" src="/en/latest/_static/jquery.js"></script>',
'<script type="text/javascript" src="/en/latest/_static/underscore.js"></script>',
'<script type="text/javascript" src="/en/latest/_static/doctools.js"></script>',
]
else:
# #6925: html: Remove redundant type="text/javascript" from <script> elements
chunks = [
# Sphinx's resources URLs
'<script src="/en/latest/_static/jquery.js"></script>',
'<script src="/en/latest/_static/underscore.js"></script>',
'<script src="/en/latest/_static/doctools.js"></script>',
]
if sphinx.version_info >= (1, 8):
if sphinx.version_info < (2, 4, 0):
chunks.append(
'<script type="text/javascript" src="/en/latest/_static/language_data.js"></script>',
)
else:
chunks.append(
'<script src="/en/latest/_static/language_data.js"></script>',
)
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_default_version': 'default',
'notfound_default_language': 'ja',
},
)
def test_toctree_urls_notfound_default(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
# sidebar URLs
'<form class="search" action="/ja/default/search.html" method="get">',
'<li class="toctree-l1"><a class="reference internal" href="/ja/default/chapter.html">Chapter</a></li>',
# resources
'<link rel="stylesheet" href="/ja/default/_static/alabaster.css" type="text/css" />',
'<link rel="stylesheet" href="/ja/default/_static/pygments.css" type="text/css" />',
'<link rel="stylesheet" href="/ja/default/_static/custom.css" type="text/css" />',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
)
def test_toctree_links(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
'<h3>Navigation</h3>',
'<li class="toctree-l1"><a class="reference internal" href="/en/latest/chapter-i.html">Chapter I</a></li>',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_default_language': 'pt-br',
'notfound_default_version': 'stable',
},
)
def test_toctree_links_custom_settings(app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
'<h3>Navigation</h3>',
'<li class="toctree-l1"><a class="reference internal" href="/pt-br/stable/chapter-i.html">Chapter I</a></li>',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.environ(
READTHEDOCS_VERSION='v2.0.5',
)
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_default_language': 'pt-br',
},
)
def test_toctree_links_language_setting_version_environment(environ, app, status, warning):
app.build()
path = app.outdir / '404.html'
assert path.exists()
content = open(path).read()
chunks = [
'<h3>Navigation</h3>',
'<li class="toctree-l1"><a class="reference internal" href="/pt-br/v2.0.5/chapter-i.html">Chapter I</a></li>',
]
for chunk in chunks:
assert chunk in content
@pytest.mark.sphinx(
srcdir=rstsrcdir,
)
def test_automatic_orphan(app, status, warning):
app.build()
if sphinx.version_info >= (3, 0, 0):
assert app.env.metadata['404'] == {'orphan': True, 'nosearch': True}
else:
assert app.env.metadata['404'] == {'orphan': True}
@pytest.mark.sphinx(
srcdir=srcdir,
confoverrides={
'notfound_default_language': 'ja',
'notfound_default_version': 'stable',
'notfound_no_urls_prefix': True,
},
)
@pytest.mark.xfail(reason='Not sure how to capture warnings from events')
def test_deprecation_warnings(app, status, warning):
messages = [
'notfound_default_language is deprecated. Use "notfound_urls_prefix" instead.',
'notfound_default_version is deprecated. Use "notfound_urls_prefix" instead.',
'notfound_no_urls_prefix is deprecated. Use "notfound_urls_prefix" instead.',
]
with warnings.catch_warnings(record=True) as warn:
warnings.simplefilter('always')
app.build()
assert len(warn) == 3
assert issubclass(warn[-1].category, DeprecationWarning)
for w in warn:
assert w.message in messages
path = app.outdir / '404.html'
assert path.exists()
| [
"[email protected]"
] | |
c5a1c49864b92fb6c4dcd7ee138f89563b247dae | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/license/consumedtask.py | 755a8455d7a73d4dbda910708d94f6333059703c | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 5,767 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ConsumedTask(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.license.ConsumedTask")
meta.moClassName = "licenseConsumedTask"
meta.rnFormat = "consumedTask-%(licenseType)s"
meta.category = MoCategory.REGULAR
meta.label = "Entitlement Consumed"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.license.ConsumedInstDef")
meta.childClasses.add("cobra.model.license.ConsumedAppInstDef")
meta.childNamesAndRnPrefix.append(("cobra.model.license.ConsumedInstDef", "instDef-"))
meta.childNamesAndRnPrefix.append(("cobra.model.license.ConsumedAppInstDef", "appDef-"))
meta.parentClasses.add("cobra.model.license.Holder")
meta.rnPrefixes = [
('consumedTask-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "licenseType", "licenseType", 36910, PropCategory.REGULAR)
prop.label = "License Type"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 4
prop.defaultValueStr = "leaf-base"
prop._addConstant("apic-plugin-app", "apic-plugin-app", 101)
prop._addConstant("ave-inst", "ave-inst", 13)
prop._addConstant("fex-16-10g", "fex-16-10g", 0)
prop._addConstant("fex-32-10g", "fex-32-10g", 1)
prop._addConstant("fex-48-10g", "fex-48-10g", 2)
prop._addConstant("fex-48-1g", "fex-48-1g", 3)
prop._addConstant("leaf-24-port", "leaf-24-port", 14)
prop._addConstant("leaf-48-port", "leaf-48-port", 15)
prop._addConstant("leaf-adv-fabric-insight-add-on", "leaf-adv-fabric-insight-add-on", 18)
prop._addConstant("leaf-adv-multi-pod", "leaf-adv-multi-pod", 9)
prop._addConstant("leaf-adv-multi-site", "leaf-adv-multi-site", 8)
prop._addConstant("leaf-base", "leaf-base", 4)
prop._addConstant("leaf-ess-fabric-insight-basic", "leaf-ess-fabric-insight-basic", 17)
prop._addConstant("leaf-ess-netflow", "leaf-ess-netflow", 5)
prop._addConstant("leaf-ess-ptp", "leaf-ess-ptp", 7)
prop._addConstant("leaf-ess-tetration", "leaf-ess-tetration", 6)
prop._addConstant("leaf-plugin-app", "leaf-plugin-app", 100)
prop._addConstant("leaf-sec", "leaf-sec", 10)
prop._addConstant("leaf-storage", "leaf-storage", 12)
prop._addConstant("spine-lc-sec", "spine-lc-sec", 11)
prop._addConstant("vpod-ave", "vpod-ave", 19)
prop._addConstant("vpod-vleaf", "vpod-vleaf", 20)
prop._addConstant("vpod-vspine", "vpod-vspine", 21)
meta.props.add("licenseType", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "licenseType"))
def __init__(self, parentMoOrDn, licenseType, markDirty=True, **creationProps):
namingVals = [licenseType]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
6ba1c6858b83940c3599f01f612860955bfeecd0 | 781e2692049e87a4256320c76e82a19be257a05d | /assignments/python/wc/src/311.py | ad8d0ddb3393ecd74fb6c04b5ba6a331d199648b | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 270 | py | from collections import Counter
def word_count(words):
wordCount = Counter()
for word in words.split():
word = ''.join(ch for ch in word if ch.isalnum()).lower()
if not word:
continue
wordCount[word] += 1
return wordCount
| [
"[email protected]"
] | |
fa89c2b5471eefbf222f8142835081003be7ccbb | 610942849a2fac0b229af569c0c3db001d87eb94 | /utest/libdoc/test_libdoc_api.py | de2ea0957c9f96a0f21a95212f20f403dd24236e | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | jnhyperion/robotframework | a6befd1c2d50d08b7c625a73228b43c04769ca3d | 559eb744c26f6acf11eb2d3a11be8343532c9a90 | refs/heads/master | 2023-01-27T12:50:41.962755 | 2022-08-24T08:33:03 | 2022-08-24T08:33:03 | 273,444,398 | 1 | 0 | Apache-2.0 | 2023-01-13T08:09:17 | 2020-06-19T08:30:13 | Python | UTF-8 | Python | false | false | 1,388 | py | from io import StringIO
import sys
import tempfile
import unittest
from robot import libdoc
from robot.utils.asserts import assert_equal
class TestLibdoc(unittest.TestCase):
def setUp(self):
sys.stdout = StringIO()
def tearDown(self):
sys.stdout = sys.__stdout__
def test_html(self):
output = tempfile.mkstemp(suffix='.html')[1]
libdoc.libdoc('String', output)
assert_equal(sys.stdout.getvalue().strip(), output)
with open(output) as f:
assert '"name": "String"' in f.read()
def test_xml(self):
output = tempfile.mkstemp(suffix='.xml')[1]
libdoc.libdoc('String', output)
assert_equal(sys.stdout.getvalue().strip(), output)
with open(output) as f:
assert 'name="String"' in f.read()
def test_format(self):
output = tempfile.mkstemp()[1]
libdoc.libdoc('String', output, format='xml')
assert_equal(sys.stdout.getvalue().strip(), output)
with open(output) as f:
assert 'name="String"' in f.read()
def test_quiet(self):
output = tempfile.mkstemp(suffix='.html')[1]
libdoc.libdoc('String', output, quiet=True)
assert_equal(sys.stdout.getvalue().strip(), '')
with open(output) as f:
assert '"name": "String"' in f.read()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5f1a9693ae8b87d22ffb444dacd8ef3562f6ac77 | f43d3731a21ee5df09298f5541b52484f408e010 | /NewsModel/migrations/0008_auto_20170815_1747.py | 479138d6adfe7c26b885c29401ac5f0adb3f2221 | [] | no_license | cash2one/wechat_admin | 2ba8c35deffff37c263b7091229ba2d86f2aaeaf | af0712fdad867d76dcee2092abcf32cada49d075 | refs/heads/master | 2021-05-04T22:22:53.514787 | 2017-09-25T10:03:07 | 2017-09-25T10:03:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-15 09:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('NewsModel', '0007_auto_20170815_1741'),
]
operations = [
migrations.AlterField(
model_name='news',
name='comment_url',
field=models.URLField(blank=True, default=None, max_length=255, null=True, verbose_name='评论URL'),
),
migrations.AlterField(
model_name='news',
name='images',
field=models.TextField(blank=True, default=None, null=True, verbose_name='图片链接'),
),
migrations.AlterField(
model_name='news',
name='source',
field=models.CharField(blank=True, default=None, max_length=50, null=True, verbose_name='来源'),
),
migrations.AlterField(
model_name='news',
name='source_url',
field=models.URLField(blank=True, default=None, max_length=255, null=True, verbose_name='源URL'),
),
]
| [
"“[email protected]”"
] | |
8dd5c50b0655740204fdad26e5451ea16c624a82 | 506f5da3ab7675b8beb406e735f837927623d705 | /docs/conf.py | ce82727ae6c3b4977951b6b9797a5b52185ddcff | [
"MIT"
] | permissive | pombredanne/weavery | 367801fc9bdc5bede53bbbbd6d35e2144f6323ef | b5cfe5387f6d14aa3fa09487bd60748bec79fc63 | refs/heads/master | 2021-05-09T18:19:24.533464 | 2017-10-07T05:07:27 | 2017-10-07T05:07:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,366 | py | # -*- coding: utf-8 -*-
#
# weavery documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 6 18:15:49 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
DIR = os.path.dirname(os.path.abspath(__file__))
def read(fpath):
with open(fpath, 'rt') as f:
return f.read().strip()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'weavery'
copyright = u'2017, Matt Bodenhamer'
author = u'Matt Bodenhamer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = read(os.path.join(DIR, '../version.txt'))
# The short X.Y version.
version = release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'weaverydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'weavery.tex', u'weavery Documentation',
u'Matt Bodenhamer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'weavery', u'weavery Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'weavery', u'weavery Documentation',
author, 'weavery', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"[email protected]"
] | |
586ad0103c3a894f7a97fab1082d6b4ed5220fd3 | f09e98bf5de6f6c49df2dbeea93bd09f4b3b902f | /google-cloud-sdk/lib/surface/auth/__init__.py | f02fed108e951643a585b30690c3e329b4aeaf4e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Peterfeng100/notepal | 75bfaa806e24d85189bd2d09d3cb091944dc97e6 | d5ba3fb4a06516fec4a4ae3bd64a9db55f36cfcd | refs/heads/master | 2021-07-08T22:57:17.407571 | 2019-01-22T19:06:01 | 2019-01-22T19:06:01 | 166,490,067 | 4 | 1 | null | 2020-07-25T04:37:35 | 2019-01-19T00:37:04 | Python | UTF-8 | Python | false | false | 2,031 | py | # -*- coding: utf-8 -*- #
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auth for the Google Cloud SDK."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA,
base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class Auth(base.Group):
"""Manage oauth2 credentials for the Google Cloud SDK.
The gcloud auth command group lets you grant and revoke authorization to Cloud
SDK (gcloud) to access Google Cloud Platform. Typically, when scripting Cloud
SDK tools for use on multiple machines, using `gcloud auth
activate-service-account` is recommended.
For more information on authorization and credential types, see:
[](https://cloud.google.com/sdk/docs/authorizing).
While running `gcloud auth` commands, the `--account` flag can be specified
to any command to use that account without activation.
## EXAMPLES
To authenticate a user account with gcloud and minimal user output, run:
$ gcloud auth login --brief
To list all credentialed accounts and identify the current active account,
run:
$ gcloud auth list
To revoke credentials for a user account (like logging out), run:
$ gcloud auth revoke [email protected]
"""
category = 'Identity and Security'
def Filter(self, context, args):
del context, args
base.DisableUserProjectQuota()
| [
"[email protected]"
] | |
c1e3dce31ab3f2259b84c16695ec02b683b497d8 | 1752e7d1cd7bca76b3e8eaf1b2bb7eee175e1d46 | /gitwrapperlib/_version.py | 96d0effe0e9f44c11ba9837acb4c44ace0a752bf | [
"MIT"
] | permissive | costastf/gitwrapperlib | f231947aeecea86ca00c556785318032559a6b3c | 521948528c175e5d1cd5c9b794a5927c50fbb78f | refs/heads/main | 2023-08-16T23:43:05.317150 | 2023-06-13T11:30:44 | 2023-06-13T11:30:44 | 116,044,039 | 0 | 5 | MIT | 2023-09-06T18:29:46 | 2018-01-02T18:19:00 | Python | UTF-8 | Python | false | false | 2,194 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: _version.py
#
# Copyright 2018 Costas Tyfoxylos
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
Manages the version of the package.
.. _Google Python Style Guide:
https://google.github.io/styleguide/pyguide.html
"""
import os
__author__ = '''Costas Tyfoxylos <[email protected]>'''
__docformat__ = '''google'''
__date__ = '''02-01-2018'''
__copyright__ = '''Copyright 2018, Costas Tyfoxylos'''
__license__ = '''MIT'''
__maintainer__ = '''Costas Tyfoxylos'''
__email__ = '''<[email protected]>'''
__status__ = '''Development''' # "Prototype", "Development", "Production".
VERSION_FILE_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'.VERSION'
)
)
LOCAL_VERSION_FILE_PATH = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'.VERSION'
)
)
try:
with open(VERSION_FILE_PATH, encoding='utf8') as f:
__version__ = f.read()
except IOError:
try:
with open(LOCAL_VERSION_FILE_PATH, encoding='utf8') as f:
__version__ = f.read()
except IOError:
__version__ = 'unknown'
| [
"[email protected]"
] | |
8d72a1f6de5f7cd5b4ddf699eba0de9e05b8231c | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/add_program_response.py | 8fba53ef1e1dfa09c08d0090bc1597b16b2eeefb | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,417 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AddProgramResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
}
attribute_map = {
}
def __init__(self):
"""AddProgramResponse - a model defined in huaweicloud sdk"""
super(AddProgramResponse, self).__init__()
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddProgramResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
9ea58e81f9278bd420e79295e83a82243615180c | 1c07f2090687a7789528ee5e674c39c019ecb57d | /src/server/venv/bin/pip | 5ed4d16b44aeaf8f622f6490fb9c51078387fe16 | [] | no_license | GitYR/EEG-Communication | c37a5e8f6f27f1d2cc9907b0580db0639a2498fd | 7064d8efcdba18f235b1b46c0137559f760abac0 | refs/heads/master | 2022-09-24T21:35:29.892217 | 2020-05-27T09:54:48 | 2020-05-27T09:54:48 | 266,724,514 | 0 | 0 | null | 2020-05-25T08:24:45 | 2020-05-25T08:24:45 | null | UTF-8 | Python | false | false | 265 | #!/home/hayoung/PycharmProjects/EEG_SERVER/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
db9b04a272126f991bb34f351bc484e806e522d2 | ea5cb47780499016ad4a09c300358df96ce6b22f | /examples/py_example.py | a5c08b76bf2962d8f1a569a4869ea37b811c7abc | [
"MIT"
] | permissive | liuguoyou/PyPatchMatch | 84e02d26534fcc62bc2f8368db38dfd9883a074f | 79e5a19296ec044c619484ff7a9e8cded43acd49 | refs/heads/master | 2020-12-08T12:21:43.706407 | 2020-01-10T03:37:10 | 2020-01-10T03:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : test.py
# Author : Jiayuan Mao
# Email : [email protected]
# Date : 01/09/2020
#
# Distributed under terms of the MIT license.
from PIL import Image
import sys
sys.path.insert(0, '../')
import patch_match
if __name__ == '__main__':
source = Image.open('./images/forest_pruned.bmp')
result = patch_match.inpaint(source, patch_size=3)
Image.fromarray(result).show()
| [
"[email protected]"
] | |
c908e3859988f35653deebb62457f73eeba5a12b | c09e2e3b3743b86a24eadd0d62717d4925a661b3 | /setup.py | 5638fab5ecbd18ab65729f9752fc0abd8a87cee2 | [
"MIT"
] | permissive | knowsuchagency/foobar | c0a4a2b067663a8d1b2a26c5ddc2994dc6f05ab9 | b02bea7e6a9af232175443e6b1512fc531b61f40 | refs/heads/master | 2020-12-02T06:36:31.700633 | 2017-07-11T07:21:14 | 2017-07-11T07:21:14 | 96,863,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from configparser import ConfigParser
from setuptools import setup
def get_requirements(section: str) -> list:
"""Read requirements from Pipfile."""
pip_config = ConfigParser()
pip_config.read('Pipfile')
def gen():
for item in pip_config.items(section):
lib, version = item
lib, version = lib.strip('"'), version.strip('"')
# ungracefully handle wildcard requirements
if version == '*': version = ''
yield lib + version
return list(gen())
packages = get_requirements('packages')
dev_packages = get_requirements('dev-packages')
setup(
install_requires=packages,
tests_require=dev_packages,
extras_require={
'dev': dev_packages,
},
entry_points={
'console_scripts': [
'foobar=foobar.cli:main'
]
},
)
| [
"[email protected]"
] | |
c08aed31ba2639b58a40e30e827a07c4a0f9456c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02836/s704418504.py | 3bddb063b93136d0f9c5b66900e0c03c589e9032 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | s = input()
n = int(len(s)/2)
x = 0
for i in range(n):
if s[i] != s[-1-i]:
x += 1
print(int(x)) | [
"[email protected]"
] | |
a7b7144aee7e78d723595ba5bc5e22bd66793753 | 209c876b1e248fd67bd156a137d961a6610f93c7 | /paddle/fluid/eager/auto_code_generator/generator/eager_gen.py | 5ae57faa37bfeabc4df04467245f8a306a23ec43 | [
"Apache-2.0"
] | permissive | Qengineering/Paddle | 36e0dba37d29146ebef4fba869490ecedbf4294e | 591456c69b76ee96d04b7d15dca6bb8080301f21 | refs/heads/develop | 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 | Apache-2.0 | 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null | UTF-8 | Python | false | false | 94,326 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import re
import argparse
import os
import logging
from codegen_utils import core_ops_returns_info, core_ops_args_info, core_ops_args_type_info
from codegen_utils import yaml_types_mapping
from codegen_utils import ReadFwdFile, ReadBwdFile
from codegen_utils import FindGradName, FindForwardName, GetSavedName, GetGradNodeName
from codegen_utils import IsPlainTensorType, IsVectorTensorType
from codegen_utils import GetConstReference, RemoveConstAndReference
from codegen_utils import GetDygraphForwardFunctionName, GetIntermediateAPIFunctionName, GetDygraphLogName
from codegen_utils import GetAutoGradMetaName, GetAutoGradMetaVectorName
from codegen_utils import RemoveSpecialSymbolsInName, RecoverBaseNameOfInplaceFunction
from codegen_utils import GetInplacedFunctionName
from codegen_utils import ParseYamlArgs, ParseYamlReturns, ParseYamlForwardFromBackward
from codegen_utils import ParseYamlForward, ParseYamlBackward
from codegen_utils import ParseYamlInplaceInfo
from codegen_utils import FunctionGeneratorBase, GeneratorBase
from codegen_utils import ops_to_fill_zero_for_empty_grads
from codegen_utils import AssertMessage, GetIndent
# Note: assign is a inplace api when parameter(output) isn't none,
# so we should check parameter(output) with rule of inplace.
# But because there is no check in old dygraph mode, in order to
# keeping the code compatible, here we also skip inplace check in new dygraph temporarily,
# and this will be fixed in the futrue.
inplace_check_blacklist = set(["assign_out_"])
# Black Ops list that's NO NEED to apply code generation
black_ops_list = [
"conv2d", "conv2d_grad", "conv2d_grad_grad", "add_n", "add_n_grad"
]
###########
## Utils ##
###########
def ParseArguments():
parser = argparse.ArgumentParser(
description='Eager Code Generator Args Parser')
parser.add_argument('--nodes_h_path', type=str)
parser.add_argument('--nodes_cc_path', type=str)
parser.add_argument('--forwards_h_path', type=str)
parser.add_argument('--forwards_cc_path', type=str)
parser.add_argument('--api_yaml_path', type=str)
parser.add_argument('--backward_yaml_path', type=str)
args = parser.parse_args()
return args
########################
## Code Gen Templates ##
########################
SET_PLAIN_TENSOR_WRAPPER_TEMPLATE = \
""" void SetTensorWrapper{}(const paddle::experimental::Tensor& {}) {{
{} = egr::TensorWrapper({}, {});
}}
"""
SET_VECTOR_TENSOR_WRAPPER_TEMPLATE = \
""" void SetTensorWrapper{}(const std::vector<paddle::experimental::Tensor>& {}) {{
for(const auto& eager_tensor : {}) {{
{}.emplace_back(egr::TensorWrapper(eager_tensor, {}));
}};
}}
"""
PLAIN_TENSOR_MEMBER_TEMPLATE = \
""" egr::TensorWrapper {};
"""
VECTOR_TENSOR_MEMBER_TEMPLATE = \
""" std::vector<egr::TensorWrapper> {};
"""
CLEAR_TENSOR_WRAPPER_TEMPLATE = \
""" {}.clear();
"""
CLEAR_VECTOR_TENSOR_WRAPPERS_TEMPLATE = \
""" for (auto& tw : {}) {{
tw.clear();
}}
"""
SET_ATTR_METHOD_TEMPLATE = \
""" void SetAttribute{}({} {}) {{
{} = {};
}}
"""
ATTRIBUTE_MEMBER_WITH_DEFAULT_TEMPLATE = \
""" {} {} = {};
"""
ATTRIBUTE_MEMBER_TEMPLATE = \
""" {} {};
"""
NODE_DECLARATION_TEMPLATE = \
"""
class {} : public egr::GradNodeBase {{
public:
{}() : egr::GradNodeBase() {{}}
{}(size_t bwd_in_slot_num, size_t bwd_out_slot_num) :
egr::GradNodeBase(bwd_in_slot_num, bwd_out_slot_num) {{}}
~{}() override = default;
virtual paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallVectorSize> operator()(
paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallVectorSize>& grads, bool create_graph = false, bool is_new_grad = false) override;
std::string name() override {{ return \"{}\"; }}
void ClearTensorWrappers() override {{
{}
SetIsTensorWrappersCleared(true);
}}
std::shared_ptr<GradNodeBase> Copy() const override {{
auto copied_node = std::shared_ptr<{}>(new {}(*this));
return copied_node;
}}
// SetTensorWrapperX, SetTensorWrapperY, ...
{}
// SetAttributes
{}
private:
// TensorWrappers
{}
// Attributes
{}}};
"""
GRAD_FUNCTION_TEMPLATE = \
"""
paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallVectorSize> {}::operator()(paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallVectorSize>& grads, bool create_graph, bool is_new_grad) {{
VLOG(3) << \"Running AD API GRAD: \" << \"{}\";
// Fill Zero For GradIn Tensors
{}
// Apply Gradient Hooks
auto hooked_grads = ApplyGradientHooks(grads);
// Collect GradIn Tensors, Attrs and Recovered TensorWrappers
{}
// Prepare Grad function call
{}
// Runtime check if we need next grad
{}
// Inplace Check
{}
// Inplace Strategy
{}
VLOG(5) << \"Running C++ API: \" << \"{}\";
// Before log info
{}
// Call grad_api function
{}
// Check NaN and Inf id needed
{}
// Get GradOut autograd_meta
{}
// Create Grad Node
{}
VLOG(4) << \"Finish AD API GRAD: {}";
// LOG IF DEBUG
{}
// Return
{}
}}
"""
FORWARD_FUNCTION_TEMPLATE = \
"""
{} {}({}) {{
VLOG(3) << \"Running AD API: \" << \"{}\";
// Dygraph Record Event
{}
// AMP Logic
{}
// Layout autotune
{}
// Get Input AutoGradMeta
{}
VLOG(5) << \"Running C++ API: \" << \"{}\";
// Before log info
{}
// Forward API Call
{}
// Check NaN and Inf if needed
{}
// Get Outputs
{}
// Get Output AutoGradMeta
{}
bool trace_backward = egr::Controller::Instance().HasGrad();
bool require_any_grad = egr::EagerUtils::ComputeRequireGrad({});
// Check Inplace if needed
{}{}
// Node Creation
{}
VLOG(4) << \"Finish AD API: {}";
// LOG IF DEBUG
{}
// Returns
return {};
}}
"""
AFTER_LOG_PRINT_TEMPLATE = \
"""
if(VLOG_IS_ON(4)){{
const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], Output: [%s] }} \";
{}
VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}}
"""
BEFORE_LOG_PRINT_TEMPLATE = \
"""
if(VLOG_IS_ON(3)){{
const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s]}} \";
{}
VLOG(3) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str);
}}
"""
FORWARD_ONLY_FUNCTION_TEMPLATE = \
"""
{} {}({}) {{
VLOG(3) << \"Running AD API: \" << \"{}\";
// Dygraph Record Event
{}
// AMP Logic
{}
// Layout autotune
{}
VLOG(5) << \"Running C++ API: \" << \"{}\";
// Before log info
{}
// Forward API Call
{}
// Get Outputs
{}
VLOG(4) << \"Finish AD API: {}";
// LOG IF DEBUG
{}
// Returns
return {};
}}
"""
FORWARD_BODY_TEMPLATE = \
""" if(require_any_grad) {{
{}
egr::EagerUtils::PassStopGradient({});
// Node Construction
{}
// SetAttributes if needed
{}
// Set TensorWrappers for Forward Inputs if needed
{}
// SetGradOutMeta & SetEdges
{}
// SetOutRank & SetHistory & SetGradInMeta & RetainGrad
{}
{}
{}
{}
// Set TensorWrappers for Forward Outputs if needed
{}
}}
"""
HIHGER_ORDER_DERIVATIVE_VALUE_TEMPLATE = \
""" if(trace_backward) {{
{}
// Node Construction
{}
// SetAttributes if needed
{}
// Set TensorWrappers for Forward Inputs if needed
{}
// SetGradOutMeta & SetEdges
{}
// SetOutRank & SetHistory & SetGradInMeta & RetainGrad
{}
{}
{}
{}
// Set TensorWrappers for Forward Outputs if needed
{}
}}
"""
NAMESPACE_WRAPPER_TEMPLATE = \
"""
namespace {} {{
{}
}}
"""
NODE_CC_FILE_TEMPLATE = \
"""
#include "glog/logging.h"
#include "paddle/phi/api/all.h"
#include "paddle/phi/api/backward/backward_api.h"
#include "paddle/phi/api/backward/sparse_bw_api.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h"
#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h"
#include "paddle/fluid/eager/to_static/run_program_op_node.h"
#include "paddle/fluid/eager/nan_inf_utils.h"
#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/fluid/eager/api/manual/eager_manual/nodes/nodes.h"
DECLARE_bool(check_nan_inf);
{}
"""
NODE_H_FILE_TEMPLATE = \
"""
#pragma once
#include "paddle/fluid/eager/tensor_wrapper.h"
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/fluid/eager/api/manual/eager_manual/nodes/nodes.h"
{}
"""
FORWARD_CC_FILE_TEMPLATE = \
"""
#include "paddle/phi/api/lib/dygraph_api.h"
#include "paddle/fluid/eager/api/generated/eager_generated/forwards/dygraph_functions.h"
#include "paddle/fluid/eager/api/generated/eager_generated/backwards/nodes.h"
#include "paddle/fluid/eager/eager_layout_auto_tune.h"
#include "paddle/phi/api/include/strings_api.h"
#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/fluid/eager/amp_utils.h"
#include "paddle/fluid/eager/eager_amp_auto_cast.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/fluid/eager/nan_inf_utils.h"
#include "paddle/fluid/eager/api/manual/eager_manual/dygraph_forward_api.h"
DECLARE_bool(check_nan_inf);
{}
{}
"""
FORWARD_H_FILE_TEMPLATE = \
"""
#pragma once
#include "glog/logging.h"
#include "paddle/fluid/eager/autograd_meta.h"
#include "paddle/phi/api/all.h"
#include "paddle/fluid/eager/utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/eager/to_static/run_program_op_func.h"
#include "paddle/fluid/eager/api/manual/eager_manual/dygraph_forward_api.h"
using CPUPlace = phi::CPUPlace;
{}
{}
"""
CORE_OPS_INFO_TEMPLATE = \
"""
std::unordered_map<std::string, std::vector<std::string>> core_ops_args_info = {{
{}
}};
std::unordered_map<std::string, std::vector<std::string>> core_ops_args_type_info = {{
{}
}};
std::unordered_map<std::string, std::vector<std::string>> core_ops_returns_info = {{
{}
}};
"""
CORE_OPS_DECLARATION_TEMPLATE = \
"""
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_args_info;
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_args_type_info;
extern std::unordered_map<std::string, std::vector<std::string>> core_ops_returns_info;
"""
CHECK_INPLACE_TEMPLATE = \
"""
egr::EagerUtils::CheckInplace({}, {}, require_any_grad);
"""
BUMP_INPLACE_VERSION_TEMPLATE = \
"""
// Bump Inplace Version
{}.bump_inplace_version();
VLOG(3) << \"Tensor(\" << {}.name() << \") uses Inplace Strategy.\";
"""
AMP_LOGIC_TEMPLATE = \
""" if (egr::Controller::Instance().GetAMPLevel() != paddle::imperative::AmpLevel::O0) {{
VLOG(5) << "Check and Prepare For AMP";
{}
paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallVectorSize> amp_tensors_vector = {};
{}
{}
{}
{{
paddle::imperative::AutoCastGuard guard(egr::Controller::Instance().GetCurrentTracer(), paddle::imperative::AmpLevel::O0);
{}
}}
}}
"""
LAYOUT_LOGIC_TEMPLATE=\
"""
if (egr::Controller::Instance().UseLayoutAutoTune()) {{
paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallVectorSize> tensors_vector = {};
{}
{}
VLOG(5) << "Check and Prepare For LAYOUT "<< op_name;
paddle::imperative::LayoutAutotuneGuard guard(egr::Controller::Instance().GetCurrentTracer(), false);
{}
{}
// Returns
return {};
}}
"""
CREATE_PLAIN_OPTIONAL_TENSOR_TEMPLATE = \
"""
paddle::optional<paddle::experimental::Tensor> {}_optional;
if({}.initialized()) {}_optional = paddle::make_optional<paddle::experimental::Tensor>({});
"""
CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE = \
"""
paddle::optional<paddle::experimental::Tensor> {}_optional;
if( {}.impl() ) {}_optional = paddle::make_optional<paddle::experimental::Tensor>({});
"""
CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE = \
"""
paddle::optional<std::vector<paddle::experimental::Tensor>> {}_optional;
if( !{}.empty() ) {}_optional = paddle::make_optional<std::vector<paddle::experimental::Tensor>>({});
"""
CHECK_BACKWARD_INPLACE_TEMPLATE = \
"""
bool can_be_inplaced = false;
if ({}.initialized()) {{
VLOG(10) << {}.name() << "({}) use_count: " << {}.impl().use_count();
if ({}.impl().use_count() == 1 || ({}.impl().use_count() == 2 && {}.impl().get() == {}.impl().get())) {{
can_be_inplaced = true;
}}
}}"""
CHECK_NAN_AND_INF_TEMPLATE = \
""" if (FLAGS_check_nan_inf) {{ egr::CheckTensorHasNanOrInf("{}", {}); }}
"""
inplace_optional_out_type_map = {
"Tensor":
"paddle::optional<paddle::experimental::Tensor>&",
"std::vector<Tensor>":
"paddle::optional<std::vector<paddle::experimental::Tensor>>&"
}
def ExtractForwardApiNameFormInvoke(invoke_config):
api_name = invoke_config.split('(')[0]
if api_name[-1] == '_':
api_name = api_name[:-1]
return re.search(
r"(?P<api_name>[a-zA-Z0-9_]+)(?P<intermediate>_intermediate)?",
api_name).group('api_name')
def IsInvokeForwardApi(api_contents, forward_api_name_list):
return 'invoke' in api_contents and ExtractForwardApiNameFormInvoke(
api_contents['invoke']) in forward_api_name_list
#######################
## Generator Helpers ##
#######################
def GenerateCoreOpInfoDeclaration():
return CORE_OPS_DECLARATION_TEMPLATE
def GenerateCoreOpInfoDefinition():
op_args_info_list = []
for op_name, arg_list in core_ops_args_info.items():
arg_str = ",".join(["\"" + v + "\"" for v in arg_list])
op_args_info = f"{{ \"{op_name}\", {{ {arg_str} }} }},"
op_args_info_list.append(op_args_info)
op_types_info_list = []
for op_name, type_list in core_ops_args_type_info.items():
type_str = ",".join(["\"" + v + "\"" for v in type_list])
op_types_info = f"{{ \"{op_name}\", {{ {type_str} }} }},"
op_types_info_list.append(op_types_info)
op_returns_info_list = []
for op_name, return_list in core_ops_returns_info.items():
return_str = ",".join(["\"" + v + "\"" for v in return_list])
return_types_info = f"{{ \"{op_name}\", {{ {return_str} }} }},"
op_returns_info_list.append(return_types_info)
op_args_info_str = "\n".join(op_args_info_list)
op_types_info_str = "\n".join(op_types_info_list)
op_returns_info_str = "\n".join(op_returns_info_list)
core_ops_info_definition_str = CORE_OPS_INFO_TEMPLATE.format(
op_args_info_str, op_types_info_str, op_returns_info_str)
return core_ops_info_definition_str
#####################
## Generator Class ##
#####################
class DygraphFunctionGeneratorBase(FunctionGeneratorBase):
def __init__(self, forward_api_contents, grad_api_contents,
forward_apis_dict, namespace):
self.forward_api_contents = forward_api_contents
# Members from Parent:
#self.namespace
#self.forward_api_contents
#self.forward_api_name
#self.orig_forward_inputs_list
#self.orig_forward_attrs_list
#self.orig_forward_returns_list
#self.forward_inputs_position_map
#self.forward_outputs_position_map
#self.optional_inputs
#self.no_need_buffers
#self.intermediate_outputs
#self.forward_inplace_map
FunctionGeneratorBase.__init__(self, forward_api_contents, namespace)
self.forward_apis_dict = forward_apis_dict
self.grad_api_contents = grad_api_contents
# Raw Contents
self.backward_forward_str = ""
self.backward_api_name = ""
self.forward_attrs_list = [
] #[ [attr_name, attr_type, default_value, orig_position], ...]
self.forward_inputs_list = [
] #[ [arg_name, arg_type, orig_position], ...]
self.forward_returns_list = [
] #[ [ret_name, ret_type, orig_position], ...]
self.backward_attrs_list = [
] #[ [attr_name, attr_type, default_value, orig_position], ...]
self.backward_inputs_list = [
] #[ [arg_name, arg_type, orig_position], ...]
self.backward_returns_list = [
] #[ [ret_name, ret_type, orig_position], ...]
# SlotNameMatched Backward Data
self.backward_forward_inputs_map = {
} #{ "name" : [type, is_fwd_input, orig_position] ...}
self.backward_grad_inputs_map = {
} #{ "name" : [type, fwd_position, orig_position] ...}
self.backward_grad_outputs_map = {
} #{ "name" : [type, fwd_position, orig_position] ...}
self.backward_inplace_map = {} #{name : name, ...}
def ParseBackwardInplaceInfo(self):
grad_api_contents = self.grad_api_contents
if 'inplace' not in grad_api_contents.keys(): return
inplace_map_str = grad_api_contents['inplace']
self.backward_inplace_map = ParseYamlInplaceInfo(inplace_map_str)
def DygraphYamlValidationCheck(self):
forward_api_contents = self.forward_api_contents
grad_api_contents = self.grad_api_contents
assert 'op' in forward_api_contents.keys(
), "Unable to find \"op\" in ops.yaml"
assert 'args' in forward_api_contents.keys(
), "Unable to find \"args\" in ops.yaml"
assert 'output' in forward_api_contents.keys(
), "Unable to find \"output\" in ops.yaml"
if grad_api_contents is not None:
assert 'backward' in forward_api_contents.keys(
), "Unable to find \"backward\" in ops.yaml"
assert 'args' in grad_api_contents.keys(
), "Unable to find \"args\" in backward.yaml"
assert 'output' in grad_api_contents.keys(
), "Unable to find \"output\" in backward.yaml"
assert 'forward' in grad_api_contents.keys(
), "Unable to find \"forward\" in backward.yaml"
def ForwardsValidationCheck(self):
forward_inputs_list = self.forward_inputs_list
forward_attrs_list = self.forward_attrs_list
forward_returns_list = self.forward_returns_list
orig_forward_inputs_list = self.orig_forward_inputs_list
orig_forward_attrs_list = self.orig_forward_attrs_list
orig_forward_returns_list = self.orig_forward_returns_list
for i in range(len(forward_inputs_list)):
forward_input_type = forward_inputs_list[i][1]
forward_input_pos = forward_inputs_list[i][2]
orig_input_type = orig_forward_inputs_list[i][1]
orig_input_pos = orig_forward_inputs_list[i][2]
assert forward_input_type == orig_input_type, AssertMessage(
forward_input_type, orig_input_type)
assert forward_input_pos == orig_input_pos, AssertMessage(
forward_input_pos, orig_input_pos)
for i in range(len(forward_attrs_list)):
orig_attr_type = orig_forward_attrs_list[i][1]
orig_attr_pos = orig_forward_attrs_list[i][3]
forward_attr_type = forward_attrs_list[i][1]
forward_attr_pos = forward_attrs_list[i][3]
assert orig_attr_type == forward_attr_type, AssertMessage(
orig_attr_type, forward_attr_type)
assert orig_attr_pos == forward_attr_pos, AssertMessage(
orig_attr_pos, forward_attr_pos)
for i in range(len(forward_returns_list)):
orig_return_type = orig_forward_returns_list[i][1]
orig_return_pos = orig_forward_returns_list[i][2]
forward_return_type = forward_returns_list[i][1]
forward_return_pos = forward_returns_list[i][2]
assert orig_return_type == forward_return_type, AssertMessage(
orig_return_type, forward_return_type)
assert orig_return_pos == forward_return_pos, AssertMessage(
orig_return_pos, forward_return_pos)
# Check Order: Inputs, Attributes
max_input_position = -1
for _, _, pos in forward_inputs_list:
max_input_position = max(max_input_position, pos)
for _, _, _, pos in forward_attrs_list:
assert pos > max_input_position, AssertMessage(
pos, max_input_position)
def BackwardValidationCheck(self):
backward_forward_inputs_map = self.backward_forward_inputs_map
backward_grad_inputs_map = self.backward_grad_inputs_map
backward_attrs_list = self.backward_attrs_list
# Check Order: TensorWrappers, GradTensors, Attributes
max_fwd_input_position = -1
for _, (_, _, pos) in backward_forward_inputs_map.items():
max_fwd_input_position = max(max_fwd_input_position, pos)
max_grad_tensor_position = -1
for _, (_, _, pos) in backward_grad_inputs_map.items():
assert pos > max_fwd_input_position, AssertMessage(
pos, max_grad_tensor_position)
max_grad_tensor_position = max(max_grad_tensor_position, pos)
max_attr_position = -1
for _, _, _, pos in backward_attrs_list:
assert pos > max_grad_tensor_position, AssertMessage(
pos, max_grad_tensor_position)
max_attr_position = max(max_attr_position, pos)
def IntermediateValidationCheck(self):
intermediate_outputs = self.intermediate_outputs
forward_returns_list = self.forward_returns_list
"""
Check whether intermediate_outputs are positioned
at the very end of forward_returns_list
"""
intermediate_positions = range(
len(forward_returns_list) - len(intermediate_outputs),
len(forward_returns_list))
for ret_name, _, pos in forward_returns_list:
if ret_name in intermediate_outputs:
assert pos in intermediate_positions, AssertMessage(
pos, intermediate_positions)
def CollectBackwardInfo(self):
forward_api_contents = self.forward_api_contents
grad_api_contents = self.grad_api_contents
self.backward_api_name = forward_api_contents['backward']
self.backward_forward_str = grad_api_contents['forward']
backward_args_str = grad_api_contents['args']
backward_returns_str = grad_api_contents['output']
self.backward_inputs_list, self.backward_attrs_list, self.backward_returns_list = ParseYamlBackward(
backward_args_str, backward_returns_str)
def CollectForwardInfoFromBackwardContents(self):
backward_forward_str = self.backward_forward_str
self.forward_inputs_list, self.forward_attrs_list, self.forward_returns_list = ParseYamlForwardFromBackward(
backward_forward_str)
def CollectForwardInfoFromYamlForward(self):
self.forward_inputs_list, self.forward_attrs_list, self.forward_returns_list = ParseYamlForwardFromBackward(
self.forward_api_contents['args'] + " -> " +
self.forward_api_contents['output'])
def SlotNameMatching(self):
backward_inputs_list = self.backward_inputs_list
backward_returns_list = self.backward_returns_list
forward_inputs_position_map = self.forward_inputs_position_map
forward_outputs_position_map = self.forward_outputs_position_map
for backward_input in backward_inputs_list:
backward_input_name = backward_input[0]
backward_input_type = backward_input[1]
backward_input_pos = backward_input[2]
backward_fwd_name = FindForwardName(backward_input_name)
if backward_fwd_name:
# Grad Input
assert backward_fwd_name in forward_outputs_position_map.keys(
), AssertMessage(backward_fwd_name,
forward_outputs_position_map.keys())
matched_forward_output_type = forward_outputs_position_map[
backward_fwd_name][0]
matched_forward_output_pos = forward_outputs_position_map[
backward_fwd_name][1]
self.backward_grad_inputs_map[backward_input_name] = [
backward_input_type, matched_forward_output_pos,
backward_input_pos
]
else:
# TensorWrapper Input
if backward_input_name in forward_inputs_position_map.keys():
tensor_wrapper_type = forward_inputs_position_map[
backward_input_name][0]
self.backward_forward_inputs_map[backward_input_name] = [
backward_input_type, True, backward_input_pos
]
elif backward_input_name in forward_outputs_position_map.keys():
tensor_wrapper_type = forward_outputs_position_map[
backward_input_name][0]
self.backward_forward_inputs_map[backward_input_name] = [
backward_input_type, False, backward_input_pos
]
else:
assert False, f"Cannot find {backward_input_name} in forward position map"
for backward_output in backward_returns_list:
backward_output_name = backward_output[0]
backward_output_type = backward_output[1]
backward_output_pos = backward_output[2]
backward_fwd_name = FindForwardName(backward_output_name)
assert backward_fwd_name is not None, f"Detected {backward_fwd_name} = None"
assert backward_fwd_name in forward_inputs_position_map.keys(
), AssertMessage(backward_fwd_name,
forward_inputs_position_map.keys())
matched_forward_input_type = forward_inputs_position_map[
backward_fwd_name][0]
matched_forward_input_pos = forward_inputs_position_map[
backward_fwd_name][1]
self.backward_grad_outputs_map[backward_output_name] = [
backward_output_type, matched_forward_input_pos,
backward_output_pos
]
def GetPassStopGradientArgsList(self, forward_outputs_position_map):
pass_stop_gradient_args_list = ["false"]
for name, (_, _) in forward_outputs_position_map.items():
output_autograd_meta_name = GetAutoGradMetaName(name)
pass_stop_gradient_args_list.append(output_autograd_meta_name)
pass_stop_gradient_args_str = ",".join(pass_stop_gradient_args_list)
return pass_stop_gradient_args_str
def GenerateNodeCreationCodes(self, for_backward=False):
forward_api_name = self.forward_api_name
forward_inputs_position_map = self.forward_inputs_position_map
forward_outputs_position_map = self.forward_outputs_position_map
forward_attrs_list = self.forward_attrs_list
backward_forward_inputs_map = self.backward_forward_inputs_map
backward_grad_inputs_map = self.backward_grad_inputs_map
backward_grad_outputs_map = self.backward_grad_outputs_map
backward_attrs_list = self.backward_attrs_list
optional_inputs = self.optional_inputs
# Pass Stop Gradient Args
pass_stop_gradient_args_str = self.GetPassStopGradientArgsList(
forward_outputs_position_map)
# Node Construction
num_backward_inputs = len(forward_outputs_position_map.keys())
num_backward_outputs = len(forward_inputs_position_map.keys())
grad_node_name = GetGradNodeName(self.backward_api_name)
# Helper
indent = GetIndent(2)
# NOTE(Aurelius74): DO NOT use make_shared here. Because some Node contains experimental::Scalar
# which contains "complex128" as data. "complex128" is memory-aligned manually. But make_shared
# request MEMALIGN for allocation (Maybe).
# See https://stackoverflow.com/questions/31228656/how-can-shared-ptr-disrupt-alignment
# and https://github.com/MRtrix3/mrtrix3/issues/957
node_construction_str = f"{indent}auto grad_node = std::shared_ptr<{grad_node_name}>(new {grad_node_name}({num_backward_inputs}, {num_backward_outputs}));"
# SetAttributes
set_attributes_list = []
forward_attrs_name_set = set()
for name, _, _, _ in forward_attrs_list:
forward_attrs_name_set.add(name)
for name, _, default_val_attr, _ in backward_attrs_list:
if name in forward_attrs_name_set:
set_attributes = f"{indent}grad_node->SetAttribute{name}({name});"
else:
set_attributes = f"{indent}grad_node->SetAttribute{name}({default_val_attr});"
set_attributes_list.append(set_attributes)
set_attributes_str = "\n".join(set_attributes_list)
# SetTensorWrappers
set_input_tensor_wrappers_list = []
set_output_tensor_wrappers_list = []
num_fwd_outputs = len(forward_outputs_position_map.keys())
for name, (atype, is_fwd_input,
pos) in backward_forward_inputs_map.items():
is_optional = (name in optional_inputs)
if is_fwd_input:
if is_optional:
set_tensor_wrappers = f"{indent}if({name}) grad_node->SetTensorWrapper{name}(*{name});"
else:
set_tensor_wrappers = f"{indent}grad_node->SetTensorWrapper{name}({name});"
set_input_tensor_wrappers_list.append(set_tensor_wrappers)
else: # Forwad's output as backward's input
if num_fwd_outputs > 1:
# Aligned with forward output position
assert name in forward_outputs_position_map.keys(
), AssertMessage(name, forward_outputs_position_map.keys())
if is_optional:
set_tensor_wrappers = f"{indent}if({name}) grad_node->SetTensorWrapper{name}(*{name});"
else:
set_tensor_wrappers = f"{indent}grad_node->SetTensorWrapper{name}({name});"
set_output_tensor_wrappers_list.append(set_tensor_wrappers)
set_input_tensor_wrappers_str = "\n".join(
set_input_tensor_wrappers_list)
set_output_tensor_wrappers_str = "\n".join(
set_output_tensor_wrappers_list)
# SetGradOutMeta & SetEdges
grad_node_out_list = []
set_grad_out_meta_list = []
set_edges_list = []
for name, (_, pos) in forward_inputs_position_map.items():
# Has corresponding grad output
has_corresponding_grad_output = False
for _, (_, corresponding_pos,
_) in backward_grad_outputs_map.items():
if pos == corresponding_pos:
has_corresponding_grad_output = True
if not has_corresponding_grad_output:
continue
grad_node_out_list.append(name)
is_optional = (name in self.optional_inputs)
if is_optional:
set_grad_out_meta = f"{indent}if({name}.get_ptr() != nullptr) grad_node->SetGradOutMeta(*({name}.get_ptr()), {pos});"
else:
set_grad_out_meta = f"{indent}grad_node->SetGradOutMeta({name}, {pos});"
set_grad_out_meta_list.append(set_grad_out_meta)
set_grad_out_meta_str = "\n".join(set_grad_out_meta_list)
# SetOutRank & SetHistory & SetGradInMeta
set_out_rank_list = []
set_history_list = []
set_grad_in_meta_list = []
set_retain_grad_list = []
num_outputs = len(forward_outputs_position_map.keys())
for name, (_, pos) in forward_outputs_position_map.items():
output_autograd_meta_name = GetAutoGradMetaName(name)
set_out_rank = f"""{indent}if ({output_autograd_meta_name}) {{
{indent} egr::EagerUtils::SetOutRankWithSlot({output_autograd_meta_name}, {pos});
{indent}}}"""
set_history = f"""{indent}if ({output_autograd_meta_name}) {{
{indent} egr::EagerUtils::SetHistory({output_autograd_meta_name}, grad_node);
{indent}}}"""
set_grad_in_meta = f"{indent}grad_node->SetGradInMeta({name}, {pos});"
set_retain_grad = f"{indent}egr::EagerUtils::CheckAndRetainGrad({name});"
set_out_rank_list.append(set_out_rank)
set_history_list.append(set_history)
set_grad_in_meta_list.append(set_grad_in_meta)
set_retain_grad_list.append(set_retain_grad)
set_out_rank_str = "\n".join(set_out_rank_list)
set_history_str = "\n".join(set_history_list)
set_grad_in_meta_str = "\n".join(set_grad_in_meta_list)
set_retain_grad_str = "\n".join(set_retain_grad_list)
node_event_name = forward_api_name + " node_creation"
node_creation_event_str = f"{indent}paddle::platform::RecordEvent node_creation_record_event(\"{node_event_name}\", paddle::platform::TracerEventType::OperatorInner, 1);\n"
if not for_backward:
self.node_creation_str = FORWARD_BODY_TEMPLATE.format(
node_creation_event_str, pass_stop_gradient_args_str,
node_construction_str, set_attributes_str,
set_input_tensor_wrappers_str, set_grad_out_meta_str,
set_out_rank_str, set_history_str, set_grad_in_meta_str,
set_retain_grad_str, set_output_tensor_wrappers_str)
else:
self.node_creation_str = HIHGER_ORDER_DERIVATIVE_VALUE_TEMPLATE.format(
node_creation_event_str, node_construction_str,
set_attributes_str, set_input_tensor_wrappers_str,
set_grad_out_meta_str, set_out_rank_str, set_history_str,
set_grad_in_meta_str, set_retain_grad_str,
set_output_tensor_wrappers_str)
self.grad_node_out_list = grad_node_out_list
def run(self):
# Basic Validation Check
self.DygraphYamlValidationCheck()
##########################
## Parsing Raw Contents ##
##########################
# Parse forward and backward inplace_map
self.ParseForwardInplaceInfo()
if self.grad_api_contents is not None:
self.ParseBackwardInplaceInfo()
# Parse no_need_buffer
self.ParseNoNeedBuffer()
# Parse optional_inputs
self.ParseDispensable()
# Parse intermediate_outputs
self.ParseIntermediate()
self.IntermediateValidationCheck()
if self.grad_api_contents is not None:
# Initialize backward_forward_str, backward_inputs_list, backward_attrs_list, backward_returns_list
self.CollectBackwardInfo()
# Initialize forward_inputs_list, forward_attrs_list, forward_returns_list
self.CollectForwardInfoFromBackwardContents()
if self.is_forward_only:
self.CollectForwardInfoFromYamlForward()
# Initialize orig_forward_inputs_list, orig_forward_attrs_list, orig_forward_returns_list
self.CollectOriginalForwardInfo()
# Forwards Validation Check
self.ForwardsValidationCheck()
#############################
## Process Parsed Contents ##
#############################
# Initialize forward_inputs_position_map, forward_outputs_position_map
self.DetermineForwardPositionMap(self.forward_inputs_list,
self.forward_returns_list)
if self.grad_api_contents is not None:
# Initialize backward_forward_inputs_map, backward_grad_inputs_map, backward_grad_outputs_map
self.SlotNameMatching()
# Backward Validation Check
self.BackwardValidationCheck()
class DygraphForwardFunctionGenerator(DygraphFunctionGeneratorBase):
def __init__(self, forward_api_contents, grad_api_contents,
forward_apis_dict, namespace):
DygraphFunctionGeneratorBase.__init__(self, forward_api_contents,
grad_api_contents,
forward_apis_dict, namespace)
# Generated Results
self.forward_definition_str = ""
self.forward_declaration_str = ""
def GenerateForwardLayoutAutotune(self, forward_api_name,
amp_tensors_vector_list,
layout_tensors_vector_optional_list,
layout_autotune_list_str,
returns_type_str, returns_str,
amp_inputs_call_args_str):
intermediate_outputs = self.intermediate_outputs
forward_attrs_list = self.forward_attrs_list
forward_outputs_position_map = self.forward_outputs_position_map
num_outputs = len(
forward_outputs_position_map.keys()) - len(intermediate_outputs)
# for layout autotune attr
lightly_sensitive_attr = [
'axis', 'axes', 'dim', 'dims', 'start', 'end', 'stop'
]
heavily_sensitive_attr = ['data_format', 'data_layout']
layout_autotune_attr = []
layout_autotune_attr_code_list = []
layout_autotune_attr_type_list = []
layout_autotune_attr_code_list.append(
f"auto op_name = phi::TransToFluidOpName(\"{forward_api_name}\");\n"
)
lightly_flag = False
heavily_flag = False
for name, atype, default_val, pos in forward_attrs_list:
for attr_name in lightly_sensitive_attr:
if name.find(attr_name) != -1 and (name
not in layout_autotune_attr):
lightly_flag = True
layout_autotune_attr.append(name)
layout_autotune_attr_type_list.append(atype)
if lightly_flag is False:
for attr_name in heavily_sensitive_attr:
if name.find(attr_name) != -1 and (
name not in layout_autotune_attr):
layout_autotune_attr.append(name)
layout_autotune_attr_type_list.append(atype)
heavily_flag = True
if len(layout_autotune_attr) == 0:
layout_autotune_attr_code_list.append(
f"auto transformer = egr::EagerLayoutAutotune(op_name, tensors_vector);\n"
)
elif len(layout_autotune_attr) == 1:
layout_autotune_attr_code_list.append(
f"auto transformer = egr::EagerLayoutAutotune<{layout_autotune_attr_type_list[0]}>(op_name, tensors_vector, &{layout_autotune_attr[0]});\n"
)
elif len(layout_autotune_attr) == 2:
layout_autotune_attr_code_list.append(
f"auto transformer = egr::EagerLayoutAutotune<{layout_autotune_attr_type_list[0]}, {layout_autotune_attr_type_list[1]}>(op_name, tensors_vector, &{layout_autotune_attr[0]}, &{layout_autotune_attr[1]});\n"
)
else:
layout_autotune_attr_code_list.append(
f"auto transformer = egr::EagerLayoutAutotune<{layout_autotune_attr_type_list[0]}>(op_name, tensors_vector,&{layout_autotune_attr[0]});\n"
)
# Out tensor
layout_inputs_call_args_str = amp_inputs_call_args_str
forward_function_name = GetDygraphForwardFunctionName(forward_api_name)
layout_tmp_result_list = []
layout_autotune_outs_list = []
result_name = "api_result"
if num_outputs == 1:
result_name = returns_str
layout_autotune_outs_list.append(
f"transformer -> SetOutTensorLayout(&{returns_str});\n")
else:
for name, (rtype, pos) in forward_outputs_position_map.items():
if name in intermediate_outputs:
continue
layout_autotune_outs_list.append(
f" auto& {name} = std::get<{len(layout_tmp_result_list)}>(api_result);\n"
)
layout_autotune_outs_list.append(
f" transformer -> SetOutTensorLayout(&{name});\n")
layout_tmp_result_list.append(f"{name}")
tensors_vector_list_str = "{ " + ",".join(
amp_tensors_vector_list) + " }"
if len(amp_tensors_vector_list) == 0:
layout_logic_str = ""
else:
after_call_str = f"{returns_type_str} {result_name} = {forward_function_name}({layout_inputs_call_args_str});\n"
layout_logic_str = LAYOUT_LOGIC_TEMPLATE.format(
tensors_vector_list_str,
" ".join(layout_tensors_vector_optional_list),
" ".join(layout_autotune_attr_code_list) + " " +
layout_autotune_list_str, after_call_str,
" ".join(layout_autotune_outs_list), returns_str)
return layout_logic_str
def GenerateForwardDefinitionAndDeclaration(self, is_inplaced):
namespace = self.namespace
if self.forward_api_name[-1] == '_' and not is_inplaced:
return
forward_api_name = GetInplacedFunctionName(
self.forward_api_name) if is_inplaced else self.forward_api_name
forward_inputs_position_map = self.forward_inputs_position_map
forward_outputs_position_map = self.forward_outputs_position_map
forward_attrs_list = self.forward_attrs_list
if not self.is_forward_only:
backward_grad_outputs_map = self.backward_grad_outputs_map
optional_inputs = self.optional_inputs
intermediate_outputs = self.intermediate_outputs
forward_inplace_map = self.forward_inplace_map if is_inplaced else {}
indent = GetIndent(1)
# Get Function Args
num_inputs = len(forward_attrs_list) + len(
forward_inputs_position_map.keys())
inputs_args_definition_list = ["" for i in range(num_inputs)]
inputs_args_declaration_list = ["" for i in range(num_inputs)]
inputs_call_list = ["" for i in range(num_inputs)]
amp_inputs_call_list = ["" for i in range(num_inputs)]
amp_tensors_vector_list = []
amp_tensors_vector_optional_list = []
amp_autocast_list = []
amp_autocast_optional_list = []
layout_autotune_list = []
layout_autotune_optional_list = []
layout_tensors_vector_optional_list = []
for name, (ttype, pos) in forward_inputs_position_map.items():
inputs_call_list[pos] = f"{name}"
amp_inputs_call_list[pos] = f"new_{name}"
is_optional = (name in optional_inputs)
if IsPlainTensorType(ttype):
if is_optional:
if self.is_forward_only and is_inplaced and forward_inplace_map and name in forward_inplace_map.keys(
):
arg_str = f"paddle::optional<paddle::experimental::Tensor>& {name}"
else:
arg_str = f"const paddle::optional<paddle::experimental::Tensor>& {name}"
amp_tensors_vector_optional_list.append(
f"if ({name}) amp_tensors_vector.push_back({{ *{name} }});\n"
)
amp_autocast_optional_list.append(
f"auto new_{name} = egr::EagerAmpAutoCast(\"{name}\", {name}, amp_dst_dtype, op_name);\n"
)
layout_tensors_vector_optional_list.append(
f"if ({name}) tensors_vector.push_back({{ *{name} }});\n"
)
layout_autotune_optional_list.append(
f"auto new_{name} = transformer->TransInTensor(\"{name}\", {name});\n"
)
else:
if is_inplaced and forward_inplace_map and name in forward_inplace_map.keys(
):
arg_str = f"paddle::experimental::Tensor& {name}"
amp_tensors_vector_list.append(f"{{{name}}}")
amp_autocast_list.append(
f"auto new_{name} = egr::EagerAmpAutoCast(\"{name}\", {name}, amp_dst_dtype, op_name);\n"
)
else:
arg_str = f"const paddle::experimental::Tensor& {name}"
amp_tensors_vector_list.append(f"{{{name}}}")
amp_autocast_list.append(
f"auto new_{name} = egr::EagerAmpAutoCast(\"{name}\", {name}, amp_dst_dtype, op_name);\n"
)
layout_autotune_list.append(
f"auto new_{name} = transformer->TransInTensor(\"{name}\", {name});\n"
)
else:
assert IsVectorTensorType(ttype)
if is_optional:
if self.is_forward_only and is_inplaced and forward_inplace_map and name in forward_inplace_map.keys(
):
arg_str = f"paddle::optional<std::vector<paddle::experimental::Tensor>>& {name}"
else:
arg_str = f"const paddle::optional<std::vector<paddle::experimental::Tensor>>& {name}"
amp_tensors_vector_optional_list.append(
f"if ({name}) amp_tensors_vector.push_back( *{name} );\n"
)
amp_autocast_optional_list.append(
f"auto new_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n"
)
layout_autotune_optional_list.append(
f"auto new_{name} = transformer->TransInTensors(\"{name}\", {name});\n"
)
else:
if is_inplaced and forward_inplace_map and name in forward_inplace_map.keys(
):
arg_str = f"std::vector<paddle::experimental::Tensor>& {name}"
else:
arg_str = f"const std::vector<paddle::experimental::Tensor>& {name}"
amp_tensors_vector_list.append(f"{name}")
amp_autocast_list.append(
f"auto new_{name} = egr::EagerAmpAutoCasts(\"{name}\", {name}, amp_dst_dtype, op_name);\n"
)
layout_autotune_list.append(
f"auto new_{name} = transformer->TransInTensors(\"{name}\", {name});\n"
)
inputs_args_definition_list[pos] = arg_str
inputs_args_declaration_list[pos] = arg_str
# forward attrs
for name, atype, default_val, pos in forward_attrs_list:
inputs_call_list[pos] = name
amp_inputs_call_list[pos] = name
if default_val is not None:
inputs_args_declaration_list[
pos] = f"{atype} {name} = {default_val}"
else:
inputs_args_declaration_list[pos] = f"{atype} {name}"
inputs_args_definition_list[pos] = f"{atype} {name}"
inputs_args_declaration_str = ", ".join(inputs_args_declaration_list)
inputs_args_definition_str = ", ".join(inputs_args_definition_list)
inputs_call_args_str = ", ".join(inputs_call_list)
# Forward Full Logic
function_name = forward_api_name
if len(intermediate_outputs) > 0:
if is_inplaced:
function_name = GetIntermediateAPIFunctionName(
forward_api_name[:-1]) + '_'
else:
function_name = GetIntermediateAPIFunctionName(function_name)
api_out_type = "auto"
if is_inplaced and len(forward_outputs_position_map) == 1:
api_out_type = "auto&"
forward_call_str = f"{indent}{api_out_type} api_result = paddle::experimental::{namespace}{function_name}({inputs_call_args_str});"
num_outputs = len(
forward_outputs_position_map.keys()) - len(intermediate_outputs)
# Check Nan and Inf
check_nan_inf_str = CHECK_NAN_AND_INF_TEMPLATE.format(
function_name, "api_result")
# Get Outputs
get_outputs_str = ""
for name, (rtype, pos) in forward_outputs_position_map.items():
if num_outputs == 1 and len(intermediate_outputs) == 0:
get_outputs_str += f"{indent}auto& {name} = api_result;\n"
else:
get_outputs_str += f"{indent}auto& {name} = std::get<{pos}>(api_result);\n"
# Get return type list & outputs
returns_type_list = ["" for i in range(num_outputs)]
returns_list = ["" for i in range(num_outputs)]
for name, (rtype, pos) in forward_outputs_position_map.items():
if name in intermediate_outputs:
continue
returns_list[pos] = f"{name}"
if IsPlainTensorType(rtype):
if is_inplaced and forward_inplace_map and name in forward_inplace_map.values(
):
ind = list(forward_inplace_map.values()).index(name)
if list(forward_inplace_map.keys()
)[ind] in self.optional_inputs:
returns_type_list[pos] = inplace_optional_out_type_map[
rtype]
else:
returns_type_list[pos] = "paddle::experimental::Tensor&"
else:
returns_type_list[pos] = "paddle::experimental::Tensor"
else:
assert IsVectorTensorType(rtype)
if is_inplaced and forward_inplace_map and name in forward_inplace_map.values(
):
ind = list(forward_inplace_map.values()).index(name)
if list(forward_inplace_map.keys()
)[ind] in self.optional_inputs:
returns_type_list[pos] = inplace_optional_out_type_map[
rtype]
else:
returns_type_list[
pos] = "std::vector<paddle::experimental::Tensor>&"
else:
returns_type_list[
pos] = "std::vector<paddle::experimental::Tensor>"
if num_outputs == 1:
returns_str = returns_list[0]
returns_type_str = returns_type_list[0]
else:
returns_type_str = ", ".join(returns_type_list)
returns_type_str = f"std::tuple<{returns_type_str}>"
returns_str = ", ".join(returns_list)
returns_str = f"{returns_type_str}{{{returns_str}}}"
# Node Creation Pre-Processing
inputs_names = []
if not self.is_forward_only:
# 1. Get Input AutoGradMeta
inputs_autograd_meta_list = []
compute_require_grad_args_list = ["trace_backward"]
for name, (ttype, pos) in forward_inputs_position_map.items():
# Has corresponding grad output
has_corresponding_grad_output = False
for _, (_, corresponding_pos,
_) in backward_grad_outputs_map.items():
if pos == corresponding_pos:
has_corresponding_grad_output = True
if has_corresponding_grad_output or (
name in forward_inplace_map and forward_api_name
not in inplace_check_blacklist) or self.is_forward_only:
input_autograd_meta_name = GetAutoGradMetaName(name)
if IsPlainTensorType(ttype):
input_autograd_meta = f"{indent}egr::AutogradMeta* {input_autograd_meta_name} = egr::EagerUtils::nullable_autograd_meta({name});"
else:
assert IsVectorTensorType(ttype)
input_autograd_meta_vec_name = GetAutoGradMetaVectorName(
name)
input_autograd_meta = f"{indent}std::vector<egr::AutogradMeta*> {input_autograd_meta_vec_name} = egr::EagerUtils::nullable_autograd_meta({name});\n"
input_autograd_meta += f"{indent}std::vector<egr::AutogradMeta*>* {input_autograd_meta_name} = &{input_autograd_meta_vec_name};"
inputs_autograd_meta_list.append(input_autograd_meta)
compute_require_grad_args_list.append(
input_autograd_meta_name)
inputs_autograd_meta_str = "\n".join(inputs_autograd_meta_list)
compute_require_grad_args_str = ",".join(
compute_require_grad_args_list)
# 2. Get Output AutoGradMeta
outputs_autograd_meta_list = []
num_fwd_outputs = len(forward_outputs_position_map.keys())
for name, (rtype, pos) in forward_outputs_position_map.items():
output_autograd_meta_name = GetAutoGradMetaName(name)
output_autograd_meta_vec_name = GetAutoGradMetaVectorName(name)
if num_fwd_outputs == 1:
if IsPlainTensorType(rtype):
output_autograd_meta = f"{indent}egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&{name});"
else:
assert IsVectorTensorType(rtype)
output_autograd_meta = f"{indent}std::vector<egr::AutogradMeta*> {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&{name});\n"
output_autograd_meta += f"{indent}std::vector<egr::AutogradMeta*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};"
else:
# Tuple api_result
if IsPlainTensorType(rtype):
output_autograd_meta = f"{indent}egr::AutogradMeta* {output_autograd_meta_name} = egr::EagerUtils::autograd_meta(&{name});"
else:
assert IsVectorTensorType(rtype)
output_autograd_meta = f"{indent}std::vector<egr::AutogradMeta*> {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&{name});\n"
output_autograd_meta += f"{indent}std::vector<egr::AutogradMeta*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};"
outputs_autograd_meta_list.append(output_autograd_meta)
outputs_autograd_meta_str = "\n".join(outputs_autograd_meta_list)
# 3. Check Inplace
check_inplace_str = ""
bump_inplace_version_str = ""
if is_inplaced:
for inplace_name in forward_inplace_map.keys():
if forward_api_name not in inplace_check_blacklist:
inplace_autograd_meta_name = GetAutoGradMetaName(
inplace_name)
check_inplace_str += CHECK_INPLACE_TEMPLATE.format(
inplace_name, inplace_autograd_meta_name)
bump_inplace_version_str += BUMP_INPLACE_VERSION_TEMPLATE.format(
inplace_name, inplace_name)
# Node Creation
self.GenerateNodeCreationCodes()
node_creation_str = self.node_creation_str
dygraph_event_str = f"{indent}paddle::platform::RecordEvent dygraph_entrance_record_event(\"{forward_api_name} dygraph\", paddle::platform::TracerEventType::Operator, 1);\n"
forward_ad_function_name = GetDygraphForwardFunctionName(
forward_api_name)
# Forward amp logic
kernel_trans2_op_name_str = f"auto op_name = phi::TransToFluidOpName(\"{forward_api_name}\");"
amp_tensors_vector_list_str = "{ " + ",".join(
amp_tensors_vector_list) + " }"
amp_tensors_vector_optional_list_str = " ".join(
amp_tensors_vector_optional_list)
amp_get_dst_dtype_str = f"auto amp_dst_dtype = egr::GetAmpDestDtype(op_name, amp_tensors_vector);\n"
amp_autocast_list_str = " ".join(
amp_autocast_list) + " " + " ".join(
amp_autocast_optional_list)
amp_inputs_call_args_str = ", ".join(amp_inputs_call_list)
amp_call_str = f"return {forward_ad_function_name}({amp_inputs_call_args_str});"
if is_inplaced or (forward_api_name == "cast"):
amp_logic_str = "\n VLOG(5) << \" No AMP for {} because it is a inplace or cast api. \"; ".format(
forward_ad_function_name)
else:
amp_logic_str = AMP_LOGIC_TEMPLATE.format(
kernel_trans2_op_name_str, amp_tensors_vector_list_str,
amp_tensors_vector_optional_list_str, amp_get_dst_dtype_str,
amp_autocast_list_str, amp_call_str)
# Forward layout autotune
layout_autotune_list_str = " ".join(
layout_autotune_list) + " ".join(layout_autotune_optional_list)
layout_logic_str = self.GenerateForwardLayoutAutotune(
forward_api_name, amp_tensors_vector_list,
layout_tensors_vector_optional_list, layout_autotune_list_str,
returns_type_str, returns_str, amp_inputs_call_args_str)
# For inputs outputs prepare for logging
var_str = f"\n{indent} std::string input_str = \"\";"
var_str += f"\n{indent} std::string output_str = \"\";"
for name, (ttype, pos) in forward_inputs_position_map.items():
var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \" \\n( {name} , [%s]), \";"
var_str += f"\n{indent} std::string input_{name}_str = paddle::string::Sprintf(TENSOR_{name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({name}));"
var_str += f"\n{indent} input_str += input_{name}_str; "
before_log_str = BEFORE_LOG_PRINT_TEMPLATE.format(var_str)
for name, (ttype, pos) in forward_outputs_position_map.items():
var_str += f"\n{indent} const char* TENSOR_{name.upper()}_TEMPLATE = \" \\n( {name} , [%s]), \";"
var_str += f"\n{indent} std::string output_{name}_str = paddle::string::Sprintf(TENSOR_{name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({name}));"
var_str += f"\n{indent} output_str += output_{name}_str; "
log_str = AFTER_LOG_PRINT_TEMPLATE.format(var_str)
# Generate forward_definition_str and forward_declaration_str
if self.is_forward_only:
if len(amp_tensors_vector_list) == 0:
amp_logic_str = "\n VLOG(7) << \" No AMP for {} because it has no input. \"; ".format(
forward_ad_function_name)
self.forward_definition_str += FORWARD_ONLY_FUNCTION_TEMPLATE.format(
returns_type_str, forward_ad_function_name,
inputs_args_definition_str, forward_api_name, dygraph_event_str,
amp_logic_str, layout_logic_str, forward_api_name,
before_log_str, forward_call_str, get_outputs_str,
forward_api_name, log_str, returns_str)
else:
self.forward_definition_str += FORWARD_FUNCTION_TEMPLATE.format(
returns_type_str, forward_ad_function_name,
inputs_args_definition_str, forward_api_name, dygraph_event_str,
amp_logic_str, layout_logic_str, inputs_autograd_meta_str,
forward_api_name, before_log_str, forward_call_str,
check_nan_inf_str, get_outputs_str, outputs_autograd_meta_str,
compute_require_grad_args_str, check_inplace_str,
bump_inplace_version_str, node_creation_str, forward_api_name,
log_str, returns_str)
self.forward_declaration_str += f"{returns_type_str} {forward_ad_function_name}({inputs_args_declaration_str});\n"
def GenerateInplacedForwardDygraphFunctions(self):
# Inplaced Version Dygraph Function Generation
forward_api_name = self.forward_api_name
forward_api_contents = self.forward_api_contents
if forward_api_name != "sum" and "inplace" in forward_api_contents.keys(
):
# Function Definition and Declaration Generation
self.GenerateForwardDefinitionAndDeclaration(is_inplaced=True)
self.UpdateCoreOpsInformation(is_inplaced=True)
def UpdateCoreOpsInformation(self, is_inplaced):
forward_api_name = GetInplacedFunctionName(
self.forward_api_name) if is_inplaced else self.forward_api_name
forward_inputs_position_map = self.forward_inputs_position_map
forward_outputs_position_map = self.forward_outputs_position_map
forward_attrs_list = self.forward_attrs_list
num_args = len(
forward_inputs_position_map.keys()) + len(forward_attrs_list)
num_returns = len(forward_outputs_position_map.keys())
fwd_api_name = "" + forward_api_name
core_ops_returns_info[fwd_api_name] = ["" for i in range(num_returns)]
core_ops_args_info[fwd_api_name] = ["" for i in range(num_args)]
core_ops_args_type_info[fwd_api_name] = ["" for i in range(num_args)]
for name, (ttype, pos) in forward_inputs_position_map.items():
core_ops_args_info[fwd_api_name][pos] = name
if IsPlainTensorType(ttype):
core_ops_args_type_info[fwd_api_name][pos] = "tensor"
else:
assert IsVectorTensorType(ttype)
core_ops_args_type_info[fwd_api_name][pos] = "list"
for name, _, _, pos in forward_attrs_list:
core_ops_args_info[fwd_api_name][pos] = name
for name, (ttype, pos) in forward_outputs_position_map.items():
core_ops_returns_info[fwd_api_name][pos] = name
def run(self):
super().run()
#####################
## Code Generation ##
#####################
# Definition And Declaration
self.GenerateForwardDefinitionAndDeclaration(is_inplaced=False)
self.UpdateCoreOpsInformation(is_inplaced=False)
self.GenerateInplacedForwardDygraphFunctions()
class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
def __init__(self,
forward_api_contents,
grad_api_contents,
forward_apis_dict,
namespace,
next_grad_api_contents=None):
DygraphFunctionGeneratorBase.__init__(self, forward_api_contents,
grad_api_contents,
forward_apis_dict, namespace)
# Record name mapping from forward_var_name to grad_var_names
self.to_next_grad_name_mapping = {} # {name : name}
# Generated Results
self.node_declaration_str = ""
self.node_definition_str = ""
self.next_grad_api_contents = next_grad_api_contents
def TransformToNextGradName(self, string):
name_mapping = self.to_next_grad_name_mapping
if string in name_mapping.keys():
return name_mapping[string]
return string
def ResetOptionalInputs(self):
namespace = self.namespace
grad_api_contents = self.grad_api_contents
base_generator = FunctionGeneratorBase(grad_api_contents, namespace)
base_generator.ParseDispensable()
self.optional_inputs = base_generator.optional_inputs
def RecordGrad2NextGradNameMapping(self, next_node_generator):
next_orig_inputs_list = next_node_generator.orig_forward_inputs_list
next_orig_returns_list = next_node_generator.orig_forward_returns_list
next_forward_inputs_list = next_node_generator.forward_inputs_list
next_forward_returns_list = next_node_generator.forward_returns_list
for i in range(len(next_orig_inputs_list)):
grad_name = next_orig_inputs_list[i][0]
next_forward_name = next_forward_inputs_list[i][0]
self.to_next_grad_name_mapping[grad_name] = next_forward_name
for i in range(len(next_orig_returns_list)):
grad_ret_name = next_orig_returns_list[i][0]
next_ret_name = next_forward_returns_list[i][0]
self.to_next_grad_name_mapping[grad_ret_name] = next_ret_name
def GenerateHigherOrderNodeCreationCode(self):
namespace = self.namespace
grad_api_contents = self.grad_api_contents
forward_apis_dict = self.forward_apis_dict
next_grad_api_contents = self.next_grad_api_contents
next_grad_node_creation_str = ""
next_grad_node_out_list = []
next_node_generator = None
if next_grad_api_contents:
# Fake forward_api_contents and backward_api_contents
forward_api_contents = grad_api_contents
forward_api_contents['op'] = forward_api_contents['backward_op']
backward_api_contents = next_grad_api_contents
next_node_generator = DygraphFunctionGeneratorBase(
forward_api_contents, backward_api_contents, forward_apis_dict,
namespace)
next_node_generator.run()
next_node_generator.GenerateNodeCreationCodes(True)
next_grad_node_creation_str = next_node_generator.node_creation_str
next_grad_node_out_list = next_node_generator.grad_node_out_list
self.RecordGrad2NextGradNameMapping(next_node_generator)
if next_node_generator is not None:
return next_grad_node_creation_str, next_grad_node_out_list, next_node_generator.backward_forward_inputs_map
else:
return next_grad_node_creation_str, next_grad_node_out_list, None
def GenerateNodeDeclaration(self):
forward_op_name = self.forward_api_name
backward_forward_inputs_map = self.backward_forward_inputs_map
backward_attrs_list = self.backward_attrs_list
no_need_buffers = self.no_need_buffers
# SetTensorWrapper Methods & TensorWrapper Members & ClearTensorWrappers
set_tensor_wrapper_methods_str = ""
tensor_wrapper_members_str = ""
clear_tensor_wrapper_str = ""
for tname, (ttype, is_fwd_input,
_) in backward_forward_inputs_map.items():
no_need_buffer = "true" if tname in no_need_buffers else "false"
tensor_wrapper_name = GetSavedName(tname)
if IsPlainTensorType(ttype):
set_tensor_wrapper_methods_str += SET_PLAIN_TENSOR_WRAPPER_TEMPLATE.format(
tname, tname, tensor_wrapper_name, tname, no_need_buffer)
tensor_wrapper_members_str += PLAIN_TENSOR_MEMBER_TEMPLATE.format(
tensor_wrapper_name)
clear_tensor_wrapper_str += CLEAR_TENSOR_WRAPPER_TEMPLATE.format(
tensor_wrapper_name)
else:
assert IsVectorTensorType(ttype)
set_tensor_wrapper_methods_str += SET_VECTOR_TENSOR_WRAPPER_TEMPLATE.format(
tname, tname, tname, tensor_wrapper_name, no_need_buffer)
tensor_wrapper_members_str += VECTOR_TENSOR_MEMBER_TEMPLATE.format(
tensor_wrapper_name)
clear_tensor_wrapper_str += CLEAR_VECTOR_TENSOR_WRAPPERS_TEMPLATE.format(
tensor_wrapper_name)
# SetAttributes & Attribute Members
set_attribute_methods_str = ""
attribute_members_str = ""
for aname, atype, default_val, _ in backward_attrs_list:
saved_attr_name = GetSavedName(aname)
set_attribute_methods_str += SET_ATTR_METHOD_TEMPLATE.format(
aname, GetConstReference(atype), aname, saved_attr_name, aname)
if default_val:
attribute_members_str += ATTRIBUTE_MEMBER_WITH_DEFAULT_TEMPLATE.format(
RemoveConstAndReference(atype), saved_attr_name,
default_val)
else:
attribute_members_str += ATTRIBUTE_MEMBER_TEMPLATE.format(
RemoveConstAndReference(atype), saved_attr_name)
grad_node_name = GetGradNodeName(self.backward_api_name)
self.node_declaration_str = NODE_DECLARATION_TEMPLATE.format(
grad_node_name, grad_node_name, grad_node_name, grad_node_name,
grad_node_name, clear_tensor_wrapper_str, grad_node_name,
grad_node_name, set_tensor_wrapper_methods_str,
set_attribute_methods_str, tensor_wrapper_members_str,
attribute_members_str)
def GenerateNodeDefinition(self, next_grad_node_creation_str,
next_grad_node_out_list,
backward_forward_inputs_map_next):
namespace = self.namespace
forward_api_name = self.forward_api_name
backward_api_name = self.backward_api_name
backward_forward_inputs_map = self.backward_forward_inputs_map
backward_grad_inputs_map = self.backward_grad_inputs_map
backward_grad_outputs_map = self.backward_grad_outputs_map
backward_attrs_list = self.backward_attrs_list
backward_inplace_map = self.backward_inplace_map
indent = GetIndent(1)
is_invoke_forward_api = IsInvokeForwardApi(self.grad_api_contents,
self.forward_apis_dict)
# Construct grad_api function args
# Order: TensorWrappers, GradTensors, Attributes
grad_api_args_len = len(backward_forward_inputs_map.keys()) + len(
backward_grad_inputs_map.keys()) + len(backward_attrs_list)
grad_api_args = ["" for i in range(grad_api_args_len)]
get_grad_in_args_list = []
# Fill Grad Ins with Zero
fill_zero_str = ""
if backward_api_name in ops_to_fill_zero_for_empty_grads:
fill_zero_str = f"{indent}const auto& input_metas = this->InputMeta();\n"
for name, (ttype, fwd_position,
grad_api_position) in backward_grad_inputs_map.items():
if name in self.optional_inputs:
if IsPlainTensorType(ttype):
fill_zero_str += f"{indent}egr::EagerUtils::FillZeroForEmptyOptionalGradInput(&grads[{fwd_position}][0], input_metas[{fwd_position}][0]);\n"
else:
if IsPlainTensorType(ttype):
fill_zero_str += f"{indent}egr::EagerUtils::FillZeroForEmptyGradInput(&grads[{fwd_position}][0], input_metas[{fwd_position}][0]);\n"
else:
fill_zero_str += f"{indent}egr::EagerUtils::FillZeroForEmptyGradInput(&grads[{fwd_position}], input_metas[{fwd_position}]);\n"
inplace_grad_input_str = ""
inplaced_tensor_wrapper = False
inplace_check_str = ""
optional_inplace_var_name = []
# Grad Ins from TensorWrappers
for name, (backward_input_type, is_fwd_input,
grad_api_position), in backward_forward_inputs_map.items():
tensor_wrapper_name = GetSavedName(name)
transformed_tensor_name = self.TransformToNextGradName(name)
is_optional = (name in self.optional_inputs)
tensor_wrapper_recover_str = f"{indent}auto {transformed_tensor_name} = egr::EagerUtils::RecoverTensorWrapper(&this->{tensor_wrapper_name});"
if backward_inplace_map and name in backward_inplace_map.keys():
if len(next_grad_node_creation_str) > 0:
if (transformed_tensor_name
in backward_forward_inputs_map_next) and (
backward_forward_inputs_map_next[
transformed_tensor_name][1]):
optional_inplace_var_name.append(
transformed_tensor_name)
tensor_wrapper_intermidiate_tensor_str = f"(&this->{tensor_wrapper_name})->get_intermidiate_tensor()"
inplace_check_str += CHECK_BACKWARD_INPLACE_TEMPLATE.format(
transformed_tensor_name, transformed_tensor_name, name,
transformed_tensor_name, transformed_tensor_name,
transformed_tensor_name, transformed_tensor_name,
tensor_wrapper_intermidiate_tensor_str)
inplace_grad_input_str = transformed_tensor_name
if is_optional:
if backward_input_type == "std::vector<Tensor>":
tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_VECTOR_TENSOR_TEMPLATE.format(
transformed_tensor_name, transformed_tensor_name,
transformed_tensor_name, transformed_tensor_name)
else:
tensor_wrapper_recover_str += "\n" + CREATE_RECOVER_OPTIONAL_TENSOR_TEMPLATE.format(
transformed_tensor_name, transformed_tensor_name,
transformed_tensor_name, transformed_tensor_name)
grad_api_args[
grad_api_position] = transformed_tensor_name + "_optional"
else:
grad_api_args[grad_api_position] = transformed_tensor_name
get_grad_in_args_list.append(tensor_wrapper_recover_str)
# Grad Ins from grads
for name, (ttype, fwd_position,
grad_api_position) in backward_grad_inputs_map.items():
transformed_tensor_name = self.TransformToNextGradName(name)
is_optional = (name in self.optional_inputs)
if IsPlainTensorType(ttype):
get_tensor_str = f"{indent}auto& {transformed_tensor_name} = hooked_grads[{fwd_position}][0];"
# Inplace in backward op
if backward_inplace_map and name in backward_inplace_map.keys():
if len(next_grad_node_creation_str) > 0:
if (transformed_tensor_name
in backward_forward_inputs_map_next) and (
backward_forward_inputs_map_next[
transformed_tensor_name][1]):
optional_inplace_var_name.append(
transformed_tensor_name)
grads_tensor_str = f"grads[{fwd_position}][0]"
inplace_check_str += CHECK_BACKWARD_INPLACE_TEMPLATE.format(
transformed_tensor_name, transformed_tensor_name, name,
transformed_tensor_name, transformed_tensor_name,
transformed_tensor_name, transformed_tensor_name,
grads_tensor_str)
inplace_grad_input_str = transformed_tensor_name
if is_optional:
get_tensor_str += "\n" + CREATE_PLAIN_OPTIONAL_TENSOR_TEMPLATE.format(
transformed_tensor_name, transformed_tensor_name,
transformed_tensor_name, transformed_tensor_name)
grad_api_args[
grad_api_position] = f"{transformed_tensor_name}_optional"
else:
grad_api_args[grad_api_position] = transformed_tensor_name
else:
assert IsVectorTensorType(ttype)
get_tensor_str = f"{indent}auto& {transformed_tensor_name} = hooked_grads[{fwd_position}];"
grad_api_args[grad_api_position] = transformed_tensor_name
get_grad_in_args_list.append(get_tensor_str)
# Grad Attrs
for name, _, _, grad_api_position in backward_attrs_list:
saved_attribute_name = GetSavedName(name)
get_attr_str = f"{indent}auto& {name} = this->{saved_attribute_name};"
grad_api_args[grad_api_position] = name
get_grad_in_args_list.append(get_attr_str)
get_grad_in_args_str = "\n".join(get_grad_in_args_list)
# Grad Function Call String
slot_num_bwd_outputs = len(self.forward_inputs_position_map.keys())
grad_api_namespace = f"paddle::experimental::{namespace}"
grad_function_prepare_str = f"""
const auto& out_metas = OutputMeta();
paddle::small_vector<std::vector<paddle::experimental::Tensor>, egr::kSlotSmallVectorSize> returns({slot_num_bwd_outputs});
for (int i = 0; i < {slot_num_bwd_outputs}; ++i) {{
out_metas[i].size() == 0 ? returns[i].resize(1) : returns[i].resize(out_metas[i].size());
}}
"""
inplace_for_grad_outs_str = ""
optional_inplace_str = ""
# Grad Outputs
out_index = -1
out_assign_str = ""
for name, (ttype, fwd_position,
grad_api_position) in backward_grad_outputs_map.items():
transformed_tensor_name = self.TransformToNextGradName(name)
out_index = out_index + 1
if is_invoke_forward_api:
if len(backward_grad_outputs_map) == 1:
out_assign_str += f"{indent}*api_output_{out_index} = api_output;\n"
else:
out_assign_str += f"{indent}*api_output_{out_index} = std::get<{out_index}>(api_output);\n"
else:
grad_api_args.append(f"api_output_{out_index}")
if inplace_grad_input_str in optional_inplace_var_name:
optional_inplace_str = "VLOG(6) << \"No Inplace should happend for wrappered input: {inplace_grad_input_str}\";"
else:
optional_inplace_str = f"""if (api_output_{out_index} != nullptr && can_be_inplaced) {{
egr::EagerUtils::HandleViewBetweenInputAndOutput({inplace_grad_input_str}, api_output_{out_index});
}}"""
if IsPlainTensorType(ttype):
if backward_inplace_map and name in backward_inplace_map.values(
):
inplace_str = f""" if (api_output_{out_index} != nullptr && can_be_inplaced) {{
egr::EagerUtils::HandleViewBetweenInputAndOutput({inplace_grad_input_str}, api_output_{out_index});
}}"""
if len(next_grad_node_creation_str) > 0:
inplace_for_grad_outs_str += f"""
if (trace_backward) {{
{optional_inplace_str}
}} else {{
{inplace_str}
}}"""
else:
inplace_for_grad_outs_str += inplace_str
grad_function_prepare_str += f"""
auto* api_output_{out_index} = (out_metas[{fwd_position}].empty() || out_metas[{fwd_position}][0].IsStopGradient()) ? nullptr : &returns[{fwd_position}][0];"""
else:
assert IsVectorTensorType(ttype)
grad_function_prepare_str += f"""
std::vector<paddle::experimental::Tensor*> api_output_{out_index};
api_output_{out_index}.reserve(returns[{fwd_position}].size());
for (size_t i = 0; i < returns[{fwd_position}].size(); ++i) {{
if (out_metas[{fwd_position}].empty() || out_metas[{fwd_position}][i].IsStopGradient()) {{
api_output_{out_index}.push_back(nullptr);
}} else {{
api_output_{out_index}.push_back(&returns[{fwd_position}][i]);
}}
}}"""
grad_api_args_str = ", ".join(grad_api_args)
if is_invoke_forward_api:
autograd_api_out = "auto"
if len(self.backward_inplace_map) > 0 and len(
backward_grad_outputs_map) == 1:
autograd_api_out = "auto&"
forward_api_name = self.grad_api_contents['invoke'].split(
'(')[0].strip()
autograd_api = self.grad_api_contents['invoke'].replace(
forward_api_name,
GetDygraphForwardFunctionName(forward_api_name), 1)
grad_function_call_str = f"""
if (trace_backward) {{
{indent}{autograd_api_out} api_output = {autograd_api};
{out_assign_str}}} else {{
{indent}{autograd_api_out} api_output = paddle::experimental::{self.namespace}{self.grad_api_contents['invoke']};
{out_assign_str}{indent}}}
"""
else:
grad_function_call_str = f"""
{indent}{grad_api_namespace}{backward_api_name}({grad_api_args_str});"""
# Check Nan and Inf
check_nan_inf_str = CHECK_NAN_AND_INF_TEMPLATE.format(
backward_api_name, "returns")
# Prepare for Node Creation if Necessary
outputs_autograd_meta_str = ""
compute_require_next_grad_str = ""
if len(next_grad_node_creation_str) > 0 or is_invoke_forward_api:
compute_require_next_grad_str = f"{indent}bool trace_backward = egr::Controller::Instance().HasGrad() && create_graph;\n"
# 3. Get Output AutoGradMeta
outputs_autograd_meta_list = []
# TODO(jiabin): Optimize this with SetStopGradient instead of Pass Stop gradient
num_fwd_outputs = len(backward_grad_outputs_map.keys())
for name, (rtype, pos,
grad_api_position) in backward_grad_outputs_map.items():
transformed_tensor_name = self.TransformToNextGradName(name)
output_autograd_meta_name = GetAutoGradMetaName(
transformed_tensor_name)
output_autograd_meta_vec_name = GetAutoGradMetaVectorName(
transformed_tensor_name)
if IsPlainTensorType(rtype):
output_autograd_meta = f"""
auto& {transformed_tensor_name} = returns[{pos}][0];
egr::AutogradMeta* {output_autograd_meta_name} = returns[{pos}][0].initialized() ? egr::EagerUtils::autograd_meta(&{transformed_tensor_name}) : nullptr;
if ({output_autograd_meta_name}) {output_autograd_meta_name}->SetStopGradient(false);
"""
else:
assert IsVectorTensorType(rtype)
if len(next_grad_node_creation_str) > 0:
output_autograd_meta = f"""
auto& {transformed_tensor_name} = returns[{pos}];
std::vector<egr::AutogradMeta*> {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&{transformed_tensor_name});
std::vector<egr::AutogradMeta*>* {output_autograd_meta_name} = &{output_autograd_meta_vec_name};
for(auto* meta : {output_autograd_meta_vec_name}){{
meta->SetStopGradient(false);
}}
"""
else:
output_autograd_meta = f"""
auto& {transformed_tensor_name} = returns[{pos}];
std::vector<egr::AutogradMeta*> {output_autograd_meta_vec_name} = egr::EagerUtils::autograd_meta(&{transformed_tensor_name});
for(auto* meta : {output_autograd_meta_vec_name}){{
meta->SetStopGradient(false);
}}
"""
outputs_autograd_meta_list.append(output_autograd_meta)
outputs_autograd_meta_str = "\n".join(outputs_autograd_meta_list)
returns_str = f"{indent}if(NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);\n"
returns_str += f"{indent}return returns;\n"
grad_node_name = GetGradNodeName(self.backward_api_name)
# For inputs outputs prepare for logging
var_str = f"\n{indent} std::string input_str = \"\";"
var_str += f"\n{indent} std::string output_str = \"\";"
for name, (ttype, fwd_position,
grad_api_position) in backward_grad_inputs_map.items():
new_name = self.TransformToNextGradName(name)
var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \" \\n( {new_name} , [%s]), \";"
var_str += f"\n{indent} std::string input_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));"
var_str += f"\n{indent} input_str += input_{new_name}_str; "
for name, (backward_input_type, is_fwd_input,
grad_api_position), in backward_forward_inputs_map.items():
new_name = self.TransformToNextGradName(name)
var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \" \\n( {new_name} , [%s]), \";"
var_str += f"\n{indent} std::string input_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));"
var_str += f"\n{indent} input_str += input_{new_name}_str; "
before_log_str = BEFORE_LOG_PRINT_TEMPLATE.format(var_str)
for name, (ttype, fwd_position,
grad_api_position) in backward_grad_outputs_map.items():
new_name = self.TransformToNextGradName(name)
var_str += f"\n{indent} const char* TENSOR_{new_name.upper()}_TEMPLATE = \" \\n ( {new_name} , [%s]), \";"
var_str += f"\n{indent} std::string output_{new_name}_str = paddle::string::Sprintf(TENSOR_{new_name.upper()}_TEMPLATE, egr::EagerUtils::TensorStr({new_name}));"
var_str += f"\n{indent} output_str += output_{new_name}_str; "
log_str = AFTER_LOG_PRINT_TEMPLATE.format(var_str)
self.node_definition_str = GRAD_FUNCTION_TEMPLATE.format(
grad_node_name, self.backward_api_name, fill_zero_str,
get_grad_in_args_str, grad_function_prepare_str,
compute_require_next_grad_str, inplace_check_str,
inplace_for_grad_outs_str, self.backward_api_name, before_log_str,
grad_function_call_str, check_nan_inf_str,
outputs_autograd_meta_str, next_grad_node_creation_str,
self.backward_api_name, log_str, returns_str)
def run(self):
super().run()
self.ResetOptionalInputs()
#####################
## Code Generation ##
#####################
# Higher-order GradNode generation
next_grad_node_creation_str, next_grad_node_out_list, backward_forward_inputs_map = self.GenerateHigherOrderNodeCreationCode(
)
self.GenerateNodeDeclaration()
self.GenerateNodeDefinition(next_grad_node_creation_str,
next_grad_node_out_list,
backward_forward_inputs_map)
class DygraphForwardAndNodesGenerator(GeneratorBase):
def __init__(self, api_yaml_path, backward_yaml_path):
# Parent members:
# self.namespace
# self.api_yaml_path
# self.forward_api_list
GeneratorBase.__init__(self, api_yaml_path)
self.backward_yaml_path = backward_yaml_path
self.grad_api_dict = {}
self.forward_declaration_str = ""
self.forward_definition_str = ""
self.node_declaration_str = ""
self.node_definition_str = ""
def CollectIsForwardOnly(self, forward_api_contents):
self.is_forward_only = False if 'backward' in forward_api_contents.keys(
) else True
def ParseYamlContents(self):
self.ParseForwardYamlContents()
backward_yaml_path = self.backward_yaml_path
# string api is forward_only, no backward_yaml respectively
if backward_yaml_path is not None:
self.grad_api_dict = ReadBwdFile(backward_yaml_path)
def GetBackwardAPIContents(self, forward_api_contents):
grad_api_dict = self.grad_api_dict
if 'backward' not in forward_api_contents.keys(): return None
backward_api_name = forward_api_contents['backward']
assert backward_api_name in grad_api_dict.keys(), AssertMessage(
backward_api_name, grad_api_dict.keys())
backward_api_contents = grad_api_dict[backward_api_name]
return backward_api_contents
def GenerateCode(self):
forward_api_list = self.forward_api_list
grad_api_dict = self.grad_api_dict
forward_apis_dict = {}
for api_item in forward_api_list:
forward_apis_dict[api_item['op']] = api_item
namespace = self.namespace
for forward_api_contents in forward_api_list:
if forward_api_contents['op'] in black_ops_list: continue
self.CollectIsForwardOnly(forward_api_contents)
if self.is_forward_only:
backward_api_contents = None
else:
backward_api_contents = self.GetBackwardAPIContents(
forward_api_contents)
# Generate Dygraph Forward Function
function_generator = DygraphForwardFunctionGenerator(
forward_api_contents, backward_api_contents, forward_apis_dict,
namespace)
function_generator.run()
self.forward_definition_str += function_generator.forward_definition_str + "\n"
self.forward_declaration_str += function_generator.forward_declaration_str + "\n"
# Generate Dygraph GradNode Function
while True:
if backward_api_contents is None:
break
next_grad_api_contents = self.GetBackwardAPIContents(
backward_api_contents)
node_generator = DygraphNodeGenerator(forward_api_contents,
backward_api_contents,
forward_apis_dict,
namespace,
next_grad_api_contents)
node_generator.run()
self.node_declaration_str += node_generator.node_declaration_str + "\n"
self.node_definition_str += node_generator.node_definition_str + "\n"
if next_grad_api_contents is None: break
# Detect if there exists higher-order GradNode
forward_api_contents = backward_api_contents
# Fake forward_api_content
forward_api_contents['op'] = forward_api_contents['backward_op']
backward_api_contents = next_grad_api_contents
if len(namespace) > 0:
if namespace.endswith("::"):
namespace = namespace[:-2]
self.forward_definition_str = NAMESPACE_WRAPPER_TEMPLATE.format(
namespace, self.forward_definition_str)
self.forward_declaration_str = NAMESPACE_WRAPPER_TEMPLATE.format(
namespace, self.forward_declaration_str)
self.node_declaration_str = NAMESPACE_WRAPPER_TEMPLATE.format(
namespace, self.node_declaration_str)
self.node_definition_str = NAMESPACE_WRAPPER_TEMPLATE.format(
namespace, self.node_definition_str)
def run(self):
self.ParseYamlContents()
self.InferNameSpace()
self.GenerateCode()
##################
## File Writers ##
##################
def GenerateNodeCCFile(filepath, node_definition_str):
if os.path.exists(filepath):
os.remove(filepath)
file_contents = NODE_CC_FILE_TEMPLATE.format(node_definition_str)
with open(filepath, 'a') as f:
f.write(file_contents)
def GenerateNodeHFile(filepath, node_declaration_str):
if os.path.exists(filepath):
os.remove(filepath)
file_contents = NODE_H_FILE_TEMPLATE.format(node_declaration_str)
with open(filepath, 'a') as f:
f.write(file_contents)
def GenerateForwardCCFile(filepath, forward_definition_str):
if os.path.exists(filepath):
os.remove(filepath)
core_ops_info_str = GenerateCoreOpInfoDefinition()
file_contents = FORWARD_CC_FILE_TEMPLATE.format(core_ops_info_str,
forward_definition_str)
with open(filepath, 'a') as f:
f.write(file_contents)
def GenerateForwardHFile(filepath, forward_function_declaration_str):
if os.path.exists(filepath):
os.remove(filepath)
core_ops_info_str = GenerateCoreOpInfoDeclaration()
file_contents = FORWARD_H_FILE_TEMPLATE.format(
core_ops_info_str, forward_function_declaration_str)
with open(filepath, 'a') as f:
f.write(file_contents)
if __name__ == "__main__":
args = ParseArguments()
api_yaml_paths = args.api_yaml_path.split(",")
backward_yaml_paths = args.backward_yaml_path.split(",")
# Generate per Dygraph API
node_declaration_str = ""
node_definition_str = ""
forward_declaration_str = ""
forward_definition_str = ""
for i in range(len(api_yaml_paths)):
api_yaml_path = api_yaml_paths[i]
# string api is forwrad only
if not api_yaml_path.endswith('strings_ops.yaml'):
backward_yaml_path = backward_yaml_paths[i]
else:
backward_yaml_path = None
generator = DygraphForwardAndNodesGenerator(api_yaml_path,
backward_yaml_path)
generator.run()
node_declaration_str += generator.node_declaration_str + "\n"
node_definition_str += generator.node_definition_str + "\n"
forward_declaration_str += generator.forward_declaration_str + "\n"
forward_definition_str += generator.forward_definition_str + "\n"
# Generate Files
nodes_h_path = args.nodes_h_path
nodes_cc_path = args.nodes_cc_path
forwards_h_path = args.forwards_h_path
forwards_cc_path = args.forwards_cc_path
GenerateNodeCCFile(nodes_cc_path, node_definition_str)
GenerateNodeHFile(nodes_h_path, node_declaration_str)
GenerateForwardCCFile(forwards_cc_path, forward_definition_str)
GenerateForwardHFile(forwards_h_path, forward_declaration_str)
| [
"[email protected]"
] | |
34767bd0c8b2f5feb057dcc43c1748422c9ddea3 | 368c66467b78adf62da04cb0b8cedd2ef37bb127 | /BOJ/Python/10828_스택.py | b380cf362f317d58d32f74c7a857ea8da85d9885 | [] | no_license | DJHyun/Algorithm | c8786ddcd8b5693fc9b3b4721fdf1eeda21611c5 | fd6ae800886dac4ec5ff6cf2618bc2c839a76e7a | refs/heads/master | 2020-07-30T16:32:49.344329 | 2020-02-25T07:59:34 | 2020-02-25T07:59:34 | 210,289,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # baekjoon source = "https://www.acmicpc.net/problem/10828"
import sys
T = int(sys.stdin.readline())
stack = [0]*T
tmp = -1
for test_case in range(T):
N = sys.stdin.readline().split()
if len(N) > 1:
cmd = N[0]
number = N[1]
else:
cmd = N[0]
if cmd == 'push':
tmp += 1
stack[tmp] = number
elif cmd == 'pop':
if tmp == -1:
print(-1)
else:
print(stack[tmp])
stack[tmp] = 0
tmp -= 1
elif cmd == 'size':
print(tmp+1)
elif cmd == 'empty':
if tmp == -1:
print(1)
else:
print(0)
elif cmd == 'top':
if tmp == -1:
print(-1)
else:
print(stack[tmp]) | [
"[email protected]"
] | |
ebfa4b5433fd5445fa52fa4128d08b66bb1c8acc | b64fcb9da80d12c52bd24a7a1b046ed9952b0026 | /client_sdk_python/providers/auto.py | e51b3ffbd7a372bb388c2dc63fe843458be132af | [
"MIT"
] | permissive | PlatONnetwork/client-sdk-python | e59f44a77690806c8763ed6db938ed8447d42417 | 94ad57bb34b5ee7bb314ac858071686382c55402 | refs/heads/master | 2022-07-09T08:49:07.312759 | 2021-12-24T08:15:46 | 2021-12-24T08:15:46 | 173,032,954 | 7 | 16 | MIT | 2022-08-31T02:19:42 | 2019-02-28T03:18:03 | Python | UTF-8 | Python | false | false | 2,788 | py | import os
from urllib.parse import (
urlparse,
)
from client_sdk_python.exceptions import (
CannotHandleRequest,
)
from client_sdk_python.providers import (
BaseProvider,
HTTPProvider,
IPCProvider,
WebsocketProvider,
)
HTTP_SCHEMES = {'http', 'https'}
WS_SCHEMES = {'ws', 'wss'}
def load_provider_from_environment():
uri_string = os.environ.get('WEB3_PROVIDER_URI', '')
if not uri_string:
return None
return load_provider_from_uri(uri_string)
def load_provider_from_uri(uri_string, headers=None):
uri = urlparse(uri_string)
if uri.scheme == 'file':
return IPCProvider(uri.path)
elif uri.scheme in HTTP_SCHEMES:
return HTTPProvider(uri_string, headers)
elif uri.scheme in WS_SCHEMES:
return WebsocketProvider(uri_string)
else:
raise NotImplementedError(
'Web3 does not know how to connect to scheme %r in %r' % (
uri.scheme,
uri_string,
)
)
class AutoProvider(BaseProvider):
default_providers = (
load_provider_from_environment,
IPCProvider,
HTTPProvider,
WebsocketProvider,
)
_active_provider = None
def __init__(self, potential_providers=None):
'''
:param iterable potential_providers: ordered series of provider classes to attempt with
AutoProvider will initialize each potential provider (without arguments),
in an attempt to find an active node. The list will default to
:attribute:`default_providers`.
'''
if potential_providers:
self._potential_providers = potential_providers
else:
self._potential_providers = self.default_providers
def make_request(self, method, params):
try:
return self._proxy_request(method, params)
except IOError as exc:
return self._proxy_request(method, params, use_cache=False)
def isConnected(self):
provider = self._get_active_provider(use_cache=True)
return provider is not None and provider.isConnected()
def _proxy_request(self, method, params, use_cache=True):
provider = self._get_active_provider(use_cache)
if provider is None:
raise CannotHandleRequest("Could not discover provider")
return provider.make_request(method, params)
def _get_active_provider(self, use_cache):
if use_cache and self._active_provider is not None:
return self._active_provider
for Provider in self._potential_providers:
provider = Provider()
if provider is not None and provider.isConnected():
self._active_provider = provider
return provider
return None
| [
"[email protected]"
] | |
e91bc932fdd46fc551c4dde40b6c032d21b7ba8e | 9a9088713c917ac47c0b8713d6969b2cfcdbadac | /leetcode_python/549.Binary_Tree_Longest_Consecutive_Sequence_II.py | d73a5a900ef7632ab534ea974479d43f89361fdf | [] | no_license | zihuaweng/leetcode-solutions | 615fdcb9178b19764b4d30bcfe65a9f785e77270 | e431ff831ddd5f26891e6ee4506a20d7972b4f02 | refs/heads/master | 2023-02-06T03:58:26.413711 | 2020-12-26T05:23:03 | 2020-12-26T05:23:03 | 311,418,790 | 4 | 7 | null | null | null | null | UTF-8 | Python | false | false | 934 | py | #!/usr/bin/env python3
# coding: utf-8
# Time complexity: O()
# Space complexity: O()
# https://leetcode.com/problems/binary-tree-longest-consecutive-sequence-ii
def longestConsecutive(self, root: TreeNode) -> int:
def longest_path(root):
if not root:
return 0, 0
inc, dec = 1, 1
l_inc, l_dec = longest_path(root.left)
r_inc, r_dec = longest_path(root.right)
if root.left:
if root.left.val == root.val + 1:
inc = max(inc, 1 + l_inc)
if root.left.val == root.val - 1:
dec = max(dec, 1 + l_dec)
if root.right:
if root.right.val == root.val + 1:
inc = max(inc, 1 + r_inc)
if root.right.val == root.val - 1:
dec = max(dec, 1 + r_dec)
res[0] = max(res[0], inc + dec - 1)
return (inc, dec)
res = [0]
longest_path(root)
return res[0] | [
"[email protected]"
] | |
03d4dc30e025720b6e6240e0e43e9d93e51dbaf7 | e66fa131cff76fa3fe70e7b6649fa1332159c781 | /ch09/generatorExp.py | f604819768741ec3d1548141beb0ef6c31cfaead | [] | no_license | chc1129/python_tutorial | c6d97c6671a7952d8a7b838ccb8aa3c352fa6881 | 2f8b389731bafbda73c766c095d1eaadb0f99a1c | refs/heads/main | 2023-08-24T07:00:43.424652 | 2021-10-28T16:07:57 | 2021-10-28T16:07:57 | 341,532,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | print( sum(i*i for i in range(10)) )
xvec = [10, 20, 30]
yvec = [7, 5, 3]
print( sum(x*y, for x,y in zip(xvec, yvec))
unipue_words = set(word for line in page for word in line.split())
valedictorian = max((student.gpa, student.name) for student in graduates)
data = 'golf'
list(data[i] for i in range(len(data)-1, -1, -1))
| [
"[email protected]"
] | |
b419828ae3ce92ddec47d5eb90005d575fb2bf97 | d65128e38be0243f279e0d72ef85e7d3c5e116ca | /base/site-packages/gdata/calendar/__init__.py | cea1a03f5836de81918a9ea01e2fcc841928e682 | [
"Apache-2.0"
] | permissive | ZxwZero/fastor | 19bfc568f9a68f1447c2e049428330ade02d451d | dd9e299e250362802032d1984801bed249e36d8d | refs/heads/master | 2021-06-26T06:40:38.555211 | 2021-06-09T02:05:38 | 2021-06-09T02:05:38 | 229,753,500 | 1 | 1 | Apache-2.0 | 2019-12-23T12:59:25 | 2019-12-23T12:59:24 | null | UTF-8 | Python | false | false | 34,390 | py | #!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to ElementWrapper objects used with Google Calendar."""
__author__ = 'api.vli (Vivian Li), api.rboyd (Ryan Boyd)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
# XML namespaces which are often used in Google Calendar entities.
GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005'
GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005}%s'
WEB_CONTENT_LINK_REL = '%s/%s' % (GCAL_NAMESPACE, 'webContent')
GACL_NAMESPACE = gdata.GACL_NAMESPACE
GACL_TEMPLATE = gdata.GACL_TEMPLATE
class ValueAttributeContainer(atom.AtomBase):
"""A parent class for all Calendar classes which have a value attribute.
Children include Color, AccessLevel, Hidden
"""
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Color(ValueAttributeContainer):
"""The Google Calendar color element"""
_tag = 'color'
_namespace = GCAL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class AccessLevel(ValueAttributeContainer):
"""The Google Calendar accesslevel element"""
_tag = 'accesslevel'
_namespace = GCAL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class Hidden(ValueAttributeContainer):
"""The Google Calendar hidden element"""
_tag = 'hidden'
_namespace = GCAL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class Selected(ValueAttributeContainer):
"""The Google Calendar selected element"""
_tag = 'selected'
_namespace = GCAL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class Timezone(ValueAttributeContainer):
"""The Google Calendar timezone element"""
_tag = 'timezone'
_namespace = GCAL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class Where(atom.AtomBase):
"""The Google Calendar Where element"""
_tag = 'where'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['valueString'] = 'value_string'
def __init__(self, value_string=None, extension_elements=None,
extension_attributes=None, text=None):
self.value_string = value_string
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class CalendarListEntry(gdata.GDataEntry, gdata.LinkFinder):
"""A Google Calendar meta Entry flavor of an Atom Entry """
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}color' % GCAL_NAMESPACE] = ('color', Color)
_children['{%s}accesslevel' % GCAL_NAMESPACE] = ('access_level',
AccessLevel)
_children['{%s}hidden' % GCAL_NAMESPACE] = ('hidden', Hidden)
_children['{%s}selected' % GCAL_NAMESPACE] = ('selected', Selected)
_children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone)
_children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', Where)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
color=None, access_level=None, hidden=None, timezone=None,
selected=None,
where=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title,
updated=updated, text=None)
self.color = color
self.access_level = access_level
self.hidden = hidden
self.selected = selected
self.timezone = timezone
self.where = where
class CalendarListFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Calendar meta feed flavor of an Atom Feed"""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarListEntry])
class Scope(atom.AtomBase):
"""The Google ACL scope element"""
_tag = 'scope'
_namespace = GACL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
_attributes['type'] = 'type'
def __init__(self, extension_elements=None, value=None, scope_type=None,
extension_attributes=None, text=None):
self.value = value
self.type = scope_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Role(ValueAttributeContainer):
"""The Google Calendar timezone element"""
_tag = 'role'
_namespace = GACL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class CalendarAclEntry(gdata.GDataEntry, gdata.LinkFinder):
"""A Google Calendar ACL Entry flavor of an Atom Entry """
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}scope' % GACL_NAMESPACE] = ('scope', Scope)
_children['{%s}role' % GACL_NAMESPACE] = ('role', Role)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
scope=None, role=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title,
updated=updated, text=None)
self.scope = scope
self.role = role
class CalendarAclFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Calendar ACL feed flavor of an Atom Feed"""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarAclEntry])
class CalendarEventCommentEntry(gdata.GDataEntry, gdata.LinkFinder):
"""A Google Calendar event comments entry flavor of an Atom Entry"""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
class CalendarEventCommentFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Calendar event comments feed flavor of an Atom Feed"""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[CalendarEventCommentEntry])
class ExtendedProperty(gdata.ExtendedProperty):
"""A transparent subclass of gdata.ExtendedProperty added to this module
for backwards compatibility."""
class Reminder(atom.AtomBase):
"""The Google Calendar reminder element"""
_tag = 'reminder'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['absoluteTime'] = 'absolute_time'
_attributes['days'] = 'days'
_attributes['hours'] = 'hours'
_attributes['minutes'] = 'minutes'
def __init__(self, absolute_time=None,
days=None, hours=None, minutes=None,
extension_elements=None,
extension_attributes=None, text=None):
self.absolute_time = absolute_time
if days is not None:
self.days = str(days)
else:
self.days = None
if hours is not None:
self.hours = str(hours)
else:
self.hours = None
if minutes is not None:
self.minutes = str(minutes)
else:
self.minutes = None
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class When(atom.AtomBase):
"""The Google Calendar When element"""
_tag = 'when'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder])
_attributes['startTime'] = 'start_time'
_attributes['endTime'] = 'end_time'
def __init__(self, start_time=None, end_time=None, reminder=None,
extension_elements=None, extension_attributes=None, text=None):
self.start_time = start_time
self.end_time = end_time
self.reminder = reminder or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Recurrence(atom.AtomBase):
"""The Google Calendar Recurrence element"""
_tag = 'recurrence'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
class UriEnumElement(atom.AtomBase):
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, tag, enum_map, attrib_name='value',
extension_elements=None, extension_attributes=None, text=None):
self.tag=tag
self.enum_map=enum_map
self.attrib_name=attrib_name
self.value=None
self.text=text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def findKey(self, value):
res=[item[0] for item in self.enum_map.items() if item[1] == value]
if res is None or len(res) == 0:
return None
return res[0]
def _ConvertElementAttributeToMember(self, attribute, value):
# Special logic to use the enum_map to set the value of the object's value member.
if attribute == self.attrib_name and value != '':
self.value = self.enum_map[value]
return
# Find the attribute in this class's list of attributes.
if self.__class__._attributes.has_key(attribute):
# Find the member of this class which corresponds to the XML attribute
# (lookup in current_class._attributes) and set this member to the
# desired value (using self.__dict__).
setattr(self, self.__class__._attributes[attribute], value)
else:
# The current class doesn't map this attribute, so try to parent class.
atom.ExtensionContainer._ConvertElementAttributeToMember(self,
attribute,
value)
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Special logic to set the desired XML attribute.
key = self.findKey(self.value)
if key is not None:
tree.attrib[self.attrib_name]=key
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Lastly, call the parent's _AddMembersToElementTree to get any
# extension elements.
atom.ExtensionContainer._AddMembersToElementTree(self, tree)
class AttendeeStatus(UriEnumElement):
"""The Google Calendar attendeeStatus element"""
_tag = 'attendeeStatus'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
attendee_enum = {
'http://schemas.google.com/g/2005#event.accepted' : 'ACCEPTED',
'http://schemas.google.com/g/2005#event.declined' : 'DECLINED',
'http://schemas.google.com/g/2005#event.invited' : 'INVITED',
'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'}
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, 'attendeeStatus', AttendeeStatus.attendee_enum,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
class AttendeeType(UriEnumElement):
"""The Google Calendar attendeeType element"""
_tag = 'attendeeType'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
attendee_type_enum = {
'http://schemas.google.com/g/2005#event.optional' : 'OPTIONAL',
'http://schemas.google.com/g/2005#event.required' : 'REQUIRED' }
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, 'attendeeType',
AttendeeType.attendee_type_enum,
extension_elements=extension_elements,
extension_attributes=extension_attributes,text=text)
class Visibility(UriEnumElement):
"""The Google Calendar Visibility element"""
_tag = 'visibility'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
visibility_enum = {
'http://schemas.google.com/g/2005#event.confidential' : 'CONFIDENTIAL',
'http://schemas.google.com/g/2005#event.default' : 'DEFAULT',
'http://schemas.google.com/g/2005#event.private' : 'PRIVATE',
'http://schemas.google.com/g/2005#event.public' : 'PUBLIC' }
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, 'visibility', Visibility.visibility_enum,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
class Transparency(UriEnumElement):
"""The Google Calendar Transparency element"""
_tag = 'transparency'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
transparency_enum = {
'http://schemas.google.com/g/2005#event.opaque' : 'OPAQUE',
'http://schemas.google.com/g/2005#event.transparent' : 'TRANSPARENT' }
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, tag='transparency',
enum_map=Transparency.transparency_enum,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
class Comments(atom.AtomBase):
"""The Google Calendar comments element"""
_tag = 'comments'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
gdata.FeedLink)
_attributes['rel'] = 'rel'
def __init__(self, rel=None, feed_link=None, extension_elements=None,
extension_attributes=None, text=None):
self.rel = rel
self.feed_link = feed_link
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class EventStatus(UriEnumElement):
"""The Google Calendar eventStatus element"""
_tag = 'eventStatus'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
status_enum = { 'http://schemas.google.com/g/2005#event.canceled' : 'CANCELED',
'http://schemas.google.com/g/2005#event.confirmed' : 'CONFIRMED',
'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'}
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, tag='eventStatus',
enum_map=EventStatus.status_enum,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
class Who(UriEnumElement):
"""The Google Calendar Who element"""
_tag = 'who'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
_children['{%s}attendeeStatus' % gdata.GDATA_NAMESPACE] = (
'attendee_status', AttendeeStatus)
_children['{%s}attendeeType' % gdata.GDATA_NAMESPACE] = ('attendee_type',
AttendeeType)
_attributes['valueString'] = 'name'
_attributes['email'] = 'email'
relEnum = { 'http://schemas.google.com/g/2005#event.attendee' : 'ATTENDEE',
'http://schemas.google.com/g/2005#event.organizer' : 'ORGANIZER',
'http://schemas.google.com/g/2005#event.performer' : 'PERFORMER',
'http://schemas.google.com/g/2005#event.speaker' : 'SPEAKER',
'http://schemas.google.com/g/2005#message.bcc' : 'BCC',
'http://schemas.google.com/g/2005#message.cc' : 'CC',
'http://schemas.google.com/g/2005#message.from' : 'FROM',
'http://schemas.google.com/g/2005#message.reply-to' : 'REPLY_TO',
'http://schemas.google.com/g/2005#message.to' : 'TO' }
def __init__(self, name=None, email=None, attendee_status=None,
attendee_type=None, rel=None, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, 'who', Who.relEnum, attrib_name='rel',
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.name = name
self.email = email
self.attendee_status = attendee_status
self.attendee_type = attendee_type
self.rel = rel
class OriginalEvent(atom.AtomBase):
"""The Google Calendar OriginalEvent element"""
_tag = 'originalEvent'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
# TODO: The when tag used to map to a EntryLink, make sure it should really be a When.
_children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', When)
_attributes['id'] = 'id'
_attributes['href'] = 'href'
def __init__(self, id=None, href=None, when=None,
extension_elements=None, extension_attributes=None, text=None):
self.id = id
self.href = href
self.when = when
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GetCalendarEventEntryClass():
return CalendarEventEntry
# This class is not completely defined here, because of a circular reference
# in which CalendarEventEntryLink and CalendarEventEntry refer to one another.
class CalendarEventEntryLink(gdata.EntryLink):
"""An entryLink which contains a calendar event entry
Within an event's recurranceExceptions, an entry link
points to a calendar event entry. This class exists
to capture the calendar specific extensions in the entry.
"""
_tag = 'entryLink'
_namespace = gdata.GDATA_NAMESPACE
_children = gdata.EntryLink._children.copy()
_attributes = gdata.EntryLink._attributes.copy()
# The CalendarEventEntryLink should like CalendarEventEntry as a child but
# that class hasn't been defined yet, so we will wait until after defining
# CalendarEventEntry to list it in _children.
class RecurrenceException(atom.AtomBase):
"""The Google Calendar RecurrenceException element"""
_tag = 'recurrenceException'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}entryLink' % gdata.GDATA_NAMESPACE] = ('entry_link',
CalendarEventEntryLink)
_children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event',
OriginalEvent)
_attributes['specialized'] = 'specialized'
def __init__(self, specialized=None, entry_link=None,
original_event=None, extension_elements=None,
extension_attributes=None, text=None):
self.specialized = specialized
self.entry_link = entry_link
self.original_event = original_event
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class SendEventNotifications(atom.AtomBase):
"""The Google Calendar sendEventNotifications element"""
_tag = 'sendEventNotifications'
_namespace = GCAL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, extension_elements=None,
value=None, extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class QuickAdd(atom.AtomBase):
"""The Google Calendar quickadd element"""
_tag = 'quickadd'
_namespace = GCAL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, extension_elements=None,
value=None, extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def _TransferToElementTree(self, element_tree):
if self.value:
element_tree.attrib['value'] = self.value
element_tree.tag = GCAL_TEMPLATE % 'quickadd'
atom.AtomBase._TransferToElementTree(self, element_tree)
return element_tree
def _TakeAttributeFromElementTree(self, attribute, element_tree):
if attribute == 'value':
self.value = element_tree.attrib[attribute]
del element_tree.attrib[attribute]
else:
atom.AtomBase._TakeAttributeFromElementTree(self, attribute,
element_tree)
class WebContentGadgetPref(atom.AtomBase):
_tag = 'webContentGadgetPref'
_namespace = GCAL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
"""The Google Calendar Web Content Gadget Preferences element"""
def __init__(self, name=None, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class WebContent(atom.AtomBase):
_tag = 'webContent'
_namespace = GCAL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}webContentGadgetPref' % GCAL_NAMESPACE] = ('gadget_pref',
[WebContentGadgetPref])
_attributes['url'] = 'url'
_attributes['width'] = 'width'
_attributes['height'] = 'height'
def __init__(self, url=None, width=None, height=None, text=None,
gadget_pref=None, extension_elements=None, extension_attributes=None):
self.url = url
self.width = width
self.height = height
self.text = text
self.gadget_pref = gadget_pref or []
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class WebContentLink(atom.Link):
_tag = 'link'
_namespace = atom.ATOM_NAMESPACE
_children = atom.Link._children.copy()
_attributes = atom.Link._attributes.copy()
_children['{%s}webContent' % GCAL_NAMESPACE] = ('web_content', WebContent)
def __init__(self, title=None, href=None, link_type=None,
web_content=None):
atom.Link.__init__(self, rel=WEB_CONTENT_LINK_REL, title=title, href=href,
link_type=link_type)
self.web_content = web_content
class CalendarEventEntry(gdata.BatchEntry):
"""A Google Calendar flavor of an Atom Entry """
_tag = gdata.BatchEntry._tag
_namespace = gdata.BatchEntry._namespace
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
# This class also contains WebContentLinks but converting those members
# is handled in a special version of _ConvertElementTreeToMember.
_children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', [Where])
_children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', [When])
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', [Who])
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [ExtendedProperty])
_children['{%s}visibility' % gdata.GDATA_NAMESPACE] = ('visibility',
Visibility)
_children['{%s}transparency' % gdata.GDATA_NAMESPACE] = ('transparency',
Transparency)
_children['{%s}eventStatus' % gdata.GDATA_NAMESPACE] = ('event_status',
EventStatus)
_children['{%s}recurrence' % gdata.GDATA_NAMESPACE] = ('recurrence',
Recurrence)
_children['{%s}recurrenceException' % gdata.GDATA_NAMESPACE] = (
'recurrence_exception', [RecurrenceException])
_children['{%s}sendEventNotifications' % GCAL_NAMESPACE] = (
'send_event_notifications', SendEventNotifications)
_children['{%s}quickadd' % GCAL_NAMESPACE] = ('quick_add', QuickAdd)
_children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments)
_children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event',
OriginalEvent)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
transparency=None, comments=None, event_status=None,
send_event_notifications=None, visibility=None,
recurrence=None, recurrence_exception=None,
where=None, when=None, who=None, quick_add=None,
extended_property=None, original_event=None,
batch_operation=None, batch_id=None, batch_status=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation, batch_id=batch_id,
batch_status=batch_status,
title=title, updated=updated)
self.transparency = transparency
self.comments = comments
self.event_status = event_status
self.send_event_notifications = send_event_notifications
self.visibility = visibility
self.recurrence = recurrence
self.recurrence_exception = recurrence_exception or []
self.where = where or []
self.when = when or []
self.who = who or []
self.quick_add = quick_add
self.extended_property = extended_property or []
self.original_event = original_event
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
# We needed to add special logic to _ConvertElementTreeToMember because we
# want to make links with a rel of WEB_CONTENT_LINK_REL into a
# WebContentLink
def _ConvertElementTreeToMember(self, child_tree):
# Special logic to handle Web Content links
if (child_tree.tag == '{%s}link' % atom.ATOM_NAMESPACE and
child_tree.attrib['rel'] == WEB_CONTENT_LINK_REL):
if self.link is None:
self.link = []
self.link.append(atom._CreateClassFromElementTree(WebContentLink,
child_tree))
return
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(atom._CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
atom._CreateClassFromElementTree(member_class, child_tree))
else:
atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
def GetWebContentLink(self):
"""Finds the first link with rel set to WEB_CONTENT_REL
Returns:
A gdata.calendar.WebContentLink or none if none of the links had rel
equal to WEB_CONTENT_REL
"""
for a_link in self.link:
if a_link.rel == WEB_CONTENT_LINK_REL:
return a_link
return None
def CalendarEventEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarEventEntry, xml_string)
def CalendarEventCommentEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarEventCommentEntry, xml_string)
CalendarEventEntryLink._children = {'{%s}entry' % atom.ATOM_NAMESPACE:
('entry', CalendarEventEntry)}
def CalendarEventEntryLinkFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarEventEntryLink, xml_string)
class CalendarEventFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Calendar event feed flavor of an Atom Feed"""
_tag = gdata.BatchFeed._tag
_namespace = gdata.BatchFeed._namespace
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[CalendarEventEntry])
_children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
total_results=None, start_index=None, items_per_page=None,
interrupted=None, timezone=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
interrupted=interrupted,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.timezone = timezone
def CalendarListEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarListEntry, xml_string)
def CalendarAclEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarAclEntry, xml_string)
def CalendarListFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarListFeed, xml_string)
def CalendarAclFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarAclFeed, xml_string)
def CalendarEventFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarEventFeed, xml_string)
def CalendarEventCommentFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarEventCommentFeed, xml_string)
| [
"[email protected]"
] | |
f91d411468aba18dd844b4bd362c56aa8218192b | be01d0d54723d1e876c9a15618921dffe2b2255a | /Python/123.best_time_to_buy_sell_stackIII.py | 408394965df626a09202ee7117c0b0d7b2fb3021 | [] | no_license | jxlxt/leetcode | 17e7f25bf94dd334ac0d6254ffcffa003ed04c10 | a6e6e5be3dd5f9501d0aa4caa6744621ab887f51 | refs/heads/master | 2023-05-26T22:10:03.997428 | 2023-05-24T02:36:05 | 2023-05-24T02:36:05 | 118,216,055 | 0 | 0 | null | 2018-01-20T06:31:57 | 2018-01-20T06:30:06 | null | UTF-8 | Python | false | false | 638 | py | class Solution:
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
n = len(prices)
if n <= 1: return 0
p1, p2 = [0] * n, [0] * n
minV, maxV = prices[0], prices[-1]
for i in range(1, n):
minV = min(prices[i], minV)
p1[i] = max(p1[i-1], prices[i] - minV)
for i in range(n-2, -1, -1):
maxV = max(prices[i], maxV)
p2[i] = max(p2[i+1], maxV - prices[i])
res = 0
for i in range(n):
res = max(res, p1[i] + p2[i])
return res
| [
"[email protected]"
] | |
3f24505ba0b0df5ce1e41a599054ca1234b21a5f | 303bac96502e5b1666c05afd6c2e85cf33f19d8c | /solutions/python3/245.py | 2a1a8626a9b652689053d04fde6cd9e553dc7683 | [
"MIT"
] | permissive | jxhangithub/leetcode | 5e82f4aeee1bf201e93e889e5c4ded2fcda90437 | 0de1af607557d95856f0e4c2a12a56c8c57d731d | refs/heads/master | 2022-05-22T12:57:54.251281 | 2022-03-09T22:36:20 | 2022-03-09T22:36:20 | 370,508,127 | 1 | 0 | MIT | 2022-03-09T22:36:20 | 2021-05-24T23:16:10 | null | UTF-8 | Python | false | false | 439 | py | class Solution:
def shortestWordDistance(self, words, word1, word2):
i1 = i2 = -1
res, same = float("inf"), word1 == word2
for i, w in enumerate(words):
if w == word1:
if same: i2 = i1
i1 = i
if i2 >= 0: res = min(res, i1 - i2)
elif w == word2:
i2 = i
if i1 >= 0: res = min(res, i2 - i1)
return res | [
"[email protected]"
] | |
b596e8250368af057a20ec19b85049a800aebf86 | b46e3f6472e2ea4605f4d088a211dbaff2493574 | /reviewboard/dependencies.py | ad815f50f2c2685d16d32fec614c8baabbda853d | [
"MIT"
] | permissive | fgallaire/reviewboard | 360501a9f39c5898c54a80801c790f53b0a74f39 | e6b1323aee5e361754b110e4604ea5fc098050fe | refs/heads/master | 2021-01-18T03:13:56.561458 | 2017-03-22T14:41:15 | 2017-03-22T14:41:15 | 85,837,942 | 0 | 0 | null | 2017-03-22T14:30:31 | 2017-03-22T14:30:31 | null | UTF-8 | Python | false | false | 2,138 | py | """Version information for Review Board dependencies.
This contains constants that other parts of Review Board (primarily packaging)
can use to look up information on major dependencies of Review Board.
The contents in this file might change substantially between releases. If
you're going to make use of data from this file, code defensively.
"""
from __future__ import unicode_literals
# NOTE: This file may not import other files! It's used for packaging and
# may be needed before any dependencies have been installed.
#: The major version of Django we're using for documentation.
django_doc_major_version = '1.6'
#: The major version of Djblets we're using for documentation.
djblets_doc_major_version = '0.9'
#: The version range required for Django.
django_version = '>=1.6.11,<1.6.999'
#: The version range required for Djblets.
djblets_version = '>=0.10a0.dev,<=0.10.999'
#: All dependencies required to install Review Board.
package_dependencies = {
'Django': django_version,
'django_evolution': '>=0.7.6,<=0.7.999',
'django-haystack': '>=2.3.1,<=2.4.999',
'django-multiselectfield': '',
'Djblets': djblets_version,
'docutils': '',
'markdown': '>=2.4.0,<2.4.999',
'mimeparse': '>=0.1.3',
'paramiko': '>=1.12',
'pycrypto': '>=2.6',
'Pygments': '>=2.1',
'python-dateutil': '==1.5',
'python-memcached': '',
'pytz': '>=2015.2',
'Whoosh': '>=2.6',
}
def build_dependency_list(deps, version_prefix=''):
"""Build a list of dependency specifiers from a dependency map.
This can be used along with :py:data:`package_dependencies`,
:py:data:`npm_dependencies`, or other dependency dictionaries to build a
list of dependency specifiers for use on the command line or in
:file:`setup.py`.
Args:
deps (dict):
A dictionary of dependencies.
Returns:
list of unicode:
A list of dependency specifiers.
"""
return sorted(
[
'%s%s%s' % (dep_name, version_prefix, dep_version)
for dep_name, dep_version in deps.items()
],
key=lambda s: s.lower())
| [
"[email protected]"
] | |
d72826271b3bd2f99dc786efae7454c29142b060 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_22284.py | 920cb5cd1458f3bda5ea7fed8c9f8ece04a5a324 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | # Are greenlets really useful by themselves?
print
| [
"[email protected]"
] | |
76e8eba313f8b3e1df3b59f6a7f19ef2cec47afc | f76e11d4da15768bf8683380b1b1312f04060f9a | /fix_uppsala_mw.py | 37b2e1f5aebb99a509df6f9d9861fadc86fee9cf | [] | no_license | rasoolims/scripts | 0804a2e5f7f405846cb659f9f8199f6bd93c4af6 | fd8110558fff1bb5a7527ff854eeea87b0b3c597 | refs/heads/master | 2021-07-07T03:53:20.507765 | 2021-04-13T14:53:00 | 2021-04-13T14:53:00 | 24,770,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | import os,sys,codecs
reader =codecs.open(os.path.abspath(sys.argv[1]),'r')
writer =codecs.open(os.path.abspath(sys.argv[2]),'w')
line =reader.readline()
while line:
spl = line.strip().split()
if len(spl)<7 or not '-' in spl[0]:
writer.write(line.strip()+'\n')
line =reader.readline()
writer.close() | [
"[email protected]"
] | |
2c8a22a8cc80312f3c5a73950fa5d5a693c26997 | cbd2eee46663fad5b5375b13c8c21b1b06eb4c6b | /ecloud/code/src/main/python/manor/streamlet/delete_node.py | 34611b5a6beb3e0fffee7b01a20ed11aa2690dd6 | [] | no_license | 1026237416/Python | ef474ee40d7efcd6dabb6fb0ecba81b4dcfc7e14 | ffa8f9ffb8bfec114b0ca46295db05c4213c4c30 | refs/heads/master | 2021-07-05T00:57:00.456886 | 2019-04-26T10:13:46 | 2019-04-26T10:13:46 | 114,510,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | import time
from tornado import gen
from manor.screwdriver import compute_util
from manor.streamlet import StreamletBase
def get_instance(params,node_id,serial):
return DeleteNode(params,serial,node_id)
class DeleteNode(StreamletBase):
def __init__(self,params,serial,node_id):
super(DeleteNode,self).__init__(node_id,params,serial)
self.serial=serial
self.server_id=self.params['server_id']
self.command_params=[]
self.stack_ids=[]
@gen.coroutine
def execute(self):
info=compute_util.get_info(self.server_id).to_dict()
self.log.debug(info)
if info['status']!='SHUTOFF':
compute_util.stop_server(self.server_id)
def check_finish(self):
info=compute_util.get_info(self.server_id).to_dict()
if info['status']=='SHUTOFF':
compute_util.delete_server(self.server_id)
for x in range(10):
self.log.debug('finish count down:%s'%x)
time.sleep(1)
self.log.debug('finished ...')
return True
| [
"[email protected]"
] | |
96a321bea45c33c8c9cbe88fe3e61e609ad28006 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02862/s302553699.py | 7f9f0b8e8757ae71fce60543238888054cf73342 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 757 | py | MOD = 1000000007
def mod_inv(mod, a):
old_t, t = 0, 1
old_r, r = mod, a
while r != 0:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_t, t = t, old_t - quotient * t
return old_t % mod
def combine(n, k, mod):
if k > n // 2:
k = n - k
u = 1
for i in range(n - k + 1, n + 1):
u = u * i % mod
v = 1
for i in range(1, k + 1):
v = v * i % mod
return u * mod_inv(mod, v) % MOD
def main():
X, Y = map(int, input().split())
m1 = X + Y
if m1 % 3 == 0:
m = m1 // 3
if X < m or Y < m:
print(0)
else:
print(combine(m, X - m, MOD))
else:
print(0)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
202ffdc331d2955587acdf296d2f6cc782d26fe0 | 0754e2e7aa1ffb90b54d563ce5a9317e41cfebf9 | /keras/keras95_save_pd.py | 0fcb88a0887f2d9415ea9ce5bffeaf323de5902a | [] | no_license | ChaeMyungSeock/Study | 62dcf4b13696b1f483c816af576ea8883c57e531 | 6f726a6ecb43387e4a3b9d068a9c491b115c74c0 | refs/heads/master | 2023-01-24T20:59:52.053394 | 2020-12-07T14:54:34 | 2020-12-07T14:54:34 | 263,255,793 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | import numpy as np
import pandas as pd
datasets = pd.read_csv("./data/csv/iris.csv", index_col=None, header=0, sep=',') # sep => ,를 기준으로 데이터를 구분한다.
# pd => loc // ioc
print(datasets)
print(datasets.__class__)
print(datasets.head()) # 위에서부터 5개
print(datasets.tail()) # 뒤에서부터 5개
print("========================")
print(datasets.values) # 판다스를 넘파이 형태로 바꿔줌
print(datasets.values.__class__)
# 넘파이로 저장
datasets = datasets.values
np.save('./data/iris_datasets.npy',arr=datasets)
# np.save('./data/iris_y.npy',arr=y_data)
# np.save('') | [
"[email protected]"
] | |
e204a81b675d672491c334f77f7cc5c4f8328a00 | 146cd740649b87032cbbfb97cde6ae486f76230b | /venv/lib/python3.6/site-packages/matplotlib/tests/test_transforms.py | 06e95e1c8e9ba5befed1fa70548029b15d1f0917 | [] | no_license | shellyhuang18/plank-filter-master | 8b7024c46334062496f05d31eefc618ebae50b4e | 8993a5b00f45841c3385fe997857bfdd10b71a84 | refs/heads/master | 2020-03-30T18:14:45.017957 | 2018-12-27T20:51:25 | 2018-12-27T20:51:25 | 151,490,556 | 0 | 1 | null | 2018-12-19T22:42:26 | 2018-10-03T22:50:58 | Python | UTF-8 | Python | false | false | 25,344 | py | import unittest
import numpy as np
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_equal, assert_array_almost_equal)
import pytest
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.transforms as mtransforms
from matplotlib.path import Path
from matplotlib.scale import LogScale
from matplotlib.testing.decorators import image_comparison
def test_non_affine_caching():
class AssertingNonAffineTransform(mtransforms.Transform):
"""
This transform raises an assertion error when called when it
shouldn't be and self.raise_on_transform is True.
"""
input_dims = output_dims = 2
is_affine = False
def __init__(self, *args, **kwargs):
mtransforms.Transform.__init__(self, *args, **kwargs)
self.raise_on_transform = False
self.underlying_transform = mtransforms.Affine2D().scale(10, 10)
def transform_path_non_affine(self, path):
assert not self.raise_on_transform, \
'Invalidated affine part of transform unnecessarily.'
return self.underlying_transform.transform_path(path)
transform_path = transform_path_non_affine
def transform_non_affine(self, path):
assert not self.raise_on_transform, \
'Invalidated affine part of transform unnecessarily.'
return self.underlying_transform.transform(path)
transform = transform_non_affine
my_trans = AssertingNonAffineTransform()
ax = plt.axes()
plt.plot(np.arange(10), transform=my_trans + ax.transData)
plt.draw()
# enable the transform to raise an exception if it's non-affine transform
# method is triggered again.
my_trans.raise_on_transform = True
ax.transAxes.invalidate()
plt.draw()
def test_external_transform_api():
class ScaledBy(object):
def __init__(self, scale_factor):
self._scale_factor = scale_factor
def _as_mpl_transform(self, axes):
return (mtransforms.Affine2D().scale(self._scale_factor)
+ axes.transData)
ax = plt.axes()
line, = plt.plot(np.arange(10), transform=ScaledBy(10))
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
# assert that the top transform of the line is the scale transform.
assert_allclose(line.get_transform()._a.get_matrix(),
mtransforms.Affine2D().scale(10).get_matrix())
@image_comparison(baseline_images=['pre_transform_data'],
tol=0.08, remove_text=True, style='mpl20')
def test_pre_transform_plotting():
# a catch-all for as many as possible plot layouts which handle
# pre-transforming the data NOTE: The axis range is important in this
# plot. It should be x10 what the data suggests it should be
ax = plt.axes()
times10 = mtransforms.Affine2D().scale(10)
ax.contourf(np.arange(48).reshape(6, 8), transform=times10 + ax.transData)
ax.pcolormesh(np.linspace(0, 4, 7),
np.linspace(5.5, 8, 9),
np.arange(48).reshape(8, 6),
transform=times10 + ax.transData)
ax.scatter(np.linspace(0, 10), np.linspace(10, 0),
transform=times10 + ax.transData)
x = np.linspace(8, 10, 20)
y = np.linspace(1, 5, 20)
u = 2*np.sin(x) + np.cos(y[:, np.newaxis])
v = np.sin(x) - np.cos(y[:, np.newaxis])
df = 25. / 30. # Compatibility factor for old test image
ax.streamplot(x, y, u, v, transform=times10 + ax.transData,
density=(df, df), linewidth=u**2 + v**2)
# reduce the vector data down a bit for barb and quiver plotting
x, y = x[::3], y[::3]
u, v = u[::3, ::3], v[::3, ::3]
ax.quiver(x, y + 5, u, v, transform=times10 + ax.transData)
ax.barbs(x - 3, y + 5, u**2, v**2, transform=times10 + ax.transData)
def test_contour_pre_transform_limits():
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.contourf(xs, ys, np.log(xs * ys),
transform=mtransforms.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[1.5, 1.24],
[2., 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
def test_pcolor_pre_transform_limits():
# Based on test_contour_pre_transform_limits()
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.pcolor(xs, ys, np.log(xs * ys),
transform=mtransforms.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[1.5, 1.24],
[2., 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
def test_pcolormesh_pre_transform_limits():
# Based on test_contour_pre_transform_limits()
ax = plt.axes()
xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20))
ax.pcolormesh(xs, ys, np.log(xs * ys),
transform=mtransforms.Affine2D().scale(0.1) + ax.transData)
expected = np.array([[1.5, 1.24],
[2., 1.25]])
assert_almost_equal(expected, ax.dataLim.get_points())
def test_Affine2D_from_values():
points = np.array([[0, 0],
[10, 20],
[-1, 0],
])
t = mtransforms.Affine2D.from_values(1, 0, 0, 0, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [10, 0], [-1, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 2, 0, 0, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [0, 20], [0, -2]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 3, 0, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [60, 0], [0, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 0, 4, 0, 0)
actual = t.transform(points)
expected = np.array([[0, 0], [0, 80], [0, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 0, 0, 5, 0)
actual = t.transform(points)
expected = np.array([[5, 0], [5, 0], [5, 0]])
assert_almost_equal(actual, expected)
t = mtransforms.Affine2D.from_values(0, 0, 0, 0, 0, 6)
actual = t.transform(points)
expected = np.array([[0, 6], [0, 6], [0, 6]])
assert_almost_equal(actual, expected)
def test_clipping_of_log():
# issue 804
M, L, C = Path.MOVETO, Path.LINETO, Path.CLOSEPOLY
points = [(0.2, -99), (0.4, -99), (0.4, 20), (0.2, 20), (0.2, -99)]
codes = [M, L, L, L, C]
path = Path(points, codes)
# something like this happens in plotting logarithmic histograms
trans = mtransforms.BlendedGenericTransform(mtransforms.Affine2D(),
LogScale.Log10Transform('clip'))
tpath = trans.transform_path_non_affine(path)
result = tpath.iter_segments(trans.get_affine(),
clip=(0, 0, 100, 100),
simplify=False)
tpoints, tcodes = zip(*result)
assert_allclose(tcodes, [M, L, L, L, C])
class NonAffineForTest(mtransforms.Transform):
"""
A class which looks like a non affine transform, but does whatever
the given transform does (even if it is affine). This is very useful
for testing NonAffine behaviour with a simple Affine transform.
"""
is_affine = False
output_dims = 2
input_dims = 2
def __init__(self, real_trans, *args, **kwargs):
self.real_trans = real_trans
mtransforms.Transform.__init__(self, *args, **kwargs)
def transform_non_affine(self, values):
return self.real_trans.transform(values)
def transform_path_non_affine(self, path):
return self.real_trans.transform_path(path)
class BasicTransformTests(unittest.TestCase):
def setUp(self):
self.ta1 = mtransforms.Affine2D(shorthand_name='ta1').rotate(np.pi / 2)
self.ta2 = mtransforms.Affine2D(shorthand_name='ta2').translate(10, 0)
self.ta3 = mtransforms.Affine2D(shorthand_name='ta3').scale(1, 2)
self.tn1 = NonAffineForTest(mtransforms.Affine2D().translate(1, 2),
shorthand_name='tn1')
self.tn2 = NonAffineForTest(mtransforms.Affine2D().translate(1, 2),
shorthand_name='tn2')
self.tn3 = NonAffineForTest(mtransforms.Affine2D().translate(1, 2),
shorthand_name='tn3')
# creates a transform stack which looks like ((A, (N, A)), A)
self.stack1 = (self.ta1 + (self.tn1 + self.ta2)) + self.ta3
# creates a transform stack which looks like (((A, N), A), A)
self.stack2 = self.ta1 + self.tn1 + self.ta2 + self.ta3
# creates a transform stack which is a subset of stack2
self.stack2_subset = self.tn1 + self.ta2 + self.ta3
# when in debug, the transform stacks can produce dot images:
# self.stack1.write_graphviz(file('stack1.dot', 'w'))
# self.stack2.write_graphviz(file('stack2.dot', 'w'))
# self.stack2_subset.write_graphviz(file('stack2_subset.dot', 'w'))
def test_transform_depth(self):
assert self.stack1.depth == 4
assert self.stack2.depth == 4
assert self.stack2_subset.depth == 3
def test_left_to_right_iteration(self):
stack3 = (self.ta1 + (self.tn1 + (self.ta2 + self.tn2))) + self.ta3
# stack3.write_graphviz(file('stack3.dot', 'w'))
target_transforms = [stack3,
(self.tn1 + (self.ta2 + self.tn2)) + self.ta3,
(self.ta2 + self.tn2) + self.ta3,
self.tn2 + self.ta3,
self.ta3,
]
r = [rh for _, rh in stack3._iter_break_from_left_to_right()]
assert len(r) == len(target_transforms)
for target_stack, stack in zip(target_transforms, r):
assert target_stack == stack
def test_transform_shortcuts(self):
assert self.stack1 - self.stack2_subset == self.ta1
assert self.stack2 - self.stack2_subset == self.ta1
assert self.stack2_subset - self.stack2 == self.ta1.inverted()
assert (self.stack2_subset - self.stack2).depth == 1
with pytest.raises(ValueError):
self.stack1 - self.stack2
aff1 = self.ta1 + (self.ta2 + self.ta3)
aff2 = self.ta2 + self.ta3
assert aff1 - aff2 == self.ta1
assert aff1 - self.ta2 == aff1 + self.ta2.inverted()
assert self.stack1 - self.ta3 == self.ta1 + (self.tn1 + self.ta2)
assert self.stack2 - self.ta3 == self.ta1 + self.tn1 + self.ta2
assert ((self.ta2 + self.ta3) - self.ta3 + self.ta3 ==
self.ta2 + self.ta3)
def test_contains_branch(self):
r1 = (self.ta2 + self.ta1)
r2 = (self.ta2 + self.ta1)
assert r1 == r2
assert r1 != self.ta1
assert r1.contains_branch(r2)
assert r1.contains_branch(self.ta1)
assert not r1.contains_branch(self.ta2)
assert not r1.contains_branch((self.ta2 + self.ta2))
assert r1 == r2
assert self.stack1.contains_branch(self.ta3)
assert self.stack2.contains_branch(self.ta3)
assert self.stack1.contains_branch(self.stack2_subset)
assert self.stack2.contains_branch(self.stack2_subset)
assert not self.stack2_subset.contains_branch(self.stack1)
assert not self.stack2_subset.contains_branch(self.stack2)
assert self.stack1.contains_branch((self.ta2 + self.ta3))
assert self.stack2.contains_branch((self.ta2 + self.ta3))
assert not self.stack1.contains_branch((self.tn1 + self.ta2))
def test_affine_simplification(self):
# tests that a transform stack only calls as much is absolutely
# necessary "non-affine" allowing the best possible optimization with
# complex transformation stacks.
points = np.array([[0, 0], [10, 20], [np.nan, 1], [-1, 0]],
dtype=np.float64)
na_pts = self.stack1.transform_non_affine(points)
all_pts = self.stack1.transform(points)
na_expected = np.array([[1., 2.], [-19., 12.],
[np.nan, np.nan], [1., 1.]], dtype=np.float64)
all_expected = np.array([[11., 4.], [-9., 24.],
[np.nan, np.nan], [11., 2.]],
dtype=np.float64)
# check we have the expected results from doing the affine part only
assert_array_almost_equal(na_pts, na_expected)
# check we have the expected results from a full transformation
assert_array_almost_equal(all_pts, all_expected)
# check we have the expected results from doing the transformation in
# two steps
assert_array_almost_equal(self.stack1.transform_affine(na_pts),
all_expected)
# check that getting the affine transformation first, then fully
# transforming using that yields the same result as before.
assert_array_almost_equal(self.stack1.get_affine().transform(na_pts),
all_expected)
# check that the affine part of stack1 & stack2 are equivalent
# (i.e. the optimization is working)
expected_result = (self.ta2 + self.ta3).get_matrix()
result = self.stack1.get_affine().get_matrix()
assert_array_equal(expected_result, result)
result = self.stack2.get_affine().get_matrix()
assert_array_equal(expected_result, result)
class TestTransformPlotInterface(unittest.TestCase):
def tearDown(self):
plt.close()
def test_line_extent_axes_coords(self):
# a simple line in axes coordinates
ax = plt.axes()
ax.plot([0.1, 1.2, 0.8], [0.9, 0.5, 0.8], transform=ax.transAxes)
assert_array_equal(ax.dataLim.get_points(),
np.array([[np.inf, np.inf],
[-np.inf, -np.inf]]))
def test_line_extent_data_coords(self):
# a simple line in data coordinates
ax = plt.axes()
ax.plot([0.1, 1.2, 0.8], [0.9, 0.5, 0.8], transform=ax.transData)
assert_array_equal(ax.dataLim.get_points(),
np.array([[0.1, 0.5], [1.2, 0.9]]))
def test_line_extent_compound_coords1(self):
# a simple line in data coordinates in the y component, and in axes
# coordinates in the x
ax = plt.axes()
trans = mtransforms.blended_transform_factory(ax.transAxes,
ax.transData)
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
assert_array_equal(ax.dataLim.get_points(),
np.array([[np.inf, -5.],
[-np.inf, 35.]]))
plt.close()
def test_line_extent_predata_transform_coords(self):
# a simple line in (offset + data) coordinates
ax = plt.axes()
trans = mtransforms.Affine2D().scale(10) + ax.transData
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
assert_array_equal(ax.dataLim.get_points(),
np.array([[1., -50.], [12., 350.]]))
plt.close()
def test_line_extent_compound_coords2(self):
# a simple line in (offset + data) coordinates in the y component, and
# in axes coordinates in the x
ax = plt.axes()
trans = mtransforms.blended_transform_factory(ax.transAxes,
mtransforms.Affine2D().scale(10) + ax.transData)
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
assert_array_equal(ax.dataLim.get_points(),
np.array([[np.inf, -50.], [-np.inf, 350.]]))
plt.close()
def test_line_extents_affine(self):
ax = plt.axes()
offset = mtransforms.Affine2D().translate(10, 10)
plt.plot(np.arange(10), transform=offset + ax.transData)
expected_data_lim = np.array([[0., 0.], [9., 9.]]) + 10
assert_array_almost_equal(ax.dataLim.get_points(), expected_data_lim)
def test_line_extents_non_affine(self):
ax = plt.axes()
offset = mtransforms.Affine2D().translate(10, 10)
na_offset = NonAffineForTest(mtransforms.Affine2D().translate(10, 10))
plt.plot(np.arange(10), transform=offset + na_offset + ax.transData)
expected_data_lim = np.array([[0., 0.], [9., 9.]]) + 20
assert_array_almost_equal(ax.dataLim.get_points(), expected_data_lim)
def test_pathc_extents_non_affine(self):
ax = plt.axes()
offset = mtransforms.Affine2D().translate(10, 10)
na_offset = NonAffineForTest(mtransforms.Affine2D().translate(10, 10))
pth = Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]]))
patch = mpatches.PathPatch(pth,
transform=offset + na_offset + ax.transData)
ax.add_patch(patch)
expected_data_lim = np.array([[0., 0.], [10., 10.]]) + 20
assert_array_almost_equal(ax.dataLim.get_points(), expected_data_lim)
def test_pathc_extents_affine(self):
ax = plt.axes()
offset = mtransforms.Affine2D().translate(10, 10)
pth = Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]]))
patch = mpatches.PathPatch(pth, transform=offset + ax.transData)
ax.add_patch(patch)
expected_data_lim = np.array([[0., 0.], [10., 10.]]) + 10
assert_array_almost_equal(ax.dataLim.get_points(), expected_data_lim)
def test_line_extents_for_non_affine_transData(self):
ax = plt.axes(projection='polar')
# add 10 to the radius of the data
offset = mtransforms.Affine2D().translate(0, 10)
plt.plot(np.arange(10), transform=offset + ax.transData)
# the data lim of a polar plot is stored in coordinates
# before a transData transformation, hence the data limits
# are not what is being shown on the actual plot.
expected_data_lim = np.array([[0., 0.], [9., 9.]]) + [0, 10]
assert_array_almost_equal(ax.dataLim.get_points(), expected_data_lim)
def assert_bbox_eq(bbox1, bbox2):
assert_array_equal(bbox1.bounds, bbox2.bounds)
def test_bbox_intersection():
bbox_from_ext = mtransforms.Bbox.from_extents
inter = mtransforms.Bbox.intersection
r1 = bbox_from_ext(0, 0, 1, 1)
r2 = bbox_from_ext(0.5, 0.5, 1.5, 1.5)
r3 = bbox_from_ext(0.5, 0, 0.75, 0.75)
r4 = bbox_from_ext(0.5, 1.5, 1, 2.5)
r5 = bbox_from_ext(1, 1, 2, 2)
# self intersection -> no change
assert_bbox_eq(inter(r1, r1), r1)
# simple intersection
assert_bbox_eq(inter(r1, r2), bbox_from_ext(0.5, 0.5, 1, 1))
# r3 contains r2
assert_bbox_eq(inter(r1, r3), r3)
# no intersection
assert inter(r1, r4) is None
# single point
assert_bbox_eq(inter(r1, r5), bbox_from_ext(1, 1, 1, 1))
def test_bbox_as_strings():
b = mtransforms.Bbox([[.5, 0], [.75, .75]])
assert_bbox_eq(b, eval(repr(b), {'Bbox': mtransforms.Bbox}))
asdict = eval(str(b), {'Bbox': dict})
for k, v in asdict.items():
assert getattr(b, k) == v
fmt = '.1f'
asdict = eval(format(b, fmt), {'Bbox': dict})
for k, v in asdict.items():
assert eval(format(getattr(b, k), fmt)) == v
def test_transform_single_point():
t = mtransforms.Affine2D()
r = t.transform_affine((1, 1))
assert r.shape == (2,)
def test_log_transform():
# Tests that the last line runs without exception (previously the
# transform would fail if one of the axes was logarithmic).
fig, ax = plt.subplots()
ax.set_yscale('log')
ax.transData.transform((1, 1))
def test_nan_overlap():
a = mtransforms.Bbox([[0, 0], [1, 1]])
b = mtransforms.Bbox([[0, 0], [1, np.nan]])
assert not a.overlaps(b)
def test_transform_angles():
t = mtransforms.Affine2D() # Identity transform
angles = np.array([20, 45, 60])
points = np.array([[0, 0], [1, 1], [2, 2]])
# Identity transform does not change angles
new_angles = t.transform_angles(angles, points)
assert_array_almost_equal(angles, new_angles)
# points missing a 2nd dimension
with pytest.raises(ValueError):
t.transform_angles(angles, points[0:2, 0:1])
# Number of angles != Number of points
with pytest.raises(ValueError):
t.transform_angles(angles, points[0:2, :])
def test_nonsingular():
# test for zero-expansion type cases; other cases may be added later
zero_expansion = np.array([-0.001, 0.001])
cases = [(0, np.nan), (0, 0), (0, 7.9e-317)]
for args in cases:
out = np.array(mtransforms.nonsingular(*args))
assert_array_equal(out, zero_expansion)
def test_invalid_arguments():
t = mtransforms.Affine2D()
# There are two different exceptions, since the wrong number of
# dimensions is caught when constructing an array_view, and that
# raises a ValueError, and a wrong shape with a possible number
# of dimensions is caught by our CALL_CPP macro, which always
# raises the less precise RuntimeError.
with pytest.raises(ValueError):
t.transform(1)
with pytest.raises(ValueError):
t.transform([[[1]]])
with pytest.raises(RuntimeError):
t.transform([])
with pytest.raises(RuntimeError):
t.transform([1])
with pytest.raises(RuntimeError):
t.transform([[1]])
with pytest.raises(RuntimeError):
t.transform([[1, 2, 3]])
def test_transformed_path():
points = [(0, 0), (1, 0), (1, 1), (0, 1)]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY]
path = Path(points, codes)
trans = mtransforms.Affine2D()
trans_path = mtransforms.TransformedPath(path, trans)
assert_allclose(trans_path.get_fully_transformed_path().vertices, points)
# Changing the transform should change the result.
r2 = 1 / np.sqrt(2)
trans.rotate(np.pi / 4)
assert_allclose(trans_path.get_fully_transformed_path().vertices,
[(0, 0), (r2, r2), (0, 2 * r2), (-r2, r2)],
atol=1e-15)
# Changing the path does not change the result (it's cached).
path.points = [(0, 0)] * 4
assert_allclose(trans_path.get_fully_transformed_path().vertices,
[(0, 0), (r2, r2), (0, 2 * r2), (-r2, r2)],
atol=1e-15)
def test_transformed_patch_path():
trans = mtransforms.Affine2D()
patch = mpatches.Wedge((0, 0), 1, 45, 135, transform=trans)
tpatch = mtransforms.TransformedPatchPath(patch)
points = tpatch.get_fully_transformed_path().vertices
# Changing the transform should change the result.
trans.scale(2)
assert_allclose(tpatch.get_fully_transformed_path().vertices, points * 2)
# Changing the path should change the result (and cancel out the scaling
# from the transform).
patch.set_radius(0.5)
assert_allclose(tpatch.get_fully_transformed_path().vertices, points)
@pytest.mark.parametrize('locked_element', ['x0', 'y0', 'x1', 'y1'])
def test_lockable_bbox(locked_element):
other_elements = ['x0', 'y0', 'x1', 'y1']
other_elements.remove(locked_element)
orig = mtransforms.Bbox.unit()
locked = mtransforms.LockableBbox(orig, **{locked_element: 2})
# LockableBbox should keep its locked element as specified in __init__.
assert getattr(locked, locked_element) == 2
assert getattr(locked, 'locked_' + locked_element) == 2
for elem in other_elements:
assert getattr(locked, elem) == getattr(orig, elem)
# Changing underlying Bbox should update everything but locked element.
orig.set_points(orig.get_points() + 10)
assert getattr(locked, locked_element) == 2
assert getattr(locked, 'locked_' + locked_element) == 2
for elem in other_elements:
assert getattr(locked, elem) == getattr(orig, elem)
# Unlocking element should revert values back to the underlying Bbox.
setattr(locked, 'locked_' + locked_element, None)
assert getattr(locked, 'locked_' + locked_element) is None
assert np.all(orig.get_points() == locked.get_points())
# Relocking an element should change its value, but not others.
setattr(locked, 'locked_' + locked_element, 3)
assert getattr(locked, locked_element) == 3
assert getattr(locked, 'locked_' + locked_element) == 3
for elem in other_elements:
assert getattr(locked, elem) == getattr(orig, elem)
| [
"[email protected]"
] | |
c28e730ec640401a04d6082566e005633a87106c | ac01b09550ccedb68a05756a7455c60766b60857 | /src/mcqexam/urls.py | eb22ad6e4bc7093c18a2bedd2176952a1c0afce6 | [] | no_license | cseai/OpenEduQA | ea669cffa7d3f2f3ded2221c8cb85876ac1438df | 8a90a843720a175c5da0af4fc51cc8e6542deb33 | refs/heads/master | 2023-06-10T23:17:40.502619 | 2021-07-05T12:43:44 | 2021-07-05T12:43:44 | 281,315,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from django.urls import path
from .views import (
mcqexam_list,
mcqexam_create,
mcqexam_detail,
mcqexam_update,
mcqexam_delete,
)
app_name = 'mcqexam'
urlpatterns = [
path('', mcqexam_list, name='list'),
path('create/', mcqexam_create, name='create'),
path('<id>/', mcqexam_detail, name='detail'),
path('<id>/edit/', mcqexam_update, name='update'),
path('<id>/delete/', mcqexam_delete),
#
]
| [
"[email protected]"
] | |
7ce7641da5e791639d86d1f99141db013420684f | e3f5f41b242650b4bef68aa191a5779aedd3e02e | /Chapter08/webapp/blog/__init__.py | 2466cfc37bebc0f03e32230a209719fa73d38481 | [
"MIT"
] | permissive | PacktPublishing/Mastering-Flask-Web-Development-Second-Edition | d4675c047bb51b0154958205f53c962ab4d32e4c | c3174127b40f8af1e2ab5e614994ffed7acbc11b | refs/heads/master | 2023-05-11T00:23:30.213655 | 2023-01-18T09:14:14 | 2023-01-18T09:14:14 | 154,667,293 | 168 | 131 | MIT | 2023-05-01T20:52:13 | 2018-10-25T12:30:58 | Python | UTF-8 | Python | false | false | 124 | py |
def create_module(app, **kwargs):
from .controllers import blog_blueprint
app.register_blueprint(blog_blueprint)
| [
"[email protected]"
] | |
850e09600cdd38e57abc33b9302c7c5f830a5f8c | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_23737.py | f4d160c86ea326b6ca9a18cbdf1e3aa86cd0a403 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | # '(match_start1)...(match_start2)...(match_end)' find the shortest string match
re.findall('(?=(\D\d{2,5}?.+?CA.?[ -._]*(?:\d{5})?))','6785 56767at435 hjfioej st. CA. 94827ifojwnf 93842')
| [
"[email protected]"
] | |
eadece8617cdc25ac73efb28c19f2ad0379e8584 | bd3528cc321dc37f8c47ac63e57561fd6432c7cc | /transformer/tensor2tensor/models/xception_test.py | b57a757b9cbc4ec041840652bf6955e95e961d8c | [
"MIT",
"Apache-2.0"
] | permissive | oskopek/cil | 92bbf52f130a1ed89bbe93b74eef74027bb2b37e | 4c1fd464b5af52aff7a0509f56e21a2671fb8ce8 | refs/heads/master | 2023-04-15T10:23:57.056162 | 2021-01-31T14:51:51 | 2021-01-31T14:51:51 | 139,629,560 | 2 | 5 | MIT | 2023-03-24T22:34:39 | 2018-07-03T19:35:24 | Python | UTF-8 | Python | false | false | 2,136 | py | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Xception tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.data_generators import problem_hparams
from tensor2tensor.models import xception
from tensor2tensor.utils import registry
import tensorflow as tf
class XceptionTest(tf.test.TestCase):
def _test_xception(self, img_size):
vocab_size = 9
batch_size = 3
x = np.random.random_integers(
0, high=255, size=(batch_size, img_size, img_size, 3))
y = np.random.random_integers(
1, high=vocab_size - 1, size=(batch_size, 1, 1, 1))
hparams = xception.xception_tiny()
p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size)
p_hparams.input_modality["inputs"] = (registry.Modalities.IMAGE, None)
p_hparams.target_modality = (registry.Modalities.CLASS_LABEL, vocab_size)
with self.test_session() as session:
features = {
"inputs": tf.constant(x, dtype=tf.int32),
"targets": tf.constant(y, dtype=tf.int32),
}
model = xception.Xception(hparams, tf.estimator.ModeKeys.TRAIN, p_hparams)
logits, _ = model(features)
session.run(tf.global_variables_initializer())
res = session.run(logits)
self.assertEqual(res.shape, (batch_size, 1, 1, 1, vocab_size))
def testXceptionSmallImage(self):
self._test_xception(img_size=9)
def testXceptionLargeImage(self):
self._test_xception(img_size=256)
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
] | |
a8ec94a76dd7aab2438f330dc057fb4b5da94e5f | f35bb12066639698a94847cba4b4628aede1da70 | /contests/python/abc212/e.py | 0ce3c5a5eda3746793766fd7066c0063fb7036f3 | [] | no_license | fly1tkg/atcoder-python-note | 7e74382a8867b07bb7a926988ac854a3b84e020b | 6051b771c0a0399ce8caf1e24256a9909101b0e7 | refs/heads/main | 2023-08-26T23:52:14.766576 | 2021-10-30T11:58:38 | 2021-10-30T11:58:38 | 363,686,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | N, M, K = map(int, input().split())
| [
"[email protected]"
] | |
dbf4440fe65197fcde1ca3b5fa97b257966e36f2 | a10377a6d0c7576b9e47209f49dea398181f73fe | /test/node/milticasttest.py | c0ccd0fe7d2886bebf8866e8cd1ec423bc99e87f | [
"BSD-3-Clause"
] | permissive | zymITsky/ants | 14077dab214aff543bbc75a059240dd55f656916 | 52918d18c94a9a69c3b2495286e3384ba57ad6f8 | refs/heads/master | 2020-06-01T11:04:53.520288 | 2015-02-03T08:09:59 | 2015-02-03T08:09:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | '''
test multicast
'''
__author__ = 'wcong'
import unittest
from ants.cluster import cluster
from ants.node import multicast
class MulticastTest(unittest.TestCase):
def test(self):
cluster = cluster.ClusterInfo(name='test_cluster')
multicast_node = multicast.MulticastManager(cluster, self.print_result)
multicast_node.cast()
multicast_node.find_node()
def print_result(self, addr):
print 'addr:' + addr[0] + ':' + str(addr[1])
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
c79b2076bfa7ce63eafa92d9ff0a8b9ecb045895 | cc0c7b6af25ce5a1a5fe310628d8a43475f0c41f | /det3d/datasets/__init__.py | 51e3d8f492a19c33b85427acab6d0244fa166b19 | [
"Apache-2.0"
] | permissive | chisyliu/Det3D | 183bb6c8d23277cecf9903184553b4c5cee88612 | e437ca6eb2e9becf478ae0e5f6400f7c21bb7495 | refs/heads/master | 2023-03-03T09:00:29.790693 | 2021-01-21T10:44:34 | 2021-01-21T10:44:34 | 267,220,075 | 1 | 0 | Apache-2.0 | 2021-01-21T10:44:35 | 2020-05-27T04:25:22 | null | UTF-8 | Python | false | false | 748 | py | from .builder import build_dataset
# from .cityscapes import CityscapesDataset
from .kitti import KittiDataset
from .lyft import LyftDataset
from .nuscenes import NuScenesDataset
# from .custom import CustomDataset
from .dataset_wrappers import ConcatDataset, RepeatDataset
# from .extra_aug import ExtraAugmentation
from .loader import DistributedGroupSampler, GroupSampler, build_dataloader
from .registry import DATASETS
# from .voc import VOCDataset
# from .wider_face import WIDERFaceDataset
# from .xml_style import XMLDataset
#
__all__ = [
"CustomDataset",
"KittiDataset",
"GroupSampler",
"DistributedGroupSampler",
"build_dataloader",
"ConcatDataset",
"RepeatDataset",
"DATASETS",
"build_dataset",
]
| [
"[email protected]"
] | |
bb5cf2bd9afbc637c54860db1dcfb80b4b6cdfcc | 6eb56f2e3f14f2373be07fe95b1c6fedf1e2d49f | /month04/Project/day03-demo/ddblog/ddblog/settings.py | 893e9f69c63fb46dbbd843cdeb0f5c78511d456d | [
"Apache-2.0"
] | permissive | chaofan-zheng/python_leanring_code | fe22b0370cadebf7456477269aff4a35cef0eb41 | 0af44ff39b9ded2c1d2cc96c6d356d21170ac04d | refs/heads/main | 2023-02-28T07:56:46.457552 | 2021-02-10T15:08:33 | 2021-02-10T15:08:33 | 323,584,115 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,843 | py | """
Django settings for ddblog project.
Generated by 'django-admin startproject' using Django 2.2.12.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mn16=mhqp=3d=ub@vo2l1ckxwnlns3fh%_auj4%vf9p2b-#c^v'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'user',
'btoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ddblog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ddblog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'ddblog',
'USER': 'root',
'PASSWORD': '417355570',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
# 静态文件的配置
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR,'static'),)
# 用户上传文件的配置
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
)
CORS_ALLOW_HEADERS = (
'accept-encoding',
'authorization',
'content-type',
'dnt',
'origin',
'user-agent',
'x-csrftoken',
'x-requested-with',
)
# 生成token使用的秘钥
JWT_TOKEN_KEY = '123456' | [
"[email protected]"
] | |
a07eec758b37f1fe500b44584d7fec680ba7cad5 | f33b30743110532ddae286ba1b34993e61669ab7 | /Minimum Time Difference.py | d462b0bb0897d150390facf9b6166c46b86a8bdc | [] | no_license | c940606/leetcode | fe9dcee7a5daa4d52999d5f53253dd6dd33c348b | 631df2ce6892a6fbb3e435f57e90d85f8200d125 | refs/heads/master | 2021-07-10T14:01:26.164966 | 2020-08-16T10:46:16 | 2020-08-16T10:46:16 | 186,588,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | class Solution(object):
def findMinDifference(self, timePoints):
"""
给定一个 24 小时制(小时:分钟)的时间列表,找出列表中任意两个时间的最小时间差并已分钟数表示。
---
输入: ["23:59","00:00"]
输出: 1
:type timePoints: List[str]
:rtype: int
"""
n = len(timePoints)
def helper(x):
x=x.split(":")
return int(x[0])*60 + int(x[1])
timePoints = sorted(map(lambda x:helper(x),timePoints))
print(timePoints)
min_time = 1500
for i in range(0,n-1):
temp = timePoints[i+1] - timePoints[i]
if temp < min_time:
min_time = temp
if abs(timePoints[0]+1440-timePoints[-1]) < min_time:
min_time = abs(timePoints[0]+1440-timePoints[-1])
return min_time
a = Solution()
print(a.findMinDifference(["23:59","00:00"]))
| [
"[email protected]"
] | |
6ebe8ee7411a32d10a802ee01d53684cd0fe6e3a | 950a87f8e64636d2e1f6dd51f04ed51a41085429 | /tests/test_models_zoo.py | 08a62611dd76f8524770c811f13c96865b23aaf1 | [
"MIT"
] | permissive | Pandinosaurus/pytorch-toolbelt | 325e503a02495a9d7e203bd58e7ad444648688bf | 94d16a339cf9cb4b95bcaa539a462d81f4b82725 | refs/heads/develop | 2023-08-31T03:41:19.373645 | 2021-08-11T14:11:12 | 2021-08-11T14:11:12 | 207,519,450 | 0 | 0 | MIT | 2021-08-12T03:04:37 | 2019-09-10T09:35:17 | Python | UTF-8 | Python | false | false | 758 | py | import pytest
import torch
from pytorch_toolbelt.zoo import resnet34_unet32_s2, resnet34_unet64_s4, hrnet34_unet64
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="CUDA is not available")
@skip_if_no_cuda
@torch.no_grad()
@pytest.mark.parametrize("model_cls", [resnet34_unet32_s2, resnet34_unet64_s4, hrnet34_unet64])
def test_segmentation_models(model_cls):
num_classes = 7
net = model_cls(num_classes=num_classes).cuda().eval()
input = torch.randn((4, 3, 512, 512)).cuda()
with torch.cuda.amp.autocast(True):
output = net(input)
assert output.size(0) == input.size(0)
assert output.size(1) == num_classes
assert output.size(2) == input.size(2)
assert output.size(3) == input.size(3)
| [
"[email protected]"
] | |
9ec6d198bb369bd0b2bed230d840f27a3b4cfc2f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03673/s125858327.py | 4c76558da4394c983252f90470a140ba3b1ccb40 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from collections import deque
n = int(input())
A = deque(map(int, input().split()))
b = deque([])
if n%2==0:
for i in range(n):
if i%2 ==0:
b.append(A[i])
else:
b.appendleft(A[i])
else:
for i in range(n):
if i%2 ==0:
b.appendleft(A[i])
else:
b.append(A[i])
print(*b) | [
"[email protected]"
] | |
2055b681464a0784c8493cf43a32d3bad5df5931 | 21fc3622bb7a3a89a8ed9dec932919936fb1ce36 | /buildout-cache/eggs/plone.app.contenttypes-1.2.11-py2.7.egg/plone/app/contenttypes/tests/test_migration_topic.py | c5bbcf0e7ba23c11c237e4c573da42310890c870 | [] | no_license | erlantostes/plone | 4bc1ccba9e0ab77ce5370489f6b47b806c889c29 | 3a5fb7574cee269a99b148eef695256805ce1a45 | refs/heads/master | 2020-04-01T18:04:32.927641 | 2018-10-17T11:22:59 | 2018-10-17T11:22:59 | 153,469,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,750 | py | # -*- coding: utf-8 -*-
from DateTime import DateTime
from Products.CMFCore.utils import getToolByName
from plone.app.contenttypes.behaviors.collection import ICollection
from plone.app.contenttypes.migration.topics import migrate_topics
from plone.app.contenttypes.testing import \
PLONE_APP_CONTENTTYPES_MIGRATION_TESTING
from plone.app.querystring.queryparser import parseFormquery
from plone.app.testing import applyProfile
from plone.app.testing import login
from plone.dexterity.content import Container
from plone.dexterity.interfaces import IDexterityFTI
from zope.component import queryUtility
from zope.interface import implementer
import unittest
@implementer(ICollection)
class FolderishCollection(Container):
"""Test subclass for folderish ``Collections``.
"""
class MigrateTopicsIntegrationTest(unittest.TestCase):
layer = PLONE_APP_CONTENTTYPES_MIGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
self.request = self.layer['request']
self.request['ACTUAL_URL'] = self.portal.absolute_url()
self.request['URL'] = self.portal.absolute_url()
self.catalog = getToolByName(self.portal, "portal_catalog")
self.portal.acl_users.userFolderAddUser('admin',
'secret',
['Manager'],
[])
login(self.portal, 'admin')
self.portal.portal_workflow.setDefaultChain(
"simple_publication_workflow")
self.portal.invokeFactory("Topic", "topic", title="Topic")
self.portal.invokeFactory("Folder", "folder", title="Folder")
def run_migration(self):
migrate_topics(self.portal)
def add_criterion(self, index, criterion, value=None):
name = '%s_%s' % (index, criterion)
self.portal.topic.addCriterion(index, criterion)
crit = self.portal.topic.getCriterion(name)
if value is not None:
crit.setValue(value)
return crit
def test_migrate_simple_topic(self):
self.assertEqual(self.portal.topic.portal_type, 'Topic')
self.assertEqual(self.portal.topic.getLayout(), 'atct_topic_view')
self.assertEqual(self.portal.topic.getLimitNumber(), False)
self.assertEqual(self.portal.topic.getItemCount(), 0)
self.assertEqual(self.portal.topic.getCustomViewFields(), ('Title',))
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
new = ICollection(self.portal.topic)
self.assertEqual(self.portal.topic.portal_type, 'Collection')
self.assertEqual(self.portal.topic.getLayout(), 'listing_view')
self.assertEqual(new.sort_on, None)
self.assertEqual(new.sort_reversed, None)
self.assertEqual(new.limit, 1000)
self.assertEqual(new.customViewFields, ('Title',))
def test_migrate_topic_fields(self):
self.portal.topic.setText('<p>Hello</p>')
self.portal.topic.setLimitNumber(True)
self.portal.topic.setItemCount(42)
self.portal.topic.setCustomViewFields(('Title', 'Type'))
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
new = ICollection(self.portal.topic)
self.assertEqual(self.portal.topic.portal_type, 'Collection')
self.assertEqual(new.limit, 42)
self.assertEqual(new.customViewFields, ('Title', 'Type'))
def test_migrate_layout(self):
self.portal.topic.setLayout('folder_summary_view')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(self.portal.topic.getLayout(), 'summary_view')
def test_migrate_customView(self):
self.portal.topic.setCustomView(True)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(self.portal.topic.getLayout(), 'tabular_view')
def test_migrate_nested_topic(self):
self.portal.portal_types['Topic'].filter_content_types = False
self.portal.topic.invokeFactory("Topic", "subtopic", title="Sub Topic")
applyProfile(self.portal, 'plone.app.contenttypes:default')
fti = queryUtility(IDexterityFTI, name='Collection')
# switch our a custom folderish base-class for collections
# we need to use _updateProperty because this also refreshes
# the content_meta_type attribute when klass has changed
fti._updateProperty(
'klass',
'plone.app.contenttypes.tests.test_migration_topic.'
'FolderishCollection')
fti._updateProperty('allowed_content_types', ['Document', 'Folder'])
fti._updateProperty('filter_content_types', False)
self.run_migration()
self.assertEqual(self.portal.topic.portal_type, 'Collection')
self.assertEqual(self.portal.topic.subtopic.portal_type, 'Collection')
def test_ATSimpleStringCriterion(self):
self.add_criterion('SearchableText', 'ATSimpleStringCriterion', 'bar')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(
self.portal.topic.query,
[{'i': 'SearchableText',
'o': 'plone.app.querystring.operation.string.contains',
'v': 'bar'}]
)
# Check that the resulting query does not give an error.
self.portal.topic.getQuery()
def test_ATSimpleStringCriterionToSelection(self):
# Some string criterions really should be selection criterions.
self.add_criterion(
'review_state',
'ATSimpleStringCriterion', 'published'
)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(
self.portal.topic.query,
[{'i': 'review_state',
'o': 'plone.app.querystring.operation.selection.any',
'v': 'published'}]
)
def test_ATDateCriteriaPast(self):
# More than 5 days in the past:
crit = self.add_criterion('created', 'ATFriendlyDateCriteria', 5)
crit.setOperation('more')
crit.setDateRange('-')
# Less than 5 days in the past:
crit = self.add_criterion('effective', 'ATFriendlyDateCriteria', 5)
crit.setOperation('less')
crit.setDateRange('-')
# The next two are logically a bit weird.
# More than 0 days in the past is historically interpreted as: after
# today.
crit = self.add_criterion('expires', 'ATFriendlyDateCriteria', 0)
crit.setOperation('more')
crit.setDateRange('-')
# Less than 0 days in the past is historically interpreted as: before
# today.
crit = self.add_criterion('modified', 'ATFriendlyDateCriteria', 0)
crit.setOperation('less')
crit.setDateRange('-')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(len(query), 4)
self.assertEqual(query[0]['i'], 'created')
self.assertEqual(
query[0]['o'],
'plone.app.querystring.operation.date.largerThanRelativeDate'
)
self.assertEqual(query[0]['v'], -5)
self.assertEqual(query[1]['i'], 'effective')
self.assertEqual(
query[1]['o'],
'plone.app.querystring.operation.date.lessThanRelativeDate'
)
self.assertEqual(query[1]['v'], -5)
self.assertEqual(query[2]['i'], 'expires')
self.assertEqual(
query[2]['o'],
'plone.app.querystring.operation.date.afterToday'
)
self.assertTrue('v' not in query[2].keys())
self.assertEqual(query[3]['i'], 'modified')
self.assertEqual(
query[3]['o'],
'plone.app.querystring.operation.date.beforeToday'
)
self.assertTrue('v' not in query[3].keys())
# Check that the resulting query does not give an error.
self.portal.topic.getQuery()
def test_ATDateCriteriaFuture(self):
# More than 5 days in the future:
crit = self.add_criterion('created', 'ATFriendlyDateCriteria', 5)
crit.setOperation('more')
crit.setDateRange('+')
# Less than 5 days in the future:
crit = self.add_criterion('effective', 'ATFriendlyDateCriteria', 5)
crit.setOperation('less')
crit.setDateRange('+')
# More than 0 days in the future: after today.
crit = self.add_criterion('expires', 'ATFriendlyDateCriteria', 0)
crit.setOperation('more')
crit.setDateRange('+')
# Less than 0 days in the future: before today.
crit = self.add_criterion('modified', 'ATFriendlyDateCriteria', 0)
crit.setOperation('less')
crit.setDateRange('+')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(len(query), 4)
self.assertEqual(query[0]['i'], 'created')
self.assertEqual(
query[0]['o'],
'plone.app.querystring.operation.date.largerThanRelativeDate'
)
self.assertEqual(query[0]['v'], 5)
self.assertEqual(query[1]['i'], 'effective')
self.assertEqual(
query[1]['o'],
'plone.app.querystring.operation.date.lessThanRelativeDate'
)
self.assertTrue(query[1]['v'], 5)
self.assertEqual(query[2]['i'], 'expires')
self.assertEqual(
query[2]['o'],
'plone.app.querystring.operation.date.afterToday'
)
self.assertTrue('v' not in query[2].keys())
self.assertEqual(query[3]['i'], 'modified')
self.assertEqual(
query[3]['o'],
'plone.app.querystring.operation.date.beforeToday'
)
self.assertTrue('v' not in query[3].keys())
# Check that the resulting query does not give an error.
self.portal.topic.getQuery()
def test_ATDateCriteriaExactDay(self):
# 5 days ago:
crit = self.add_criterion('created', 'ATFriendlyDateCriteria', 5)
crit.setOperation('within_day')
crit.setDateRange('-')
# 5 days from now:
crit = self.add_criterion('effective', 'ATFriendlyDateCriteria', 5)
crit.setOperation('within_day')
crit.setDateRange('+')
# past or future does not matter if the day is today.
# today minus
crit = self.add_criterion('expires', 'ATFriendlyDateCriteria', 0)
crit.setOperation('within_day')
crit.setDateRange('-')
# today plus
crit = self.add_criterion('modified', 'ATFriendlyDateCriteria', 0)
crit.setOperation('within_day')
crit.setDateRange('+')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
time2 = DateTime()
self.assertEqual(len(query), 4)
self.assertEqual(query[0]['i'], 'created')
self.assertEqual(
query[0]['o'],
'plone.app.querystring.operation.date.between'
)
self.assertEqual(
query[0]['v'],
((time2 - 5).earliestTime(), (time2 - 5).latestTime())
)
self.assertEqual(query[1]['i'], 'effective')
self.assertEqual(
query[1]['o'],
'plone.app.querystring.operation.date.between'
)
self.assertEqual(
query[1]['v'],
((time2 + 5).earliestTime(), (time2 + 5).latestTime())
)
self.assertEqual(query[2]['i'], 'expires')
self.assertEqual(
query[2]['o'],
'plone.app.querystring.operation.date.today'
)
self.assertFalse('v' in query[2].keys())
self.assertEqual(query[3]['i'], 'modified')
self.assertEqual(
query[3]['o'],
'plone.app.querystring.operation.date.today'
)
self.assertFalse('v' in query[3].keys())
# Check that the resulting query does not give an error.
self.portal.topic.getQuery()
def test_ATCurrentAuthorCriterion(self):
self.add_criterion('Creator', 'ATCurrentAuthorCriterion')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(
self.portal.topic.query,
[{'i': 'Creator',
'o': 'plone.app.querystring.operation.string.currentUser',
'v': 'admin'}]
)
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATListCriterion(self):
# The new-style queries do not currently offer the possibility
# to choose if the given values should be joined with 'or' or
# 'and'. Default is 'or'.
crit = self.add_criterion('Subject', 'ATListCriterion', ('foo', 'bar'))
crit.setOperator('or')
# Note: this could have been an ATPortalTypeCriterion too:
crit = self.add_criterion(
'portal_type',
'ATListCriterion', ('Document', 'Folder')
)
crit.setOperator('and')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(len(query), 2)
self.assertEqual(query[0],
{'i': 'Subject',
'o': 'plone.app.querystring.operation.selection.any',
'v': ('foo', 'bar')})
self.assertEqual(query[1],
{'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.any',
'v': ('Document', 'Folder')})
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATPathCriterion(self):
crit = self.add_criterion(
'path',
'ATPathCriterion', self.portal.folder.UID())
crit.setRecurse(True)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(self.portal.topic.query,
[{'i': 'path',
'o': 'plone.app.querystring.operation.string.path',
'v': self.portal.folder.UID()}])
# check is the query is correct
self.assertEqual(
parseFormquery(self.portal, self.portal.topic.query),
{'path': {'query': ['/plone/folder']}})
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATPathCriterionNonRecursive(self):
# Topics supported non recursive search, so search at a
# specific depth of 1. At first, new Collections did not
# support it. But since plone.app.querystring 1.1.0 it works.
crit = self.add_criterion(
'path',
'ATPathCriterion', self.portal.folder.UID()
)
crit.setRecurse(False)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(len(query), 1)
self.assertEqual(query,
[{'i': 'path',
'o': 'plone.app.querystring.operation.string.path',
'v': self.portal.folder.UID() + '::1'}])
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATPathCriterionMultiRecursive(self):
# Collections support multiple paths since
# plone.app.querystring 1.2.0.
login(self.portal, 'admin')
self.portal.invokeFactory("Folder", "folder2", title="Folder 2")
crit = self.add_criterion(
'path',
'ATPathCriterion',
[self.portal.folder.UID(), self.portal.folder2.UID()]
)
crit.setRecurse(True)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(len(query), 2)
self.assertEqual(query[0],
{'i': 'path',
'o': 'plone.app.querystring.operation.string.path',
'v': self.portal.folder.UID()})
self.assertEqual(query[1],
{'i': 'path',
'o': 'plone.app.querystring.operation.string.path',
'v': self.portal.folder2.UID()})
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATPathCriterionMultiNonRecursive(self):
# Collections support multiple paths since
# plone.app.querystring 1.2.0.
login(self.portal, 'admin')
self.portal.invokeFactory("Folder", "folder2", title="Folder 2")
crit = self.add_criterion(
'path',
'ATPathCriterion',
[self.portal.folder.UID(), self.portal.folder2.UID()]
)
crit.setRecurse(False)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(len(query), 2)
self.assertEqual(query[0],
{'i': 'path',
'o': 'plone.app.querystring.operation.string.path',
'v': self.portal.folder.UID() + '::1'})
self.assertEqual(query[1],
{'i': 'path',
'o': 'plone.app.querystring.operation.string.path',
'v': self.portal.folder2.UID() + '::1'})
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATBooleanCriterion(self):
# Note that in standard Plone the boolean criterion is only
# defined for is_folderish and is_default_page.
crit = self.add_criterion('is_folderish', 'ATBooleanCriterion')
crit.setBool(True)
crit = self.add_criterion('is_default_page', 'ATBooleanCriterion')
crit.setBool(False)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(len(query), 2)
self.assertEqual(
query[0],
{'i': 'is_folderish',
'o': 'plone.app.querystring.operation.boolean.isTrue'}
)
self.assertEqual(
query[1],
{'i': 'is_default_page',
'o': 'plone.app.querystring.operation.boolean.isFalse'}
)
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATDateRangeCriteria(self):
time1 = DateTime()
# Days in the past:
crit = self.add_criterion('created', 'ATDateRangeCriterion')
crit.setStart(time1 - 5)
crit.setEnd(time1 - 3)
# Past and future:
crit = self.add_criterion('effective', 'ATDateRangeCriterion')
crit.setStart(time1 - 2)
crit.setEnd(time1 + 2)
# Days in the future:
crit = self.add_criterion('expires', 'ATDateRangeCriterion')
crit.setStart(time1 + 3)
crit.setEnd(time1 + 5)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(len(query), 3)
self.assertEqual(query[0]['i'], 'created')
self.assertEqual(
query[0]['o'],
'plone.app.querystring.operation.date.between'
)
self.assertEqual(query[0]['v'], (time1 - 5, time1 - 3))
self.assertEqual(query[1]['i'], 'effective')
self.assertEqual(
query[1]['o'],
'plone.app.querystring.operation.date.between'
)
self.assertEqual(query[1]['v'], (time1 - 2, time1 + 2))
self.assertEqual(query[2]['i'], 'expires')
self.assertEqual(
query[2]['o'],
'plone.app.querystring.operation.date.between'
)
self.assertEqual(query[2]['v'], (time1 + 3, time1 + 5))
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATPortalTypeCriterion(self):
self.add_criterion(
'portal_type',
'ATPortalTypeCriterion', ('Document', 'Folder')
)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(
query,
[{'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.any',
'v': ('Document', 'Folder')}]
)
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATPortalTypeCriterionOfTopic(self):
# We migrate Topics to Collections, so we should update
# criterions that search for Topics.
self.add_criterion(
'portal_type',
'ATPortalTypeCriterion', ('Topic',)
)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(
query,
[{'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.any',
'v': ('Collection',)}])
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATSelectionCriterion(self):
# The new-style queries do not currently offer the possibility
# to choose if the given values should be joined with 'or' or
# 'and'. Default is 'or'.
crit = self.add_criterion(
'Subject',
'ATSelectionCriterion',
('foo', 'bar')
)
crit.setOperator('or')
# Note: this could have been an ATPortalTypeCriterion too:
# Note that we check that Topic is turned into Collection too.
crit = self.add_criterion(
'portal_type',
'ATSelectionCriterion',
('Document', 'Topic')
)
crit.setOperator('and')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(len(query), 2)
self.assertEqual(query[0],
{'i': 'Subject',
'o': 'plone.app.querystring.operation.selection.any',
'v': ('foo', 'bar')})
self.assertEqual(query[1],
{'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.any',
'v': ('Document', 'Collection')})
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATSelectionCriterionForTypeTitle(self):
# 'portal_type' is the object id of the FTI in portal_types.
# 'Type' is the title of that object.
# For example:
# - portal_type 'Document' has Type 'Page'.
# - portal_type 'Topic' has Type 'Collection (old)'.
# Type is not enabled as criterion index by default, so we
# want to migrate to a portal_type criterion instead.
self.add_criterion('Type', 'ATSelectionCriterion', ('Page', 'Folder'))
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
query = self.portal.topic.query
self.assertEqual(
query,
[{'i': 'portal_type',
'o': 'plone.app.querystring.operation.selection.any',
'v': ['Document', 'Folder']}])
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATReferenceCriterion(self):
# Note: the new criterion is disabled by default. Also, it
# needs the _referenceIs function in the plone.app.querystring
# queryparser and that function is not defined.
self.add_criterion(
'getRawRelatedItems',
'ATReferenceCriterion',
self.portal.folder.UID()
)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
# TODO re-enable this check when the queryparser works.
# self.assertEqual(
# self.portal.topic.query,
# [{'i': 'getRawRelatedItems',
# 'o': 'plone.app.querystring.operation.reference.is',
# 'v': (portal.folder.UID(),)}]
# )
# Check that the resulting query does not give an error.
# self.portal.topic.results
def test_ATRelativePathCriterion(self):
crit = self.add_criterion(
'path',
'ATRelativePathCriterion'
)
crit.setRelativePath('../folder')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(
self.portal.topic.query,
[{'i': 'path',
'o': 'plone.app.querystring.operation.string.relativePath',
'v': '../folder'}]
)
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATRelativePathCriterionNonRecursive(self):
# Topics supported non recursive search, so search at a specific
# depth. New Collections do not support it.
crit = self.add_criterion('path', 'ATRelativePathCriterion')
crit.setRelativePath('../folder')
crit.setRecurse(True)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(
self.portal.topic.query,
[{'i': 'path',
'o': 'plone.app.querystring.operation.string.relativePath',
'v': '../folder'}])
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATSimpleIntCriterion(self):
self.add_criterion('getObjPositionInParent', 'ATSimpleIntCriterion', 7)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(self.portal.topic.query,
[{'i': 'getObjPositionInParent',
'o': 'plone.app.querystring.operation.int.is',
'v': 7}])
# Check that the resulting query does not give an error.
self.portal.topic.results
def test_ATSimpleIntCriterionMinimum(self):
crit = self.add_criterion(
'getObjPositionInParent',
'ATSimpleIntCriterion', 6
)
crit.setDirection('min')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(
self.portal.topic.query,
[{'i': 'getObjPositionInParent',
'o': 'plone.app.querystring.operation.int.largerThan',
'v': 6}]
)
# Check that the resulting query does not give an error.
self.portal.topic.getQuery()
def test_ATSimpleIntCriterionMaximum(self):
crit = self.add_criterion(
'getObjPositionInParent',
'ATSimpleIntCriterion',
5
)
crit.setDirection('max')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(
self.portal.topic.query,
[{'i': 'getObjPositionInParent',
'o': 'plone.app.querystring.operation.int.lessThan',
'v': 5}]
)
# Check that the resulting query does not give an error.
self.portal.topic.getQuery()
def test_ATSimpleIntCriterionBetween(self):
# This is not supported.
crit = self.add_criterion(
'getObjPositionInParent',
'ATSimpleIntCriterion',
4
)
crit.setDirection('min:max')
crit.setValue2(8)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(self.portal.topic.query, [])
# Check that the resulting query does not give an error.
self.portal.topic.getQuery()
def test_ATSortCriterion(self):
self.add_criterion('modified', 'ATSortCriterion')
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(self.portal.topic.sort_on, 'modified')
self.assertEqual(self.portal.topic.sort_reversed, False)
self.assertEqual(self.portal.topic.query, [])
# Check that the resulting query does not give an error.
self.portal.topic.getQuery()
def test_ATSortCriterionReversed(self):
crit = self.add_criterion('created', 'ATSortCriterion')
crit.setReversed(True)
applyProfile(self.portal, 'plone.app.contenttypes:default')
self.run_migration()
self.assertEqual(self.portal.topic.sort_on, 'created')
self.assertEqual(self.portal.topic.sort_reversed, True)
self.assertEqual(self.portal.topic.query, [])
# Check that the resulting query does not give an error.
self.portal.topic.getQuery()
| [
"[email protected]"
] | |
8cca21dc1dfb3c3e8dabd4d6e00022561017415a | c83acc6433aa8ef7703192e9033fe7cd92b2cccf | /traits/observation/exceptions.py | 5e57b713bc5ff8d4c1000491d5a4eb9aad8a3be8 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"CC-BY-3.0"
] | permissive | oscarpicas/traits | 857f5c06f3caf06003aed8b21b502b66ca8ba6cc | e72691a2f8aa34529af431d6b6b8c1a476ef4107 | refs/heads/master | 2022-03-17T10:30:08.330129 | 2022-02-18T21:01:50 | 2022-02-18T21:01:50 | 26,197,506 | 0 | 0 | null | 2015-01-10T04:01:48 | 2014-11-05T01:39:56 | null | UTF-8 | Python | false | false | 504 | py | # (C) Copyright 2005-2022 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
class NotifierNotFound(Exception):
""" Raised when a notifier cannot be found."""
pass
| [
"[email protected]"
] | |
689adeb931f6bef31959fcee01b791be15cabf44 | 05352c29e844705f02d65526343eea9b486f8bd7 | /src/python/pants/backend/awslambda/python/register.py | b7408cad1f7104ac4ba3e71537a413e10471f00b | [
"Apache-2.0"
] | permissive | DoN-SultaN/pants | af2557de1178faaf73eed0a5a32e8f6fd34d2169 | 5cb5379003a0674c51f9a53f582cf690eddfaf45 | refs/heads/master | 2022-10-15T04:18:54.759839 | 2020-06-13T10:04:21 | 2020-06-13T10:04:21 | 272,089,524 | 1 | 0 | Apache-2.0 | 2020-06-13T21:36:50 | 2020-06-13T21:36:49 | null | UTF-8 | Python | false | false | 1,026 | py | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Create AWS Lambdas from Python code.
See https://pants.readme.io/docs/awslambda-python.
"""
from pants.backend.awslambda.common import awslambda_common_rules
from pants.backend.awslambda.python import awslambda_python_rules
from pants.backend.awslambda.python.target_types import PythonAWSLambda
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.target import Target
def rules():
return [*awslambda_common_rules.rules(), *awslambda_python_rules.rules()]
def target_types():
return [PythonAWSLambda]
# Dummy v1 target to ensure that v1 tasks can still parse v2 BUILD files.
class LegacyPythonAWSLambda(Target):
def __init__(self, handler=None, runtime=None, sources=tuple(), **kwargs):
super().__init__(**kwargs)
def build_file_aliases():
return BuildFileAliases(targets={PythonAWSLambda.alias: LegacyPythonAWSLambda})
| [
"[email protected]"
] | |
19f030eeaf16a07224b934871ffad46de4011858 | 934235f70a390a3ba0d7b464cddd10872f31cda3 | /auction/auction/migrations/0012_auto_20210329_1515.py | e0a2ab42ef0fd92803ce83ab6d4e895d55838d22 | [] | no_license | deji100/Projects | 6919041ba23e77a5c74e5ab7692bfcee38ececcb | 17e64d954d1d7805be57ec5d8d4344e4944889e6 | refs/heads/master | 2023-04-30T05:25:03.143303 | 2021-05-20T15:00:43 | 2021-05-20T15:00:43 | 338,844,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Generated by Django 3.1.3 on 2021-03-29 14:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('auction', '0011_auto_20210326_2149'),
]
operations = [
migrations.AlterField(
model_name='listing',
name='sold_out',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
c993ab5b21d8f118c3a3d92b12bbc00a0e289025 | 5c9c9072adafff9de79552d927d225539874a1e5 | /fallas/panels.py | 3e39327b21d1c1fe85ed7cd64558561a50ba6772 | [] | no_license | NOKIA-GAP/trouble-shooting-api | e38e221aa01b16b28cd90b1c93f0b5141d67b26a | a3f6e2c3c22727c888b1f3f4e570fd729920e267 | refs/heads/master | 2021-08-10T01:07:46.333294 | 2018-10-01T17:21:14 | 2018-10-01T17:21:14 | 114,031,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | from .models import (
Falla
)
INSTALACION = 'Instalacion'
INTEGRACION = 'Integracion'
SOFTWARE = 'Software'
HARDWARE = 'Hardware'
DATAFILL = 'Datafill'
AJUSTEPOTENCIA = 'Ajuste Potencia'
INTERFERENCIAEXTREMA = 'Interferencia externa'
CAMBIODISENO = 'Cambio diseno'
MALRECHAZO = 'Mal rechazo'
TX = 'TX'
COMPORTAMIENTOESPERADO = 'Comportamiento esperado'
COMPORTAMIENTOPREVIO = 'Comportamiento previo'
AJUSTEADYACENCIAS = 'Ajuste Adyacencias'
fallas = Falla.objects.all()
fallas_instalacion = Falla.objects.filter(tipo_falla=INSTALACION)
fallas_integracion = Falla.objects.filter(tipo_falla=INTEGRACION)
fallas_software = Falla.objects.filter(tipo_falla=SOFTWARE)
fallas_hardware = Falla.objects.filter(tipo_falla=HARDWARE)
fallas_datafill = Falla.objects.filter(tipo_falla=DATAFILL)
fallas_ajuste_potencia = Falla.objects.filter(tipo_falla=AJUSTEPOTENCIA)
fallas_interferencia_externa = Falla.objects.filter(tipo_falla=INTERFERENCIAEXTREMA)
fallas_cambio_diseno = Falla.objects.filter(tipo_falla=CAMBIODISENO)
fallas_mal_rechazo = Falla.objects.filter(tipo_falla=MALRECHAZO)
fallas_tx = Falla.objects.filter(tipo_falla=TX)
fallas_comportamiento_esperado = Falla.objects.filter(tipo_falla=COMPORTAMIENTOESPERADO)
fallas_comportamiento_previo = Falla.objects.filter(tipo_falla=COMPORTAMIENTOPREVIO)
fallas_ajuste_adyasencias = Falla.objects.filter(tipo_falla=AJUSTEADYACENCIAS)
| [
"[email protected]"
] | |
ca25dc5334e07d1808f358786bcaf82904395a1f | 77c8c500d4077ad733fbfe2c6a85a1dd47bd3cb5 | /soohyun/python/baekjoon/0714/14890/1.py | edba3f8a5a55290bcb7c3549d30fcc7e6037b623 | [] | no_license | chelseashin/AlgorithmStudy2021 | 786f03c4c17bc057518d428481e7d710d24ec98e | 1a4744a621ed25715fc9060c5224f0b1092d9c00 | refs/heads/master | 2023-06-22T22:27:47.289806 | 2021-07-28T02:54:22 | 2021-07-28T02:54:22 | 326,441,667 | 1 | 5 | null | 2021-06-29T01:27:40 | 2021-01-03T15:44:16 | Python | UTF-8 | Python | false | false | 3,777 | py |
# 걸린시간 1시간 20분
import sys
input = sys.stdin.readline
def count_slope_col(N, L, stairs):
answer = 0
for row in range(N):
count = 1
left = 101
previous = -1
is_slope = True
for col in range(N):
current = stairs[row][col]
#print(f"|{stairs[row][col]} previous:{previous}, current: {current}|", end=" ")
#print(left, count)
if previous != -1:
if abs(previous - current) == 1:
if left != 101 and left > 0:
#print("Not enough min number", f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
if previous > current:
left = L
left -= 1
count = 0
else:
if count < L:
#print("count < L", count, f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
count = 1
elif abs(previous - current) == 0:
if left != 101 and left > 0:
left -= 1
else:
count += 1
else:
#print("abs(previous - current) > 0", f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
previous = current
if left != 101 and left > 0:
is_slope = False
if is_slope:
#print(col, answer)
answer += 1
return answer
def count_slope_row(N, L, stairs):
answer = 0
for col in range(N):
count = 1
left = 101
previous = -1
is_slope = True
for row in range(N):
current = stairs[row][col]
#print(f"|{stairs[row][col]} previous:{previous}, current: {current}|", end=" ")
#print(left, count)
if previous != -1:
if abs(previous - current) == 1:
if left != 101 and left > 0:
#print("Not enough min number", f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
if previous > current:
left = L
left -= 1
count = 0
else:
if count < L:
#print("count < L", count, f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
count = 1
elif abs(previous - current) == 0:
if left != 101 and left > 0:
left -= 1
else:
count += 1
else:
#print("abs(previous - current) > 0", f"|{stairs[row][col]} previous:{previous}, current: {current}|")
is_slope = False
break
previous = current
if left != 101 and left > 0:
is_slope = False
if is_slope:
#print(col, answer)
answer += 1
return answer
def main():
N, L = map(int, input().rstrip().split(" "))
stairs = [list(map(int, input().rstrip().split(" "))) for _ in range(N)]
print(count_slope_row(N, L, stairs) + count_slope_col(N, L, stairs))
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
fdc80a64579ae8d080ea7732c7de51bfdd52b18f | a4e2b2fa5c54c7d43e1dbe4eef5006a560cd598e | /silk/templatetags/filters.py | 19422dffbef2ed8bba3077129da8477927e3f616 | [
"MIT"
] | permissive | joaofrancese/silk | baa9fc6468351ec34bc103abdbd1decce0ae2f5d | d8de1367eb70f4405f4ae55d9286f0653c5b3189 | refs/heads/master | 2023-04-01T07:30:42.707427 | 2017-02-22T14:06:05 | 2017-02-22T14:06:05 | 23,427,190 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,092 | py | import re
from django.template import Library
from django.template.defaultfilters import stringfilter
from django.utils import timezone
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
register = Library()
def _esc_func(autoescape):
if autoescape:
esc = conditional_escape
else:
esc = lambda x: x
return esc
@stringfilter
def spacify(value, autoescape=None):
esc = _esc_func(autoescape)
val = esc(value).replace(' ', " ")
val = val.replace('\t', ' ')
return mark_safe(val)
def _urlify(str):
r = re.compile("(?P<src>/.*\.py)\", line (?P<num>[0-9]+).*")
m = r.search(str)
while m:
group = m.groupdict()
src = group['src']
num = group['num']
start = m.start('src')
end = m.end('src')
rep = '<a href="/silk/src/?file_path={src}&line_num={num}">{src}</a>'.format(src=src, num=num)
str = str[:start] + rep + str[end:]
m = r.search(str)
return str
@register.filter
def hash(h, key):
return h[key]
def _process_microseconds(dt_strftime):
splt = dt_strftime.split('.')
micro = splt[-1]
time = '.'.join(splt[0:-1])
micro = '%.3f' % float('0.' + micro)
return time + micro[1:]
def _silk_date_time(dt):
today = timezone.now().date()
if dt.date() == today:
dt_strftime = dt.strftime('%H:%M:%S.%f')
return _process_microseconds(dt_strftime)
else:
return _process_microseconds(dt.strftime('%Y.%m.%d %H:%M.%f'))
@register.filter
def silk_date_time(dt):
return _silk_date_time(dt)
@register.filter
def sorted(l):
return sorted(l)
@stringfilter
def filepath_urlify(value, autoescape=None):
value = _urlify(value)
return mark_safe(value)
@stringfilter
def body_filter(value):
print(value)
if len(value) > 20:
return 'Too big!'
else:
return value
spacify.needs_autoescape = True
filepath_urlify.needs_autoescape = True
register.filter(spacify)
register.filter(filepath_urlify)
register.filter(body_filter) | [
"[email protected]"
] | |
6a09ae6348786ae00b9bb35b1a7611573b23169e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /s45bQoPtMoZcj7rnR_16.py | 41a063878034de7567ea745ec857b1619c25c1ce | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py |
def closest_palindrome(n):
if n == 100:
return 99
n = list(str(n))
n = [int(i) for i in n]
for i in range(len(n) // 2):
x, y = n[i], n[-i - 1]
x, y = x, x
n[i], n[-i - 1] = x, x
return int(''.join(str(i) for i in n))
| [
"[email protected]"
] | |
6bd3aeb20fd5e5259fbb55aac32c164b66cb5769 | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/build/android/pylib/utils/base_error.py | 263479a3c3279aca00c226b8b97cd52bc9ec3175 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 270 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=unused-wildcard-import
# pylint: disable=wildcard-import
from devil.base_error import *
| [
"[email protected]"
] | |
4e821bf5ff638f0d5f8bf8cd7e66dbe3fd01bec1 | 08d316151302f7ba4ae841c15b7adfe4e348ddf1 | /reviewboard/integrations/tests/test_configs.py | 29008ec52f56c465b2b27c813306f2c40fc28b9c | [
"MIT"
] | permissive | LloydFinch/reviewboard | aa8cd21fac359d49b3dfc5a68c42b857c0c04bd8 | 563c1e8d4dfd860f372281dc0f380a0809f6ae15 | refs/heads/master | 2020-08-10T20:02:32.204351 | 2019-10-02T20:46:08 | 2019-10-02T20:46:08 | 214,411,166 | 2 | 0 | MIT | 2019-10-11T10:44:55 | 2019-10-11T10:44:54 | null | UTF-8 | Python | false | false | 4,954 | py | from __future__ import unicode_literals
import logging
from djblets.conditions import ConditionSet
from djblets.forms.fields import ConditionsField
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.integrations.forms import IntegrationConfigForm
from reviewboard.integrations.models import IntegrationConfig
from reviewboard.reviews.conditions import ReviewRequestConditionChoices
from reviewboard.testing.testcase import TestCase
class MyConfigForm(IntegrationConfigForm):
my_conditions = ConditionsField(
choices=ReviewRequestConditionChoices)
class IntegrationConfigTests(SpyAgency, TestCase):
"""Unit tests for reviewboard.integrations.models.IntegrationConfig."""
def test_load_conditions(self):
"""Testing IntegrationConfig.load_conditions"""
config = IntegrationConfig()
config.settings['my_conditions'] = {
'mode': 'all',
'conditions': [
{
'choice': 'branch',
'op': 'is',
'value': 'master',
},
{
'choice': 'summary',
'op': 'contains',
'value': '[WIP]',
},
],
}
condition_set = config.load_conditions(MyConfigForm,
conditions_key='my_conditions')
self.assertEqual(condition_set.mode, ConditionSet.MODE_ALL)
conditions = condition_set.conditions
self.assertEqual(len(conditions), 2)
condition = conditions[0]
self.assertEqual(condition.choice.choice_id, 'branch')
self.assertEqual(condition.operator.operator_id, 'is')
self.assertEqual(condition.value, 'master')
condition = conditions[1]
self.assertEqual(condition.choice.choice_id, 'summary')
self.assertEqual(condition.operator.operator_id, 'contains')
self.assertEqual(condition.value, '[WIP]')
def test_load_conditions_with_empty(self):
"""Testing IntegrationConfig.load_conditions with empty or missing
data
"""
config = IntegrationConfig()
config.settings['conditions'] = None
self.assertIsNone(config.load_conditions(MyConfigForm))
def test_load_conditions_with_bad_data(self):
"""Testing IntegrationConfig.load_conditions with bad data"""
config = IntegrationConfig()
config.settings['conditions'] = 'dfsafas'
self.spy_on(logging.debug)
self.spy_on(logging.exception)
self.assertIsNone(config.load_conditions(MyConfigForm))
self.assertTrue(logging.debug.spy.called)
self.assertTrue(logging.exception.spy.called)
@add_fixtures(['test_users'])
def test_match_conditions(self):
"""Testing IntegrationConfig.match_conditions"""
config = IntegrationConfig()
config.settings['my_conditions'] = {
'mode': 'all',
'conditions': [
{
'choice': 'branch',
'op': 'is',
'value': 'master',
},
{
'choice': 'summary',
'op': 'contains',
'value': '[WIP]',
},
],
}
review_request = self.create_review_request(
branch='master',
summary='[WIP] This is a test.')
self.assertTrue(config.match_conditions(
MyConfigForm,
conditions_key='my_conditions',
review_request=review_request))
review_request = self.create_review_request(
branch='master',
summary='This is a test.')
self.assertFalse(config.match_conditions(
MyConfigForm,
conditions_key='my_conditions',
review_request=review_request))
@add_fixtures(['test_users'])
def test_match_conditions_sandbox(self):
"""Testing IntegrationConfig.match_conditions with exceptions
sandboxed
"""
config = IntegrationConfig()
config.settings['my_conditions'] = {
'mode': 'all',
'conditions': [
{
'choice': 'branch',
'op': 'is',
'value': 'master',
},
{
'choice': 'summary',
'op': 'contains',
'value': '[WIP]',
},
],
}
self.create_review_request(
branch='master',
summary='[WIP] This is a test.')
self.spy_on(logging.exception)
self.assertFalse(config.match_conditions(
MyConfigForm,
conditions_key='my_conditions',
review_request='test'))
self.assertTrue(logging.exception.spy.called)
| [
"[email protected]"
] | |
c1b8ece492125158774e3af66f2bfa7f7de642cd | d043a51ff0ca2f9fb3943c3f0ea21c61055358e9 | /python3网络爬虫开发实战/数据存储/文件存储/file6.py | 38d8bd4763b4c20db1de58e5df8e49db8c1879e2 | [] | no_license | lj1064201288/dell_python | 2f7fd9dbcd91174d66a2107c7b7f7a47dff4a4d5 | 529985e0e04b9bde2c9e0873ea7593e338b0a295 | refs/heads/master | 2020-03-30T03:51:51.263975 | 2018-12-11T13:21:13 | 2018-12-11T13:21:13 | 150,707,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | import csv
with open('data.csv', 'w') as csvfile:
# 调用csv库的writer()方法初始化写入对象
writer = csv.writer(csvfile)
# 调用writerow()方法传入每行的数据即可完成写入
writer.writerow(['id', 'name', 'age'])
writer.writerow(['10001', 'Mike', 20])
writer.writerow(['10002', 'Bob', 22])
writer.writerow(['10003', 'Jordan', 21]) | [
"[email protected]"
] | |
030656461f5d5ace42ba347134ba1fef6d164dd2 | 9184e230f8b212e8f686a466c84ecc89abe375d1 | /arcseventdata/applications/obsolete/ipdpE.py | f4a03c9939aa822c22a8334013aca9fa028edc74 | [] | no_license | danse-inelastic/DrChops | 75b793d806e6351dde847f1d92ab6eebb1ef24d2 | 7ba4ce07a5a4645942192b4b81f7afcae505db90 | refs/heads/master | 2022-04-26T17:37:41.666851 | 2015-05-02T23:21:13 | 2015-05-02T23:21:13 | 34,094,584 | 0 | 1 | null | 2020-09-10T01:50:10 | 2015-04-17T03:30:52 | Python | UTF-8 | Python | false | false | 4,378 | py | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
## This script reads events from event data file
## and create a histogram hdf5 file of I(pack, detector, pixel, E)
import os
def run( eventdatafilename, nevents, ARCSxml, h5filename,
E_params, Ei, emission_time = 0 ):
from arcseventdata.getinstrumentinfo import getinstrumentinfo
infos = getinstrumentinfo(ARCSxml)
npacks, ndetsperpack, npixelsperdet = infos['detector-system-dimensions']
mod2sample = infos['moderator-sample distance']
pixelPositionsFilename = infos['pixelID-position mapping binary file']
print "eventdatafilename = %s" % eventdatafilename
print "nevents = %s" % nevents
print "pixel-positions-filename=%s" % pixelPositionsFilename
print "output h5filename = %s" % h5filename
print 'E_params (unit: angstrom) = %s' % (E_params, )
print 'mod2sample distance = %s' % mod2sample
print 'Incident energy (unit: meV) = %s' % (Ei, )
print 'emission_time (unit: microsecond) = %s' % (emission_time, )
if os.path.exists(h5filename):
raise IOError, "%s already exists" % h5filename
E_begin, E_end, E_step = E_params # angstrom
import arcseventdata, histogram
E_axis = histogram.axis('energy', boundaries = histogram.arange(
E_begin, E_end, E_step) )
h = histogram.histogram(
'I(pdpE)',
[
('detectorpackID', range(npacks+1)),
('detectorID', range(ndetsperpack)),
('pixelID', range(npixelsperdet) ),
E_axis,
],
data_type = 'int',
)
events = arcseventdata.readevents( eventdatafilename, nevents )
pixelPositions = arcseventdata.readpixelpositions( pixelPositionsFilename )
arcseventdata.events2IpdpE(
events, nevents, h, Ei, pixelPositions,
npacks = npacks, ndetsperpack = ndetsperpack, npixelsperdet = npixelsperdet,
mod2sample = mod2sample,
emission_time = emission_time,
)
# set error bar squares to be equal to counts
h.errors().storage().asNumarray()[:] = h.data().storage().asNumarray()
from histogram.hdf import dump
dump(h, h5filename, '/', 'c' )
return
def main():
from optparse import OptionParser
usage = "usage: %prog [options] event-data-file"
parser = OptionParser(usage)
#parser.add_option("-e", "--eventdatafile", dest="eventdatafile",
# help="ARCS event data file")
parser.add_option("-o", "--out", dest="h5filename", default = "Idspacing.h5",
help="hdf5 file of I(dspacing) histogram")
parser.add_option("-n", "--nevents", dest="nevents", default = '1000',
type = 'int', help="number of events")
parser.add_option("-E", "--EnergyTransfer", dest="E_params", default = '-50,50,1.',
help="energy transfer bin parameters (begin, end, step). units: meV")
parser.add_option("-x", "--ARCS-xml", dest = "ARCSxml",
default = "ARCS.xml",
help="ARCS instrument xml file" )
parser.add_option('-I', '--IncidentEnergy', dest='Ei', default = 60, type = 'float',
help='incident energy. unit: meV')
parser.add_option('-t', '--emission_time', dest='emission_time', default = 0.0, type = 'float',
help='emission time. tof reading - real tof. unit: microsecond')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
raise "should not reach here"
eventdatafile = args[0]
h5filename = options.h5filename
nevents = options.nevents
E_params = eval( options.E_params )
Ei = options.Ei
emission_time = options.emission_time
ARCSxml = options.ARCSxml
run( eventdatafile, nevents, ARCSxml, h5filename, E_params, Ei, emission_time )
return
if __name__ == '__main__':
import journal
journal.warning( 'arcseventdata.Histogrammer2' ).deactivate()
main()
# version
__id__ = "$Id$"
# End of file
| [
"[email protected]"
] | |
c9f002f2e16d65a1878541197c7e17dfa4b052e7 | fb3c1e036f18193d6ffe59f443dad8323cb6e371 | /src/flash/build/buildbot/slaves/windows64/buildbot.tac | b10193418512735dd402c802a5bdee88e3ccd140 | [] | no_license | playbar/nstest | a61aed443af816fdc6e7beab65e935824dcd07b2 | d56141912bc2b0e22d1652aa7aff182e05142005 | refs/heads/master | 2021-06-03T21:56:17.779018 | 2016-08-01T03:17:39 | 2016-08-01T03:17:39 | 64,627,195 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | tac | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is [Open Source Virtual Machine.].
#
# The Initial Developer of the Original Code is
# Adobe System Incorporated.
# Portions created by the Initial Developer are Copyright (C) 2009
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Adobe AS3 Team
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ****
from twisted.application import service
from buildbot.slave.bot import BuildSlave
basedir = r'/c/buildbot/tamarin-redux/windows64'
buildmaster_host = '10.171.22.12'
port = 9750
slavename = 'asteamwin3'
passwd = 'asteam'
keepalive = 600
usepty = 1
umask = None
application = service.Application('buildslave')
s = BuildSlave(host, port, slavename, passwd, basedir, keepalive, usepty,
umask=umask)
s.setServiceParent(application)
| [
"[email protected]"
] | |
83534c791f8063eb1d5ac8569916a49b0f8b0f09 | d12b59b33df5c467abf081d48e043dac70cc5a9c | /uhd_restpy/testplatform/sessions/ixnetwork/topology/dhcpv4client_cfcdda8db5004b679a441f92193405ea.py | 2fd232e15d33bc4c0cf4ba5e72ca7f72b489eb1f | [
"MIT"
] | permissive | ajbalogh/ixnetwork_restpy | 59ce20b88c1f99f95a980ff01106bda8f4ad5a0f | 60a107e84fd8c1a32e24500259738e11740069fd | refs/heads/master | 2023-04-02T22:01:51.088515 | 2021-04-09T18:39:28 | 2021-04-09T18:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,318 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class Dhcpv4client(Base):
"""DHCPv4 Client protocol.
The Dhcpv4client class encapsulates a list of dhcpv4client resources that are managed by the user.
A list of resources can be retrieved from the server using the Dhcpv4client.find() method.
The list can be managed by using the Dhcpv4client.add() and Dhcpv4client.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'dhcpv4client'
_SDM_ATT_MAP = {
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Dhcp4Broadcast': 'dhcp4Broadcast',
'Dhcp4GatewayAddress': 'dhcp4GatewayAddress',
'Dhcp4GatewayMac': 'dhcp4GatewayMac',
'Dhcp4ServerAddress': 'dhcp4ServerAddress',
'Dhcp4UseFirstServer': 'dhcp4UseFirstServer',
'DiscoveredAddresses': 'discoveredAddresses',
'DiscoveredGateways': 'discoveredGateways',
'DiscoveredPrefix': 'discoveredPrefix',
'Errors': 'errors',
'Multiplier': 'multiplier',
'Name': 'name',
'RenewTimer': 'renewTimer',
'SessionInfo': 'sessionInfo',
'SessionStatus': 'sessionStatus',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'UseRapidCommit': 'useRapidCommit',
}
def __init__(self, parent):
super(Dhcpv4client, self).__init__(parent)
@property
def Bfdv4Interface(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bfdv4interface_91b557a3f744baf442dbe21ac75e8f2e.Bfdv4Interface): An instance of the Bfdv4Interface class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bfdv4interface_91b557a3f744baf442dbe21ac75e8f2e import Bfdv4Interface
return Bfdv4Interface(self)
@property
def BgpIpv4Peer(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv4peer_9dd9eddcf2bd784d82d8a016e392f035.BgpIpv4Peer): An instance of the BgpIpv4Peer class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.bgpipv4peer_9dd9eddcf2bd784d82d8a016e392f035 import BgpIpv4Peer
return BgpIpv4Peer(self)
@property
def ECpriRe(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.ecprire_51f1030cbafd2e567d3b517032a1b011.ECpriRe): An instance of the ECpriRe class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ecprire_51f1030cbafd2e567d3b517032a1b011 import ECpriRe
return ECpriRe(self)
@property
def ECpriRec(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.ecprirec_129f1d43f285a4f806ade4e0df814255.ECpriRec): An instance of the ECpriRec class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ecprirec_129f1d43f285a4f806ade4e0df814255 import ECpriRec
return ECpriRec(self)
@property
def Geneve(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.geneve_14ab6f140956b4fc77d1d0f03c5e7514.Geneve): An instance of the Geneve class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.geneve_14ab6f140956b4fc77d1d0f03c5e7514 import Geneve
return Geneve(self)
@property
def IgmpHost(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.igmphost_8940887674c0387469423e8df3a33854.IgmpHost): An instance of the IgmpHost class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.igmphost_8940887674c0387469423e8df3a33854 import IgmpHost
return IgmpHost(self)
@property
def IgmpQuerier(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.igmpquerier_38c883b0cec7ffb5405af90bf1b8cda5.IgmpQuerier): An instance of the IgmpQuerier class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.igmpquerier_38c883b0cec7ffb5405af90bf1b8cda5 import IgmpQuerier
return IgmpQuerier(self)
@property
def MplsOam(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.mplsoam_e01bb6affe899a4731aa60619f4aeadc.MplsOam): An instance of the MplsOam class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.mplsoam_e01bb6affe899a4731aa60619f4aeadc import MplsOam
return MplsOam(self)
@property
def NetconfClient(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.netconfclient_1eaa2ab0efacd988796bdc1f5fe4291c.NetconfClient): An instance of the NetconfClient class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.netconfclient_1eaa2ab0efacd988796bdc1f5fe4291c import NetconfClient
return NetconfClient(self)
@property
def NetconfServer(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.netconfserver_ad256f8ca38068f1eaff839ed40b1e30.NetconfServer): An instance of the NetconfServer class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.netconfserver_ad256f8ca38068f1eaff839ed40b1e30 import NetconfServer
return NetconfServer(self)
@property
def Ospfv2(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv2_27b7a27a991a50e01e629b9de482a2f0.Ospfv2): An instance of the Ospfv2 class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.ospfv2_27b7a27a991a50e01e629b9de482a2f0 import Ospfv2
return Ospfv2(self)
@property
def Pcc(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.pcc_9346785b55d17399fecd6fe36c418219.Pcc): An instance of the Pcc class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pcc_9346785b55d17399fecd6fe36c418219 import Pcc
return Pcc(self)
@property
def Pce(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.pce_bd5f6a11078a4f0deb5d56bef8e9674f.Pce): An instance of the Pce class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pce_bd5f6a11078a4f0deb5d56bef8e9674f import Pce
return Pce(self)
@property
def PimV4Interface(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.pimv4interface_92603cbceaf153039f7575ed9bc4aa67.PimV4Interface): An instance of the PimV4Interface class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.pimv4interface_92603cbceaf153039f7575ed9bc4aa67 import PimV4Interface
return PimV4Interface(self)
@property
def Tag(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
return Tag(self)
@property
def TlvProfile(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c.TlvProfile): An instance of the TlvProfile class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.tlvprofile_69db000d3ef3b060f5edc387b878736c import TlvProfile
return TlvProfile(self)
@property
def Vxlan(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.vxlan_ed3df6fe7146492fc5fe0f77f53f9473.Vxlan): An instance of the Vxlan class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.vxlan_ed3df6fe7146492fc5fe0f77f53f9473 import Vxlan
return Vxlan(self)
@property
def ConnectedVia(self):
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Dhcp4Broadcast(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If enabled, ask the server or relay agent to use the broadcast IP address in the replies.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp4Broadcast']))
@property
def Dhcp4GatewayAddress(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Configures the Manual Gateway IP Address for the DHCPv4 Client.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp4GatewayAddress']))
@property
def Dhcp4GatewayMac(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Configures the Manual Gateway MAC corresponding to the configured Manual Gateway IP of the DHCPv4 Client session.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp4GatewayMac']))
@property
def Dhcp4ServerAddress(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The address of the DHCP server from which the subnet will accept IP addresses.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp4ServerAddress']))
@property
def Dhcp4UseFirstServer(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): If enabled, the subnet accepts the IP addresses offered by the first server to respond with an offer of IP addresses.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Dhcp4UseFirstServer']))
@property
def DiscoveredAddresses(self):
"""
Returns
-------
- list(str): The discovered IPv4 addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredAddresses'])
@property
def DiscoveredGateways(self):
"""
Returns
-------
- list(str): The discovered gateway IPv4 addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredGateways'])
@property
def DiscoveredPrefix(self):
"""
Returns
-------
- list(number): The discovered IPv4 prefix length.
"""
return self._get_attribute(self._SDM_ATT_MAP['DiscoveredPrefix'])
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def Multiplier(self):
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def RenewTimer(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The used-defined lease renewal timer. The value is estimated in seconds and will override the lease renewal timer if it is not zero and is smaller than server-defined value.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RenewTimer']))
@property
def SessionInfo(self):
"""
Returns
-------
- list(str[arpFailed | discoverTimeout | excessiveTlvs | none | rebindTimeout | relayDown | renewTimeout | requestTimeout]): Logs additional information about the session state
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionInfo'])
@property
def SessionStatus(self):
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def StackedLayers(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def UseRapidCommit(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enables DHCP clients to negotiate leases with rapid commit.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UseRapidCommit']))
def update(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Updates dhcpv4client resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Adds a new dhcpv4client resource on the server and adds it to the container.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved dhcpv4client resources using find and the newly added dhcpv4client resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained dhcpv4client resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, DiscoveredAddresses=None, DiscoveredGateways=None, DiscoveredPrefix=None, Errors=None, Multiplier=None, Name=None, SessionInfo=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves dhcpv4client resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve dhcpv4client resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all dhcpv4client resources from the server.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- DiscoveredAddresses (list(str)): The discovered IPv4 addresses.
- DiscoveredGateways (list(str)): The discovered gateway IPv4 addresses.
- DiscoveredPrefix (list(number)): The discovered IPv4 prefix length.
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionInfo (list(str[arpFailed | discoverTimeout | excessiveTlvs | none | rebindTimeout | relayDown | renewTimeout | requestTimeout])): Logs additional information about the session state
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching dhcpv4client resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of dhcpv4client data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the dhcpv4client resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Dhcp4Broadcast=None, Dhcp4GatewayAddress=None, Dhcp4GatewayMac=None, Dhcp4ServerAddress=None, Dhcp4UseFirstServer=None, RenewTimer=None, UseRapidCommit=None):
"""Base class infrastructure that gets a list of dhcpv4client device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Dhcp4Broadcast (str): optional regex of dhcp4Broadcast
- Dhcp4GatewayAddress (str): optional regex of dhcp4GatewayAddress
- Dhcp4GatewayMac (str): optional regex of dhcp4GatewayMac
- Dhcp4ServerAddress (str): optional regex of dhcp4ServerAddress
- Dhcp4UseFirstServer (str): optional regex of dhcp4UseFirstServer
- RenewTimer (str): optional regex of renewTimer
- UseRapidCommit (str): optional regex of useRapidCommit
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self, *args, **kwargs):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
abort(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def Rebind(self, *args, **kwargs):
"""Executes the rebind operation on the server.
Rebind selected DHCP items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
rebind(SessionIndices=list)
---------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
rebind(SessionIndices=string)
-----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('rebind', payload=payload, response_object=None)
def Renew(self, *args, **kwargs):
"""Executes the renew operation on the server.
Renew selected DHCP items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
renew(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
renew(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('renew', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(SessionIndices=list)
--------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
restartDown(SessionIndices=string)
----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def SendArp(self, *args, **kwargs):
"""Executes the sendArp operation on the server.
Sends Arp for its corresponding gateway
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
sendArp(SessionIndices=list)
----------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
sendArp(SessionIndices=string)
------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendArp', payload=payload, response_object=None)
def SendPing(self, *args, **kwargs):
"""Executes the sendPing operation on the server.
Send ping for selected DHCP items.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
sendPing(DestIP=string)list
---------------------------
- DestIP (str): This parameter requires a destIP of type kString
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
sendPing(DestIP=string, SessionIndices=list)list
------------------------------------------------
- DestIP (str): This parameter requires a destIP of type kString
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
sendPing(SessionIndices=string, DestIP=string)list
--------------------------------------------------
- SessionIndices (str): This parameter requires a destIP of type kString
- DestIP (str): This parameter requires a string of session numbers 1-4;6;7-12
- Returns list(dict(port:str[None | /api/v1/sessions/1/ixnetwork/vport],isSuccess:bool,data:str)): The return value is an array of structures where each structure consists of a /vport object reference, the success of the operation and the returned data of the operation for that /vport. This exec is not asynchronous.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendPing', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
start(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(SessionIndices=list)
-------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stop(SessionIndices=string)
---------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.