blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0283e43ce1b3b31585f53812085b759b79811cc6 | 5182897b2f107f4fd919af59c6762d66c9be5f1d | /.history/src/Individuo_20200710164705.py | ffc91aee5f741018ab08a59df5c8052c8ae54c56 | [
"MIT"
]
| permissive | eduardodut/Trabalho_final_estatistica_cd | 422b7e702f96291f522bcc68d2e961d80d328c14 | fbedbbea6bdd7a79e1d62030cde0fab4e93fc338 | refs/heads/master | 2022-11-23T03:14:05.493054 | 2020-07-16T23:49:26 | 2020-07-16T23:49:26 | 277,867,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,332 | py | import random
class Individuo():
SADIO = 0
INFECTADO_TIPO_1 = 1 #assintomáticos e o infectado inicial
INFECTADO_TIPO_2 = 2 #sintomático
CURADO = 3
MORTO = 4
def __init__(
self,
status,
atualizacoes_cura,
posicao):
self.status = status
self.atualizacoes_cura = atualizacoes_cura
self.posicao = posicao
def __repr__(self):
return string(self.status)
def infectar(self, chance_infeccao, chance_infeccao_tipo2):
saida = Individuo.SADIO
if (self.status == Individuo.INFECTADO_TIPO_2 or self.status == Individuo.INFECTADO_TIPO_1):
#número aleatório para chance de infectar o vizinho
rng_infeccao = random.random()
if rng_infeccao <= chance_infeccao:
#número aleatório para chance de infecção tipo 1 ou 2
rng_infeccao_tipo2 = random.random()
if rng_infeccao_tipo2 <= chance_infeccao_tipo2:
saida = Individuo.INFECTADO_TIPO_2
else:
saida = Individuo.INFECTADO_TIPO_1
return saida
def checagem_morte(self, chance_morte):
if self.status == Individuo.INFECTADO_TIPO_2:
rng_morte = random.random()
if rng_morte <= chance_morte:
self.status = Individuo.MORTO
return self.status
return self.checagem_cura()
def checagem_cura(self):
if self.status == Individuo.INFECTADO_TIPO_2 or self.status == Individuo.INFECTADO_TIPO_1:
self.atualizacoes_cura = self.atualizacoes_cura - 1
if self.atualizacoes_cura == 0:
self.status = Individuo.CURADO
return self.status
class Fabrica_individuo():
def __init__(
self,
atualizacoes_cura): #número de atualizações necessárias para a cura de um indivíduo tipo 1 ou 2
self.atualizacoes_cura = atualizacoes_cura
def criar_individuo(self, status_inicial, posicao):
return Individuo(
status_inicial,
self.atualizacoes_cura,
posicao)
| [
"[email protected]"
]
| |
d29f521a654b15c312751d8f72d1ec6c1fa0ff3d | d0081f81996635e913b1f267a4586eb0bfd3dcd5 | /tests/unit/dataactvalidator/test_fabsreq4.py | 489d9acbfbffa7cd67a5f92be51975de4852decc | [
"CC0-1.0"
]
| permissive | fedspendingtransparency/data-act-broker-backend | 71c10a6c7c284c8fa6556ccc0efce798870b059b | b12c73976fd7eb5728eda90e56e053759c733c35 | refs/heads/master | 2023-09-01T07:41:35.449877 | 2023-08-29T20:14:45 | 2023-08-29T20:14:45 | 57,313,310 | 55 | 36 | CC0-1.0 | 2023-09-13T16:40:58 | 2016-04-28T15:39:36 | Python | UTF-8 | Python | false | false | 1,561 | py | from tests.unit.dataactcore.factories.staging import FABSFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabsreq4'
def test_column_headers(database):
expected_subset = {'row_number', 'business_funds_indicator', 'correction_delete_indicatr',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test BusinessFundsIndicator is required for all submissions except delete records. """
fabs = FABSFactory(correction_delete_indicatr='C', business_funds_indicator='REC')
fabs_2 = FABSFactory(correction_delete_indicatr='', business_funds_indicator='NON')
# Test ignoring for D records
fabs_3 = FABSFactory(correction_delete_indicatr='d', business_funds_indicator=None)
fabs_4 = FABSFactory(correction_delete_indicatr='D', business_funds_indicator='')
fabs_5 = FABSFactory(correction_delete_indicatr='D', business_funds_indicator='RE')
errors = number_of_errors(_FILE, database, models=[fabs, fabs_2, fabs_3, fabs_4, fabs_5])
assert errors == 0
def test_failure(database):
""" Test fail BusinessFundsIndicator is required for all submissions except delete records. """
fabs = FABSFactory(correction_delete_indicatr='c', business_funds_indicator=None)
fabs_2 = FABSFactory(correction_delete_indicatr=None, business_funds_indicator='')
errors = number_of_errors(_FILE, database, models=[fabs, fabs_2])
assert errors == 2
| [
"[email protected]"
]
| |
c3b8845978fac8cfa735af881c0a55ce00ccf926 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_326/ch6_2020_03_09_19_29_35_692924.py | 62c78804fbf0b43e0866b1d8788774568a3b42ba | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | def celsius_para_fahrenheit(celsius):
temperatura_equivalente_em_F = (celsius * 9/5) + 32
return temperatura_equivalente_em_F | [
"[email protected]"
]
| |
167189898c959abc7ed28e564880ee1069d227f1 | 2d4380518d9c591b6b6c09ea51e28a34381fc80c | /CIM16/CDPSM/Balanced/IEC61970/LoadModel/__init__.py | 9ee7be64a7b50687c12f23c691687acf992d4b74 | [
"MIT"
]
| permissive | fran-jo/PyCIM | 355e36ae14d1b64b01e752c5acd5395bf88cd949 | de942633d966bdf2bd76d680ecb20517fc873281 | refs/heads/master | 2021-01-20T03:00:41.186556 | 2017-09-19T14:15:33 | 2017-09-19T14:15:33 | 89,480,767 | 0 | 1 | null | 2017-04-26T12:57:44 | 2017-04-26T12:57:44 | null | UTF-8 | Python | false | false | 1,650 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""This package is responsible for modeling the energy consumers and the system load as curves and associated curve data. Special circumstances that may affect the load, such as seasons and daytypes, are also included here. This information is used by Load Forecasting and Load Management.
"""
from CIM16.CDPSM.Balanced.IEC61970.LoadModel.LoadResponseCharacteristic import LoadResponseCharacteristic
nsURI = "http://iec.ch/TC57/2010/CIM-schema-cim15?profile=http://iec.ch/TC57/2011/iec61968-13/CDPSM/Balanced#LoadModel"
nsPrefix = "cimLoadModel"
| [
"[email protected]"
]
| |
8461e2f548998a35f94100eb6fdd0f429b1d5ab8 | c68268657c1a94c09271a124b200b0aeb85bb05e | /angulardjangorest/angular/views.py | 2fc517fe4100318c5e1a5d13e2a29905c476cc33 | []
| no_license | photonkhan/angulardjangorest | 146960801c8fdab924c4012271075a04c1379d91 | 3357066ab094ae152b138a506f3e2d41588ecf68 | refs/heads/master | 2022-12-12T02:09:56.248353 | 2018-07-25T13:12:39 | 2018-07-25T13:12:39 | 142,123,874 | 0 | 0 | null | 2022-11-17T05:58:28 | 2018-07-24T07:49:54 | HTML | UTF-8 | Python | false | false | 278 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
def index(request):
context = {
'header' : 'Angular with Djano Rest API'
}
return render(request, 'angular/index.html', context)
| [
"[email protected]"
]
| |
ff6750998ace4ef5d00078ea55ba213c8bdec0e3 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-bcs/huaweicloudsdkbcs/v2/model/dimension.py | cd981259ff98407818c27a6f0bc3680ff3fc3da4 | [
"Apache-2.0"
]
| permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Dimension:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'value': 'str'
}
attribute_map = {
'name': 'name',
'value': 'value'
}
def __init__(self, name=None, value=None):
"""Dimension - a model defined in huaweicloud sdk"""
self._name = None
self._value = None
self.discriminator = None
if name is not None:
self.name = name
if value is not None:
self.value = value
@property
def name(self):
"""Gets the name of this Dimension.
维度名称。
:return: The name of this Dimension.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Dimension.
维度名称。
:param name: The name of this Dimension.
:type: str
"""
self._name = name
@property
def value(self):
"""Gets the value of this Dimension.
维度取值。
:return: The value of this Dimension.
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this Dimension.
维度取值。
:param value: The value of this Dimension.
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Dimension):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
c728ae87bedf61ae87a3b9715df359479018fde4 | a08cbd5e9b4e4a037deaaae1749ed4dc55c79661 | /test/IECoreMaya/ObjectDataTest.py | b61ac51f2f90642c99a252ee1471e27d8838e253 | []
| no_license | victorvfx/cortex | 46385788b12dae375c1a5ade26d8f403d2dbccff | deb23599c8c69eac5671e59fe1a8ca0d5e943a36 | refs/heads/master | 2021-01-16T23:11:39.139147 | 2017-06-23T12:39:41 | 2017-06-23T12:39:41 | 95,709,763 | 1 | 0 | null | 2017-06-28T20:40:12 | 2017-06-28T20:40:12 | null | UTF-8 | Python | false | false | 3,927 | py | ##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import maya.cmds
import maya.OpenMaya
import IECore
import IECoreMaya
class ObjectDataTest( IECoreMaya.TestCase ) :
def setUp( self ) :
IECoreMaya.TestCase.setUp( self )
if not maya.cmds.pluginInfo( "ObjectDataTestNode.py", query=True, loaded=True ) :
maya.cmds.loadPlugin( "ObjectDataTestNode.py" )
def testReadWrite( self ) :
node = maya.cmds.createNode( "ieObjectDataTestNode" )
compoundData = IECore.CompoundData( {
"val1" : IECore.FloatData( 1 ),
"val2" : IECore.StringData( "val2Data" ),
"val3" : {
"val3.val1" : IECore.IntData( 100 ),
},
} )
IECoreMaya.ToMayaPlugConverter.create( compoundData ).convert( node + ".objectData" )
plugValue = IECoreMaya.FromMayaPlugConverter.create( node + ".objectData" ).convert()
self.assertEqual( plugValue, compoundData )
# try saving and loading an ascii file
maya.cmds.file( rename = os.getcwd() + "/test/IECoreMaya/objectDataTest.ma" )
sceneFileName = maya.cmds.file( force = True, type = "mayaAscii", save = True )
maya.cmds.file( new=True, force=True )
maya.cmds.file( sceneFileName, force=True, open=True )
loadedCompoundData = IECoreMaya.FromMayaPlugConverter.create( node + ".objectData" ).convert()
self.assertEqual( loadedCompoundData, compoundData )
# try saving and loading a binary file
maya.cmds.file( rename = os.getcwd() + "/test/IECoreMaya/objectDataTest.mb" )
sceneFileName = maya.cmds.file( force = True, type = "mayaBinary", save = True )
maya.cmds.file( new=True, force=True )
maya.cmds.file( sceneFileName, force=True, open=True )
loadedCompoundData = IECoreMaya.FromMayaPlugConverter.create( node + ".objectData" ).convert()
self.assertEqual( loadedCompoundData, compoundData )
def tearDown( self ) :
maya.cmds.file( new = True, force = True )
maya.cmds.flushUndo()
maya.cmds.unloadPlugin( "ObjectDataTestNode.py" )
for f in [
"./test/IECoreMaya/objectDataTest.ma",
"./test/IECoreMaya/objectDataTest.mb",
] :
if os.path.exists( f ) :
os.remove( f )
if __name__ == "__main__":
IECoreMaya.TestProgram( plugins = [ "ieCore" ] )
| [
"[email protected]"
]
| |
2175f0c6c75b4f6669609f7a09d8ddb6bacc229e | 93bf4bbafe0524335ea1216f7f2941348c2cd1bd | /tensorflow/python/kernel_tests/pad_op_test.py | 1597a8c947eb82da5cb47f2e9aac7d6a2967bbbf | [
"Apache-2.0"
]
| permissive | sachinpro/sachinpro.github.io | c4951734b09588cad58711a76fe657f110163c11 | c3bbd8d89818f5d8bb7296c851ed5e52c19728e3 | refs/heads/master | 2022-12-23T10:00:13.902459 | 2016-06-27T13:18:27 | 2016-06-27T13:25:58 | 25,289,839 | 1 | 1 | Apache-2.0 | 2022-12-15T00:45:03 | 2014-10-16T06:44:30 | C++ | UTF-8 | Python | false | false | 6,742 | py | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.nn_ops.Pad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class PadOpTest(tf.test.TestCase):
def _npPad(self, inp, paddings, mode):
return np.pad(inp, paddings, mode=mode.lower())
def testNpPad(self):
self.assertAllEqual(
np.array([[0, 0, 0, 0, 0, 0],
[0, 3, 3, 0, 0, 0],
[0, 4, 4, 0, 0, 0],
[0, 5, 5, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]]),
self._npPad(
np.array([[3, 3], [4, 4], [5, 5]]),
[[1, 2], [1, 3]],
mode="constant"))
self.assertAllEqual(
np.array([[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0],
[4, 3, 4, 9, 4, 3],
[1, 0, 1, 2, 1, 0]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="reflect"))
self.assertAllEqual(
np.array([[0, 0, 1, 2, 2, 1],
[0, 0, 1, 2, 2, 1],
[3, 3, 4, 9, 9, 4],
[3, 3, 4, 9, 9, 4]]),
self._npPad(
np.array([[0, 1, 2], [3, 4, 9]]),
[[1, 1], [1, 2]],
mode="symmetric"))
def _testPad(self, np_inputs, paddings, mode, use_gpu=False):
np_val = self._npPad(np_inputs, paddings, mode=mode)
with self.test_session(use_gpu=use_gpu):
tf_val = tf.pad(np_inputs, paddings, mode=mode)
out = tf_val.eval()
self.assertAllEqual(np_val, out)
self.assertShapeEqual(np_val, tf_val)
def _testGradient(self, x, a, mode):
with self.test_session():
inx = tf.convert_to_tensor(x)
xs = list(x.shape)
ina = tf.convert_to_tensor(a)
y = tf.pad(inx, ina, mode=mode)
# Expected y's shape to be:
ys = list(np.array(x.shape) + np.sum(np.array(a), axis=1))
jacob_t, jacob_n = tf.test.compute_gradient(inx,
xs,
y,
ys,
x_init_value=x)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_inputs, paddings):
for mode in ("CONSTANT", "REFLECT", "SYMMETRIC"):
self._testPad(np_inputs, paddings, mode=mode, use_gpu=False)
self._testPad(np_inputs, paddings, mode=mode, use_gpu=True)
if np_inputs.dtype == np.float32:
self._testGradient(np_inputs, paddings, mode=mode)
def testInputDims(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2, 1, 1, 1, 1]),
tf.reshape([1, 2], shape=[1, 2]))
def testPaddingsDim(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[2]))
def testPaddingsDim2(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[2, 1]))
def testPaddingsDim3(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2], shape=[1, 2]))
def testPaddingsDim4(self):
with self.test_session():
with self.assertRaises(ValueError):
tf.pad(
tf.reshape([1, 2], shape=[1, 2]),
tf.reshape([1, 2, 3, 4, 5, 6], shape=[3, 2]))
def testPaddingsNonNegative(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
tf.pad(
tf.constant([1], shape=[1]),
tf.constant([-1, 0], shape=[1, 2]))
def testPaddingsNonNegative2(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "must be non-negative"):
tf.pad(
tf.constant([1], shape=[1]),
tf.constant([-1, 0], shape=[1, 2]))
def testPaddingsMaximum(self):
with self.test_session():
with self.assertRaises(Exception):
tf.pad(
tf.constant([1], shape=[2]),
tf.constant([2, 0], shape=[1, 2]),
mode="REFLECT").eval()
with self.assertRaises(Exception):
tf.pad(
tf.constant([1], shape=[2]),
tf.constant([0, 3], shape=[1, 2]),
mode="SYMMETRIC").eval()
def testIntTypes(self):
# TODO(touts): Figure out why the padding tests do not work on GPU
# for int types and rank > 2.
for t in [np.int32, np.int64]:
self._testAll((np.random.rand(4, 4, 3) * 100).astype(t),
[[1, 0], [2, 3], [0, 2]])
def testFloatTypes(self):
for t in [np.float32, np.float64, np.complex64]:
self._testAll(np.random.rand(2, 5).astype(t),
[[1, 0], [2, 0]])
def testShapeFunctionEdgeCases(self):
# Unknown paddings shape.
inp = tf.constant(0.0, shape=[4, 4, 4, 4])
padded = tf.pad(inp, tf.placeholder(tf.int32))
self.assertEqual([None, None, None, None], padded.get_shape().as_list())
# Unknown input shape.
inp = tf.placeholder(tf.float32)
padded = tf.pad(inp, [[2, 2], [2, 2]])
self.assertEqual([None, None], padded.get_shape().as_list())
# Unknown input and paddings shape.
inp = tf.placeholder(tf.float32)
padded = tf.pad(inp, tf.placeholder(tf.int32))
self.assertAllEqual(None, padded.get_shape().ndims)
def testScalars(self):
paddings = np.zeros((0, 2), dtype=np.int32)
inp = np.asarray(7)
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
tf_val = tf.pad(inp, paddings)
out = tf_val.eval()
self.assertAllEqual(inp, out)
self.assertShapeEqual(inp, tf_val)
if __name__ == "__main__":
tf.test.main()
| [
"[email protected]"
]
| |
a5f111402661dc059cae8cb061839575063d1371 | e84feabf99ff6e15df9eeee7b7c2595853fe746d | /app/utils/rabbit.py | 34a303f35e2f07bacfe9fb45fc79e5897dccb18e | []
| no_license | Ravillatypov/freeswitch-intergration | f0536b3abf3982cb2051291dbe101d07e59809b8 | cac8cc829ca0d5734a5294847bf7587758b45eb1 | refs/heads/main | 2023-01-23T02:06:53.070823 | 2020-10-01T17:19:33 | 2020-10-01T17:19:33 | 300,362,493 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | from uuid import uuid4
from aio_pika import Message
from app.settings import MQ_CONVERTER_QUEUE_NAME, MQ_UPLOADS_QUEUE_NAME, ENVIRONMENT
from aio_pika import RobustConnection
async def send_message(rabbit_mq: RobustConnection, routing_key: str, body: bytes):
if ENVIRONMENT == 'test':
return
async with rabbit_mq:
channel = await rabbit_mq.channel()
await channel.default_exchange.publish(
Message(body=body),
routing_key=routing_key,
)
async def need_convert(rabbit_mq: RobustConnection, call_id: uuid4, path: str):
await send_message(
rabbit_mq,
body=f'{call_id}\n{path}'.encode(),
routing_key=MQ_CONVERTER_QUEUE_NAME,
)
async def need_upload(rabbit_mq: RobustConnection, call_id: uuid4, path: str):
await send_message(
rabbit_mq,
body=f'{call_id}\n{path}'.encode(),
routing_key=MQ_UPLOADS_QUEUE_NAME,
)
| [
"[email protected]"
]
| |
f7460f6641108e37de035ca72ab022c1e7990a3d | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-3507.py | f4222a0786dc84c1b850189e6ca510c0b4caf33c | []
| no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,286 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < $Member:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
]
| |
dc4e68ac5b189ea2e0119cedefbd33e0a5c254e5 | 0cce9a9d9b9da4a820e9ed5fc674d06f0be9810a | /ch10_first_exercises.py | 61b5a7953658f4cd352dcef9cd381f374e582ead | []
| no_license | wbroach/python_work | 3f4a85e998805f50b2400e64c5b7cbc31780b245 | 7e1842b317539d61bab0f04d72e71db893c865ff | refs/heads/master | 2020-04-14T22:06:43.164595 | 2019-10-02T01:25:00 | 2019-10-02T01:25:00 | 164,151,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py |
#~ file_name = 'python_can.txt'
#~ with open(file_name) as file_object:
#~ lines = file_object.readlines()
#~ learning_summary = ''
#~ for line in lines:
#~ learning_summary += line.strip() + " \n"
#~ print(learning_summary)
file_name = 'python_can.txt'
with open(file_name) as file_object:
lines = file_object.readlines()
for i in range(len(lines)):
lines[i] = lines[i].replace('Python', 'C').strip()
for line in lines:
print(line)
| [
"[email protected]"
]
| |
d2dded48a1d604bda8edaeb28fbb63865106133a | e406487b60cf9a220c4164a2e6f6e612ec98843f | /mall/apps/goods/migrations/0002_auto_20190117_1555.py | 7fdd947712c4473763b67eca18cb03688ec0a703 | []
| no_license | L-huihui/duoduo | 3d7caad17664b045882043afacb3dcbc13008fb2 | 16bb64e1216a82a35867516e4351dffe68247415 | refs/heads/master | 2020-04-18T05:49:21.281388 | 2019-01-25T01:44:29 | 2019-01-25T01:44:29 | 167,293,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2019-01-17 07:55
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('goods', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='brand',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='goods',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='goodscategory',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='goodschannel',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='goodsspecification',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='sku',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='skuimage',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='skuspecification',
old_name='updata_time',
new_name='update_time',
),
migrations.RenameField(
model_name='specificationoption',
old_name='updata_time',
new_name='update_time',
),
]
| [
"[email protected]"
]
| |
0ba8e6cf08ec403a39f4114cd07dae91ac4183bf | f572e0a4b843ed3fd2cd8edec2ad3aab7a0019d3 | /ows/wms/v13/test_encoders.py | d57cc1d1d58cfd386dcf9b086e8f9df557098abf | [
"MIT"
]
| permissive | EOxServer/pyows | 9039c8ed7358c98d736e2b8fd9f47be944f0b0a1 | e09310f992d6e69088940e9b5dbd7302f697344b | refs/heads/master | 2022-10-09T23:27:43.884159 | 2022-10-04T10:03:25 | 2022-10-04T10:03:25 | 218,005,699 | 1 | 1 | null | 2022-01-04T13:36:06 | 2019-10-28T09:01:51 | Python | UTF-8 | Python | false | false | 7,902 | py | # -------------------------------------------------------------------------------
#
# Project: pyows <http://eoxserver.org>
# Authors: Fabian Schindler <[email protected]>
#
# -------------------------------------------------------------------------------
# Copyright (C) 2019 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -------------------------------------------------------------------------------
from datetime import datetime, timedelta
from ows.util import Version, year, month
from ows.common.types import WGS84BoundingBox, BoundingBox
from ..types import (
ServiceCapabilities, FormatOnlineResource, Layer, Style, LegendURL,
Dimension, Range,
GetMapRequest, GetFeatureInfoRequest
)
from .encoders import xml_encode_capabilities, kvp_encode_get_map_request
def test_encode_capabilities():
capabilities = ServiceCapabilities()
print(xml_encode_capabilities(capabilities, pretty_print=True).value.decode('utf-8'))
capabilities = ServiceCapabilities.with_defaults(
'http://provider.org',
['image/png', 'image/jpeg'],
['text/html', 'application/json'],
update_sequence='2018-05-08',
title='Title',
abstract='Description',
keywords=[
'test', 'WMS',
],
fees='None',
access_constraints=['None'],
provider_name='Provider Inc',
provider_site='http://provider.org',
individual_name='John Doe',
organisation_name='Provider Inc',
position_name='CTO',
phone_voice='+99/9008820',
phone_facsimile='+99/9008821',
delivery_point='Point du Hoc',
city='City',
administrative_area='Adminity',
postal_code='12345',
country='Cooontry',
electronic_mail_address='[email protected]',
online_resource='http://provider.org',
hours_of_service='09:00AM - 18:00PM',
contact_instructions='Just send a mail or a carrier pidgeon',
role='Chief',
layer=Layer(
title='root layer',
abstract='Some abstract',
keywords=['Root', 'right?'],
crss=['EPSG:4326', 'EPSG:3857'],
wgs84_bounding_box=WGS84BoundingBox([-180, -90, 180, 90]),
bounding_boxes=[
BoundingBox('EPSG:3857', [
-20026376.39, -20048966.10,
20026376.39, 20048966.10,
])
],
attribution='root attribution',
authority_urls={
'root-auth': 'http://provider.org',
},
identifiers={
'root-auth': 'myId',
},
metadata_urls=[
FormatOnlineResource(
format='text/xml',
href='http://provider.com/metadata.xml',
)
],
data_urls=[
FormatOnlineResource(
format='image/tiff',
href='http://provider.com/data.tif',
)
],
min_scale_denominator=5,
max_scale_denominator=10,
layers=[
Layer(
name='sublayer',
title='My Sub-layer',
queryable=True,
styles=[
Style(
name='styli',
title='Styli',
abstract='stylisch Style',
legend_urls=[
LegendURL(
width=500,
height=300,
format='image/jpeg',
href='http://provider.com/legend.jpg',
)
],
style_sheet_url=FormatOnlineResource(
'text/xml',
href='http://provider.com/stylesheet.xml',
),
style_url=FormatOnlineResource(
'text/xml',
href='http://provider.com/style.xml',
)
)
],
dimensions=[
Dimension(
name='time',
units='seconds',
values=Range(
datetime(2018, 5, 10),
datetime(2018, 5, 12),
timedelta(hours=1),
),
unit_symbol='s',
default='',
multiple_values=False,
nearest_value=True,
current=False
),
Dimension(
name='elevation',
units='meters',
values=[5, 10, 500, 1000, 15000],
unit_symbol='m',
default='',
multiple_values=False,
nearest_value=True,
current=False
)
]
)
]
),
)
# print(xml_encode_capabilities(capabilities, pretty_print=True).value.decode('utf-8'))
def test_encode_getmap():
print(kvp_encode_get_map_request(GetMapRequest(
Version(1, 3, 0),
layers=['a', 'b', 'c'],
styles=['s1', 's2', None],
bounding_box=BoundingBox('EPSG:4326', [0, 0, 10, 10]),
width=256,
height=256,
format='image/jpeg',
dimensions={}
)))
print(kvp_encode_get_map_request(GetMapRequest(
Version(1, 3, 0),
layers=['a', 'b', 'c'],
styles=['s1', 's2', None],
bounding_box=BoundingBox('EPSG:4326', [0, 0, 10, 10]),
width=256,
height=256,
format='image/jpeg',
time=Range(year(2012), year(2013)),
elevation=1000,
dimensions={
'wavelength': '2456.2',
'pressure': ['123', '234'],
'range': [Range('0', '1'), Range('2', '4')]
}
)))
def test_encode_getfeatureinfo():
GetFeatureInfoRequest(
Version(1, 3, 0),
layers=['a', 'b', 'c'],
styles=['s1', 's2', None],
bounding_box=BoundingBox('EPSG:4326', [0, 0, 10, 10]),
width=256,
height=256,
format='image/jpeg',
dimensions={},
query_layers=['a', 'b'],
info_format='text/xml',
i=12,
j=12,
feature_count=15,
)
| [
"[email protected]"
]
| |
2ce97362db302bca16c4ce50d46abe8cef6a939d | e2e39726195c7bc075b9bd56e757acd136527d5c | /typings/vtkmodules/vtkCommonCore/vtkDenseArray_IhE.pyi | 1e566c0d11ab4d5b928576e9018c81f9d2190df0 | [
"BSD-3-Clause"
]
| permissive | gen4438/vtk-python-stubs | a652272183d2d1ee48d4639e86bcffc1ac454af0 | c9abd76362adf387af64ce5ddbd04c5d3bebe9da | refs/heads/main | 2023-04-04T02:13:15.459241 | 2021-04-15T10:47:28 | 2021-04-15T10:53:59 | 358,224,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,465 | pyi | """
This type stub file was generated by pyright.
"""
from .vtkTypedArray_IhE import vtkTypedArray_IhE
class vtkDenseArray_IhE(vtkTypedArray_IhE):
"""
vtkDenseArray<unsigned char> - Contiguous storage for N-way arrays.
Superclass: vtkTypedArray[uint8]
vtkDenseArray is a concrete vtkArray implementation that stores
values using a contiguous block of memory. Values are stored with
fortran ordering, meaning that if you iterated over the memory block,
the left-most coordinates would vary the fastest.
In addition to the retrieval and update methods provided by
vtkTypedArray, vtkDenseArray provides methods to:
Fill the entire array with a specific value.
Retrieve a pointer to the storage memory block.
@sa
vtkArray, vtkTypedArray, vtkSparseArray
@par Thanks: Developed by Timothy M. Shead ([email protected]) at
Sandia National Laboratories.
"""
def DeepCopy(self):
"""
V.DeepCopy() -> vtkArray
C++: vtkArray *DeepCopy() override;
Returns a new array that is a deep copy of this array.
"""
...
def Fill(self, p_int):
"""
V.Fill(int)
C++: void Fill(const unsigned char &value)
Fills every element in the array with the given value.
"""
...
def GetCoordinatesN(self, p_int, vtkArrayCoordinates):
"""
V.GetCoordinatesN(int, vtkArrayCoordinates)
C++: void GetCoordinatesN(const SizeT n,
vtkArrayCoordinates &coordinates) override;
Returns the coordinates of the n-th value in the array, where n
is in the range [0, GetNonNullSize()). Note that the order in
which coordinates are visited is undefined, but is guaranteed to
match the order in which values are visited using
vtkTypedArray::GetValueN() and vtkTypedArray::SetValueN().
"""
...
def GetExtents(self):
"""
V.GetExtents() -> vtkArrayExtents
C++: const vtkArrayExtents &GetExtents() override;
Returns the extents (the number of dimensions and size along each
dimension) of the array.
"""
...
def GetNonNullSize(self):
"""
V.GetNonNullSize() -> int
C++: SizeT GetNonNullSize() override;
Returns the number of non-null values stored in the array. Note
that this value will equal GetSize() for dense arrays, and will
be less-than-or-equal to GetSize() for sparse arrays.
"""
...
def GetNumberOfGenerationsFromBase(self, string):
"""
V.GetNumberOfGenerationsFromBase(string) -> int
C++: vtkIdType GetNumberOfGenerationsFromBase(const char *type)
override;
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def GetNumberOfGenerationsFromBaseType(self, string):
"""
V.GetNumberOfGenerationsFromBaseType(string) -> int
C++: static vtkIdType GetNumberOfGenerationsFromBaseType(
const char *type)
Given a the name of a base class of this class type, return the
distance of inheritance between this class type and the named
class (how many generations of inheritance are there between this
class and the named class). If the named class is not in this
class's inheritance tree, return a negative value. Valid
responses will always be nonnegative. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def GetStorage(self):
"""
V.GetStorage() -> (int, ...)
C++: unsigned char *GetStorage()
Returns a mutable reference to the underlying storage. Values
are stored contiguously with fortran ordering. Use at your own
risk!
"""
...
def GetValue(self, p_int):
"""
V.GetValue(int) -> int
C++: const unsigned char &GetValue(CoordinateT i) override;
V.GetValue(int, int) -> int
C++: const unsigned char &GetValue(CoordinateT i, CoordinateT j)
override;
V.GetValue(int, int, int) -> int
C++: const unsigned char &GetValue(CoordinateT i, CoordinateT j,
CoordinateT k) override;
V.GetValue(vtkArrayCoordinates) -> int
C++: const unsigned char &GetValue(
const vtkArrayCoordinates &coordinates) override;
Returns the value stored in the array at the given coordinates.
Note that the number of dimensions in the supplied coordinates
must match the number of dimensions in the array.
"""
...
def GetValueN(self, p_int):
"""
V.GetValueN(int) -> int
C++: const unsigned char &GetValueN(const SizeT n) override;
Returns the n-th value stored in the array, where n is in the
range [0, GetNonNullSize()). This is useful for efficiently
visiting every value in the array. Note that the order in which
values are visited is undefined, but is guaranteed to match the
order used by vtkArray::GetCoordinatesN().
"""
...
def IsA(self, string):
"""
V.IsA(string) -> int
C++: vtkTypeBool IsA(const char *type) override;
Return 1 if this class is the same type of (or a subclass of) the
named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def IsDense(self):
"""
V.IsDense() -> bool
C++: bool IsDense() override;
Returns true iff the underlying array storage is "dense", i.e.
that GetSize() and GetNonNullSize() will always return the same
value. If not, the array is "sparse".
"""
...
def IsTypeOf(self, string):
"""
V.IsTypeOf(string) -> int
C++: static vtkTypeBool IsTypeOf(const char *type)
Return 1 if this class type is the same type of (or a subclass
of) the named class. Returns 0 otherwise. This method works in
combination with vtkTypeMacro found in vtkSetGet.h.
"""
...
def NewInstance(self):
"""
V.NewInstance() -> vtkDenseArray_IhE
C++: vtkDenseArray<unsigned char> *NewInstance()
"""
...
def SafeDownCast(self, vtkObjectBase):
"""
V.SafeDownCast(vtkObjectBase) -> vtkDenseArray_IhE
C++: static vtkDenseArray<unsigned char> *SafeDownCast(
vtkObjectBase *o)
"""
...
def SetValue(self, p_int, p_int_1):
"""
V.SetValue(int, int)
C++: void SetValue(CoordinateT i, const unsigned char &value)
override;
V.SetValue(int, int, int)
C++: void SetValue(CoordinateT i, CoordinateT j,
const unsigned char &value) override;
V.SetValue(int, int, int, int)
C++: void SetValue(CoordinateT i, CoordinateT j, CoordinateT k,
const unsigned char &value) override;
V.SetValue(vtkArrayCoordinates, int)
C++: void SetValue(const vtkArrayCoordinates &coordinates,
const unsigned char &value) override;
Overwrites the value stored in the array at the given
coordinates. Note that the number of dimensions in the supplied
coordinates must match the number of dimensions in the array.
"""
...
def SetValueN(self, p_int, p_int_1):
"""
V.SetValueN(int, int)
C++: void SetValueN(const SizeT n, const unsigned char &value)
override;
Overwrites the n-th value stored in the array, where n is in the
range [0, GetNonNullSize()). This is useful for efficiently
visiting every value in the array. Note that the order in which
values are visited is undefined, but is guaranteed to match the
order used by vtkArray::GetCoordinatesN().
"""
...
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
...
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
...
def __init__(self, *args, **kwargs) -> None:
...
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
...
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
...
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
...
def __str__(self, *args, **kwargs) -> str:
""" Return str(self). """
...
__this__ = ...
__dict__ = ...
__vtkname__ = ...
| [
"[email protected]"
]
| |
b72ad75306f31160948715d6656aedb45e1792c7 | e2e993962d6e5e30905d13f0532acdac9302c84b | /HTL.tab/Test.panel/Test.pushbutton/keyman/keyman/keys/models.py | a35c8dcb63e577c0174f8f38772822ae3ef1b000 | [
"MIT"
]
| permissive | htlcnn/pyrevitscripts | a1777fa0ba36003443d95e0f9fbbcadc4ffa5fe7 | b898a3a5e8d212570254772ae314f343498b1398 | refs/heads/master | 2021-01-01T20:08:31.001558 | 2018-01-11T06:44:26 | 2018-01-11T06:44:26 | 98,773,971 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | from django.db import models
from django.core.urlresolvers import reverse
class Software(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('software_edit', kwargs={'pk': self.pk})
| [
"[email protected]"
]
| |
849cad464c0731a0afd6cb94a57fdc9007ce5036 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_200/2122.py | c31ad8bdbc94a1aa0c0f79101990a413684bb5df | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py |
def resolve_case():
num = int(input())
num_list = list(str(num))
assend_index = 0
same=1
for assend_index in range(1, len(num_list)):
if num_list[assend_index - 1] > num_list[assend_index]:
break;
elif num_list[assend_index - 1] == num_list[assend_index]:
same += 1
else:
same = 1
print("".join(num_list[:assend_index-same]), end="")
num_list_tmp = num_list[assend_index-same:]
num_list_sorted = num_list[assend_index-same:]
num_list_sorted.sort()
length = len(num_list_tmp)
for x in range(0, length):
if num_list_tmp[x] is num_list_sorted[x]:
print(num_list_tmp[x], end="")
else:
print(int(str(int(num_list_tmp[x]) - 1) + ("9" * (length - x - 1))), end="")
break
return
cases = int(input())
for case in range(0, cases):
print("Case #" + str(case + 1), end=": ")
resolve_case()
print()
| [
"[email protected]"
]
| |
edc76c891adea130c96d0cc048da2c771c18b92a | 4dda597dac544b237cf8f8b04b3c9e662b988a92 | /moot/moot/urls.py | 8bbcabe60d7122ead34e89c6f5032e63ab65b2e7 | []
| no_license | beriuta/history | 02ac9414c0475fde59f6a455c23c56235fe3c4bc | 026c965f694e84120825069bedf7bfac235318b5 | refs/heads/master | 2020-04-11T17:24:39.558174 | 2018-12-16T02:08:18 | 2018-12-16T02:08:18 | 161,959,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | """moot URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from app01 import views
from django.contrib import admin
urlpatterns = [
url(r'^login/', views.login),
url(r'^index', views.indexView.as_view()),
url(r'^upload/$',views.upload)
]
| [
"[email protected]"
]
| |
d03578c423b75b0a881d10b32f169ad9a5075cbb | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005394.py | e5781c7991690423aaa123393d0e5d46def81e37 | []
| no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,559 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher33668(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1, 1: 1}), [
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 0
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher33668._instance is None:
CommutativeMatcher33668._instance = CommutativeMatcher33668()
return CommutativeMatcher33668._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 33667
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 33669
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.2.2.1', tmp3)
except ValueError:
pass
else:
pass
# State 33670
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.2.2.2', tmp5)
except ValueError:
pass
else:
pass
# State 33671
if len(subjects2) == 0:
pass
# State 33672
if len(subjects) == 0:
pass
# 0: x**m
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
if len(subjects) >= 1 and isinstance(subjects[0], Add):
tmp7 = subjects.popleft()
associative1 = tmp7
associative_type1 = type(tmp7)
subjects8 = deque(tmp7._args)
matcher = CommutativeMatcher33674.get()
tmp9 = subjects8
subjects8 = []
for s in tmp9:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp9, subst0):
pass
if pattern_index == 0:
pass
# State 33691
if len(subjects) == 0:
pass
# 1: f + e*x**r
yield 1, subst1
subjects.appendleft(tmp7)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from .generated_part005395 import *
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"[email protected]"
]
| |
0e7ff815cea27a8a05cb59b0e158e66ee68977ce | 1896685500833ba2e4e25400a03e10536dc57ad5 | /apimpe/wsgi.py | 7ebc7b896b1a99ceded6af9f77e7fbd203d23e58 | []
| no_license | Oswaldinho24k/MPE-API | 04e5d134199083300c2eac2b2d21e206e4306eeb | 2e1d7b282f994867c04d31b09395785d73d90b9d | refs/heads/master | 2020-06-21T14:53:18.903127 | 2016-12-02T18:57:25 | 2016-12-02T18:57:25 | 74,783,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | """
WSGI config for apimpe project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "apimpe.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| [
"[email protected]"
]
| |
2dc917a66e8ef838946a5fcb757890bce77a7963 | 9405aa570ede31a9b11ce07c0da69a2c73ab0570 | /aliyun-python-sdk-kms/aliyunsdkkms/request/v20160120/GenerateDataKeyWithoutPlaintextRequest.py | 681ae27d16cff738aea6473faf406f81efbeea7d | [
"Apache-2.0"
]
| permissive | liumihust/aliyun-openapi-python-sdk | 7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b | c7b5dd4befae4b9c59181654289f9272531207ef | refs/heads/master | 2020-09-25T12:10:14.245354 | 2019-12-04T14:43:27 | 2019-12-04T14:43:27 | 226,002,339 | 1 | 0 | NOASSERTION | 2019-12-05T02:50:35 | 2019-12-05T02:50:34 | null | UTF-8 | Python | false | false | 2,008 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkkms.endpoint import endpoint_data
class GenerateDataKeyWithoutPlaintextRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Kms', '2016-01-20', 'GenerateDataKeyWithoutPlaintext','kms')
self.set_protocol_type('https')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_EncryptionContext(self):
return self.get_query_params().get('EncryptionContext')
def set_EncryptionContext(self,EncryptionContext):
self.add_query_param('EncryptionContext',EncryptionContext)
def get_KeyId(self):
return self.get_query_params().get('KeyId')
def set_KeyId(self,KeyId):
self.add_query_param('KeyId',KeyId)
def get_KeySpec(self):
return self.get_query_params().get('KeySpec')
def set_KeySpec(self,KeySpec):
self.add_query_param('KeySpec',KeySpec)
def get_NumberOfBytes(self):
return self.get_query_params().get('NumberOfBytes')
def set_NumberOfBytes(self,NumberOfBytes):
self.add_query_param('NumberOfBytes',NumberOfBytes) | [
"[email protected]"
]
| |
f6f69bd11a57079c1e860e060690db08820b94e3 | 67553d46a257631810f394908013b82c337e0fbd | /goat/chapter05/5.2/globals_test.py | cc3a84eb474eb4feacb54ede07ac205a359b442a | []
| no_license | bopopescu/goat-python | 3f9d79eb1a9c2733345d699c98d82f91968ca5fa | c139488e2b5286033954df50ae1ca834144446f5 | refs/heads/master | 2022-11-21T11:25:27.921210 | 2020-03-06T01:02:57 | 2020-03-06T01:02:57 | 281,066,748 | 0 | 0 | null | 2020-07-20T09:00:08 | 2020-07-20T09:00:08 | null | UTF-8 | Python | false | false | 154 | py | # coding: utf-8
name = 'Charlie'
def test ():
# 直接访问name全局变量
print(name) # Charlie
name = '孙悟空'
test()
print(name)
| [
"[email protected]"
]
| |
51719366dd3b45e9da2070caf4e1ca0906df0c1b | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev2283-2409/base-trunk-2283/prototype/pyfox/setup.py | 58da3fb0318bb0b263de8f582d9d2d07804158ca | []
| no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,325 | py | import os
from distutils.core import setup, Extension
Moz="/home/djm/work/firefox/mozilla"
MozDist=Moz+"/dist"
setup(name='pyfoxutil',
version='1.0',
ext_modules=[Extension('_pyfoxutil', ['pyfoxutil.cpp'],
include_dirs=[MozDist+"/include/embed_base",
MozDist+"/include/string",
MozDist+"/include/xpcom",
MozDist+"/include/widget",
MozDist+"/include/nspr",
MozDist+"/include/gfx",
MozDist+"/include/webbrwsr",
MozDist+"/include/uriloader",
Moz+"/extensions/python/xpcom/src",
],
library_dirs = [Moz+"/extensions/python/xpcom/src",
MozDist+"/lib"],
libraries = ["nspr4", "plc4", "plds4",
"xpcomcomponents_s",
"embed_base_s", "_xpcom",
])
],
)
| [
"[email protected]"
]
| |
59cee6f8de1719cbc71645dcc02d79317246d9ea | ce083128fa87ca86c65059893aa8882d088461f5 | /python/python-salad-bdd/.venv/bin/subunit-filter | 317293fe0fb7c7007d0cf0b7e77ec7d6c6da347c | []
| no_license | marcosptf/fedora | 581a446e7f81d8ae9a260eafb92814bc486ee077 | 359db63ff1fa79696b7bc803bcfa0042bff8ab44 | refs/heads/master | 2023-04-06T14:53:40.378260 | 2023-03-26T00:47:52 | 2023-03-26T00:47:52 | 26,059,824 | 6 | 5 | null | 2022-12-08T00:43:21 | 2014-11-01T18:48:56 | null | UTF-8 | Python | false | false | 7,071 | #!/home/marcosptf/developer/python-bdd/.venv/bin/python2
# subunit: extensions to python unittest to get test results from subprocesses.
# Copyright (C) 200-2013 Robert Collins <[email protected]>
# (C) 2009 Martin Pool
#
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# license you chose for the specific language governing permissions and
# limitations under that license.
#
"""Filter a subunit stream to include/exclude tests.
The default is to strip successful tests.
Tests can be filtered by Python regular expressions with --with and --without,
which match both the test name and the error text (if any). The result
contains tests which match any of the --with expressions and none of the
--without expressions. For case-insensitive matching prepend '(?i)'.
Remember to quote shell metacharacters.
"""
from optparse import OptionParser
import sys
import re
from testtools import ExtendedToStreamDecorator, StreamToExtendedDecorator
from subunit import (
DiscardStream,
ProtocolTestCase,
StreamResultToBytes,
read_test_list,
)
from subunit.filters import filter_by_result, find_stream
from subunit.test_results import (
and_predicates,
make_tag_filter,
TestResultFilter,
)
def make_options(description):
parser = OptionParser(description=__doc__)
parser.add_option("--error", action="store_false",
help="include errors", default=False, dest="error")
parser.add_option("-e", "--no-error", action="store_true",
help="exclude errors", dest="error")
parser.add_option("--failure", action="store_false",
help="include failures", default=False, dest="failure")
parser.add_option("-f", "--no-failure", action="store_true",
help="exclude failures", dest="failure")
parser.add_option("--passthrough", action="store_false",
help="Forward non-subunit input as 'stdout'.", default=False,
dest="no_passthrough")
parser.add_option("--no-passthrough", action="store_true",
help="Discard all non subunit input.", default=False,
dest="no_passthrough")
parser.add_option("-s", "--success", action="store_false",
help="include successes", dest="success")
parser.add_option("--no-success", action="store_true",
help="exclude successes", default=True, dest="success")
parser.add_option("--no-skip", action="store_true",
help="exclude skips", dest="skip")
parser.add_option("--xfail", action="store_false",
help="include expected failures", default=True, dest="xfail")
parser.add_option("--no-xfail", action="store_true",
help="exclude expected failures", default=True, dest="xfail")
parser.add_option(
"--with-tag", type=str,
help="include tests with these tags", action="append", dest="with_tags")
parser.add_option(
"--without-tag", type=str,
help="exclude tests with these tags", action="append", dest="without_tags")
parser.add_option("-m", "--with", type=str,
help="regexp to include (case-sensitive by default)",
action="append", dest="with_regexps")
parser.add_option("--fixup-expected-failures", type=str,
help="File with list of test ids that are expected to fail; on failure "
"their result will be changed to xfail; on success they will be "
"changed to error.", dest="fixup_expected_failures", action="append")
parser.add_option("--without", type=str,
help="regexp to exclude (case-sensitive by default)",
action="append", dest="without_regexps")
parser.add_option("-F", "--only-genuine-failures", action="callback",
callback=only_genuine_failures_callback,
help="Only pass through failures and exceptions.")
parser.add_option("--rename", action="append", nargs=2,
help="Apply specified regex subsitutions to test names.",
dest="renames", default=[])
return parser
def only_genuine_failures_callback(option, opt, value, parser):
parser.rargs.insert(0, '--no-passthrough')
parser.rargs.insert(0, '--no-xfail')
parser.rargs.insert(0, '--no-skip')
parser.rargs.insert(0, '--no-success')
def _compile_re_from_list(l):
return re.compile("|".join(l), re.MULTILINE)
def _make_regexp_filter(with_regexps, without_regexps):
"""Make a callback that checks tests against regexps.
with_regexps and without_regexps are each either a list of regexp strings,
or None.
"""
with_re = with_regexps and _compile_re_from_list(with_regexps)
without_re = without_regexps and _compile_re_from_list(without_regexps)
def check_regexps(test, outcome, err, details, tags):
"""Check if this test and error match the regexp filters."""
test_str = str(test) + outcome + str(err) + str(details)
if with_re and not with_re.search(test_str):
return False
if without_re and without_re.search(test_str):
return False
return True
return check_regexps
def _compile_rename(patterns):
def rename(name):
for (from_pattern, to_pattern) in patterns:
name = re.sub(from_pattern, to_pattern, name)
return name
return rename
def _make_result(output, options, predicate):
"""Make the result that we'll send the test outcomes to."""
fixup_expected_failures = set()
for path in options.fixup_expected_failures or ():
fixup_expected_failures.update(read_test_list(path))
return StreamToExtendedDecorator(TestResultFilter(
ExtendedToStreamDecorator(
StreamResultToBytes(output)),
filter_error=options.error,
filter_failure=options.failure,
filter_success=options.success,
filter_skip=options.skip,
filter_xfail=options.xfail,
filter_predicate=predicate,
fixup_expected_failures=fixup_expected_failures,
rename=_compile_rename(options.renames)))
def main():
parser = make_options(__doc__)
(options, args) = parser.parse_args()
regexp_filter = _make_regexp_filter(
options.with_regexps, options.without_regexps)
tag_filter = make_tag_filter(options.with_tags, options.without_tags)
filter_predicate = and_predicates([regexp_filter, tag_filter])
filter_by_result(
lambda output_to: _make_result(sys.stdout, options, filter_predicate),
output_path=None,
passthrough=(not options.no_passthrough),
forward=False,
protocol_version=2,
input_stream=find_stream(sys.stdin, args))
sys.exit(0)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| ||
56543401f8fff6de82cf72f6710a4d11cd322f0f | 265a07a2becd232b292872d1d7136789463874be | /lei练习/child.py | a18eb14e18c20325173e5d20365a90e17db5663c | []
| no_license | Lz0224/Python-exercise | f4918b8cd5f7911f0c35c0458c2269959937d07d | 3d09f54aebc653f4a5b36765b25c7241e3960764 | refs/heads/master | 2020-12-24T22:20:55.573019 | 2017-08-11T07:18:16 | 2017-08-11T07:18:16 | 100,005,776 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | #!/usr/bin/python
#coding=utf-8
class ParentClass(object):
"""docstring for ParentClass."""
name = "老张"
# def __init__(self, arg):
# super(ParentClass, self).__init__()
# self.arg = arg
def fun(self):
print "老子有钱"
class ChildClass(ParentClass):
"""这是什么玩意。。。。"""
# def __init__(self, arg):
# super(, self).__init__()
# self.arg = arg
def fun1(self):
print "哥也有钱"
class GrentChildClass(ChildClass):
pass
child = ChildClass()
print child.name
child.fun()
grent_child = GrentChildClass()
print grent_child.name
print dir(ParentClass)
print ChildClass.__doc__
| [
"[email protected]"
]
| |
405530803fec0cadc01ebcb0fa2c1f53d630fa56 | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/chrome/chrome_common.gypi | a8ee3297b5a77f90ca6ad3a7c2b54682254341f2 | [
"MIT",
"BSD-3-Clause"
]
| permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 33,680 | gypi | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'common',
'type': 'static_library',
'variables': {
'chrome_common_target': 1,
'enable_wexit_time_destructors': 1,
},
'include_dirs': [
'..',
'<(SHARED_INTERMEDIATE_DIR)', # Needed by chrome_content_client.cc.
],
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'dependencies': [
# TODO(gregoryd): chrome_resources and chrome_strings could be
# shared with the 64-bit target, but it does not work due to a gyp
# issue.
'common_net',
'common_version',
'installer_util',
'metrics_proto',
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/base/base.gyp:base_i18n',
'<(DEPTH)/base/base.gyp:base_prefs',
'<(DEPTH)/base/base.gyp:base_static',
'<(DEPTH)/chrome/chrome_resources.gyp:chrome_resources',
'<(DEPTH)/chrome/chrome_resources.gyp:chrome_strings',
'<(DEPTH)/chrome/chrome_resources.gyp:theme_resources',
'<(DEPTH)/chrome/common_constants.gyp:common_constants',
'<(DEPTH)/components/components.gyp:json_schema',
'<(DEPTH)/components/components.gyp:policy_component',
'<(DEPTH)/components/components.gyp:variations',
'<(DEPTH)/content/content.gyp:content_common',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/skia/skia.gyp:skia',
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
'<(DEPTH)/third_party/libxml/libxml.gyp:libxml',
'<(DEPTH)/third_party/sqlite/sqlite.gyp:sqlite',
'<(DEPTH)/third_party/zlib/google/zip.gyp:zip',
'<(DEPTH)/ui/resources/ui_resources.gyp:ui_resources',
'<(DEPTH)/url/url.gyp:url_lib',
'<(DEPTH)/webkit/common/user_agent/webkit_user_agent.gyp:user_agent',
],
'sources': [
'../apps/app_shim/app_shim_launch.h',
'../apps/app_shim/app_shim_messages.h',
'common/all_messages.h',
'common/attrition_experiments.h',
'common/auto_start_linux.cc',
'common/auto_start_linux.h',
'common/autocomplete_match_type.cc',
'common/autocomplete_match_type.h',
'common/automation_constants.cc',
'common/automation_constants.h',
'common/automation_messages.cc',
'common/automation_messages.h',
'common/automation_messages_internal.h',
'common/badge_util.cc',
'common/badge_util.h',
'common/cancelable_task_tracker.cc',
'common/cancelable_task_tracker.h',
'common/child_process_logging.h',
'common/child_process_logging_win.cc',
'common/chrome_content_client.cc',
'common/chrome_content_client.h',
'common/chrome_content_client_constants.cc',
'common/chrome_content_client_ios.mm',
'common/chrome_result_codes.h',
'common/chrome_utility_messages.h',
'common/chrome_version_info.cc',
'common/chrome_version_info_android.cc',
'common/chrome_version_info_chromeos.cc',
'common/chrome_version_info_posix.cc',
'common/chrome_version_info_mac.mm',
'common/chrome_version_info_win.cc',
'common/chrome_version_info.h',
'common/cloud_print/cloud_print_class_mac.h',
'common/cloud_print/cloud_print_class_mac.mm',
'common/cloud_print/cloud_print_constants.cc',
'common/cloud_print/cloud_print_constants.h',
'common/cloud_print/cloud_print_helpers.cc',
'common/cloud_print/cloud_print_helpers.h',
'common/cloud_print/cloud_print_proxy_info.cc',
'common/cloud_print/cloud_print_proxy_info.h',
'common/common_message_generator.cc',
'common/common_message_generator.h',
'common/common_param_traits.cc',
'common/common_param_traits.h',
'common/common_param_traits_macros.h',
'common/content_restriction.h',
'common/content_settings.cc',
'common/content_settings.h',
'common/content_settings_helper.cc',
'common/content_settings_helper.h',
'common/content_settings_pattern.cc',
'common/content_settings_pattern.h',
'common/content_settings_pattern_parser.cc',
'common/content_settings_pattern_parser.h',
'common/content_settings_types.h',
'common/crash_keys.cc',
'common/crash_keys.h',
'common/custom_handlers/protocol_handler.cc',
'common/custom_handlers/protocol_handler.h',
'common/descriptors_android.h',
'common/dump_without_crashing.cc',
'common/dump_without_crashing.h',
'common/encrypted_media_messages_android.h',
'common/extensions/api/commands/commands_handler.cc',
'common/extensions/api/commands/commands_handler.h',
'common/extensions/api/extension_action/action_info.cc',
'common/extensions/api/extension_action/action_info.h',
'common/extensions/api/extension_action/browser_action_handler.cc',
'common/extensions/api/extension_action/browser_action_handler.h',
'common/extensions/api/extension_action/page_action_handler.cc',
'common/extensions/api/extension_action/page_action_handler.h',
'common/extensions/api/extension_action/script_badge_handler.cc',
'common/extensions/api/extension_action/script_badge_handler.h',
'common/extensions/api/file_browser_handlers/file_browser_handler.cc',
'common/extensions/api/file_browser_handlers/file_browser_handler.h',
'common/extensions/api/file_handlers/file_handlers_parser.cc',
'common/extensions/api/file_handlers/file_handlers_parser.h',
'common/extensions/api/i18n/default_locale_handler.cc',
'common/extensions/api/i18n/default_locale_handler.h',
'common/extensions/api/identity/oauth2_manifest_handler.cc',
'common/extensions/api/identity/oauth2_manifest_handler.h',
'common/extensions/api/input_ime/input_components_handler.cc',
'common/extensions/api/input_ime/input_components_handler.h',
'common/extensions/api/managed_mode_private/managed_mode_handler.cc',
'common/extensions/api/managed_mode_private/managed_mode_handler.h',
'common/extensions/api/media_galleries_private/media_galleries_handler.h',
'common/extensions/api/media_galleries_private/media_galleries_handler.cc',
'common/extensions/api/messaging/message.h',
'common/extensions/api/omnibox/omnibox_handler.cc',
'common/extensions/api/omnibox/omnibox_handler.h',
'common/extensions/api/plugins/plugins_handler.cc',
'common/extensions/api/plugins/plugins_handler.h',
'common/extensions/api/sockets/sockets_manifest_handler.cc',
'common/extensions/api/sockets/sockets_manifest_handler.h',
'common/extensions/api/sockets/sockets_manifest_data.cc',
'common/extensions/api/sockets/sockets_manifest_data.h',
'common/extensions/api/sockets/sockets_manifest_permission.cc',
'common/extensions/api/sockets/sockets_manifest_permission.h',
'common/extensions/api/speech/tts_engine_manifest_handler.cc',
'common/extensions/api/speech/tts_engine_manifest_handler.h',
'common/extensions/api/spellcheck/spellcheck_handler.cc',
'common/extensions/api/spellcheck/spellcheck_handler.h',
'common/extensions/api/storage/storage_schema_manifest_handler.cc',
'common/extensions/api/storage/storage_schema_manifest_handler.h',
'common/extensions/api/system_indicator/system_indicator_handler.cc',
'common/extensions/api/system_indicator/system_indicator_handler.h',
'common/extensions/api/url_handlers/url_handlers_parser.cc',
'common/extensions/api/url_handlers/url_handlers_parser.h',
'common/extensions/chrome_extensions_client.cc',
'common/extensions/chrome_extensions_client.h',
'common/extensions/chrome_manifest_handlers.cc',
'common/extensions/chrome_manifest_handlers.h',
'common/extensions/command.cc',
'common/extensions/command.h',
'common/extensions/dom_action_types.h',
'common/extensions/extension_constants.cc',
'common/extensions/extension_constants.h',
'common/extensions/extension_file_util.cc',
'common/extensions/extension_file_util.h',
'common/extensions/extension_icon_set.cc',
'common/extensions/extension_icon_set.h',
'common/extensions/extension_l10n_util.cc',
'common/extensions/extension_l10n_util.h',
'common/extensions/extension_messages.cc',
'common/extensions/extension_messages.h',
'common/extensions/extension_process_policy.cc',
'common/extensions/extension_process_policy.h',
'common/extensions/extension_set.cc',
'common/extensions/extension_set.h',
'common/extensions/features/api_feature.cc',
'common/extensions/features/api_feature.h',
'common/extensions/features/base_feature_provider.cc',
'common/extensions/features/base_feature_provider.h',
'common/extensions/features/complex_feature.cc',
'common/extensions/features/complex_feature.h',
'common/extensions/features/feature_channel.cc',
'common/extensions/features/feature_channel.h',
'common/extensions/features/manifest_feature.cc',
'common/extensions/features/manifest_feature.h',
'common/extensions/features/permission_feature.cc',
'common/extensions/features/permission_feature.h',
'common/extensions/features/simple_feature.cc',
'common/extensions/features/simple_feature.h',
'common/extensions/manifest_handler_helpers.cc',
'common/extensions/manifest_handler_helpers.h',
'common/extensions/manifest_handlers/app_isolation_info.cc',
'common/extensions/manifest_handlers/app_isolation_info.h',
'common/extensions/manifest_handlers/app_launch_info.cc',
'common/extensions/manifest_handlers/app_launch_info.h',
'common/extensions/manifest_handlers/content_scripts_handler.cc',
'common/extensions/manifest_handlers/content_scripts_handler.h',
'common/extensions/manifest_handlers/externally_connectable.cc',
'common/extensions/manifest_handlers/externally_connectable.h',
'common/extensions/manifest_handlers/icons_handler.cc',
'common/extensions/manifest_handlers/icons_handler.h',
'common/extensions/manifest_handlers/minimum_chrome_version_checker.cc',
'common/extensions/manifest_handlers/minimum_chrome_version_checker.h',
'common/extensions/manifest_handlers/nacl_modules_handler.cc',
'common/extensions/manifest_handlers/nacl_modules_handler.h',
'common/extensions/manifest_handlers/settings_overrides_handler.cc',
'common/extensions/manifest_handlers/settings_overrides_handler.h',
'common/extensions/manifest_handlers/theme_handler.cc',
'common/extensions/manifest_handlers/theme_handler.h',
'common/extensions/manifest_url_handler.cc',
'common/extensions/manifest_url_handler.h',
'common/extensions/message_bundle.cc',
'common/extensions/message_bundle.h',
'common/extensions/mime_types_handler.cc',
'common/extensions/mime_types_handler.h',
'common/extensions/permissions/bluetooth_permission.cc',
'common/extensions/permissions/bluetooth_permission.h',
'common/extensions/permissions/bluetooth_permission_data.cc',
'common/extensions/permissions/bluetooth_permission_data.h',
'common/extensions/permissions/chrome_api_permissions.cc',
'common/extensions/permissions/chrome_api_permissions.h',
'common/extensions/permissions/chrome_permission_message_provider.cc',
'common/extensions/permissions/chrome_permission_message_provider.h',
'common/extensions/permissions/media_galleries_permission.cc',
'common/extensions/permissions/media_galleries_permission.h',
'common/extensions/permissions/media_galleries_permission_data.cc',
'common/extensions/permissions/media_galleries_permission_data.h',
'common/extensions/permissions/permission_message_util.cc',
'common/extensions/permissions/permission_message_util.h',
'common/extensions/permissions/set_disjunction_permission.h',
'common/extensions/permissions/settings_override_permission.cc',
'common/extensions/permissions/settings_override_permission.h',
'common/extensions/permissions/socket_permission.cc',
'common/extensions/permissions/socket_permission.h',
'common/extensions/permissions/socket_permission_data.cc',
'common/extensions/permissions/socket_permission_data.h',
'common/extensions/permissions/socket_permission_entry.cc',
'common/extensions/permissions/socket_permission_entry.h',
'common/extensions/permissions/usb_device_permission.cc',
'common/extensions/permissions/usb_device_permission.h',
'common/extensions/permissions/usb_device_permission_data.cc',
'common/extensions/permissions/usb_device_permission_data.h',
'common/extensions/sync_helper.cc',
'common/extensions/sync_helper.h',
'common/extensions/update_manifest.cc',
'common/extensions/update_manifest.h',
'common/extensions/value_counter.cc',
'common/extensions/value_counter.h',
'common/extensions/web_accessible_resources_handler.cc',
'common/extensions/web_accessible_resources_handler.h',
'common/extensions/webview_handler.cc',
'common/extensions/webview_handler.h',
'common/favicon/favicon_types.cc',
'common/favicon/favicon_types.h',
'common/favicon/favicon_url_parser.cc',
'common/favicon/favicon_url_parser.h',
'common/icon_with_badge_image_source.cc',
'common/icon_with_badge_image_source.h',
'common/importer/firefox_importer_utils.cc',
'common/importer/firefox_importer_utils.h',
'common/importer/firefox_importer_utils_linux.cc',
'common/importer/firefox_importer_utils_mac.mm',
'common/importer/firefox_importer_utils_win.cc',
'common/importer/ie_importer_test_registry_overrider_win.cc',
'common/importer/ie_importer_test_registry_overrider_win.h',
'common/importer/ie_importer_utils_win.cc',
'common/importer/ie_importer_utils_win.h',
'common/importer/imported_bookmark_entry.cc',
'common/importer/imported_bookmark_entry.h',
'common/importer/imported_favicon_usage.cc',
'common/importer/imported_favicon_usage.h',
'common/importer/importer_bridge.cc',
'common/importer/importer_bridge.h',
'common/importer/importer_data_types.cc',
'common/importer/importer_data_types.h',
'common/importer/importer_type.h',
'common/importer/importer_url_row.cc',
'common/importer/importer_url_row.h',
'common/importer/profile_import_process_messages.cc',
'common/importer/profile_import_process_messages.h',
'common/importer/safari_importer_utils.h',
'common/importer/safari_importer_utils.mm',
'common/instant_restricted_id_cache.h',
'common/instant_types.cc',
'common/instant_types.h',
'common/localized_error.cc',
'common/localized_error.h',
'common/local_discovery/service_discovery_client.cc',
'common/local_discovery/service_discovery_client.h',
'common/logging_chrome.cc',
'common/logging_chrome.h',
'common/mac/app_mode_common.h',
'common/mac/app_mode_common.mm',
'common/mac/cfbundle_blocker.h',
'common/mac/cfbundle_blocker.mm',
'common/mac/launchd.h',
'common/mac/launchd.mm',
'common/mac/objc_method_swizzle.h',
'common/mac/objc_method_swizzle.mm',
'common/mac/objc_zombie.h',
'common/mac/objc_zombie.mm',
'common/media/webrtc_logging_messages.h',
'common/metrics/caching_permuted_entropy_provider.cc',
'common/metrics/caching_permuted_entropy_provider.h',
'common/metrics/metrics_log_base.cc',
'common/metrics/metrics_log_base.h',
'common/metrics/metrics_log_manager.cc',
'common/metrics/metrics_log_manager.h',
'common/metrics/metrics_service_base.cc',
'common/metrics/metrics_service_base.h',
'common/metrics/variations/experiment_labels.cc',
'common/metrics/variations/experiment_labels.h',
'common/metrics/variations/uniformity_field_trials.cc',
'common/metrics/variations/uniformity_field_trials.h',
'common/metrics/variations/variations_util.cc',
'common/metrics/variations/variations_util.h',
'common/multi_process_lock.h',
'common/multi_process_lock_linux.cc',
'common/multi_process_lock_mac.cc',
'common/multi_process_lock_win.cc',
'common/omaha_query_params/omaha_query_params.cc',
'common/omaha_query_params/omaha_query_params.h',
'common/omnibox_focus_state.h',
'common/partial_circular_buffer.cc',
'common/partial_circular_buffer.h',
'common/pepper_flash.cc',
'common/pepper_flash.h',
'common/pepper_permission_util.cc',
'common/pepper_permission_util.h',
'common/pref_names_util.cc',
'common/pref_names_util.h',
'common/print_messages.cc',
'common/print_messages.h',
'common/profiling.cc',
'common/profiling.h',
'common/ref_counted_util.h',
'common/render_messages.cc',
'common/render_messages.h',
'common/safe_browsing/download_protection_util.cc',
'common/safe_browsing/download_protection_util.h',
'common/safe_browsing/safebrowsing_messages.h',
'common/safe_browsing/zip_analyzer.cc',
'common/safe_browsing/zip_analyzer.h',
'common/search_provider.h',
'common/search_types.h',
'common/search_urls.cc',
'common/search_urls.h',
'common/service_messages.h',
'common/service_process_util.cc',
'common/service_process_util.h',
'common/service_process_util_linux.cc',
'common/service_process_util_mac.mm',
'common/service_process_util_posix.cc',
'common/service_process_util_posix.h',
'common/service_process_util_win.cc',
'common/spellcheck_common.cc',
'common/spellcheck_common.h',
'common/spellcheck_marker.h',
'common/spellcheck_messages.h',
'common/spellcheck_result.h',
'common/switch_utils.cc',
'common/switch_utils.h',
'common/thumbnail_score.cc',
'common/thumbnail_score.h',
'common/translate/language_detection_details.cc',
'common/translate/language_detection_details.h',
'common/translate/translate_errors.h',
'common/tts_messages.h',
'common/tts_utterance_request.cc',
'common/tts_utterance_request.h',
'common/url_constants.cc',
'common/url_constants.h',
'common/web_application_info.cc',
'common/web_application_info.h',
'common/worker_thread_ticker.cc',
'common/worker_thread_ticker.h',
],
'conditions': [
['enable_extensions==1', {
'dependencies': [
'../device/bluetooth/bluetooth.gyp:device_bluetooth',
'../device/usb/usb.gyp:device_usb',
],
}, { # enable_extensions == 0
'sources/': [
['exclude', '^common/extensions/api/'],
['include', 'common/extensions/api/extension_action/action_info.cc'],
['include', 'common/extensions/api/extension_action/action_info.h'],
['include', 'common/extensions/api/i18n/default_locale_handler.cc'],
['include', 'common/extensions/api/i18n/default_locale_handler.h'],
['include', 'common/extensions/api/identity/oauth2_manifest_handler.cc'],
['include', 'common/extensions/api/identity/oauth2_manifest_handler.h'],
['include', 'common/extensions/api/managed_mode_private/managed_mode_handler.cc'],
['include', 'common/extensions/api/managed_mode_private/managed_mode_handler.h'],
['include', 'common/extensions/api/plugins/plugins_handler.cc'],
['include', 'common/extensions/api/plugins/plugins_handler.h'],
['include', 'common/extensions/api/storage/storage_schema_manifest_handler.cc'],
['include', 'common/extensions/api/storage/storage_schema_manifest_handler.h'],
],
}],
['OS=="win" or OS=="mac"', {
'sources': [
'common/media_galleries/itunes_library.cc',
'common/media_galleries/itunes_library.h',
'common/media_galleries/picasa_types.cc',
'common/media_galleries/picasa_types.h',
'common/media_galleries/pmp_constants.h',
],
}],
['OS=="mac"', {
'sources': [
'common/media_galleries/iphoto_library.cc',
'common/media_galleries/iphoto_library.h',
],
}],
['OS != "ios"', {
'dependencies': [
'<(DEPTH)/chrome/common/extensions/api/api.gyp:api',
'<(DEPTH)/components/components.gyp:autofill_core_common',
'<(DEPTH)/components/components.gyp:autofill_content_common',
'<(DEPTH)/components/nacl.gyp:nacl_common',
'<(DEPTH)/components/components.gyp:visitedlink_common',
'<(DEPTH)/extensions/extensions.gyp:extensions_common',
'<(DEPTH)/ipc/ipc.gyp:ipc',
'<(DEPTH)/third_party/adobe/flash/flash_player.gyp:flapper_version_h',
'<(DEPTH)/third_party/re2/re2.gyp:re2',
'<(DEPTH)/third_party/widevine/cdm/widevine_cdm.gyp:widevine_cdm_version_h',
],
}, { # OS == ios
'sources/': [
['exclude', '^common/child_process_'],
['exclude', '^common/chrome_content_client\\.cc$'],
['exclude', '^common/chrome_version_info_posix\\.cc$'],
['exclude', '^common/common_message_generator\\.cc$'],
['exclude', '^common/common_param_traits'],
['exclude', '^common/custom_handlers/'],
['exclude', '^common/extensions/'],
['exclude', '^common/logging_chrome\\.'],
['exclude', '^common/multi_process_'],
['exclude', '^common/nacl_'],
['exclude', '^common/pepper_flash\\.'],
['exclude', '^common/profiling\\.'],
['exclude', '^common/service_process_util_'],
['exclude', '^common/spellcheck_'],
['exclude', '^common/validation_message_'],
['exclude', '^common/web_apps\\.'],
# TODO(ios): Include files here as they are made to work; once
# everything is online, remove everything below here and just
# use the exclusions above.
['exclude', '\\.(cc|mm)$'],
['include', '_ios\\.(cc|mm)$'],
['include', '(^|/)ios/'],
['include', '^common/chrome_version_info\\.cc$'],
['include', '^common/translate'],
['include', '^common/zip'],
],
'include_dirs': [
'<(DEPTH)/breakpad/src',
],
}],
['enable_printing!=0', {
'dependencies': [
'<(DEPTH)/printing/printing.gyp:printing',
],
}],
['OS!="ios" and chrome_multiple_dll!=1', {
'dependencies': [
'<(DEPTH)/webkit/glue/webkit_glue.gyp:glue',
],
}],
['OS=="android"', {
'sources/': [
['exclude', '^common/chrome_version_info_posix.cc'],
['exclude', '^common/service_'],
],
'sources!': [
'common/badge_util.cc',
'common/extensions/api/extension_action/browser_action_handler.cc',
'common/extensions/api/extension_action/page_action_handler.cc',
'common/extensions/api/spellcheck/spellcheck_handler.cc',
'common/extensions/manifest_handlers/minimum_chrome_version_checker.cc',
'common/extensions/manifest_handlers/nacl_modules_handler.cc',
'common/icon_with_badge_image_source.cc',
'common/importer/imported_bookmark_entry.cc',
'common/importer/importer_bridge.cc',
'common/importer/importer_data_types.cc',
'common/importer/importer_url_row.cc',
'common/net/url_util.cc',
'common/spellcheck_common.cc',
],
}],
['OS=="win"', {
'include_dirs': [
'<(DEPTH)/breakpad/src',
'<(DEPTH)/third_party/wtl/include',
],
}],
['enable_mdns == 1', {
'sources': [
'common/local_discovery/local_discovery_messages.h',
]
}],
['toolkit_uses_gtk == 1', {
'dependencies': [
'../build/linux/system.gyp:gtk',
],
'export_dependent_settings': [
'../third_party/sqlite/sqlite.gyp:sqlite',
],
'link_settings': {
'libraries': [
'-lX11',
'-lXrender',
'-lXss',
'-lXext',
],
},
}],
['chromeos==1', {
'sources!': [
'common/chrome_version_info_linux.cc',
],
}],
['OS=="mac"', {
'dependencies': [
'../third_party/mach_override/mach_override.gyp:mach_override',
],
'include_dirs': [
'<(DEPTH)/breakpad/src',
'../third_party/GTM',
],
'sources!': [
'common/child_process_logging_posix.cc',
'common/chrome_version_info_posix.cc',
],
}],
['remoting==1', {
'dependencies': [
'../remoting/remoting.gyp:remoting_client_plugin',
],
}],
['enable_automation==0', {
'sources/': [
['exclude', '^common/automation_']
]
}],
['enable_plugins==0', {
'source!' : [
'common/pepper_permission_util.cc',
],
}],
['use_system_nspr==1', {
'dependencies': [
'<(DEPTH)/base/third_party/nspr/nspr.gyp:nspr',
],
}],
['enable_webrtc==0', {
'sources!': [
'common/media/webrtc_logging_messages.h',
]
}],
['enable_printing==0', {
'sources!': [
'common/print_messages.cc',
'common/print_messages.h',
]
}],
['configuration_policy==1', {
'dependencies': [
'<(DEPTH)/components/components.gyp:policy',
],
}],
],
'target_conditions': [
['OS == "ios"', {
'sources/': [
# Pull in specific Mac files for iOS (which have been filtered out
# by file name rules).
['include', '^common/chrome_version_info_mac\\.mm$'],
],
}],
],
'export_dependent_settings': [
'../base/base.gyp:base',
'metrics_proto',
],
},
{
'target_name': 'common_version',
'type': 'none',
'conditions': [
['os_posix == 1 and OS != "mac" and OS != "ios"', {
'direct_dependent_settings': {
'include_dirs': [
'<(SHARED_INTERMEDIATE_DIR)',
],
},
# Because posix_version generates a header, we must set the
# hard_dependency flag.
'hard_dependency': 1,
'actions': [
{
'action_name': 'posix_version',
'variables': {
'lastchange_path':
'<(DEPTH)/build/util/LASTCHANGE',
'version_py_path': 'tools/build/version.py',
'version_path': 'VERSION',
'template_input_path': 'common/chrome_version_info_posix.h.version',
},
'conditions': [
[ 'branding == "Chrome"', {
'variables': {
'branding_path':
'app/theme/google_chrome/BRANDING',
},
}, { # else branding!="Chrome"
'variables': {
'branding_path':
'app/theme/chromium/BRANDING',
},
}],
],
'inputs': [
'<(template_input_path)',
'<(version_path)',
'<(branding_path)',
'<(lastchange_path)',
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/chrome/common/chrome_version_info_posix.h',
],
'action': [
'python',
'<(version_py_path)',
'-f', '<(version_path)',
'-f', '<(branding_path)',
'-f', '<(lastchange_path)',
'<(template_input_path)',
'<@(_outputs)',
],
'message': 'Generating version information',
},
],
}],
],
},
{
'target_name': 'common_net',
'type': 'static_library',
'sources': [
'common/net/net_error_info.cc',
'common/net/net_error_info.h',
'common/net/net_resource_provider.cc',
'common/net/net_resource_provider.h',
'common/net/predictor_common.h',
'common/net/url_fixer_upper.cc',
'common/net/url_fixer_upper.h',
'common/net/url_util.cc',
'common/net/url_util.h',
'common/net/x509_certificate_model.cc',
'common/net/x509_certificate_model_nss.cc',
'common/net/x509_certificate_model_openssl.cc',
'common/net/x509_certificate_model.h',
],
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/chrome/chrome_resources.gyp:chrome_resources',
'<(DEPTH)/chrome/chrome_resources.gyp:chrome_strings',
'<(DEPTH)/crypto/crypto.gyp:crypto',
'<(DEPTH)/net/net.gyp:net_resources',
'<(DEPTH)/net/net.gyp:net',
'<(DEPTH)/third_party/icu/icu.gyp:icui18n',
'<(DEPTH)/third_party/icu/icu.gyp:icuuc',
],
'conditions': [
['OS != "ios"', {
'dependencies': [
'<(DEPTH)/gpu/gpu.gyp:gpu_ipc',
],
}, { # OS == ios
'sources!': [
'common/net/net_resource_provider.cc',
'common/net/x509_certificate_model.cc',
],
}],
['os_posix == 1 and OS != "mac" and OS != "ios" and OS != "android"', {
'dependencies': [
'../build/linux/system.gyp:ssl',
],
},
],
['os_posix != 1 or OS == "mac" or OS == "ios"', {
'sources!': [
'common/net/x509_certificate_model_nss.cc',
'common/net/x509_certificate_model_openssl.cc',
],
},
],
['OS == "android"', {
'dependencies': [
'../third_party/openssl/openssl.gyp:openssl',
],
},
],
['use_openssl==1', {
'sources!': [
'common/net/x509_certificate_model_nss.cc',
],
},
{ # else !use_openssl: remove the unneeded files
'sources!': [
'common/net/x509_certificate_model_openssl.cc',
],
},
],
['OS=="win"', {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [4267, ],
},
],
],
},
{
# Protobuf compiler / generator for the safebrowsing client
# model proto and the client-side detection (csd) request
# protocol buffer.
'target_name': 'safe_browsing_proto',
'type': 'static_library',
'sources': [
'common/safe_browsing/client_model.proto',
'common/safe_browsing/csd.proto'
],
'variables': {
'proto_in_dir': 'common/safe_browsing',
'proto_out_dir': 'chrome/common/safe_browsing',
},
'includes': [ '../build/protoc.gypi' ],
},
{
# Protobuf compiler / generator for UMA (User Metrics Analysis).
'target_name': 'metrics_proto',
'type': 'static_library',
'sources': [
'common/metrics/proto/chrome_experiments.proto',
'common/metrics/proto/chrome_user_metrics_extension.proto',
'common/metrics/proto/histogram_event.proto',
'common/metrics/proto/omnibox_event.proto',
'common/metrics/proto/perf_data.proto',
'common/metrics/proto/permuted_entropy_cache.proto',
'common/metrics/proto/profiler_event.proto',
'common/metrics/proto/system_profile.proto',
'common/metrics/proto/user_action_event.proto',
],
'variables': {
'proto_in_dir': 'common/metrics/proto',
'proto_out_dir': 'chrome/common/metrics/proto',
},
'includes': [ '../build/protoc.gypi' ],
},
],
}
| [
"[email protected]"
]
| |
374a204faa07f80ad6187a04e03dc1f385ef9168 | 7619aed8a311e2832634379762c373886f4354fb | /trace_pox_eel_l2_multi-BinaryLeafTreeTopology1-steps200/interactive_replay_config.py | 642e4a4db5b05f5ecefc4f4ffd682ee68f5bc5b3 | []
| no_license | jmiserez/sdnracer-traces | b60f8588277c4dc2dad9fe270c05418c47d229b3 | 8991eee19103c8ebffd6ffe15d88dd8c25e1aad5 | refs/heads/master | 2021-01-21T18:21:32.040221 | 2015-12-15T14:34:46 | 2015-12-15T14:34:46 | 39,391,225 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py |
from config.experiment_config_lib import ControllerConfig
from sts.topology import *
from sts.control_flow.interactive_replayer import InteractiveReplayer
from sts.simulation_state import SimulationConfig
from sts.input_traces.input_logger import InputLogger
simulation_config = SimulationConfig(controller_configs=[ControllerConfig(start_cmd=' ./pox.py --verbose openflow.of_01 --address=__address__ --port=__port__ openflow.discovery forwarding.l2_multi_orig', label='c1', address='127.0.0.1', cwd='/home/ahassany/repos/pox/')],
topology_class=BinaryLeafTreeTopology,
topology_params="num_levels=1",
patch_panel_class=BufferedPatchPanel,
multiplex_sockets=False,
ignore_interposition=False,
kill_controllers_on_exit=True)
control_flow = InteractiveReplayer(simulation_config, "traces/trace_pox_eel_l2_multi-BinaryLeafTreeTopology1-steps200/events.trace")
# wait_on_deterministic_values=False
# delay_flow_mods=False
# Invariant check: 'InvariantChecker.check_liveness'
# Bug signature: ""
| [
"[email protected]"
]
| |
01ce29ccc41608e7b9ab3c3e53c70e7465104cb8 | fd67592b2338105e0cd0b3503552d188b814ad95 | /egoi_api/paths/lists/post.pyi | e1cd61175409ba8d08a6a2aaadd81bc677ac36da | []
| no_license | E-goi/sdk-python | 175575fcd50bd5ad426b33c78bdeb08d979485b7 | 5cba50a46e1d288b5038d18be12af119211e5b9f | refs/heads/master | 2023-04-29T20:36:02.314712 | 2023-04-18T07:42:46 | 2023-04-18T07:42:46 | 232,095,340 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 16,249 | pyi | # coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import typing_extensions
import urllib3
from urllib3._collections import HTTPHeaderDict
from egoi_api import api_client, exceptions
from datetime import date, datetime # noqa: F401
import decimal # noqa: F401
import functools # noqa: F401
import io # noqa: F401
import re # noqa: F401
import typing # noqa: F401
import typing_extensions # noqa: F401
import uuid # noqa: F401
import frozendict # noqa: F401
from egoi_api import schemas # noqa: F401
from egoi_api.model.request_timeout import RequestTimeout
from egoi_api.model.unauthorized import Unauthorized
from egoi_api.model.service_unavailable import ServiceUnavailable
from egoi_api.model.post_lists_conflict import PostListsConflict
from egoi_api.model.post_request_list import PostRequestList
from egoi_api.model.bad_request import BadRequest
from egoi_api.model.unprocessable_entity import UnprocessableEntity
from egoi_api.model.model_list import ModelList
from egoi_api.model.internal_server_error import InternalServerError
from egoi_api.model.too_many_requests import TooManyRequests
from egoi_api.model.forbidden import Forbidden
# body param
SchemaForRequestBodyApplicationJson = PostRequestList
request_body_post_request_list = api_client.RequestBody(
content={
'application/json': api_client.MediaType(
schema=SchemaForRequestBodyApplicationJson),
},
required=True,
)
SchemaFor201ResponseBodyApplicationJson = ModelList
@dataclass
class ApiResponseFor201(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor201ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_201 = api_client.OpenApiResponse(
response_cls=ApiResponseFor201,
content={
'application/json': api_client.MediaType(
schema=SchemaFor201ResponseBodyApplicationJson),
},
)
SchemaFor400ResponseBodyApplicationJson = BadRequest
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor400ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
content={
'application/json': api_client.MediaType(
schema=SchemaFor400ResponseBodyApplicationJson),
},
)
SchemaFor401ResponseBodyApplicationJson = Unauthorized
@dataclass
class ApiResponseFor401(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor401ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_401 = api_client.OpenApiResponse(
response_cls=ApiResponseFor401,
content={
'application/json': api_client.MediaType(
schema=SchemaFor401ResponseBodyApplicationJson),
},
)
SchemaFor403ResponseBodyApplicationJson = Forbidden
@dataclass
class ApiResponseFor403(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor403ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_403 = api_client.OpenApiResponse(
response_cls=ApiResponseFor403,
content={
'application/json': api_client.MediaType(
schema=SchemaFor403ResponseBodyApplicationJson),
},
)
SchemaFor408ResponseBodyApplicationJson = RequestTimeout
@dataclass
class ApiResponseFor408(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor408ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_408 = api_client.OpenApiResponse(
response_cls=ApiResponseFor408,
content={
'application/json': api_client.MediaType(
schema=SchemaFor408ResponseBodyApplicationJson),
},
)
SchemaFor409ResponseBodyApplicationJson = PostListsConflict
@dataclass
class ApiResponseFor409(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor409ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_409 = api_client.OpenApiResponse(
response_cls=ApiResponseFor409,
content={
'application/json': api_client.MediaType(
schema=SchemaFor409ResponseBodyApplicationJson),
},
)
SchemaFor422ResponseBodyApplicationJson = UnprocessableEntity
@dataclass
class ApiResponseFor422(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor422ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_422 = api_client.OpenApiResponse(
response_cls=ApiResponseFor422,
content={
'application/json': api_client.MediaType(
schema=SchemaFor422ResponseBodyApplicationJson),
},
)
SchemaFor429ResponseBodyApplicationJson = TooManyRequests
@dataclass
class ApiResponseFor429(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor429ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_429 = api_client.OpenApiResponse(
response_cls=ApiResponseFor429,
content={
'application/json': api_client.MediaType(
schema=SchemaFor429ResponseBodyApplicationJson),
},
)
SchemaFor500ResponseBodyApplicationJson = InternalServerError
@dataclass
class ApiResponseFor500(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor500ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_500 = api_client.OpenApiResponse(
response_cls=ApiResponseFor500,
content={
'application/json': api_client.MediaType(
schema=SchemaFor500ResponseBodyApplicationJson),
},
)
SchemaFor503ResponseBodyApplicationJson = ServiceUnavailable
@dataclass
class ApiResponseFor503(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: typing.Union[
SchemaFor503ResponseBodyApplicationJson,
]
headers: schemas.Unset = schemas.unset
_response_for_503 = api_client.OpenApiResponse(
response_cls=ApiResponseFor503,
content={
'application/json': api_client.MediaType(
schema=SchemaFor503ResponseBodyApplicationJson),
},
)
_all_accept_content_types = (
'application/json',
)
class BaseApi(api_client.Api):
@typing.overload
def _create_list_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def _create_list_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def _create_list_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def _create_list_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor201,
api_client.ApiResponseWithoutDeserialization,
]: ...
def _create_list_oapg(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
"""
Create new list
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
used_path = path.value
_headers = HTTPHeaderDict()
# TODO add cookie handling
if accept_content_types:
for accept_content_type in accept_content_types:
_headers.add('Accept', accept_content_type)
if body is schemas.unset:
raise exceptions.ApiValueError(
'The required body parameter has an invalid value of: unset. Set a valid value instead')
_fields = None
_body = None
serialized_data = request_body_post_request_list.serialize(body, content_type)
_headers.add('Content-Type', content_type)
if 'fields' in serialized_data:
_fields = serialized_data['fields']
elif 'body' in serialized_data:
_body = serialized_data['body']
response = self.api_client.call_api(
resource_path=used_path,
method='post'.upper(),
headers=_headers,
fields=_fields,
body=_body,
auth_settings=_auth,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
class CreateList(BaseApi):
# this class is used by api classes that refer to endpoints with operationId fn names
@typing.overload
def create_list(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def create_list(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def create_list(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def create_list(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor201,
api_client.ApiResponseWithoutDeserialization,
]: ...
def create_list(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._create_list_oapg(
body=body,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
class ApiForpost(BaseApi):
# this class is used by api classes that refer to endpoints by path and http method names
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: typing_extensions.Literal["application/json"] = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: typing_extensions.Literal[False] = ...,
) -> typing.Union[
ApiResponseFor201,
]: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
skip_deserialization: typing_extensions.Literal[True],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
) -> api_client.ApiResponseWithoutDeserialization: ...
@typing.overload
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = ...,
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = ...,
) -> typing.Union[
ApiResponseFor201,
api_client.ApiResponseWithoutDeserialization,
]: ...
def post(
self,
body: typing.Union[SchemaForRequestBodyApplicationJson,],
content_type: str = 'application/json',
accept_content_types: typing.Tuple[str] = _all_accept_content_types,
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
):
return self._create_list_oapg(
body=body,
content_type=content_type,
accept_content_types=accept_content_types,
stream=stream,
timeout=timeout,
skip_deserialization=skip_deserialization
)
| [
"[email protected]"
]
| |
176869291f6071bba98ea44848936d7ceec24616 | 9b3abcd9c207a015ed611da2a22b48957e46c22d | /18_Django框架网站/dailyfresh/apps/user/models.py | 7d2b0e2c38e7610873de8a89d398099678359f03 | []
| no_license | it-zyk/PythonCode | 6e22aff6705d0e66df74a7c43f62f8a3a8f3f70f | 1ee51edad768ff01ba8542b1ddea05aee533524b | refs/heads/master | 2020-04-23T05:47:33.691995 | 2019-05-08T14:07:34 | 2019-05-08T14:07:34 | 170,951,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | from django.db import models
from django.contrib.auth.models import AbstractUser
from db.base_model import BaseModel
# Create your models here.
class User(AbstractUser, BaseModel):
'''用户模型类'''
class Meta:
db_table = 'df_user'
verbose_name = '用户'
verbose_name_plural = verbose_name
class AddressManager(models.Manager):
'''地址模型管理器类'''
# 1.改变原有查询的结果集:all()
# 2.封装方法:用户操作模型类对应的数据表(增删改查)
def get_default_address(self, user):
'''获取用户默认收货地址'''
# self.model:获取self对象所在的模型类
try:
address = self.get(user=user, is_default=True) # models.Manager
except self.model.DoesNotExist:
# 不存在默认收货地址
address = None
return address
class Address(BaseModel):
'''地址模型类'''
user = models.ForeignKey('User', verbose_name='所属账户')
receiver = models.CharField(max_length=20, verbose_name='收件人')
addr = models.CharField(max_length=256, verbose_name='收件地址')
zip_code = models.CharField(max_length=6, null=True, verbose_name='邮政编码')
phone = models.CharField(max_length=11, verbose_name='联系电话')
is_default = models.BooleanField(default=False, verbose_name='是否默认')
# 自定义一个模型管理器对象
objects = AddressManager()
class Meta:
db_table = 'df_address'
verbose_name = '地址'
verbose_name_plural = verbose_name
| [
"[email protected]"
]
| |
1d82f772f429dc0332e5f8d1b7f993895f9f527b | b2e1d96c0551b6b31ef85353f9b6e5b6354d64e8 | /datafaucet/spark/rows.py | 97e9ffa97152917b124d55ef93a80b38b4716046 | [
"MIT"
]
| permissive | SylarCS/datafaucet-1 | 8bd7b96cecc5592e153b61367892e2a63a96119d | a63074ba1fb1a6d15f06e2bfff05df754aaaa452 | refs/heads/master | 2020-09-15T06:04:31.999012 | 2019-11-18T20:00:55 | 2019-11-18T20:00:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,062 | py | import sys
from random import randint
from pyspark.sql import DataFrame
from datafaucet.spark import dataframe
INT_MAX = sys.maxsize
INT_MIN = -sys.maxsize-1
def sample(df, n=1000, *col, seed=None):
# n 0<float<=1 -> fraction of samples
# n floor(int)>1 -> number of samples
# todo:
# n dict of key, value pairs or array of (key, value)
# cols = takes alist of columns for sampling if more than one column is provided
# if a stratum is not specified, provide equally with what is left over form the total of the other quota
if n>1:
count = df.count()
fraction = n/count
return df if fraction>1 else df.sample(False, fraction, seed=seed)
else:
return df.sample(False, n, seed=seed)
_sample = sample
class Rows:
def __init__(self, df, scols=None, gcols=None):
self.df = df
self.gcols = gcols or []
self.scols = scols or df.columns
self.scols = list(set(self.scols) - set(self.gcols))
@property
def columns(self):
return [x for x in self.df.columns if x in (self.scols + self.gcols)]
def overwrite(self,data):
df = self.df
return df.sql_ctx.createDataFrame(data,df.schema)
def append(self, data):
df = self.df
return df.unionByName(df.sql_ctx.createDataFrame(data, df.schema))
def sample(self, n=1000, *cols, random_state=True):
return _sample(self.df, n, *cols, random_state)
def filter_by_date(self, column=None, start=None, end=None, window=None):
df = dataframe.filter_by_datetime(self.df, column, start, end, window)
return df
def filter(self, *args, **kwargs):
return self.df.filter(*args, **kwargs)
@property
def cols(self):
from datafaucet.spark.cols import Cols
return Cols(self.df, self.scols, self.gcols)
@property
def data(self):
from datafaucet.spark.data import Data
return Data(self.df, self.scols, self.gcols)
def _rows(self):
return Rows(self)
DataFrame.rows = property(_rows)
| [
"[email protected]"
]
| |
87a59bc42433821ff6c353a9e24ee2417a5de00f | a6fae33cdf3d3cb0b0d458c2825a8d8cc010cd25 | /l3/z3/.history/population_20200522014505.py | 3e8108e382a859c114f46c50cbcf52c462b42894 | []
| no_license | Qabrix/optimization_amh | 12aab7c7980b38812ec38b7e494e82452a4176b4 | 6a4f5b897a4bef25f6e2acf535ba20ace7351689 | refs/heads/main | 2022-12-28T10:57:00.064130 | 2020-10-17T22:57:27 | 2020-10-17T22:57:27 | 304,983,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,540 | py | import random
import numpy as np
from utils import calculate_value, decision
class Inhabitant:
def __init__(self, gene, value=0):
self.gene = gene
self.value = 0
def __iter__(self):
for char in self.gene:
yield char
def __len__(self):
return len(self.gene)
def __getitem__(self, item):
return self.gene[item]
def get_str_gene(self, up):
return "".join(self.gene[:up])
class Population:
def __init__(self, population_size, all_puzzle_keys, starter_words):
self.all_puzzle_keys = all_puzzle_keys
self.best_res = None
self.population_size = population_size
self.generation = self._gen_generation(starter_words)
def __iter__(self):
for inhabitant in self.generation:
yield inhabitant
def _random_word(self):
return random.sample(self.all_puzzle_keys, len(self.all_puzzle_keys))
def _gen_generation(self, starter_words):
min_size = min([len(word) for word in starter_words])
max_size = max([len(word) for word in starter_words])
generation = []
for word in starter_words:
generation.append(Inhabitant(list(word)))
for _ in range(len(starter_words), self.population_size):
word = self._random_word()[: random.randint(min_size, max_size)]
generation.append(Inhabitant(word))
return generation
def sorted_generation(self):
return sorted(self.generation, key=lambda x: x.value, reverse=True)
def make_selection(self, elite_percentage, percentage=0.75):
selection = []
sorted_generation = self.sorted_generation()
selection_size = int(self.population_size * percentage)
elite_size = int(elite_percentage * selection_size)
for inhabitant in sorted_generation[:elite_size]:
selection.append(inhabitant)
if elite_size - selection_size < 0:
for inhabitant in sorted_generation[elite_size - selection_size :]:
selection.append(inhabitant)
return selection
def _check_if_correct(self, word):
possible_chars = self.all_puzzle_keys.copy()
for char in word:
if char in possible_chars:
possible_chars.remove(char)
else:
return False
return True
def recombinate(self, elite_percentage=0.6):
selection = self.make_selection(elite_percentage)
permutation = np.random.permutation(len(selection))
new_generation = []
new_generation.append(Inhabitant(selection[0].gene.copy()))
new_generation.append(Inhabitant(selection[1].gene.copy()))
for i in range(1, len(permutation)):
pivot = random.randint(
0,
min(
len(selection[permutation[i % len(permutation)]]),
len(selection[permutation[(i + 1) % len(permutation)]]),
)
// 2,
)
new_word = (
selection[permutation[i % len(permutation)]][:pivot]
+ selection[permutation[(i + 1) % len(permutation)]][pivot:]
)
if self._check_if_correct(new_word):
new_generation.append(Inhabitant(new_word))
else:
new_generation.append(
Inhabitant(selection[permutation[i % len(permutation)]].gene)
)
new_word = (
selection[permutation[(i + 1) % len(permutation)]][:pivot]
+ selection[permutation[i % len(permutation)]][pivot:]
)
if self._check_if_correct(new_word):
new_generation.append(Inhabitant(new_word))
else:
new_generation.append(
Inhabitant(selection[permutation[(i + 1) % len(permutation)]].gene)
)
self.generation = new_generation
def mutate(
self,
min_swap_probability=0.2,
max_swap_probability=0.7,
inverse_probability=0.001,
random_probability=0.05,
shift_probability=0.001,
insert_probability=0.9,
):
swap_probability = random.uniform(min_swap_probability, max_swap_probability)
for inhabitant in self.generation[1:]:
if decision(insert_probability):
insert_amount = random.randint(1, 2)
if decision(0.5): # remove decision
if(len(inhabitant)+insert_amount < len(self.all_puzzle_keys)):
possible_chars = self._random_word()
for char in inhabitant.gene:
if char in possible_chars:
possible_chars.remove(char)
if decision(0.33):
inhabitant.gene += possible_chars[:insert_amount]
elif decision(0.5):
inhabitant.gene = possible_chars[:insert_amount] + inhabitant.gene
else:
insert_index = random.randint(1, len(inhabitant.gene))
inhabitant.gene = inhabitant.gene[:insert_index] + possible_chars[:insert_amount] + inhabitant.gene[insert_index:]
else:
if(len(inhabitant)-insert_amount > 0):
if decision(0.5):
inhabitant.gene = inhabitant.gene[insert_amount:]
else:
inhabitant.gene = inhabitant.gene[:-insert_amount]
elif decision(random_probability):
inhabitant.gene = self._random_word()
else:
if decision(shift_probability):
shift_range = random.randint(1, 3)
for _ in range(shift_range + 1):
inhabitant.gene = [inhabitant.gene[-1]] + inhabitant.gene[:-1]
for i in range(len(inhabitant.gene) // 2):
if decision(swap_probability):
random_id = random.randint(0, len(inhabitant) - 1)
inhabitant.gene[i], inhabitant.gene[random_id] = (
inhabitant.gene[random_id],
inhabitant.gene[i],
)
if decision(inverse_probability):
inhabitant.gene = inhabitant.gene[::-1] | [
"[email protected]"
]
| |
8efeda44d905898ff678ae343caf148717963d54 | 38c606ed14564591c1aa6e65c7dab255aebf76f9 | /0x11-python-network_1/5-hbtn_header.py | 2c76d103cd07681c5295f2c7dd0ea62d4798e81a | []
| no_license | camilooob/holbertonschool-higher_level_programming | d7cee708b308bed86fcc384d7451de26fa9cafaa | db9b6760e7e4998c5f00a4f2cfeb17ec14e44cab | refs/heads/master | 2020-09-29T00:19:25.736344 | 2020-05-15T01:34:32 | 2020-05-15T01:34:32 | 226,900,553 | 1 | 8 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | #!/usr/bin/python3
"""the package request"""
if __name__ == "__main__":
import requests
import sys
r = requests.get(sys.argv[1])
print(r.headers.get('X-Request-Id'))
| [
"[email protected]"
]
| |
0332091c980a247b508924dc4e03710be5f08839 | b0856a2d66cc4c71705b8c16c169848070294cf6 | /removeDupSortedArray.py | 3f60a7ee970822ff9418506693aa240504fabb51 | []
| no_license | jfriend08/LeetCode | 9e378ff015edc3102a4785b0832cf0eeb09f5fc2 | f76d3cf2e7fd91767f80bd60eed080a7bad06e62 | refs/heads/master | 2021-01-21T19:28:25.354537 | 2016-01-15T04:53:11 | 2016-01-15T04:53:11 | 28,518,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 832 | py | '''
Follow up for "Remove Duplicates":
What if duplicates are allowed at most twice?
For example,
Given sorted array nums = [1,1,1,2,2,3],
Your function should return length = 5, with the first five elements of nums being 1, 1, 2, 2 and 3.
It doesn't matter what you leave beyond the new length.
Subscribe to see which companies asked this question
'''
class Solution(object):
def removeDuplicates(self, nums):
if not nums:
return 0
mapCount = {}
maxNum = nums[-1]
for num in nums:
try:
mapCount[num] += 1
except:
mapCount[num] = 1
res = []
for num in xrange(maxNum+1):
if num in mapCount:
res += ( [num] if mapCount[num]==1 else [num, num])
return len(res)
sol = Solution()
print sol.removeDuplicates([1,1,1,2,2,3])
print sol.removeDuplicates([])
| [
"[email protected]"
]
| |
bfe8e4bc295bbf5a06577105e22905e15b024ebe | 1aec3c93eaa1fc271ea80141a3a41a24cd60c8d9 | /mcrouter/test/test_loadbalancer_route.py | 854b6970f6db6a6b80bfcb4620c6a999a5ebd5a3 | [
"BSD-3-Clause"
]
| permissive | boboozy/mcrouter | 810859b997ea2c687c67723a3ad94aa88e93b746 | d78f599bd3887a87d5785422a25e3ac07b0de169 | refs/heads/master | 2021-07-25T09:52:09.175808 | 2017-11-04T01:02:51 | 2017-11-04T01:11:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from mcrouter.test.MCProcess import Mcrouter
from mcrouter.test.McrouterTestCase import McrouterTestCase
class TestLoadBalancerRoute(McrouterTestCase):
config = './mcrouter/test/test_loadbalancer_route.json'
null_route_config = './mcrouter/test/test_nullroute.json'
mcrouter_server_extra_args = ['--server-load-interval-ms=50']
extra_args = []
def setUp(self):
self.mc = []
for _i in range(8):
self.mc.append(Mcrouter(self.null_route_config,
extra_args=self.mcrouter_server_extra_args))
self.add_server(self.mc[_i])
self.mcrouter = self.add_mcrouter(
self.config,
extra_args=self.extra_args)
def test_loadbalancer(self):
n = 20000
for i in range(0, n):
key = 'someprefix:{}:|#|id=123'.format(i)
self.assertTrue(not self.mcrouter.get(key))
self.assertTrue(self.mcrouter.stats()['cmd_get_count'] > 0)
lblrc = 'load_balancer_load_reset_count'
self.assertEqual(int(self.mcrouter.stats("all")[lblrc]), 0)
sum = 0
for i in range(8):
self.assertTrue(self.mc[i].stats()['cmd_get_count'] > 0)
sum += int(self.mc[i].stats()['cmd_get_count'])
self.assertEqual(sum, n)
| [
"[email protected]"
]
| |
2c198ce9caa80d3848e36c87c340082b71dfce04 | 4d37628a27c5a50a70fa06f78be346223c37ade0 | /jobs/migrations.py | 88f61681fe41fbe0da93397de6760842a9ab4e57 | [
"MIT"
]
| permissive | vinissimus/jobs | 93dbc0fd2c755b63d685165996b27a260e5e367c | 6e15749465f7da44e4dc0ad2f520ea6f7fbb67fe | refs/heads/master | 2023-01-01T01:29:50.332671 | 2020-10-23T15:27:49 | 2020-10-23T15:27:49 | 281,219,465 | 7 | 0 | MIT | 2020-10-23T15:31:47 | 2020-07-20T20:30:13 | Python | UTF-8 | Python | false | false | 1,978 | py | from .utils import setup_stdout_logging
from pathlib import Path
import asyncio
import asyncpg
import glob
import logging
import sys
import typing
logger = logging.getLogger("jobs")
current = Path(__file__)
def get_migrations_path() -> Path:
return current.parent / "sql"
def get_available():
files: typing.Dict[int, str] = {}
path = str(get_migrations_path())
for item in glob.glob(f"{path}/*.up.sql"):
file = item.replace(path + "/", "")
version = int(file.split("_")[0])
files[version] = file
return files
def load_migration(name: str):
file = get_migrations_path() / name
with file.open() as f:
return f.read()
async def migrate(db: asyncpg.Connection = None):
migrations = get_available()
try:
current = await db.fetchval("SELECT migration FROM jobs.migrations")
except asyncpg.exceptions.UndefinedTableError:
current = 0
logger.info("Current migration %s", current)
applied = current
async with db.transaction():
for avail in sorted(list(migrations.keys())):
if avail > current:
logger.info("Appling migration %s", migrations[avail])
data = load_migration(migrations[avail])
await db.execute(data)
applied = avail
if applied != current:
logger.info("Update migrations history version: %s", applied)
await db.execute("update jobs.migrations set migration=$1", applied)
else:
logger.info("No migrations applied. Your db it's at latest version")
async def main(dsn: str):
db = await asyncpg.connect(dsn=dsn)
await migrate(db)
usage = """
run it with:
job-migrations postgresql://xxx:xxxx@localhost:5432/db
"""
def run():
if len(sys.argv) != 2:
print(usage)
sys.exit(1)
setup_stdout_logging()
dsn = sys.argv[1]
asyncio.run(main(dsn))
if __name__ == "__main__":
run()
| [
"[email protected]"
]
| |
a98daa0410363b639ee81fc77a48ba3c678abf66 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/insights/get_guest_diagnostics_settings_association.py | 3440cdd68c76aa4250f607aaf13bbb8ba2ffb7dc | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,562 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGuestDiagnosticsSettingsAssociationResult',
'AwaitableGetGuestDiagnosticsSettingsAssociationResult',
'get_guest_diagnostics_settings_association',
'get_guest_diagnostics_settings_association_output',
]
@pulumi.output_type
class GetGuestDiagnosticsSettingsAssociationResult:
"""
Virtual machine guest diagnostic settings resource.
"""
def __init__(__self__, guest_diagnostic_settings_name=None, id=None, location=None, name=None, tags=None, type=None):
if guest_diagnostic_settings_name and not isinstance(guest_diagnostic_settings_name, str):
raise TypeError("Expected argument 'guest_diagnostic_settings_name' to be a str")
pulumi.set(__self__, "guest_diagnostic_settings_name", guest_diagnostic_settings_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="guestDiagnosticSettingsName")
def guest_diagnostic_settings_name(self) -> str:
"""
The guest diagnostic settings name.
"""
return pulumi.get(self, "guest_diagnostic_settings_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type
"""
return pulumi.get(self, "type")
class AwaitableGetGuestDiagnosticsSettingsAssociationResult(GetGuestDiagnosticsSettingsAssociationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGuestDiagnosticsSettingsAssociationResult(
guest_diagnostic_settings_name=self.guest_diagnostic_settings_name,
id=self.id,
location=self.location,
name=self.name,
tags=self.tags,
type=self.type)
def get_guest_diagnostics_settings_association(association_name: Optional[str] = None,
resource_uri: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGuestDiagnosticsSettingsAssociationResult:
"""
Virtual machine guest diagnostic settings resource.
API Version: 2018-06-01-preview.
:param str association_name: The name of the diagnostic settings association.
:param str resource_uri: The fully qualified ID of the resource, including the resource name and resource type.
"""
__args__ = dict()
__args__['associationName'] = association_name
__args__['resourceUri'] = resource_uri
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:insights:getGuestDiagnosticsSettingsAssociation', __args__, opts=opts, typ=GetGuestDiagnosticsSettingsAssociationResult).value
return AwaitableGetGuestDiagnosticsSettingsAssociationResult(
guest_diagnostic_settings_name=__ret__.guest_diagnostic_settings_name,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_guest_diagnostics_settings_association)
def get_guest_diagnostics_settings_association_output(association_name: Optional[pulumi.Input[str]] = None,
resource_uri: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGuestDiagnosticsSettingsAssociationResult]:
"""
Virtual machine guest diagnostic settings resource.
API Version: 2018-06-01-preview.
:param str association_name: The name of the diagnostic settings association.
:param str resource_uri: The fully qualified ID of the resource, including the resource name and resource type.
"""
...
| [
"[email protected]"
]
| |
1fcab9c53a7ede0ecb7dfb6ee6e2ec6b84a8d078 | 0f44be0680ccbc2f8f96abfe97f5d1a094cd6e98 | /erokov.py | 494d719d07af47089ded91dc77709f24a452c75e | []
| no_license | kimihito/erokov | f75bf3199531af17a700dac854f00df19b59d3c1 | 32390edbade3d84f8be87367654ff1f6c229ca62 | refs/heads/master | 2016-09-05T23:20:41.926046 | 2012-08-27T10:33:11 | 2012-08-27T10:33:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,019 | py | #!/usr/bin/env python
# coding: utf-8
#AVのタイトルをマルコフ連鎖で作るスクリプトを書いてます
import random
import MeCab
def wakati(text):
t = MeCab.Tagger("-Owakati")
m = t.parse(text)
result = m.split(" ")
return result
if __name__ == "__main__":
filename = "title_sort_uniq.txt"
wordlist = []
src = open(filename,"r").read().split("\n")
for tmpsrc in src:
wordlist += wakati(tmpsrc)
erokov = {}
w1 = ""
w2 = ""
for word in wordlist:
if w1 and w2:
if (w1,w2) not in erokov:
erokov[(w1,w2)] = []
erokov[(w1,w2)].append(word)
w1,w2 = w2, word
count = 0
sentence = ""
w1,w2 = random.choice(erokov.keys())
while count < 11:
try:
tmp = random.choice(erokov[(w1,w2)])
sentence += tmp
w1,w2 = w2, tmp
count += 1
except KeyError:
print "Error!"
pass
print sentence
| [
"[email protected]"
]
| |
f80e0eb67f0790a4fdf274aeb6c73eb6e9eec19b | cdc996370837c00003296556afdb33e2f2fee884 | /devel_scripts/launcher.py | 5237995d7e1aaac822ae3a4d546bf7b117644b25 | []
| no_license | capitaneanu/borunte_robot | 1d4f14aadb2aa9e041ea0fdccc85d424cf155fb2 | 85e8765cbfae879f297c5254733a2dea48daeba0 | refs/heads/master | 2022-09-15T03:09:14.062484 | 2020-05-20T17:39:01 | 2020-05-20T17:39:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # -*- coding: utf-8 -*-
import os
import shlex
import sys
import time
TIMEOUT = 1.0
processes = [] # type: List[sh.RunningCommand]
class ProgramTerminated(Exception):
pass
def check_pid(pid):
""" Check For the existence of a unix pid. """
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def start_process(command, line):
"""
:type command: sh.Command
"""
processes.append(
command(shlex.split(line), _out=sys.stdout, _err=sys.stderr, _bg=True)
)
time.sleep(TIMEOUT)
def terminate_processes():
for process in processes:
if process is None:
continue
try:
process.terminate()
except OSError:
pass
process.wait()
def check_processes():
for process in processes:
if process is None:
continue
if not check_pid(process.pid):
raise ProgramTerminated()
def wait_loop():
try:
while True:
check_processes()
time.sleep(TIMEOUT)
except KeyboardInterrupt:
pass
except ProgramTerminated:
print('A program terminated, stopping other processes.')
| [
"[email protected]"
]
| |
59fe0e859dc6987503f7f78594e9789a09d02ae2 | cc60064828984edca97af87427159981e89f582d | /torch/_dynamo/output_graph.py | aee9bfebcf4d9f2f436a509da52fcbe1f879468d | [
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
]
| permissive | d4l3k/pytorch | fdd28e089aa77c1cd897da02c9dd765d53f5ab05 | 11890156e7d10d1fb72c41a38a1b94fc27004d2b | refs/heads/master | 2023-04-16T01:22:23.749295 | 2023-04-04T13:09:25 | 2023-04-06T01:51:10 | 153,366,388 | 0 | 0 | NOASSERTION | 2018-10-16T23:14:39 | 2018-10-16T23:14:38 | null | UTF-8 | Python | false | false | 34,350 | py | import collections
import copy
import functools
import itertools
import logging
import operator
import re
import sys
import traceback
from dataclasses import dataclass
from typing import Any, Dict, List, NamedTuple, Optional, OrderedDict, Set, Union
import torch._guards
import torch._logging
import torch.nn
from torch import fx
from torch._guards import (
Checkpointable,
Guard,
GuardsCheckpointState,
Source,
TracingContext,
)
from torch.fx.experimental.symbolic_shapes import ShapeEnv
from . import config, logging as torchdynamo_logging, variables
from .backends.registry import CompiledFn, CompilerFn
from .bytecode_transformation import (
create_call_function,
create_instruction,
Instruction,
unique_id,
)
from .codegen import PyCodegen
from .exc import BackendCompilerFailed, unimplemented
from .guards import GuardBuilder
from .mutation_guard import is_dynamic_nn_module
from .side_effects import SideEffects
from .source import (
ConstantSource,
DeterministicAlgorithmsSource,
is_constant_source,
LocalSource,
ParamBufferSource,
ShapeEnvSource,
)
from .utils import (
assert_no_fake_params_or_buffers,
checkpoint_params,
CleanupHook,
clone_inputs,
count_calls,
counters,
dynamo_timed,
format_graph_code,
format_graph_tabular,
same,
)
from .variables.base import VariableTracker
from .variables.builder import GraphArg, TrackedFake, VariableBuilder, wrap_fx_proxy
from .variables.nn_module import NNModuleVariable
from .variables.tensor import (
SymNodeVariable,
TensorVariable,
UnspecializedPythonVariable,
)
log = logging.getLogger(__name__)
graph_tabular_log = torch._logging.getArtifactLogger(__name__, "graph")
graph_code_log = torch._logging.getArtifactLogger(__name__, "graph_code")
class OutputGraphState(NamedTuple):
graphargs: List[GraphArg]
tracked_fakes: List[TrackedFake]
guard_state: GuardsCheckpointState
nn_modules: Optional[Dict[str, torch.nn.Module]]
param_name_to_source: Optional[Dict[str, Source]]
side_effects: SideEffects
timestamp: int
def diff(self, other: "OutputGraphState", *, prefix: str = "") -> Optional[str]:
for k in self._fields:
if k == "guard_state":
r = self.guard_state.diff(other.guard_state)
if r is not None:
return r
continue
elif k == "side_effects":
r = self.side_effects.diff(other.side_effects)
if r is not None:
return r
continue
sv = getattr(self, k)
ov = getattr(other, k)
if sv != ov:
return f"{prefix}{k} mismatch: {sv} != {ov}"
return None
# Back compat .guards api
@property
def guards(self):
return self.guard_state.dynamo_guards
@functools.lru_cache(None)
def _step_logger():
return torchdynamo_logging.get_step_logger(log)
@dataclass
class GraphCompileReason:
"""Stores why a given output graph was compiled; i.e. what caused the graph break."""
reason: str
user_stack: List[traceback.FrameSummary]
def _get_gen_rand_values_fn(random_calls):
def _gen_rand_values():
return [fn(*args, **kwargs) for fn, args, kwargs in random_calls]
return _gen_rand_values
class FakeRootModule(torch.nn.Module):
"""Trick the constructor of fx.GraphModule"""
def __init__(self, nn_modules: Dict[str, torch.nn.Module]):
super().__init__()
for k, v in nn_modules.items():
setattr(self, k, v)
def __repr__(self):
return "FakeRootModule(...)"
class WrapperBackend:
def __init__(self, backend: CompilerFn, original_example_inputs):
self.backend: CompilerFn = backend
self.original_example_inputs = original_example_inputs
@property
def example_inputs(self):
return clone_inputs(self.original_example_inputs)
def __call__(self, gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]):
self.restore = checkpoint_params(gm)
self.gm = gm
copy_gm = copy.deepcopy(self.gm)
self.candidate = self.backend(copy_gm, self.original_example_inputs)
if self.candidate is None or self.candidate is self.gm.forward:
return self.gm.forward
if not config.verify_correctness:
return self.candidate
# if verify_correctness=True
try:
correct = self.gm.forward(*self.example_inputs)
result = self.candidate(*self.example_inputs)
# TODO: replace `same` function with the one in testing
if same(correct, result):
return self.candidate
raise RuntimeError(f"incorrect results of backend {self}")
return self.gm.forward
except Exception:
log.exception("error in verify_correctness")
raise
finally:
self.restore()
class OutputGraph(fx.Tracer, Checkpointable[OutputGraphState]):
"""
Wrapper class to hold outputs of InstructionTranslator. Mainly the
generated fx.Graph.
"""
def __init__(
self,
f_globals: Dict[str, Any],
code_options: Dict[str, Any],
compiler_fn: CompilerFn,
root_tx,
export: bool,
export_constraints,
):
super().__init__()
self.graph = torch.fx.Graph()
self.graphargs: List[GraphArg] = []
self.export = export
self.export_constraints = export_constraints
# In export mode, we force the shape_env to strictly disallow any constraining
# of the user marked dynamic dims
fake_mode = torch._subclasses.FakeTensorMode(
shape_env=ShapeEnv(
allow_scalar_outputs=config.capture_scalar_outputs,
allow_dynamic_output_shape_ops=config.capture_dynamic_output_shape_ops,
)
if config.dynamic_shapes
else None,
# TODO (tmanlaibaatar) Remove this once we always lift params and buffers
allow_non_fake_inputs=True if self.export else False,
)
self.tracing_context: TracingContext = TracingContext(fake_mode)
if config.dynamic_shapes:
# Register a SHAPE_ENV guard to make sure we setup shape guards
# that show up in ShapeEnv
self.guards.add(ShapeEnvSource().make_guard(GuardBuilder.SHAPE_ENV))
self.guards.add(
DeterministicAlgorithmsSource().make_guard(
GuardBuilder.DETERMINISTIC_ALGORITHMS
)
)
# tracked_fakes says where any tensor that was wrapped to fake came
# from. It is similar to GraphArg, in that all GraphArgs will get
# will get added to TrackedFakes, but TrackedFakes also contains
# GraphArgs that got pruned, and things like Tensor attributes which
# aren't explicit graph inputs. Used by shape guard
self.tracked_fakes: List[TrackedFake] = []
# Although we prune unused graphargs before sending graphs to
# compilers, we may have legitimately triggered shape guards
# on "unused" inputs that we must keep track of. So after
# remove_unused_graphargs is called, orig_graphargs and
# graphargs no longer alias; orig_graphargs is the original
# graphargs, and graphargs is the pruned list. Guard creation
# should use original graphargs.
self.orig_graphargs: List[GraphArg] = self.graphargs
self.nn_modules: Optional[Dict[str, torch.nn.Module]] = dict()
# Stores the full fqn of a param or buffer to the relevant source.
self.param_name_to_source: Optional[Dict[str, Source]] = dict()
self.side_effects = SideEffects()
self.code_options = dict(code_options)
self.output_instructions: List[Instruction] = []
# used to track nodes that are added between calls of copy_graphstate
# and restore_graphstate
self.timestamp = 0
# Node => computed real value (see utils.get_real_value)
self.real_value_cache: Dict[fx.Node, torch.Tensor] = {}
# Not checkpointed
self.compiler_fn: CompilerFn = compiler_fn
self.root_globals = f_globals
self.root_tx = root_tx
from torch._dynamo.symbolic_convert import InstructionTranslatorBase
self._current_tx: List[InstructionTranslatorBase] = []
self.cleanups: List[CleanupHook] = []
self.should_exit = False
self.random_values_var = None
self.initial_random_state = ()
self.unspec_variable_map: Dict[str, UnspecializedPythonVariable] = {}
# Enables creating unique node names by tracking
# all current placeholder node names
self.name_to_input: OrderedDict[
str, Optional[fx.Proxy]
] = collections.OrderedDict()
@property
def output(self):
return self
@property
def fake_mode(self):
return self.root_tx.fake_mode
@property
def shape_env(self):
return self.tracing_context.fake_mode.shape_env
@property
def guards(self) -> Set[Guard]:
return self.tracing_context.guards_context.dynamo_guards
def push_tx(self, tx):
self._current_tx.append(tx)
def pop_tx(self):
return self._current_tx.pop()
@property
def current_tx(self):
return self.root_tx if not self._current_tx else self._current_tx[-1]
def copy_graphstate(self) -> OutputGraphState:
"""Create a checkpoint of the current state by copying everything"""
assert self.nn_modules is not None
assert self.param_name_to_source is not None
guards_graph_state = self.tracing_context.guards_context.copy_graphstate()
state = OutputGraphState(
list(self.graphargs),
list(self.tracked_fakes),
guards_graph_state,
dict(self.nn_modules),
dict(self.param_name_to_source),
self.side_effects.clone(),
self.timestamp,
)
self.timestamp += 1
return state
def restore_graphstate(self, state: OutputGraphState):
"""Restore a checkpoint created by self.copy_graphstate()"""
(
self.graphargs,
self.tracked_fakes,
guards_state,
self.nn_modules,
self.param_name_to_source,
self.side_effects,
self.timestamp,
) = state
self.tracing_context.guards_context.restore_graphstate(guards_state)
# FX deepcopy doesn't work for a partially created graph, so just remove new nodes
removed_nodes = 0
for node in reversed(list(self.graph.nodes)):
if node.meta["creation_timestamp"] > self.timestamp:
# Erasing node alone does not remove the meta information
# So, remove the help tensor explicitly
if "example_value" in node.meta:
del node.meta["example_value"]
self.remove_node(node)
self.real_value_cache.pop(node, None)
removed_nodes += 1
log.debug(f"restore_graphstate: removed {removed_nodes} nodes")
def add_grapharg(self, arg: GraphArg):
curr_pos = len(self.graphargs)
self.graphargs.append(arg)
def count_calls(self):
return count_calls(self.graph)
def get_submodule(self, keys):
assert keys
obj = self.nn_modules
for k in keys.split("."):
if isinstance(obj, dict):
obj = obj[k]
else:
obj = getattr(obj, k)
return obj
def create_graph_input(self, name, type_expr=None):
# unique
if name in self.name_to_input:
for i in itertools.count():
if f"{name}_{i}" not in self.name_to_input:
name = f"{name}_{i}"
break
if self.name_to_input:
prev_name = next(reversed(self.name_to_input))
ctx = self.graph.inserting_after(self.name_to_input[prev_name])
else:
ctx = self.graph.inserting_before(None)
with ctx:
proxy = self.create_proxy("placeholder", name, (), {}, type_expr=type_expr)
self.name_to_input[name] = proxy.node
return proxy
def new_var(self, name="tmp"):
existing = set(self.code_options["co_varnames"])
for i in itertools.count():
var = f"___{name}_{i}"
if var not in existing:
self.code_options["co_varnames"] += (var,)
return var
def update_co_names(self, name):
"""Ensure self.code_options.co_names contains name"""
if name not in self.code_options["co_names"]:
self.code_options["co_names"] += (name,)
@staticmethod
def module_has_hooks(mod, only_check_unsupported=False):
supported_hooks = [
"_forward_pre_hooks",
"_forward_hooks",
]
unsupported_hooks = [
"_backward_pre_hooks",
"_backward_hooks",
"_state_dict_pre_hooks",
"_state_dict_hooks",
"_load_state_dict_pre_hooks",
"_load_state_dict_post_hooks",
]
check_hooks = unsupported_hooks
if not only_check_unsupported:
check_hooks += supported_hooks
return any(len(getattr(mod, x)) > 0 for x in check_hooks if hasattr(mod, x))
def register_attr_or_module(
self,
target: Union[torch.nn.Module, torch.Tensor, Any],
*names,
**options,
):
if is_dynamic_nn_module(target):
return variables.UnspecializedNNModuleVariable(target, **options)
options = dict(options)
options["guards"] = set(options.get("guards", []))
assert "source" in options
source = options["source"]
assert not isinstance(source, ParamBufferSource)
if isinstance(target, torch.Tensor):
if not is_constant_source(source):
options["guards"].add(source.make_guard(GuardBuilder.TENSOR_MATCH))
def wrap_name(module_key):
assert self.param_name_to_source is not None
self.param_name_to_source[module_key] = source
return wrap_fx_proxy(
self.root_tx,
self.create_proxy("get_attr", module_key, tuple(), {}),
example_value=target,
**options,
)
elif isinstance(target, torch.nn.Module):
assert isinstance(target, torch.nn.Module)
if self.module_has_hooks(target, only_check_unsupported=True):
torch._logging.warning_once(
log, "nn.Module hooks are not fully supported, they may be ignored"
)
options["guards"].add(source.make_guard(GuardBuilder.NN_MODULE))
def wrap_name(module_key):
return NNModuleVariable(type(target), module_key, **options)
elif isinstance(target, (torch.SymInt, torch.SymFloat)):
# HACKY CODE REGION BEGIN
# WE ARE PIGGYBACKING ON EXISTING INFRA TO REGISTER ATTRS
# This ultimately gets written to self.nn_modules, which is unfortunate
# Attrs that are tenors and symints and such need to be migrated to have their
# own storage
# alas, this is like this for now
def wrap_name(module_key):
return SymNodeVariable.create(
self,
self.create_proxy("get_attr", module_key, tuple(), {}),
sym_num=target,
**options,
)
# HACKY CODE REGION END
else:
def wrap_name(module_key):
self.output.update_co_names(module_key)
self.root_globals[module_key] = target
return VariableBuilder(self, ConstantSource(source_name=module_key))(
target
)
assert self.nn_modules is not None
for k, v in self.nn_modules.items():
if v is target:
# it already exists
return wrap_name(k)
# create a new unique name
name = "_".join(map(str, names))
# Strip the guard lookup L/G access
name = re.sub(r"^[GL]\['?(.*?)'?\]$", r"\1", name)
# e.g. replace abc.xyz[123].qkv with abc.xyz_123.qkv
name = re.sub(r"\[(\d+)\]", r"_\g<1>", name)
# e.g. replace abc.xyz_123.qkv with abc_xyz_123_qkv
name = re.sub(r"[^a-zA-Z0-9]", "_", name)
if not name or not name[0].isalpha():
name = "sub" + name
base = name
for i in itertools.count():
if name not in self.nn_modules:
self.nn_modules[name] = target
if isinstance(target, torch.nn.Module):
def register_leaf_name(leaf_name):
assert self.param_name_to_source is not None
new_source = ParamBufferSource(source, leaf_name)
new_name = f"{name}.{leaf_name}"
self.param_name_to_source[new_name] = new_source
# annoying, but there are cases when we do not have parameters
# see test_nn_moduledict_contains
if hasattr(target, "_parameters"):
for leaf_name, _ in target.named_parameters(
remove_duplicate=False
):
register_leaf_name(leaf_name)
if hasattr(target, "_buffers"):
for leaf_name, _ in target.named_buffers(
remove_duplicate=False
):
register_leaf_name(leaf_name)
return wrap_name(name)
name = f"{base}_{i}"
raise AssertionError("unreachable")
def compile_subgraph(
self, tx, partial_convert=False, reason: Optional[GraphCompileReason] = None
):
"""
Generate a subgraph to continue execution on user code.
Automatically restore live variables.
"""
from .eval_frame import disable
self.partial_convert = partial_convert
self.compile_subgraph_reason = reason
log.debug(f"COMPILING GRAPH due to {reason}")
if not all(block.can_restore() for block in tx.block_stack):
unimplemented("compile_subgraph with block_depth != 0")
prefix_insts: List[Instruction] = []
if sys.version_info >= (3, 11):
# prefix instructions (Python 3.11+)
for inst in tx.prefix_insts:
if inst.opname == "MAKE_CELL":
prefix_insts.append(
create_instruction("MAKE_CELL", argval=inst.argval)
)
elif inst.opname == "COPY_FREE_VARS":
prefix_insts.append(
create_instruction(
"COPY_FREE_VARS", arg=len(tx.code_options["co_freevars"])
)
)
else:
prefix_insts.append(inst)
def append_prefix_insts():
self.add_output_instructions(prefix_insts)
prefix_insts.clear()
for block in reversed(tx.block_stack):
block.exit(tx)
tx.prune_dead_locals()
stack_values = list(tx.stack)
assert self.nn_modules is not None
root = FakeRootModule(self.nn_modules)
# Add all the local vars to the "stack" so restore at the end
restore_vars = []
val_to_names: OrderedDict[
VariableTracker, List[str]
] = collections.OrderedDict()
if stack_values:
val_to_names[stack_values[-1]] = list()
for k, v in tx.symbolic_locals.items():
# Note! this explicitly uses .local_name for matching
# Failure to do so will cause spurious registrations in val_to_names.
# This will in turn result in spurious variables showing up in the graph.
# This was very tricky to debug. For an example, dump the graph at call_user_compiler
# while running test_subgraphs.py
if isinstance(v.source, LocalSource) and v.source.local_name == k:
continue # no need to restore initial state
if v not in val_to_names:
val_to_names[v] = list()
val_to_names[v].append(k)
for v in val_to_names.keys():
restore_vars.extend(val_to_names[v])
stack_values.extend([v] * len(val_to_names[v]))
# to handle random calls
if len(tx.random_calls) > 0:
append_prefix_insts()
random_calls_instructions = []
self.random_values_var = self.new_var("random_values")
rand_fn_name = unique_id("__gen_rand_values")
rand_fn = disable(_get_gen_rand_values_fn(tx.random_calls))
self.install_global(rand_fn_name, rand_fn)
codegen = PyCodegen(tx, root)
random_calls_instructions.extend(
[
codegen.create_load_global("random", True, add=True),
codegen.create_load_attr("setstate"),
codegen.create_load_const(tx.output.initial_random_state),
]
+ create_call_function(1, False),
)
random_calls_instructions.extend(
codegen.load_function_name(rand_fn_name, True)
)
random_calls_instructions.extend(create_call_function(0, False))
random_calls_instructions.append(
codegen.create_store(tx.output.random_values_var),
)
self.add_output_instructions(random_calls_instructions)
if (
stack_values
and all(
not isinstance(v, UnspecializedPythonVariable) for v in stack_values
)
and all(isinstance(x, TensorVariable) for x in stack_values)
and len(set(stack_values)) == len(stack_values)
and self.side_effects.is_empty()
):
append_prefix_insts()
# optimization to generate better code in a common case
self.add_output_instructions(
self.compile_and_call_fx_graph(tx, list(reversed(stack_values)), root)
+ [create_instruction("UNPACK_SEQUENCE", arg=len(stack_values))]
)
else:
graph_output_var = self.new_var("graph_out")
pass1 = PyCodegen(tx, root, graph_output_var)
self.side_effects.codegen_save_tempvars(pass1)
pass1.foreach(stack_values)
self.side_effects.codegen_update_mutated(pass1)
# one more time now that we have established tempvars
pass2 = PyCodegen(
tx,
root,
graph_output_var,
tempvars={val: None for val, count in pass1.uses.items() if count > 1},
)
self.side_effects.codegen_save_tempvars(pass2)
pass2.foreach(stack_values)
self.side_effects.codegen_update_mutated(pass2)
output = []
if count_calls(self.graph) != 0 or len(pass2.graph_outputs) != 0:
output.extend(
self.compile_and_call_fx_graph(tx, pass2.graph_output_vars(), root)
)
if len(pass2.graph_outputs) != 0:
output.append(pass2.create_store(graph_output_var))
else:
output.append(create_instruction("POP_TOP"))
append_prefix_insts()
self.add_output_instructions(output + pass2.get_instructions())
# restore all the live local vars
self.add_output_instructions(
[PyCodegen(tx).create_store(var) for var in reversed(restore_vars)]
)
def compile_and_call_fx_graph(self, tx, rv, root):
"""
Generate code from self.graph and return the Instruction()s to
call that generated code.
"""
from .eval_frame import disable
assert isinstance(rv, list)
assert isinstance(root, FakeRootModule)
for output in rv:
self.guards.update(output.guards)
self.create_node(
"output", "output", (self.create_arg(tuple(x.as_proxy() for x in rv)),), {}
)
self.remove_unused_graphargs()
ncalls = count_calls(self.graph)
counters["stats"]["calls_captured"] += ncalls
# free a bit of memory
for node in self.graph.nodes:
if "example_value" in node.meta:
del node.meta["example_value"]
self.real_value_cache.clear()
gm = fx.GraphModule(root, self.graph)
gm.recompile()
gm.compile_subgraph_reason = self.compile_subgraph_reason
name = unique_id("__compiled_fn")
assert_no_fake_params_or_buffers(gm)
compiled_fn = self.call_user_compiler(gm)
compiled_fn = disable(compiled_fn)
counters["stats"]["unique_graphs"] += 1
self.install_global(name, compiled_fn)
graph_code_log.debug(format_graph_code(name, gm))
graph_tabular_log.debug(format_graph_tabular(name, gm))
cg = PyCodegen(tx)
cg.make_call_generated_code(name)
return cg.get_instructions()
@dynamo_timed(phase_name="backend_compile")
def call_user_compiler(self, gm: fx.GraphModule) -> CompiledFn:
tot = 0
placeholders = []
for node in gm.graph.nodes:
if node.op in ("call_function", "call_method", "call_module"):
tot += 1
if node.op == "placeholder":
placeholders.append(node)
torch._dynamo.utils.increment_op_count(tot)
assert len(placeholders) == len(self.graphargs)
for pl, arg in zip(placeholders, self.graphargs):
pl._dynamo_source = arg.source
gm._param_name_to_source = self.param_name_to_source
try:
name = (
self.compiler_fn.__name__
if hasattr(self.compiler_fn, "__name__")
else ""
)
_step_logger()(logging.INFO, f"calling compiler function {name}")
compiler_fn = self.compiler_fn
# WrapperBackend needs real inputs, for now, to verify correctness
if config.verify_correctness:
compiler_fn = WrapperBackend(compiler_fn, self.example_inputs())
# NOTE: [Real Tensors in Accuracy Evaluation]
#
# Today, tensors are passed to backends as fake at compile time. See the .fake_example_inputs()
# call to compiler_fn below. At runtime, backends use real tensors.
#
# This should be a strong invariant we hold across all backends,
# and generally, it is. However, for accuracy evaluation, we need real tensors at compile time,
# for now, due to the unfortunate setup described below.
#
# Due to the nature of how we invoke comparison as a backend in two different ways:
#
# (1) Less bad, but still worth rewriting, WrapperBackend above, which takes
# real inputs for its ctor. see the config.verify_correctnes above.
#
# (2) More bad, and very worth rewriting, the minifier installs accuracy comparison as
# a true backend, and therefore needs to be compiled with real inputs. This is made trickier
# by the fact that the minifier will spawn new processes during minification. As such, we have
# created a global flag, MINIFIER_SPAWNED, that should be set IF AND ONLY IF this run was spawned
# as part of accuracy minification. This flag is not a contract, and ideally will not be here long.
#
# The longer term PoR is to:
# (A) Rewrite the minifier accuracy evaluation and verify_correctness code to share the same
# correctness and accuracy logic, so as not to have two different ways of doing the same thing.
#
# (B) Refactor minifier accuracy backend to do its comparison fully at runtime, so as not to need to
# pass real tensors to it at compile time.
is_top_level_minifying = (
config.repro_after is not None and config.repro_level == 4
)
if torch._dynamo.debug_utils.MINIFIER_SPAWNED or is_top_level_minifying:
# Disable the tracing context so we don't pick up the ambient
# fake tensor mode
with torch._guards.tracing(None):
compiled_fn = compiler_fn(gm, self.example_inputs())
elif config.DO_NOT_USE_legacy_non_fake_example_inputs:
compiled_fn = compiler_fn(gm, self.example_inputs())
else:
compiled_fn = compiler_fn(gm, self.fake_example_inputs())
_step_logger()(logging.INFO, f"done compiler function {name}")
assert callable(compiled_fn), "compiler_fn did not return callable"
except Exception as e:
raise BackendCompilerFailed(self.compiler_fn, e).with_traceback(
e.__traceback__
) from None
return compiled_fn
def fake_example_inputs(self) -> List[torch.Tensor]:
result = []
for arg in self.graphargs:
example = arg.get_fake_examples()
if example is not None:
result.extend(example)
else:
# Fallback, in case fake_tensor was not set
# Particularly for graph args that are not tensors
result.extend(arg.get_examples())
return result
def example_inputs(self) -> List[torch.Tensor]:
result = []
for arg in self.graphargs:
result.extend(arg.get_examples())
return result
def remove_unused_graphargs(self) -> None:
for node in reversed(list(self.graph.nodes)):
if len(list(node.users)) == 0:
if node.op == "get_attr":
self.remove_node(node)
elif node.op == "call_function" and node.target is operator.getitem:
self.remove_node(node)
expanded_graphargs = []
for arg in self.graphargs:
expanded_graphargs.extend([arg] * len(arg))
arg.uses = 0
for node, arg in zip(self.graph.nodes, expanded_graphargs):
assert node.op == "placeholder"
arg.uses += len(node.users)
for node, arg in list(zip(self.graph.nodes, expanded_graphargs)):
if arg.uses == 0:
log.debug(f"REMOVE UNUSED GRAPHARG {arg.source.name()}")
if "example_value" in node.meta:
del node.meta["example_value"]
self.remove_node(node)
self.real_value_cache.pop(node, None)
self.graphargs = [arg for arg in self.graphargs if arg.uses > 0]
def add_output_instructions(self, prefix: List[Instruction]) -> None:
"""
We call this on the creation of a new compiled subgraph that is inserted
before user code.
"""
self.output_instructions.extend(prefix)
self.should_exit = True
def install_global(self, name, value) -> None:
self.cleanups.append(CleanupHook.create(self.root_globals, name, value))
def cleanup(self) -> None:
# There is a reference cycle between tracer and OutputGraph, causing
# some of the tensor objects to be held alive for longer than necessary.
self.root_tx = None
# Note: generated fx graph will hold a reference to the nn_module,
# So depending on the backend they may not be released
self.nn_modules = None
self.param_name_to_source = None
# Cleanup graphargs
for graph_arg in self.graphargs:
graph_arg.erase()
for node in self.graph.nodes:
if "example_value" in node.meta:
del node.meta["example_value"]
self.real_value_cache.clear()
self.name_to_input.clear()
self.side_effects.keepalive = []
def create_proxy(
self,
kind,
target,
args,
kwargs,
name=None,
type_expr=None,
proxy_factory_fn=None,
):
rv = super().create_proxy(
kind, target, args, kwargs, name, type_expr, proxy_factory_fn
)
# append stack trace to fx node
tx = self.current_tx
nn_module_stack = tx.nn_module_stack
if nn_module_stack:
rv.node.meta["nn_module_stack"] = nn_module_stack.copy()
if kind in {"call_function", "call_method"}:
rv.node.meta["source_fn"] = target
elif kind == "call_module":
# For modules we store the class
rv.node.meta["source_fn"] = rv.node.meta["nn_module_stack"][target][1]
frame_summaries: List[traceback.FrameSummary] = []
while tx:
frame_summaries.append(tx.frame_summary())
tx = getattr(tx, "parent", None)
# Reverse the frame_summaries, such that the innermost frame is at the last
frame_summaries.reverse()
# official from_list stub doesn't have new-style type
msgs = traceback.StackSummary.from_list(frame_summaries).format() # type: ignore[arg-type]
rv.node.stack_trace = "".join(msgs)
return rv
def create_node(self, *args, **kwargs):
node = super().create_node(*args, **kwargs)
node.meta["creation_timestamp"] = self.timestamp
return node
# Note: we did not override erase_node since
# we call self.graph.erase_node elsewhere
def remove_node(self, node):
self.graph.erase_node(node)
self.name_to_input.pop(node.name, None)
| [
"[email protected]"
]
| |
85f5723a1b5f6ace5827d42b8a4f999504cb1d52 | 92e3840c3b799a8dffffa2aa5fc3ee9c4c3e3bea | /src/apps/video_tags/classification/en_vtag_process.py | 58b8abccecb8387790fdc8ab9f5d4b6aab2f13de | []
| no_license | ZouJoshua/nlp_server | a5c1de32b1fcce769fd70af71425897f0dd03abf | ef53a3dc5856aff5e6ba8ad449f0b21962acbd80 | refs/heads/master | 2022-12-05T14:51:45.816542 | 2019-09-19T10:32:54 | 2019-09-19T10:32:54 | 173,694,625 | 0 | 0 | null | 2022-11-21T21:38:00 | 2019-03-04T07:24:12 | Python | UTF-8 | Python | false | false | 19,874 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Joshua
@Time : 2019/3/4 12:26
@File : predict.py
@Desc : 印度英语视频tag处理
"""
from pyquery import PyQuery
import re
import logging
import json
import requests
from nltk import ne_chunk, pos_tag, word_tokenize
from nltk.tree import Tree
class EnProcess(object):
def __init__(self, vtag2kwline, fix2list, word2fix, kw2vtag, stopwords, logger=None):
self.fix2list = fix2list
self.vtag2kwline = vtag2kwline
self.kw2vtag = kw2vtag
self.stopwords = stopwords
self.word2fix = word2fix
if logger:
self.log = logger
else:
self.log = logging.getLogger("nlp_v_tags_process")
self.log.setLevel(logging.INFO)
def get_cleaned_tags(self, title, taglist):
self.log.info("Processing en video tag of taglist:【{}】".format(taglist))
newtaglist = []
# 保留tf>=5的tag
resultdict = {}
oldtagdict = {}
title_lower = title.lower()
tmp_title = ''
tmp_title2 = ''
old_tagdeleteset = set()
for vtag in taglist:
vtag = vtag.lower()
token = vtag.split(' ')
if len(token) == 1:
tmp_title2 += vtag + ' '
if title_lower.find(tmp_title2.strip()) >= 0:
tmp_title = tmp_title2
old_tagdeleteset.add(vtag)
continue
else:
break
else:
break
taglist2 = []
if tmp_title != '' and len(tmp_title.strip().split(' ')) >= 2:
# print(title_lower)
# print(tmp_title.strip())
for vtag in taglist:
vtag = vtag.lower()
if vtag not in old_tagdeleteset:
taglist2.append(vtag)
else:
taglist2 = taglist
taglist = taglist2
for vtag in taglist:
vtag = vtag.lower()
if vtag.endswith('video') or vtag.endswith('song') or vtag.endswith('movie') or vtag.endswith('show'):
vtag = vtag + 's'
if vtag not in oldtagdict:
oldtagdict[vtag] = 1
else:
oldtagdict[vtag] += 1
vtag2, cresultdict, details = self.trim_vtag(vtag)
# print(title)
# print(vtag+'==>'+'#'.join(vtag2))
# for debug_word in details:
# print('\t'+debug_word)
for k, v in cresultdict.items():
if k not in resultdict:
resultdict[k] = v
else:
resultdict[k].extend(v)
newtaglist.extend(vtag2)
# newtaglist process
x2list = []
x2dict = {}
mergetaglist = []
mergetagdict = {}
tmp_title = tmp_title.strip()
if tmp_title != '' and len(tmp_title.split(' ')) >= 2:
if tmp_title not in mergetagdict:
mergetaglist.append((tmp_title, 30, 'onegramemerge'))
mergetagdict[tmp_title] = 'onegramemerge'
for ntag in newtaglist:
ntag = ntag.strip()
if ntag != '' and ntag not in self.fix2list:
if ntag not in x2dict:
x2dict[ntag] = 1
else:
x2dict[ntag] += 1
x2list.append(ntag)
# step0:title split
pattern1 = r'(\||\-\s{1}|\s{1}\-|\(|\)|\?|!|–\s{1}|\s{1}–|│|' \
r'\"|\'\s{1}|\s{1}\'|‘\s{1}|\s{1}‘|’\s{1}|\s{1}’|:|\s{1}\[|\]\s{1}|~|\/\s{1}|\s{1}\/|🔴|•)'
res = re.compile(pattern1, flags=0)
title2 = res.sub("#", title.lower())
for trunk in title2.split('#'):
trunk = trunk.strip()
if trunk == '': continue
ntaglist = []
foundit = 0
if trunk in self.vtag2kwline:
if self.vtag2kwline[trunk][4] == 0 and self.vtag2kwline[trunk][0] >= 2:
ntaglist.append(trunk)
foundit = 1
if foundit == 0 and trunk in self.kw2vtag:
tagset = self.kw2vtag[trunk]
for ntag in tagset:
if ntag in self.vtag2kwline:
if self.vtag2kwline[ntag][4] == 0 and self.vtag2kwline[ntag][0] >= 2:
ntaglist.append(ntag)
for xtag in ntaglist:
if xtag not in mergetagdict:
mergetaglist.append((xtag, 25, 'trunk'))
mergetagdict[xtag] = 'trunk'
# if trunk in title_split_tag and trunk not in mergetagdict:
# trunkres = title_split_tag[trunk]
# mergetaglist.append((trunkres, 25, 'trunk'))
# mergetagdict[trunkres] = 'trunk'
# step1:
for k, v in x2dict.items():
if v >= 2 and k not in mergetagdict:
mergetaglist.append((k, 10 * v, 'tf>=2'))
mergetagdict[k] = 'tf>=2'
# step2:
step2_dict = {}
for x in x2list:
for y in x2list:
if len(x) < len(y) and x in oldtagdict and (y.startswith(x + ' ') or y.endswith(' ' + x)):
if x not in step2_dict:
step2_dict[x] = 1 + len(x.split(' '))
else:
step2_dict[x] += 1 + len(x.split(' '))
sortedtstep2_dict = sorted(step2_dict.items(), key=lambda k: k[1], reverse=True)
for k, v in sortedtstep2_dict:
if v >= 3:
if k not in mergetagdict:
mergetagdict[k] = 'fix'
mergetaglist.append((k, v, 'fix'))
# stpe3: x2list 剩下的
step3dict = {}
for k in x2list:
ff = 0
if k in self.vtag2kwline:
ff = 1
elif title.lower().strip().startswith(k) or title.lower().strip().endswith(k):
ff = 1
else:
pass
if ff == 0: continue
if k not in step3dict:
step3dict[k] = ff
else:
step3dict[k] += ff
sortedtstep3_dict = sorted(step3dict.items(), key=lambda k: k[1], reverse=True)
for k, v in sortedtstep3_dict:
if k not in mergetagdict:
mergetagdict[k] = 'x2'
if len(mergetaglist) < 7:
mergetaglist.append((k, v, 'x2'))
# step4: type period lang
for k, vlist in resultdict.items():
max_dict = {}
for v in vlist:
v = v.strip()
if v not in max_dict:
max_dict[v] = 1
else:
max_dict[v] += 1
sortedmax_dict = sorted(max_dict.items(), key=lambda k: k[1], reverse=True)
if k == 'period':
for kk, vv in sortedmax_dict:
if kk not in ['best', 'top', 'latest', 'updates', 'today', 'new']:
ptag = kk
if ptag != '' and ptag not in mergetagdict:
mergetagdict[ptag] = 'ptag'
mergetaglist.append(('p_' + ptag, 0.5, 'ptag'))
break
if k == 'lang':
for kk, vv in sortedmax_dict:
ltag = kk
if ltag != '' and ltag not in mergetagdict:
mergetagdict[ltag] = 'ltag'
mergetaglist.append(('l_' + ltag, 0.5, 'ltag'))
break
if k == 'type':
if len(sortedmax_dict) > 0:
cc_tag = sortedmax_dict[0][0]
if cc_tag != '' and cc_tag not in mergetagdict:
mergetagdict[cc_tag] = 'ttag'
mergetaglist.append((cc_tag, 0.5, 'ttag'))
for kk, vv in sortedmax_dict:
if len(kk.split(' ')) >= 2:
if kk != '' and kk not in mergetagdict:
mergetagdict[kk] = 'ttag'
mergetaglist.append((kk, 0.5, 'ttag'))
return [item[0] for item in mergetaglist]
def extract_tag(self, title, text):
self.log.info("extracting tags from title and text...")
mergetaglist = []
mergetagdict = {}
lasttaglist = []
pattern1 = r'(\||\-\s{1}|\s{1}\-|\(|\)|\?|!|–\s{1}|\s{1}–|│|' \
r'\"|\'\s{1}|\s{1}\'|‘\s{1}|\s{1}‘|’\s{1}|\s{1}’|:|\s{1}\[|\]\s{1}|~|\/\s{1}|\s{1}\/|🔴|•)'
res = re.compile(pattern1, flags=0)
title2 = res.sub("#", title)
title2_lower = title2.lower()
if text.startswith(title):
text = text[len(title):]
text2 = text.replace('\\n', ' #')
pattern_http = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
pattern_replace = re.compile(r'(▬|=)')
text2 = pattern_http.sub("#", text2)
text2 = pattern_replace.sub("#", text2)
text2_lower = text2.lower()
text2_ner_list = self.get_continuous_chunks(text2)
debug_list1 = []
debug_list2 = []
title_nerlist = []
for title_trunk in title2.split('#'):
title_trunk = title_trunk.strip()
title_trunk_lower = title_trunk.lower()
if title_trunk == '': continue
if text2_lower.find(title_trunk_lower) >= 0 and title_trunk != title2:
debug_list1.append(title_trunk_lower)
if title_trunk_lower in self.vtag2kwline:
if title_trunk_lower not in mergetagdict:
mergetaglist.append([title_trunk_lower, 'title_trunk_vtag'])
mergetagdict[title_trunk_lower] = None
elif title_trunk_lower in self.kw2vtag:
for vtag in self.kw2vtag[title_trunk_lower]:
if vtag not in mergetagdict:
mergetaglist.append([title_trunk_lower, 'title_trunk_kw'])
mergetagdict[title_trunk_lower] = None
# debug_list2.append(title_trunk_lower)
title_trunk_list = self.get_continuous_chunks(title_trunk)
title_nerlist.extend(title_trunk_list)
tfdict = {}
for trunk in title_nerlist:
trunk_lower = trunk.lower()
if trunk_lower == '': continue
if trunk_lower in self.stopwords: continue
n = len(trunk_lower.split(' '))
x = 1.5
if n >= 2:
x = 2
if trunk_lower not in tfdict:
tfdict[trunk_lower] = x
else:
tfdict[trunk_lower] += x
for trunk in text2_ner_list:
trunk_lower = trunk.lower()
if trunk_lower in self.stopwords: continue
if trunk_lower == '': continue
if trunk_lower not in tfdict:
tfdict[trunk_lower] = 1
else:
tfdict[trunk_lower] += 1
sorted_tfdict = sorted(tfdict.items(), key=lambda k: k[1], reverse=True)
sorted_tfdict2 = [x for x in sorted_tfdict if x[1] >= 2]
for c_tag, c_tf in sorted_tfdict2:
if c_tag in self.vtag2kwline or len(c_tag.split(' ')) >= 2:
if c_tag not in mergetagdict:
mergetaglist.append([c_tag, 'tf_vtag'])
mergetagdict[c_tag] = None
for i, (tag, reason) in enumerate(mergetaglist):
if i >= 5: break
lasttaglist.append(tag)
return lasttaglist
def trim_vtag(self, inputline):
# inputline = 'latest news 2019 news 2018'
inputraw = inputline
resultdict = {}
details = []
# 1. 预清洗
inputline = inputline.replace('#', ' ')
inputtoken = []
for w in inputline.split(' '):
w = w.strip()
if w != '':
inputtoken.append(w)
inputline = ' '.join(inputtoken)
details.append(inputraw + '0==>' + inputline)
# 2. 预判断:is in vtag2kwline or not
c_tag = []
if inputline in self.vtag2kwline:
c_tag = [inputline]
elif inputline in self.kw2vtag:
c_tag = list(self.kw2vtag[inputline])
if len(c_tag) >= 1:
details.append(inputline + '1==>' + '#'.join(c_tag))
return c_tag, resultdict, details
else:
pass
# 小于2元词不处理,直接返回
if len(inputtoken) < 2:
details.append(inputline + '2==>' + inputline)
return [inputline], resultdict, details
else:
pass
# 3.trim1 process: period trim 时间性单词 或修饰行状语
pattern_period = r'^top\s{1}\d.\s{1}|^best|^best of|^hit|2015|2016|2017|2018|2019|latest|updates|today| new$|new released|^new '
res_period = re.compile(pattern_period, flags=0)
res1 = res_period.sub('', inputline.strip())
res1_tokens = []
for w in res1.split(' '):
w = w.strip()
if w != '':
res1_tokens.append(w)
res1 = ' '.join(res1_tokens)
res1findall = res_period.findall(inputline.strip())
resultdict['period'] = res1findall
details.append(inputline + '3==>' + res1)
# 3. 预判断:is in vtag2kwline or not
c_tag = []
if res1 in self.vtag2kwline:
c_tag = [res1]
elif res1 in self.kw2vtag:
c_tag = list(self.kw2vtag[res1])
if len(c_tag) >= 1:
details.append(inputline + '4==>' + '#'.join(c_tag))
return c_tag, resultdict, details
else:
pass
# 小于2元词不处理,直接返回
if len(res1_tokens) < 2:
details.append(inputline + '5==>' + inputline)
return [inputline], resultdict, details
else:
pass
# 4.trim2 process: language trim
res1 = res1.replace('in english', 'english')
res1 = res1.replace('in hindi', 'hindi')
res1 = res1.replace('in hind', 'hindi')
res1 = res1.replace('in hinid', 'hindi')
res1 = res1.replace('in telugu', 'telugu')
res1 = res1.replace('in tamil', 'tamil')
res1 = res1.replace('in malayalam', 'malayalam')
res1 = res1.replace('in bhojpuri', 'bhojpuri')
res1 = res1.replace('in punjabi', 'punjabi')
res1 = res1.replace('bangla', 'bengali')
res1 = res1.replace('in bengali', 'bengali')
res1 = res1.replace('in marathi', 'marathi')
res1 = res1.replace('in kannada', 'kannada')
res1 = res1.replace('in gujarati', 'gujarati')
res1 = res1.replace('in rajasthani', 'rajasthani')
res1 = res1.replace('haryanavi', 'haryanvi')
res1 = res1.replace('in haryanvi', 'haryanvi')
res1 = res1.replace('in assamese', 'assamese')
res1 = res1.replace('in bodo', 'bodo')
res1 = res1.replace('in dogri', 'dogri')
res1 = res1.replace('in kashmiri', 'kashmiri')
res1 = res1.replace('in konkani', 'konkani')
res1 = res1.replace('in maithili', 'maithili')
res1 = res1.replace('in manipuri', 'manipuri')
res1 = res1.replace('in nepali', 'nepali')
res1 = res1.replace('in odia', 'odia')
res1 = res1.replace('in sanskrit', 'sanskrit')
res1 = res1.replace('in santali', 'santali')
res1 = res1.replace('in sindhi', 'sindhi')
res1 = res1.replace('in urdu', 'urdu')
# 4. 预判断:is in vtag2kwline or not
c_tag = []
if res1 in self.vtag2kwline:
c_tag = [res1]
elif res1 in self.kw2vtag:
c_tag = list(self.kw2vtag[res1])
if len(c_tag) >= 1:
details.append(res1 + '6==>' + '#'.join(c_tag))
return c_tag, resultdict, details
else:
pass
# 小于2元词不处理,直接返回
if len(res1.split(' ')) < 2:
details.append(res1 + '7==>' + res1)
return [res1], resultdict, details
else:
pass
pattern_lang = r'english|hindi|telugu|tamil|malayalam|' \
r'bhojpuri|punjabi|bengali|marathi|kannada|' \
r'gujarati|rajasthani|haryanvi|assamese|bodo|' \
r'dogri|kashmiri|konkani|maithili|manipuri|nepali|' \
r'odia|sanskrit|santali|sindhi|urdu|haryanavi'
res_lang = re.compile(pattern_lang, flags=0)
res2 = res_lang.sub('', res1.strip())
res2_tokens = []
for w in res2.split(' '):
w = w.strip()
if w != '':
res2_tokens.append(w)
res2 = ' '.join(res2_tokens)
if res2.endswith('video') or res2.endswith('song') or res2.endswith('movie') or res2.endswith('show'):
res2 = res2 + 's'
res2findall = res_lang.findall(res1.strip())
resultdict['lang'] = res2findall
details.append(res1 + '8==>' + res2)
# 4. 预判断:is in vtag2kwline or not
c_tag = []
if res2 in self.vtag2kwline:
c_tag = [res2]
elif res2 in self.kw2vtag:
c_tag = list(self.kw2vtag[res2])
if len(c_tag) > 1:
details.append(res2 + '9==>' + '#'.join(c_tag))
return c_tag, resultdict, details
else:
pass
# 小于等于2元词不处理,直接返回
if len(res2_tokens) < 2:
details.append(res1 + '10==>' + res1)
return [res1], resultdict, details
else:
pass
# 5.trim3 process: type
# trim2: type
word = res2
word2 = word
resultdict['type'] = []
for k, v in self.word2fix.items():
if word.find(k + ' ') >= 0 or word.find(' ' + k) >= 0 or word == k:
word2 = word.replace(k, '').strip()
resultdict['type'].append(k)
word = word2
if word2 in self.word2fix:
word2 = ''
res3_tokens = []
for x in word2.split(' '):
if x != '' and x != 's':
res3_tokens.append(x)
res3 = ' '.join(res3_tokens)
# 5. 预判断:is in vtag2kwline or not
c_tag = []
if res3 in self.vtag2kwline:
c_tag = [res3]
elif res3 in self.kw2vtag:
c_tag = list(self.kw2vtag[res3])
if len(c_tag) > 1:
details.append(res3 + '11==>' + '#'.join(c_tag))
return c_tag, resultdict, details
else:
pass
# 小于等于2元词不处理,直接返回
if len(res3_tokens) < 2:
details.append(res2 + '12==>' + res2)
return [res2], resultdict, details
else:
pass
details.append(res3 + '13==>' + res3)
return [res3], resultdict, details
def get_continuous_chunks(self, text):
chunked = ne_chunk(pos_tag(word_tokenize(text)))
continuous_chunk = []
current_chunk = []
for i in chunked:
if type(i) == Tree:
current_chunk.append(" ".join([token for token, pos in i.leaves()]))
elif current_chunk:
continuous_chunk.append(" ".join(current_chunk))
continuous_chunk.append(i[0])
current_chunk = []
else:
continuous_chunk.append(i[0])
continue
if current_chunk:
continuous_chunk.append(" ".join(current_chunk))
current_chunk = []
return continuous_chunk
| [
"[email protected]"
]
| |
15c9096c932868854571f4061ed0c3a68eec026e | 6efc2eb23678741263da7ac6bd868a9f3a37d38b | /01.stock_investment/05.chart_analysis/test_boto3/test_s3_download.py | 363a8247b52c0222ee245d335c3d2c697ee9f4c1 | []
| no_license | predora005/business-research | c6272b129353a302673cf8a13c1629b5ade4a50e | 96743cc6a0b592c87e6d0f2de341fc3bbb3ef3b1 | refs/heads/main | 2023-06-18T08:08:24.537951 | 2021-07-22T04:19:09 | 2021-07-22T04:19:09 | 314,985,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | # coding: utf-8
import boto3
import tempfile
##################################################
# メイン
##################################################
if __name__ == '__main__':
BUCKET_NAME = ''
OBJECT_NAME1 = 'dir1/file1.txt'
FILE_NAME1 = 'file1.txt'
OBJECT_NAME2 = 'dir1/file2.txt'
FILE_NAME2 = 'file2.txt'
OBJECT_NAME3 = 'dir2/file3.csv'
FILE_NAME3 = 'file3.csv'
OBJECT_NAME4 = 'dir2/file4.txt'
FILE_NAME4 = 'file4.txt'
##############################
s3 = boto3.resource('s3')
s3.Bucket(BUCKET_NAME).download_file(OBJECT_NAME1, FILE_NAME1)
##############################
# The download_file method
s3 = boto3.client('s3')
s3.download_file(BUCKET_NAME, OBJECT_NAME2, FILE_NAME2)
##############################
s3 = boto3.resource('s3')
bucket = s3.Bucket(BUCKET_NAME)
with open(FILE_NAME3, 'wb') as f:
bucket.download_fileobj(OBJECT_NAME3, f)
##############################
# The download_fileobj method
s3 = boto3.client('s3')
with open(FILE_NAME4, 'wb') as f:
#with tempfile.NamedTemporaryFile(mode='wb') as f:
s3.download_fileobj(BUCKET_NAME, OBJECT_NAME4, f)
print(f.name)
print(f.tell)
| [
"[email protected]"
]
| |
624e6493ba366cde8a495ba0effb21374417bbd1 | 4d0213e588149b9fa86fbe35faea8657052d9254 | /setup.py | 27b1a3f4ad3e7c71aeb236803df30c35aed1ff6d | [
"Apache-2.0"
]
| permissive | Pyligent/gen-efficientnet-pytorch | 1e492dec87fa33458e452472c65ed0f7afd1a876 | b3bc163478737924f508978a6f0c96e07046e025 | refs/heads/master | 2020-12-14T15:51:36.930259 | 2019-10-30T22:31:10 | 2019-10-30T22:31:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,688 | py | """ Setup
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
exec(open('geffnet/version.py').read())
setup(
name='geffnet',
version=__version__,
description='(Generic) EfficientNets for PyTorch',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/rwightman/gen-efficientnet-pytorch',
author='Ross Wightman',
author_email='[email protected]',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# Note that this is a string of words separated by whitespace, not a list.
keywords='pytorch pretrained models efficientnet mixnet mobilenetv3 mnasnet',
packages=find_packages(exclude=['data']),
install_requires=['torch >= 1.1', 'torchvision'],
python_requires='>=3.6',
)
| [
"[email protected]"
]
| |
e3fccd35bcac0946969cbb7de0a9f8057ab2c8ee | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/BsPlanInviteConfig.py | f6e1ed05d6ca34d93fd5c47f5c13a9c375717bbe | [
"Apache-2.0"
]
| permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,362 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BsPlanInviteConfig(object):
def __init__(self):
self._end_time = None
self._start_time = None
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
self._end_time = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
self._start_time = value
def to_alipay_dict(self):
params = dict()
if self.end_time:
if hasattr(self.end_time, 'to_alipay_dict'):
params['end_time'] = self.end_time.to_alipay_dict()
else:
params['end_time'] = self.end_time
if self.start_time:
if hasattr(self.start_time, 'to_alipay_dict'):
params['start_time'] = self.start_time.to_alipay_dict()
else:
params['start_time'] = self.start_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BsPlanInviteConfig()
if 'end_time' in d:
o.end_time = d['end_time']
if 'start_time' in d:
o.start_time = d['start_time']
return o
| [
"[email protected]"
]
| |
3ecd4acf486810b559bb1eb756b9b32e70f99558 | f05084e33f189c3ca16982a6704fa808831fa71a | /yayo/cart/views.py | 5009693a7231de1285c2d32c1e33dd096dbdca83 | []
| no_license | willyowi/Yayo-maluku-shop | f7581ae4c5ca0a1dc6a9daa92701d0965d27914c | 7c8844bd3cbd97fdac01f991b45ca55b5f419c38 | refs/heads/master | 2021-01-06T16:06:36.397007 | 2020-02-18T15:20:46 | 2020-02-18T15:20:46 | 241,390,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from django.shortcuts import render,redirect,get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from .cart import Cart
from .forms import CartAddProductForm
# Create your views here.
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,quantity=cd['quantity'],update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request,product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
return render(request, 'cart/detail.html', {'cart': cart})
| [
"[email protected]"
]
| |
a30781f84b1feca4e4a793f1a648138952c65940 | b2cefb7a2a83aa93ee1b15a780b5ddf6c498215b | /examples/nlp/duplex_text_normalization/data/data_split.py | 3b053a34419980bc0351c55707a288cbdab02f16 | [
"Apache-2.0"
]
| permissive | VahidooX/NeMo | bfde8c9b48c818342a9c6290fb9dee62fafeca38 | 866cc3f66fab3a796a6b74ef7a9e362c2282a976 | refs/heads/main | 2023-07-23T19:13:39.948228 | 2022-04-29T21:51:54 | 2022-04-29T21:51:54 | 227,733,473 | 1 | 2 | Apache-2.0 | 2022-09-15T15:30:13 | 2019-12-13T01:55:21 | Jupyter Notebook | UTF-8 | Python | false | false | 5,238 | py | # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script creates data splits of the Google Text Normalization dataset
of the format mentioned in the `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
USAGE Example:
1. Download the Google TN dataset from https://www.kaggle.com/google-nlu/text-normalization
2. Unzip the English subset (e.g., by running `tar zxvf en_with_types.tgz`). Then there will a folder named `en_with_types`.
3. Run this script
# python data_split.py \
--data_dir=en_with_types/ \
--output_dir=data_split/ \
--lang=en
In this example, the split files will be stored in the `data_split` folder.
The folder should contain three subfolders `train`, 'dev', and `test` with `.tsv` files.
"""
from argparse import ArgumentParser
from os import listdir, mkdir
from os.path import isdir, isfile, join
from tqdm import tqdm
from nemo.collections.nlp.data.text_normalization import constants
# Local Constants
TEST_SIZE_EN = 100002
TEST_SIZE_RUS = 100007
def read_google_data(data_file: str, lang: str, split: str):
"""
The function can be used to read the raw data files of the Google Text Normalization
dataset (which can be downloaded from https://www.kaggle.com/google-nlu/text-normalization)
Args:
data_file: Path to the data file. Should be of the form output-xxxxx-of-00100
lang: Selected language.
split: data split
Return:
data: list of examples
"""
data = []
cur_classes, cur_tokens, cur_outputs = [], [], []
with open(data_file, 'r', encoding='utf-8') as f:
for linectx, line in tqdm(enumerate(f)):
es = line.strip().split('\t')
if split == "test":
# For the results reported in the paper "RNN Approaches to Text Normalization: A Challenge":
# + For English, the first 100,002 lines of output-00099-of-00100 are used for the test set
# + For Russian, the first 100,007 lines of output-00099-of-00100 are used for the test set
if lang == constants.ENGLISH and linectx == TEST_SIZE_EN:
break
if lang == constants.RUSSIAN and linectx == TEST_SIZE_RUS:
break
if len(es) == 2 and es[0] == '<eos>':
data.append((cur_classes, cur_tokens, cur_outputs))
# Reset
cur_classes, cur_tokens, cur_outputs = [], [], []
continue
# Remove _trans (for Russian)
if lang == constants.RUSSIAN:
es[2] = es[2].replace('_trans', '')
# Update the current example
assert len(es) == 3
cur_classes.append(es[0])
cur_tokens.append(es[1])
cur_outputs.append(es[2])
return data
if __name__ == '__main__':
parser = ArgumentParser(description='Preprocess Google text normalization dataset')
parser.add_argument('--data_dir', type=str, required=True, help='Path to folder with data')
parser.add_argument('--output_dir', type=str, default='preprocessed', help='Path to folder with preprocessed data')
parser.add_argument(
'--lang', type=str, default=constants.ENGLISH, choices=constants.SUPPORTED_LANGS, help='Language'
)
args = parser.parse_args()
# Create the output dir (if not exist)
if not isdir(args.output_dir):
mkdir(args.output_dir)
mkdir(args.output_dir + '/train')
mkdir(args.output_dir + '/dev')
mkdir(args.output_dir + '/test')
for fn in sorted(listdir(args.data_dir))[::-1]:
fp = join(args.data_dir, fn)
if not isfile(fp):
continue
if not fn.startswith('output'):
continue
# Determine the current split
split_nb = int(fn.split('-')[1])
if split_nb < 90:
cur_split = "train"
elif split_nb < 95:
cur_split = "dev"
elif split_nb == 99:
cur_split = "test"
data = read_google_data(data_file=fp, lang=args.lang, split=cur_split)
# write out
output_file = join(args.output_dir, f'{cur_split}', f'{fn}.tsv')
print(fp)
print(output_file)
output_f = open(output_file, 'w', encoding='utf-8')
for inst in data:
cur_classes, cur_tokens, cur_outputs = inst
for c, t, o in zip(cur_classes, cur_tokens, cur_outputs):
output_f.write(f'{c}\t{t}\t{o}\n')
output_f.write('<eos>\t<eos>\n')
print(f'{cur_split}_sentences: {len(data)}')
| [
"[email protected]"
]
| |
b02a3215d5c955daec98e2db06f5171974b90720 | 05ec80585e500eb75baade82bada8f0c5a2a76dc | /Backtracking/GenerateIP.py | 4b339410a665caec82a4815768bb4049c6a8bab4 | []
| no_license | NenadPantelic/GeeksforGeeks-Must-Do-Interview-preparation | 24477da148d4b9fe8113f669f21984d081327563 | 180c6b1bc6a4b6e1b44c409c220368b391b672b8 | refs/heads/master | 2021-01-05T19:16:22.436554 | 2020-11-15T14:39:02 | 2020-11-15T14:39:02 | 241,113,139 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 21:19:12 2020
@author: nenad
"""
def is_valid(ip, pos,segments):
len_flag = True
# ip is not valid in form 0x e.g 121.03.22.234
if len(ip) > 1 and ip[0] == "0":
len_flag = False
# check ip length, value and if ip's part is already checked
return len_flag and len(ip) > 0 and 0 <= int(ip) <= 255 and segments[pos] == False
def genIP(string):
ips = []
n = len(string)
segments = [False] * n
solve(string, n, 0, ips, segments, [])
print(ips)
def solve(string, n, pos, ips,segments, ip):
# ip has 4 parts
if len(ip) == 4:
# if we raached end of the string that we process
if pos>=n:
ips.append(".".join(ip))
return
# one part of ip has length from 1 to 3, both inclusive
for i in range(1,min(4, n-pos+1)):
# take substring as ip's quartette
substr = string[pos:pos+i]
# if ip is valid
if is_valid(substr, pos,segments):
# mark that char as used
segments[pos] = True
# check the rest of the string - can we form the rest of ip from that substring
solve(string, n, pos+i, ips, segments, ip + [substr])
# backtrack if we can't do that
segments[pos] = False
return
# Test 0
string = "1111"
genIP(string)
# Test 1
string = "11211"
genIP(string)
# Test 2
string = "112112"
genIP(string)
# Test 3
string = "25500255"
genIP(string)
| [
"[email protected]"
]
| |
907a3117714649a6d18e4ff188d89b213ab23196 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinci_v39r1/tuplemaking/misidrestripping2015/runssmisidrestripping2012.py | 990ae9cec1042354b70a1ce99da16007f43cbb9e | []
| no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,035 | py | # $Id: $
# Test your line(s) of the stripping
#
# NOTE: Please make a copy of this file for your testing, and do NOT change this one!
#
##use CommonParticlesArchive
#from CommonParticlesArchive import CommonParticlesArchiveConf
#CommonParticlesArchiveConf().redirect("stripping21r1p1a")
#
#from Gaudi.Configuration import *
#from Configurables import DaVinci
#from StrippingConf.Configuration import StrippingConf
#
#
## Tighten Trk Chi2 to <3
#from CommonParticles.Utils import DefaultTrackingCuts
#DefaultTrackingCuts().Cuts = { "Chi2Cut" : [ 0, 3 ],
# "CloneDistCut" : [5000, 9e+99 ] }
#
##
##Raw event juggler to split Other/RawEvent into Velo/RawEvent and Tracker/RawEvent
##
#from Configurables import RawEventJuggler
#juggler = RawEventJuggler( DataOnDemand=True, Input=2.0, Output=4.2 )
#
##
##Fix for TrackEff lines
##
#from Configurables import DecodeRawEvent
#DecodeRawEvent().setProp("OverrideInputs",4.2)
#
## Specify the name of your configuration
#confname="B23MuNu" #FOR USERS
#
## NOTE: this will work only if you inserted correctly the
## default_config dictionary in the code where your LineBuilder
## is defined.
#from StrippingSelections import buildersConf
#confs = buildersConf()
#
#from StrippingSelections.Utils import lineBuilder, buildStreamsFromBuilder
##confs[confname]["CONFIG"]["SigmaPPi0CalPrescale"] = 0.5 ## FOR USERS, YOU ONLY NEED TO QUICKLY MODIFY CutName and NewValue (no need to recompile the package but please update the default_config before committing)
#streams = buildStreamsFromBuilder(confs,confname)
#
##clone lines for CommonParticles overhead-free timing
#print "Creating line clones for timing"
#for s in streams:
# for l in s.lines:
# if "_TIMING" not in l.name():
# cloned = l.clone(l.name().strip("Stripping")+"_TIMING")
# s.appendLines([cloned])
#
##define stream names
#leptonicMicroDSTname = 'Leptonic'
#charmMicroDSTname = 'Charm'
#pidMicroDSTname = 'PID'
#bhadronMicroDSTname = 'Bhadron'
#mdstStreams = [ leptonicMicroDSTname,charmMicroDSTname,pidMicroDSTname,bhadronMicroDSTname ]
#dstStreams = [ "BhadronCompleteEvent", "CharmCompleteEvent", "CharmToBeSwum", "Dimuon",
# "EW", "Semileptonic", "Calibration", "MiniBias", "Radiative" ]
#
#stripTESPrefix = 'Strip'
#
#from Configurables import ProcStatusCheck
#
#from PhysConf.Filters import LoKi_Filters
#flts = LoKi_Filters(VOID_Code = "( TrSource(TrSOURCE('/Event/Rec/Track/Best', TrLONG))"\
# " >> ( sum( TrPT,TrP < 1 * TeV ) > 1 * TeV ) )" ,
# VOID_Preambulo = ["from LoKiTracks.decorators import *" ,
# "from LoKiCore.functions import * ",
# "from GaudiKernel.SystemOfUnits import *"])
#filterBadEvents = GaudiSequencer("BadEventFilter",
# ModeOR = True,
# Members = [ flts.sequencer("GECFilter"),
# ProcStatusCheck() ] )
#streamFilter = { 'default' : filterBadEvents,
# 'MiniBias' : ProcStatusCheck() }
#
#
#sc = StrippingConf( Streams = streams,
# MaxCandidates = 2000,
# AcceptBadEvents = False,
# BadEventSelection = streamFilter,
# TESPrefix = stripTESPrefix,
# ActiveMDSTStream = True,
# Verbose = True,
# DSTStreams = dstStreams,
# MicroDSTStreams = mdstStreams )
#
from Configurables import DecayTreeTuple, FilterDesktop,CombineParticles,FitDecayTrees, TupleToolRecoStats, TupleToolTrigger, TupleToolTISTOS, CondDB
from DecayTreeTuple.Configuration import *
#ADDED for BDT reason
#from Configurables import LoKi__Hybrid__TupleTool
#from Configurables import LoKi__Hybrid__Tool as MyFactory
#mf = MyFactory("HybridFactory")
#mf.Modules.append( 'LoKiPhysMC.decorators' )
tuple = DecayTreeTuple("B_Tuple")
#tuple.Inputs = [location]
tuple.Inputs = ["/Event/Semileptonic/Phys/B23MuNu_TriFakeMuLine/Particles"]
#tuple.Inputs = ["Phys/DecayTreeFitterB"]
tuple.ToolList = [
"TupleToolKinematic",
"TupleToolEventInfo",
"TupleToolRecoStats",
"TupleToolANNPID"
]
tuple.addBranches({ # remove all "^" except where needed.
"Jpsi" : "^(J/psi(1S) -> mu+ mu-)",
"mu1" : " J/psi(1S) -> ^mu+ mu-",
"mu2" : " J/psi(1S) -> mu+ ^mu-"
})
tuple.Jpsi.ToolList += [ "TupleToolTISTOS" ]
tuple.Jpsi.addTool( TupleToolTISTOS, name = "TupleToolTISTOS" )
tuple.Jpsi.TupleToolTISTOS.Verbose = True
tuple.Jpsi.TupleToolTISTOS.TriggerList = [
"L0DiMuonDecision"
, "L0MuonDecision"
, "L0HadronDecision"
, "Hlt1TrackAllL0Decision"
, "Hlt1TrackMuonDecision"
, "Hlt1DiMuonHighMassDecision"
, "Hlt1SingleMuonHighPTDecision"
, "Hlt2TopoMu2BodyBBDTDecision"
, "Hlt2TopoMu3BodyBBDTDecision"
, "Hlt2Topo2BodyBBDTDecision"
, "Hlt2Topo3BodyBBDTDecision"
, "Hlt2DiMuonDetachedJPsiDecision"
, "Hlt2DiMuonDetachedDecision"
, "Hlt2SingleMuonDecision"
, "Hlt2DiMuonDetachedHeavyDecision"
]
LoKi_All1=tuple.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_All")
LoKi_All1.Variables = {
'MINIPCHI2' : "MIPCHI2DV(PRIMARY)",
'MINIP' : "MIPDV(PRIMARY)",
'ETA' : 'ETA',
'PHI' : 'PHI'
}
LoKi_Jpsi1=tuple.Jpsi.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_Bplus")
LoKi_Jpsi1.Variables = {
'TAU' : "BPVLTIME()",
'DIRA_OWNPV' : "BPVDIRA",
'FD_CHI2' : "BPVVDCHI2",
'ENDVERTEX_CHI2' : "VFASPF(VCHI2/VDOF)",
'X_travelled' : "VFASPF(VX)-BPV(VX)",
'Y_travelled' : "VFASPF(VY)-BPV(VY)",
'Z_travelled' : "VFASPF(VZ)-BPV(VZ)",
'P_Parallel' : "BPVDIRA*P",
'P_Perp' : "sin(acos(BPVDIRA))*P",
'BPVVDZ' : "BPVVDZ",
'Corrected_Mass' : "BPVCORRM"
}
LoKi_mu11=tuple.mu1.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu1")
LoKi_mu11.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
LoKi_mu22=tuple.mu2.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu2")
LoKi_mu22.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
tuple.Decay = "J/psi(1S) -> ^mu+ ^mu-"
tuple2 = DecayTreeTuple("B_Tuple2")
tuple2.Inputs = ["/Event/Semileptonic/Phys/B23MuNu_TriFakeMuLine/Particles"]
tuple2.ToolList = [
"TupleToolKinematic",
"TupleToolEventInfo",
"TupleToolRecoStats",
"TupleToolPid",
"TupleToolANNPID"
]
tuple2.addBranches({ # remove all "^" except where needed.
"Bplus" : "^([B+ -> (J/psi(1S) -> mu+ mu-) mu+]CC)",
"mu1" : "[B+ -> (J/psi(1S) -> ^mu+ mu-) mu+]CC",
"mu2" : "[B+ -> (J/psi(1S) -> mu+ ^mu-) mu+]CC ",
"mu3" : "[B+ -> (J/psi(1S) -> mu+ mu-) ^mu+]CC ",
})
tuple2.Bplus.ToolList += [ "TupleToolTISTOS" ]
tuple2.Bplus.addTool( TupleToolTISTOS, name = "TupleToolTISTOS" )
tuple2.Bplus.TupleToolTISTOS.Verbose = True
tuple2.Bplus.TupleToolTISTOS.TriggerList = [
"L0DiMuonDecision"
, "L0MuonDecision"
, "L0HadronDecision"
, "Hlt1TrackAllL0Decision"
, "Hlt1TrackMuonDecision"
, "Hlt1DiMuonHighMassDecision"
, "Hlt1SingleMuonHighPTDecision"
, "Hlt2TopoMu2BodyBBDTDecision"
, "Hlt2TopoMu3BodyBBDTDecision"
, "Hlt2Topo2BodyBBDTDecision"
, "Hlt2Topo3BodyBBDTDecision"
, "Hlt2DiMuonDetachedJPsiDecision"
, "Hlt2DiMuonDetachedDecision"
, "Hlt2SingleMuonDecision"
, "Hlt2DiMuonDetachedHeavyDecision"
]
LoKi_All=tuple2.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_All")
LoKi_All.Variables = {
'MINIPCHI2' : "MIPCHI2DV(PRIMARY)",
'MINIP' : "MIPDV(PRIMARY)",
'ETA' : 'ETA',
'PHI' : 'PHI'
}
LoKi_Bplus=tuple2.Bplus.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_Bplus")
LoKi_Bplus.Variables = {
'TAU' : "BPVLTIME()",
'DIRA_OWNPV' : "BPVDIRA",
'FD_CHI2' : "BPVVDCHI2",
'ENDVERTEX_CHI2' : "VFASPF(VCHI2/VDOF)",
'X_travelled' : "VFASPF(VX)-BPV(VX)",
'Y_travelled' : "VFASPF(VY)-BPV(VY)",
'Z_travelled' : "VFASPF(VZ)-BPV(VZ)",
'P_Parallel' : "BPVDIRA*P",
'P_Perp' : "sin(acos(BPVDIRA))*P",
'BPVVDZ' : "BPVVDZ",
'Corrected_Mass' : "BPVCORRM"
}
LoKi_mu1=tuple2.mu1.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu1")
LoKi_mu1.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
LoKi_mu2=tuple2.mu2.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu2")
LoKi_mu2.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
LoKi_mu3=tuple2.mu3.addTupleTool("LoKi::Hybrid::TupleTool/LoKi_mu3")
LoKi_mu3.Variables = {
'PIDmuLoki' : "PIDmu",
'PIDKLoki' : "PIDK",
'PIDpLoki' : "PIDp",
'ghost' : "TRGHP",
'TRACK_CHI2' : "TRCHI2DOF",
'NNK' : "PPINFO(PROBNNK)",
'NNpi' : "PPINFO(PROBNNpi)",
'NNmu' : "PPINFO(PROBNNmu)",
'isMuonLoose' : "switch(ISMUONLOOSE,1,0)",
'isMuonLoki' : "switch(ISMUON,1,0)",
'inMuon' : "switch(INMUON,1,0)",
'nShared' : "PPINFO(LHCb.ProtoParticle.MuonNShared,-1000)"
}
tuple2.Decay = "[B+ -> ^(J/psi(1S) -> ^mu+ ^mu-) ^mu+]CC" #^J/psi(1S)->
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexMisppMuMu
tuple2.Bplus.addTool(TupleToolVertexMisppMuMu)
tuple2.Bplus.ToolList+=["TupleToolVertexMisppMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexMispmMuMu
tuple2.Bplus.addTool(TupleToolVertexMispmMuMu)
tuple2.Bplus.ToolList+=["TupleToolVertexMispmMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexMismpMuMu
tuple2.Bplus.addTool(TupleToolVertexMismpMuMu)
tuple2.Bplus.ToolList+=["TupleToolVertexMismpMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolVertexMisMuMuMu
tuple2.Bplus.addTool(TupleToolVertexMisMuMuMu)
tuple2.Bplus.ToolList+=["TupleToolVertexMisMuMuMu"]
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolSallyvs3
tuple2.Bplus.addTool(TupleToolSallyvs3)
tuple2.Bplus.ToolList+=["TupleToolSallyvs3"]
#tuple2.Decay = "[B+ -> ^(J/psi(1S) -> ^mu+ ^mu-) ^mu+]CC" #^J/psi(1S)->
from DecayTreeTuple.Configuration import *
from Configurables import TupleToolApplypMuIsolation
tuple2.Bplus.addTool(TupleToolApplypMuIsolation)
tuple2.Bplus.TupleToolApplypMuIsolation.OutputSuffix="_weights"
tuple2.Bplus.TupleToolApplypMuIsolation.WeightsFile="weights_110614_Lc_pX.xml"
tuple2.Bplus.ToolList+=["TupleToolApplypMuIsolation"]
#Mysterious things to make isolation work
name="TupleToolApplypMuIsolation"
from Configurables import ChargedProtoParticleMaker
veloprotos = ChargedProtoParticleMaker(name+"ProtoPMaker")
veloprotos.Inputs = ["Rec/Track/Best"]
veloprotos.Output = "Rec/ProtoP/myProtoPMaker/ProtoParticles"
from Configurables import DaVinci
DaVinci().appendToMainSequence( [ veloprotos ])
from Gaudi.Configuration import *
from Configurables import ProtoParticleCALOFilter, CombinedParticleMaker,NoPIDsParticleMaker
from CommonParticles.Utils import *
algorithm = NoPIDsParticleMaker('StdNoPIDsVeloPions', Particle = 'pion', )
algorithm.Input = "Rec/ProtoP/myProtoPMaker/ProtoParticles"
selector = trackSelector ( algorithm , trackTypes = ['Velo'] )
locations = updateDoD ( algorithm )
DaVinci().appendToMainSequence( [ algorithm ])
from Configurables import TimingAuditor, SequencerTimerTool
TimingAuditor().addTool(SequencerTimerTool,name="TIMER")
TimingAuditor().TIMER.NameSize = 60
from Configurables import AuditorSvc, ChronoAuditor
AuditorSvc().Auditors.append( ChronoAuditor("Chrono") )
#from Configurables import StrippingReport
#sr = StrippingReport(Selections = sc.selections())
#from Configurables import AlgorithmCorrelationsAlg
#ac = AlgorithmCorrelationsAlg(Algorithms = list(set(sc.selections())))
#DaVinci().HistogramFile = 'DV_stripping_histosnew2.root'
DaVinci().TupleFile = "B23MuNuFakeSS.root"
#DaVinci().HistogramFile = 'DVHistosnshared.root'
#DaVinci().TupleFile = "DVTuplesnshared.root"
#DaVinci().EvtMax = 10000
DaVinci().PrintFreq = 2000
#DaVinci().UserAlgorithms = [ tuple ]
#DaVinci().appendToMainSequence( [ tuple ] )
DaVinci().appendToMainSequence( [ tuple2 ] )
#DaVinci().appendToMainSequence( [ tuple2 ] )
#DaVinci().appendToMainSequence( [ sc.sequence() ] )
#DaVinci().appendToMainSequence( [ tuple] )
#DaVinci().appendToMainSequence( [ tuple2] )
#DaVinci().appendToMainSequence( [ sr ] )
#DaVinci().appendToMainSequence( [ ac ] )
#DaVinci().appendToMainSequence( [ tuple] )
#DaVinci().appendToMainSequence( [ tuple2] )
DaVinci().DataType = "2012"
DaVinci().InputType = "DST"
DaVinci().Lumi = True
DaVinci().Simulation = False
# change the column size of timing table
from Configurables import TimingAuditor, SequencerTimerTool
TimingAuditor().addTool(SequencerTimerTool,name="TIMER")
TimingAuditor().TIMER.NameSize = 60
#NTupleSvc().Output = ["FILE1 DATAFILE='trythis.root' TYP='ROOT' OPT='NEW'"]
MessageSvc().Format = "% F%60W%S%7W%R%T %0W%M"
#from GaudiConf import IOHelper
#IOHelper().inputFiles(['./00050733_00021988_1.semileptonic.dst'], clear=True)
# database
#DaVinci().DDDBtag = "dddb-20120831"
#DaVinci().CondDBtag = "cond-20121008"
#DaVinci().Lumi = True
# input file
#importOptions("$STRIPPINGSELECTIONSROOT/tests/data/Reco15a_Run164668.py")
#importOptions("$STRIPPINGSELECTIONSROOT/tests/data/Reco14_Run125113.py")
| [
"[email protected]"
]
| |
540590ef128c7fc98cb5a28c475cbf774c51ff24 | d96787f92bd86c8d8bcf01a4e7ec8f7feec24194 | /kattis/nizovi/solution.py | 9c70b792c4c93471dd6c04868b1338089c92b9f2 | []
| no_license | iandioch/solutions | 133cbc3af58fadcde0b2e981fb0e7d05801070a7 | 8b3e458b3c01179ddf776bfbb897f263f22f3693 | refs/heads/master | 2023-04-09T03:39:16.952817 | 2023-03-15T20:00:53 | 2023-03-15T20:00:53 | 47,693,495 | 48 | 40 | null | 2019-10-22T14:52:59 | 2015-12-09T13:36:55 | Python | UTF-8 | Python | false | false | 721 | py | s = input()
curr = ''
indent = 0
lines = []
for c in s:
curr += c
if curr == '{':
lines.append('{}{}'.format(' '*indent, c))
curr = ''
indent += 2
elif curr.endswith('}') or curr.endswith('},'):
d = curr.find('}')
if len(curr[:d]) > 0:
lines.append('{}{}'.format(' '*indent, curr[:d]))
indent -= 2
lines.append('{}{}'.format(' '*indent, curr[d:]))
curr = ''
elif curr[-1] == ',':
lines.append('{}{}'.format(' '*indent, curr))
curr = ''
# remove commas trailing afer }s
for j in range(len(lines)-1, -1, -1):
if lines[j].strip() == ',':
del lines[j]
lines[j-1] += ','
print('\n'.join(lines))
| [
"[email protected]"
]
| |
2e618ec30de1abe5e786ff02bf4e9c6a5555c288 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2364/60623/305882.py | 64207e9aa3a3c85e563c2b8fdf16e3f809b357a9 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | a=input()
if a=='100':
print(10)
else:
print(a) | [
"[email protected]"
]
| |
b632edb4abed10644c2eca37adee10ff3ebf2a1e | 080397d0e6d573ef6d7eb9c2bc6b1b5787cfe0d1 | /tests/twitter_learning_journal/builders/test_cache_path_builder.py | 2dfeb176343ff67367981b17880cefdbe6d09dac | [
"Beerware"
]
| permissive | DEV3L/twitter-learning-journal | ecd0eb922e369b10fd6e039d652eed7078601139 | a51d22a60a3d1249add352d8357975a7f2db585c | refs/heads/master | 2021-09-20T17:27:11.157096 | 2018-08-13T11:58:34 | 2018-08-13T11:58:34 | 114,556,953 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | from app.twitter_learning_journal.builders.cache_path_builder import build_cache_path
def test_build_cache_path():
expected_cache_path = './data/pickle/tweets/test'
assert expected_cache_path == build_cache_path(sub_directory='test')
| [
"[email protected]"
]
| |
dc9388fcc7ecf66dabb9bc64fe98c2f689c370d6 | 20176bf4fbd8aec139c7b5a27f2c2e155e173e6e | /data/all-pratic/Anusha Koila/print_odd_num.py | d290d9c8c6e77770c4fb451217c46810fd11629d | []
| no_license | githubjyotiranjan/pytraining | 4ac4a1f83cc4270e2939d9d32c705019c5bc61c5 | 8b50c4ab7848bd4cbfdfbc06489768d577289c66 | refs/heads/master | 2020-03-19T06:22:20.793296 | 2018-06-15T20:08:11 | 2018-06-15T20:08:11 | 136,013,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | #print odd numbers
#raise exception when negtive numbers are inputted
try:
num = int(input("Enter an number :"))
except erro1:
if(num<0):
print("Negative numbers not allowed")
else:
print("ODD numbers list :\n ")
for i in range(1,num):
res=i%2
if(res!=0):
print(i)
i=i+1
| [
"[email protected]"
]
| |
93196c7e4c3d9aee7a600a779e6f089b06a181e0 | 13eae91d078c8b88c990bb6da1b9cdb8e3648b76 | /cogs/Downloader/lib/fontTools/misc/macRes.py | e8b3cbc20ed28d5048adec1ba0a12c560f11c715 | []
| no_license | skylarr1227/skybizzle | 98303c99a5ea897469e381e06dcda3725d6500d6 | 63c38995437d6880bd9bf0de52d406c904cbbd24 | refs/heads/master | 2023-05-13T00:12:46.827511 | 2019-11-12T01:03:45 | 2019-11-12T01:03:45 | 221,097,000 | 0 | 1 | null | 2023-05-07T06:22:44 | 2019-11-12T00:40:38 | Python | UTF-8 | Python | false | false | 6,591 | py | """ Tools for reading Mac resource forks. """
from fontTools.misc.py23 import *
import struct
from fontTools.misc import sstruct
from collections import OrderedDict
try:
from collections.abc import MutableMapping
except ImportError:
from UserDict import DictMixin as MutableMapping
class ResourceError(Exception):
pass
class ResourceReader(MutableMapping):
def __init__(self, fileOrPath):
self._resources = OrderedDict()
if hasattr(fileOrPath, 'read'):
self.file = fileOrPath
else:
try:
# try reading from the resource fork (only works on OS X)
self.file = self.openResourceFork(fileOrPath)
self._readFile()
return
except (ResourceError, IOError):
# if it fails, use the data fork
self.file = self.openDataFork(fileOrPath)
self._readFile()
@staticmethod
def openResourceFork(path):
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
with open(path + '/..namedfork/rsrc', 'rb') as resfork:
data = resfork.read()
infile = BytesIO(data)
infile.name = path
return infile
@staticmethod
def openDataFork(path):
with open(path, 'rb') as datafork:
data = datafork.read()
infile = BytesIO(data)
infile.name = path
return infile
def _readFile(self):
self._readHeaderAndMap()
self._readTypeList()
def _read(self, numBytes, offset=None):
if offset is not None:
try:
self.file.seek(offset)
except OverflowError:
raise ResourceError("Failed to seek offset ('offset' is too large)")
if self.file.tell() != offset:
raise ResourceError('Failed to seek offset (reached EOF)')
try:
data = self.file.read(numBytes)
except OverflowError:
raise ResourceError("Cannot read resource ('numBytes' is too large)")
if len(data) != numBytes:
raise ResourceError('Cannot read resource (not enough data)')
return data
def _readHeaderAndMap(self):
self.file.seek(0)
headerData = self._read(ResourceForkHeaderSize)
sstruct.unpack(ResourceForkHeader, headerData, self)
# seek to resource map, skip reserved
mapOffset = self.mapOffset + 22
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
self.absTypeListOffset = self.mapOffset + self.typeListOffset
self.absNameListOffset = self.mapOffset + self.nameListOffset
def _readTypeList(self):
absTypeListOffset = self.absTypeListOffset
numTypesData = self._read(2, absTypeListOffset)
self.numTypes, = struct.unpack('>H', numTypesData)
absTypeListOffset2 = absTypeListOffset + 2
for i in range(self.numTypes + 1):
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
resType = tostr(item['type'], encoding='mac-roman')
refListOffset = absTypeListOffset + item['refListOffset']
numRes = item['numRes'] + 1
resources = self._readReferenceList(resType, refListOffset, numRes)
self._resources[resType] = resources
def _readReferenceList(self, resType, refListOffset, numRes):
resources = []
for i in range(numRes):
refOffset = refListOffset + ResourceRefItemSize * i
refData = self._read(ResourceRefItemSize, refOffset)
res = Resource(resType)
res.decompile(refData, self)
resources.append(res)
return resources
def __getitem__(self, resType):
return self._resources[resType]
def __delitem__(self, resType):
del self._resources[resType]
def __setitem__(self, resType, resources):
self._resources[resType] = resources
def __len__(self):
return len(self._resources)
def __iter__(self):
return iter(self._resources)
def keys(self):
return self._resources.keys()
@property
def types(self):
return list(self._resources.keys())
def countResources(self, resType):
"""Return the number of resources of a given type."""
try:
return len(self[resType])
except KeyError:
return 0
def getIndices(self, resType):
numRes = self.countResources(resType)
if numRes:
return list(range(1, numRes+1))
else:
return []
def getNames(self, resType):
"""Return list of names of all resources of a given type."""
return [res.name for res in self.get(resType, []) if res.name is not None]
def getIndResource(self, resType, index):
"""Return resource of given type located at an index ranging from 1
to the number of resources for that type, or None if not found.
"""
if index < 1:
return None
try:
res = self[resType][index-1]
except (KeyError, IndexError):
return None
return res
def getNamedResource(self, resType, name):
"""Return the named resource of given type, else return None."""
name = tostr(name, encoding='mac-roman')
for res in self.get(resType, []):
if res.name == name:
return res
return None
def close(self):
if not self.file.closed:
self.file.close()
class Resource(object):
def __init__(self, resType=None, resData=None, resID=None, resName=None,
resAttr=None):
self.type = resType
self.data = resData
self.id = resID
self.name = resName
self.attr = resAttr
def decompile(self, refData, reader):
sstruct.unpack(ResourceRefItem, refData, self)
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
self.dataOffset, = struct.unpack('>L', bytesjoin([b"\0", self.dataOffset]))
absDataOffset = reader.dataOffset + self.dataOffset
dataLength, = struct.unpack(">L", reader._read(4, absDataOffset))
self.data = reader._read(dataLength)
if self.nameOffset == -1:
return
absNameOffset = reader.absNameListOffset + self.nameOffset
nameLength, = struct.unpack('B', reader._read(1, absNameOffset))
name, = struct.unpack('>%ss' % nameLength, reader._read(nameLength))
self.name = tostr(name, encoding='mac-roman')
ResourceForkHeader = """
> # big endian
dataOffset: L
mapOffset: L
dataLen: L
mapLen: L
"""
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
ResourceMapHeader = """
> # big endian
attr: H
typeListOffset: H
nameListOffset: H
"""
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
ResourceTypeItem = """
> # big endian
type: 4s
numRes: H
refListOffset: H
"""
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
ResourceRefItem = """
> # big endian
id: h
nameOffset: h
attr: B
dataOffset: 3s
reserved: L
"""
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
| [
"[email protected] config --global user.email [email protected]"
]
| [email protected] config --global user.email [email protected] |
d4cb57f250e733e13d0676e9b5d25d710f3cafad | 7f52bb7c3a5ed3be6821306137c5217362d06dc3 | /manage.py | 3b50f6ea9203cf02974bbf61451c2a74f68e4d63 | []
| no_license | payush/cristianoronaldoyopmailcom-307 | 547f36250cf3c9c94bdea0fe8c7a1e3e1194294a | d2f2a1f76ab354e391bab8a628782c80a3b1c97a | refs/heads/master | 2020-03-23T14:24:53.428495 | 2018-07-20T06:44:00 | 2018-07-20T06:44:00 | 141,674,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cristianoronaldoyopmailcom_307.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
]
| |
af9bfb5814f5f4141fc5fd9980c003da790129c1 | 2dbd4a34f6da93c0e70e8517971672a010db93dc | /py_m/lexer_.py | 2bc01b0157aa225fd69bd537af1b174f584f269a | []
| no_license | kawain/copy_interpreter | 44eebe43c6b9ddefa94066577dcd5779a933f426 | 94e7a6d5d03b528b9138c17a5a6828f6332fa98d | refs/heads/master | 2023-04-26T02:51:46.457263 | 2021-05-22T07:48:52 | 2021-05-22T07:48:52 | 356,544,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,363 | py | from token_ import TokenType, Token
class Lexer:
"""字句解析"""
def __init__(self, input, position=0, next_position=0, ch=""):
self.input = input
self.position = position
self.next_position = next_position
self.ch = ch
self.size = len(self.input)
self.read_char()
def read_char(self):
if self.next_position >= self.size:
self.ch = ""
else:
self.ch = self.input[self.next_position]
self.position = self.next_position
self.next_position += 1
def skip_whitespace(self):
while self.ch == " " or self.ch == "\t" or self.ch == "\n" or self.ch == "\r":
self.read_char()
@staticmethod
def is_letter(v):
if v.isalpha():
return True
elif v == "_":
return True
else:
return False
@staticmethod
def is_digit(v):
if v.isdigit():
return True
elif v == '.':
return True
else:
return False
def peek_char(self):
if self.next_position >= self.size:
return ""
else:
return self.input[self.next_position]
def read_identifier(self):
position = self.position
while self.is_letter(self.ch):
self.read_char()
return self.input[position:self.position]
def read_number(self):
position = self.position
while self.is_digit(self.ch):
self.read_char()
return self.input[position:self.position]
def read_string(self):
position = self.position + 1
while True:
self.read_char()
if self.ch == '"' or self.ch == "":
break
return self.input[position:self.position]
def next_token(self):
tok = Token()
self.skip_whitespace()
if self.ch == "=":
if self.peek_char() == "=":
self.read_char()
tok.token_type = TokenType.EQ
tok.literal = "=="
else:
tok.token_type = TokenType.ASSIGN
tok.literal = "="
elif self.ch == "+":
tok.token_type = TokenType.PLUS
tok.literal = self.ch
elif self.ch == "-":
tok.token_type = TokenType.MINUS
tok.literal = self.ch
elif self.ch == "!":
if self.peek_char() == "=":
self.read_char()
tok.token_type = TokenType.NOT_EQ
tok.literal = "!="
else:
tok.token_type = TokenType.BANG
tok.literal = "!"
elif self.ch == "/":
tok.token_type = TokenType.SLASH
tok.literal = self.ch
elif self.ch == "*":
tok.token_type = TokenType.ASTERISK
tok.literal = self.ch
elif self.ch == "<":
tok.token_type = TokenType.LT
tok.literal = self.ch
elif self.ch == ">":
tok.token_type = TokenType.GT
tok.literal = self.ch
elif self.ch == ";":
tok.token_type = TokenType.SEMICOLON
tok.literal = self.ch
elif self.ch == ",":
tok.token_type = TokenType.COMMA
tok.literal = self.ch
elif self.ch == "{":
tok.token_type = TokenType.LBRACE
tok.literal = self.ch
elif self.ch == "}":
tok.token_type = TokenType.RBRACE
tok.literal = self.ch
elif self.ch == "(":
tok.token_type = TokenType.LPAREN
tok.literal = self.ch
elif self.ch == ")":
tok.token_type = TokenType.RPAREN
tok.literal = self.ch
elif self.ch == '"':
tok.token_type = TokenType.STRING
tok.literal = self.read_string()
elif self.ch == "[":
tok.token_type = TokenType.LBRACKET
tok.literal = self.ch
elif self.ch == "]":
tok.token_type = TokenType.RBRACKET
tok.literal = self.ch
elif self.ch == "":
tok.token_type = TokenType.EOF
tok.literal = ""
else:
if self.is_letter(self.ch):
tok.literal = self.read_identifier()
tok.token_type = tok.lookup_ident(tok.literal)
return tok
elif self.is_digit(self.ch):
literal = self.read_number()
if literal.count(".") == 0:
tok.token_type = TokenType.INT
tok.literal = literal
return tok
elif literal.count(".") == 1:
tok.token_type = TokenType.FLOAT
tok.literal = literal
return tok
else:
tok.token_type = TokenType.ILLEGAL
tok.literal = literal
else:
tok.token_type = TokenType.ILLEGAL
tok.literal = self.ch
self.read_char()
return tok
def __str__(self):
return "Lexer()"
if __name__ == "__main__":
pass
| [
"[email protected]"
]
| |
e96605d4527a4551d1105f8932434a99310e65b9 | 561c590ec93131ceb58c21912a375b6e0d50bedb | /jiang_fenci/hmm_segment/segment/model.py | f02feafaf66f8f9e98368fd143fd2570a3590bb7 | []
| no_license | chuanfanyoudong/nlp_learn | 3607555e59789240afd6c4a9620cc6e678e0afb3 | 9fbb6781640ab9aba561dc2de0066a1f1e5882a0 | refs/heads/master | 2020-04-07T13:25:16.118562 | 2019-04-24T07:18:33 | 2019-04-24T07:18:33 | 158,406,684 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,045 | py | # -*- coding: utf-8 -*-
"""
SEGMENT
--------
封装hmm分词模型
"""
import numpy as np
from hmmlearn.hmm import MultinomialHMM
from jiang_fenci.hmm_segment.segment.corpus import get_corpus
__model = None
class Segment:
def __init__(self):
self.corpus = get_corpus()
self.states, self.init_p = self.get_init_state()
self.trans_p = self.get_trans_state()
self.vocabs, self.emit_p = self.get_emit_state()
self.model = self.get_model()
def get_init_state(self):
"""
获取初始概率,转为hmm模型接受数据形式
"""
states = ['S', 'B', 'M', 'E']
init_state = self.corpus.get_state('init')
init_p = np.array([init_state[s] for s in states])
return states, init_p
def get_trans_state(self):
"""
获取转移概率,转为hmm模型接受数据形式
"""
trans_state = self.corpus.get_state('trans')
trans_p = np.array([[trans_state[s][ss] for ss in self.states] for s in self.states])
return trans_p
def get_emit_state(self):
"""
获取发射概率,转为hmm模型接受数据形式
"""
emit_state = self.corpus.get_state('emit')
vocabs = []
for s in self.states:
vocabs.extend([k for k, v in emit_state[s].items()])
vocabs = list(set(vocabs))
emit_p = np.array([[emit_state[s][w] for w in vocabs] for s in self.states])
return vocabs, emit_p
def get_model(self):
"""
初始化hmm模型
"""
model = MultinomialHMM(n_components=len(self.states))
model.startprob_ = self.init_p
model.transmat_ = self.trans_p
model.emissionprob_ = self.emit_p
return model
def pre_process(self, word):
"""
未知字处理
"""
if word in self.vocabs:
return self.vocabs.index(word)
else:
return len(self.vocabs)-1
def cut(self, sentence):
"""
分词
"""
seen_n = np.array([[self.pre_process(w) for w in sentence]]).T
log_p, b = self.model.decode(seen_n, algorithm='viterbi')
# print(len(sentence),len(b))
# print(sentence,b)
#print(self.states,len(b))
states = list(map(lambda x: self.states[int(x)], b))
#print(type(states),states)
cut_sentence = ''
for index in range(len(list(states))):
# print(list(states))
if list(states)[index] in ('S', 'E'):
cut_sentence += sentence[index]+' '
else:
cut_sentence += sentence[index]
return cut_sentence
@staticmethod
def stats(cut_corpus, gold_corpus):
"""
正确率、召回率、F1
"""
success_count = 0
cut_count = 0
gold_count = 0
for index in range(len(cut_corpus)):
cut_sentence = cut_corpus[index].split(' ')
gold_sentence = gold_corpus[index].split(' ')
cut_count += len(cut_sentence)
gold_count += len(gold_sentence)
for word in cut_sentence:
if word in gold_sentence:
success_count += 1
recall = float(success_count)/float(gold_count)
precision = float(success_count)/float(cut_count)
f1 = (2*recall*precision)/(recall+precision)
return [precision, recall, f1]
def test(self):
"""
分词测试
"""
test_corpus = self.corpus.get_test_corpus('test')
gold_corpus = [sentence.replace(' ', ' ').strip() for sentence in self.corpus.get_test_corpus('test_gold') if sentence]
cut_corpus = [self.cut(sentence).strip() for sentence in test_corpus if sentence]
result = self.stats(cut_corpus, gold_corpus)
# print(result)
return result
def get_model():
"""
单例模型获取
"""
global __model
if not __model:
__model = Segment()
return __model
| [
"[email protected]"
]
| |
a6c3cf2f1f9a3458d0b562aef5935f76de142de7 | 1956883d52e4019bbf8bd7bbc3744cdd1376e128 | /abutton.py | 96ea6f9b2354e36232ba86e55ad6e83e85bbaeda | [
"MIT"
]
| permissive | Otumian-empire/tkinter-basic-gui | 5d7f7c697e9ac40f34b598b166186733d0931202 | 8a561fde8f770119bc3108511388371b1bdcabf5 | refs/heads/master | 2020-06-18T13:41:17.248470 | 2019-07-15T18:51:40 | 2019-07-15T18:51:40 | 196,320,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from tkinter import *
root = Tk()
x = 0
def increase():
global x
x += 1
label.configure(text=x)
def decrease():
global x
x -= 1
label.configure(text=x)
label = Label(text=x)
sendbutton = Button(text="increase", command=increase)
deletebutton = Button(text="decrease", command=decrease)
sendbutton.grid()
label.grid()
deletebutton.grid()
mainloop() | [
"[email protected]"
]
| |
47e01a8d79922beb1795fe91191db98c2627286b | 61a8f496dbe1880398b0156940b1789ddfe8e081 | /Week_7_Lab/Advanced/q10.py | 3c2bfdedd51b4b0239a8993191d0ee3ac329def6 | []
| no_license | luca2849/CM1103-Problem-Solving-With-Python | e369cdc032249e3625ae5dbbd926703e20d11dd9 | a10b7ee6b972b23528a983dd7fff78d097c08465 | refs/heads/master | 2020-04-04T15:07:01.179113 | 2018-12-13T12:07:19 | 2018-12-13T12:07:19 | 156,024,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | def rec_power(a,n):
# if n is 1 then return a
if n == 1:
return a
# recursively call this function for n/2 and call it factor
n = n/2
factor = rec_power(a, n)
# if n/2 is even return the square of factor
if (n/2) % 2 == 0:
return factor * factor
# if n/2 is odd then return the square of factor multiplied by a
if (n/2) % 2 == 1:
return factor * factor * a
print(rec_power(10, 4))
| [
"="
]
| = |
af8010a1e412e867091d19bae06ae1b90c345783 | f993e252fc740471e71a6748685988fc0b5f2e34 | /backend/driver/migrations/0001_initial.py | 66dea496859c52c65d3e087e3245db062a3abc77 | []
| no_license | crowdbotics-apps/cobros-app-22778 | b9b9561d693fc979de0af693ffa9e4ca4d57873d | 0774fc76d1b8b484790ed1ec070c1f6455905c65 | refs/heads/master | 2023-01-12T16:46:05.469345 | 2020-11-19T17:57:14 | 2020-11-19T17:57:14 | 314,314,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | # Generated by Django 2.2.17 on 2020-11-19 17:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("delivery_order", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="DriverProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("photo", models.URLField()),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
("last_updated", models.DateTimeField(auto_now=True)),
("details", models.TextField(blank=True, null=True)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="driverprofile_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="DriverOrder",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"driver",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="driverorder_driver",
to="driver.DriverProfile",
),
),
(
"order",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="driverorder_order",
to="delivery_order.Order",
),
),
],
),
]
| [
"[email protected]"
]
| |
4c5aa5950353440cacb41eae8812b9ebad525a8f | 536656cd89e4fa3a92b5dcab28657d60d1d244bd | /chrome/test/enterprise/e2e/policy/extension_blacklist/extension_blacklist.py | d14b00fa20cb00fb2767361a0b60407fe2824f33 | [
"BSD-3-Clause"
]
| permissive | ECS-251-W2020/chromium | 79caebf50443f297557d9510620bf8d44a68399a | ac814e85cb870a6b569e184c7a60a70ff3cb19f9 | refs/heads/master | 2022-08-19T17:42:46.887573 | 2020-03-18T06:08:44 | 2020-03-18T06:08:44 | 248,141,336 | 7 | 8 | BSD-3-Clause | 2022-07-06T20:32:48 | 2020-03-18T04:52:18 | null | UTF-8 | Python | false | false | 2,267 | py | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from chrome_ent_test.infra.core import environment, before_all, test
from infra import ChromeEnterpriseTestCase
@environment(file="../policy_test.asset.textpb")
class ExtensionInstallBlacklistTest(ChromeEnterpriseTestCase):
"""Test the ExtensionInstallBlacklist policy.
https://cloud.google.com/docs/chrome-enterprise/policies/?policy=ExtensionInstallBlacklist"""
@before_all
def setup(self):
self.InstallChrome('client2012')
self.InstallWebDriver('client2012')
def installExtension(self, url):
args = ['--url', url, '--text_only', '--wait', '5']
dir = os.path.dirname(os.path.abspath(__file__))
logging.info('Opening page: %s' % url)
output = self.RunWebDriverTest('client2012',
os.path.join(dir, '../install_extension.py'),
args)
return output
@test
def test_ExtensionBlacklist_all(self):
extension = '*'
self.SetPolicy('win2012-dc', r'ExtensionInstallBlacklist\1', extension,
'String')
self.RunCommand('client2012', 'gpupdate /force')
logging.info('Disabled extension install for ' + extension)
test_url = 'https://chrome.google.com/webstore/detail/google-hangouts/nckgahadagoaajjgafhacjanaoiihapd'
output = self.installExtension(test_url)
self.assertIn('blocked', output)
@test
def test_ExtensionBlacklist_hangout(self):
extension = 'nckgahadagoaajjgafhacjanaoiihapd'
self.SetPolicy('win2012-dc', r'ExtensionInstallBlacklist\1', extension,
'String')
self.RunCommand('client2012', 'gpupdate /force')
logging.info('Disabled extension install for ' + extension)
test_url = 'https://chrome.google.com/webstore/detail/google-hangouts/nckgahadagoaajjgafhacjanaoiihapd'
output = self.installExtension(test_url)
self.assertIn('blocked', output)
positive_test_url = 'https://chrome.google.com/webstore/detail/grammarly-for-chrome/kbfnbcaeplbcioakkpcpgfkobkghlhen'
output = self.installExtension(positive_test_url)
self.assertNotIn('blocked', output)
| [
"[email protected]"
]
| |
8ec3bf12cacdc47c54db00c9ea91520d09aa8fc4 | 747f759311d404af31c0f80029e88098193f6269 | /addons/email_account/wizard/wizard_send_email.py | 2ad2aa3b9779c2e69e1d813316fd08d361ac84a4 | []
| no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | /home/openerp/production/extra-addons/email_account/wizard/wizard_send_email.py | [
"[email protected]"
]
| |
5559ff3b8d6a987f15d3f19068b4c475668a7461 | a07ed3f4984e8153219ef25927a5784c127f43a4 | /arp_spoof/venv/bin/easy_install-3.7 | bb94d94b031d29105f2a3d2e40ac4cab54a11736 | []
| no_license | golan1202/Hacking-with-Python | 939333b4e86669527f2ccf846caa49601fc05848 | 98e0b1fef562d64b3e6ec8eab90ed75fb8c3f221 | refs/heads/master | 2020-08-21T23:41:08.979455 | 2019-10-19T21:31:11 | 2019-10-19T21:31:11 | 216,272,690 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | 7 | #!/root/PycharmProjects/arp_spoof/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
]
| |
365f109ecdc7bef348e0afda449d6ff9c1423a44 | 0892937e1ef77f110a05042fa49b9178221590a5 | /quiz_app/admin.py | c54e0446c5f2b40a946d0f3ec5d7d08897fbfaa7 | []
| no_license | dmswl0311/nh_hackathon_quiz | aa2e0cc51db3abe45bdb6aadb96855528a149d63 | c4fadf6a9249d6e8ad80d553f8c20a848bdfc851 | refs/heads/master | 2023-01-29T15:46:22.046038 | 2020-12-13T13:35:46 | 2020-12-13T13:35:46 | 320,609,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | from django.contrib import admin
from .models import Quiz, OX_Quiz
admin.site.register(Quiz)
admin.site.register(OX_Quiz)
| [
"[email protected]"
]
| |
d06ba3a39c1339b3301e652807885c2348d249aa | 75f3ddcebb39e1575d0e735090cbafae5bc05140 | /setup.py | 7d957bf60b9cd3b0c319e27de4efb82b1d33cecc | [
"BSD-3-Clause"
]
| permissive | ethen8181/ort_inference | 372b548e98f4e6e6e5fde2bf5533a31c6e6273ce | 2fdd7fe8479c4b8679f8e809fa2b3846ad96b3fe | refs/heads/main | 2023-05-15T07:42:48.247881 | 2021-06-21T04:30:06 | 2021-06-21T04:30:06 | 376,840,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,471 | py | # -*- coding: utf-8 -*-
import os
import sys
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
# Convert distutils Windows platform specifiers to CMake -A arguments
PLAT_TO_CMAKE = {
"win32": "Win32",
"win-amd64": "x64",
"win-arm32": "ARM",
"win-arm64": "ARM64",
}
# A CMakeExtension needs a sourcedir instead of a file list.
# The name must be the _single_ output extension from the CMake build.
# If you need multiple extensions, see scikit-build.
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=""):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cfg = "Debug" if self.debug else "Release"
# CMake lets you override the generator - we need to check this.
# Can be set with Conda-Build, for example.
cmake_generator = os.environ.get("CMAKE_GENERATOR", "")
# Set Python_EXECUTABLE instead if you use PYBIND11_FINDPYTHON
# EXAMPLE_VERSION_INFO shows you how to pass a value into the C++ code
# from Python.
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(extdir),
"-DPYTHON_EXECUTABLE={}".format(sys.executable),
"-DEXAMPLE_VERSION_INFO={}".format(self.distribution.get_version()),
"-DCMAKE_BUILD_TYPE={}".format(cfg), # not used on MSVC, but no harm
]
build_args = []
if self.compiler.compiler_type != "msvc":
# Using Ninja-build since it a) is available as a wheel and b)
# multithreads automatically. MSVC would require all variables be
# exported for Ninja to pick it up, which is a little tricky to do.
# Users can override the generator with CMAKE_GENERATOR in CMake
# 3.15+.
if not cmake_generator:
cmake_args += ["-GNinja"]
else:
# Single config generators are handled "normally"
single_config = any(x in cmake_generator for x in {"NMake", "Ninja"})
# CMake allows an arch-in-generator style for backward compatibility
contains_arch = any(x in cmake_generator for x in {"ARM", "Win64"})
# Specify the arch if using MSVC generator, but only if it doesn't
# contain a backward-compatibility arch spec already in the
# generator name.
if not single_config and not contains_arch:
cmake_args += ["-A", PLAT_TO_CMAKE[self.plat_name]]
# Multi-config generators have a different way to specify configs
if not single_config:
cmake_args += [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format(cfg.upper(), extdir)
]
build_args += ["--config", cfg]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, "parallel") and self.parallel:
# CMake 3.12+ only.
build_args += ["-j{}".format(self.parallel)]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp
)
subprocess.check_call(
["cmake", "--build", "."] + build_args, cwd=self.build_temp
)
# The information here can also be placed in setup.cfg - better separation of
# logic and declaration, and simpler if you include description/version in a file.
setup(
name="ort_inference",
version="0.0.1",
author="MingYu (Ethen) Liu",
author_email="[email protected]",
description="CPU Inferencing with Onnxruntime",
long_description="CPU Inferencing with Onnxruntime",
ext_modules=[CMakeExtension("ort_inference")],
cmdclass={"build_ext": CMakeBuild},
zip_safe=False
)
| [
"[email protected]"
]
| |
4f529ded28f7f66f414f239e601ba06e9f1e7c18 | f8d7bf751f5596f46111f52f127a412c50b7c6a4 | /mhc2flurry/cluster_parallelism.py | 85bc3fc2d6e85625f2cd6c14a123b901ad645de5 | [
"Apache-2.0"
]
| permissive | luoyuan3316/mhc2flurry | 7e9ef790fbceebc34e8f432dc594fc3a12bddab7 | 914dddfd708801a83615d0cc3d41dd3b19e45919 | refs/heads/master | 2023-04-16T14:27:36.602561 | 2021-04-26T14:09:56 | 2021-04-26T14:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,651 | py | """
Simple, naive parallel map implementation for HPC clusters.
Used for training MHC2flurry models.
"""
import traceback
import sys
import os
import time
import signal
import argparse
import pickle
import subprocess
import shutil
from .local_parallelism import call_wrapped_kwargs
from .class1_affinity_predictor import Class1AffinityPredictor
try:
from shlex import quote
except ImportError:
from pipes import quote
def add_cluster_parallelism_args(parser):
"""
Add commandline arguments controlling cluster parallelism to an argparse
ArgumentParser.
Parameters
----------
parser : argparse.ArgumentParser
"""
group = parser.add_argument_group("Cluster parallelism")
group.add_argument(
"--cluster-parallelism",
default=False,
action="store_true")
group.add_argument(
"--cluster-submit-command",
default='sh',
help="Default: %(default)s")
group.add_argument(
"--cluster-results-workdir",
default='./cluster-workdir',
help="Default: %(default)s")
group.add_argument(
"--additional-complete-file",
default='STDERR',
help="Additional file to monitor for job completion. Default: %(default)s")
group.add_argument(
'--cluster-script-prefix-path',
help="",
)
group.add_argument(
'--cluster-max-retries',
type=int,
help="How many times to rerun failing jobs. Default: %(default)s",
default=3)
def cluster_results_from_args(
args,
work_function,
work_items,
constant_data=None,
input_serialization_method="pickle",
result_serialization_method="pickle",
clear_constant_data=False):
"""
Parallel map configurable using commandline arguments. See the
cluster_results() function for docs.
The `args` parameter should be an argparse.Namespace from an argparse parser
generated using the add_cluster_parallelism_args() function.
Parameters
----------
args
work_function
work_items
constant_data
result_serialization_method
clear_constant_data
Returns
-------
generator
"""
return cluster_results(
work_function=work_function,
work_items=work_items,
constant_data=constant_data,
submit_command=args.cluster_submit_command,
results_workdir=args.cluster_results_workdir,
additional_complete_file=args.additional_complete_file,
script_prefix_path=args.cluster_script_prefix_path,
input_serialization_method=input_serialization_method,
result_serialization_method=result_serialization_method,
max_retries=args.cluster_max_retries,
clear_constant_data=clear_constant_data
)
def cluster_results(
work_function,
work_items,
constant_data=None,
submit_command="sh",
results_workdir="./cluster-workdir",
additional_complete_file=None,
script_prefix_path=None,
input_serialization_method="pickle",
result_serialization_method="pickle",
max_retries=3,
clear_constant_data=False):
"""
Parallel map on an HPC cluster.
Returns [work_function(item) for item in work_items] where each invocation
of work_function is performed as a separate HPC cluster job. Order is
preserved.
Optionally, "constant data" can be specified, which will be passed to
each work_function() invocation as a keyword argument called constant_data.
This data is serialized once and all workers read it from the same source,
which is more efficient than serializing it separately for each worker.
Each worker's input is serialized to a shared NFS directory and the
submit_command is used to launch a job to process that input. The shared
filesystem is polled occasionally to watch for results, which are fed back
to the user.
Parameters
----------
work_function : A -> B
work_items : list of A
constant_data : object
submit_command : string
For running on LSF, we use "bsub" here.
results_workdir : string
Path to NFS shared directory where inputs and results can be written
script_prefix_path : string
Path to script that will be invoked to run each worker. A line calling
the _mhcflurry-cluster-worker-entry-point command will be appended to
the contents of this file.
result_serialization_method : string, one of "pickle" or "save_predictor"
The "save_predictor" works only when the return type of work_function
is Class2AffinityPredictor
max_retries : int
How many times to attempt to re-launch a failed worker
clear_constant_data : bool
If True, the constant data dict is cleared on the launching host after
it is serialized to disk.
Returns
-------
generator of B
"""
if input_serialization_method == "dill":
import dill
input_serialization_module = dill
else:
assert input_serialization_method == "pickle"
input_serialization_module = pickle
constant_payload = {
'constant_data': constant_data,
'function': work_function,
}
if not os.path.exists(results_workdir):
os.mkdir(results_workdir)
work_dir = os.path.join(
os.path.abspath(results_workdir),
str(int(time.time())))
os.mkdir(work_dir)
constant_payload_path = os.path.join(
work_dir,
"global_data." + input_serialization_method)
with open(constant_payload_path, "wb") as fd:
input_serialization_module.dump(
constant_payload,
fd,
protocol=input_serialization_module.HIGHEST_PROTOCOL)
print("Wrote:", constant_payload_path)
if clear_constant_data:
constant_data.clear()
print("Cleared constant data to free up memory.")
if script_prefix_path:
with open(script_prefix_path) as fd:
script_prefix = fd.read()
else:
script_prefix = "#!/bin/bash"
result_items = []
for (i, item) in enumerate(work_items):
item_workdir = os.path.join(
work_dir, "work-item.%03d-of-%03d" % (i, len(work_items)))
os.mkdir(item_workdir)
item_data_path = os.path.join(
item_workdir, "data." + input_serialization_method)
with open(item_data_path, "wb") as fd:
input_serialization_module.dump(
item, fd, protocol=input_serialization_module.HIGHEST_PROTOCOL)
print("Wrote:", item_data_path)
item_result_path = os.path.join(item_workdir, "result")
item_error_path = os.path.join(item_workdir, "error.pkl")
item_finished_path = os.path.join(item_workdir, "COMPLETE")
item_script_pieces = [
script_prefix.format(work_item_num=i, work_dir=item_workdir)
]
item_script_pieces.append(" ".join([
"_mhcflurry-cluster-worker-entry-point",
"--constant-data", quote(constant_payload_path),
"--worker-data", quote(item_data_path),
"--result-out", quote(item_result_path),
"--error-out", quote(item_error_path),
"--complete-dir", quote(item_finished_path),
"--input-serialization-method", input_serialization_method,
"--result-serialization-method", result_serialization_method,
]))
item_script = "\n".join(item_script_pieces)
item_script_path = os.path.join(
item_workdir,
"run.%d.sh" % i)
with open(item_script_path, "w") as fd:
fd.write(item_script)
print("Wrote:", item_script_path)
launch_command = " ".join([
submit_command, "<", quote(item_script_path)
])
subprocess.check_call(launch_command, shell=True)
print("Invoked", launch_command)
result_items.append({
'work_dir': item_workdir,
'finished_path': item_finished_path,
'result_path': item_result_path,
'error_path': item_error_path,
'retry_num': 0,
'launch_command': launch_command,
})
def result_generator():
additional_complete_file_path = None
start = time.time()
while result_items:
print("[%0.1f sec elapsed] waiting on %d / %d items." % (
time.time() - start, len(result_items), len(work_items)))
while True:
result_item = None
for d in result_items:
if additional_complete_file:
additional_complete_file_path = os.path.join(
d['work_dir'], additional_complete_file)
if os.path.exists(d['finished_path']):
result_item = d
break
if additional_complete_file and os.path.exists(
additional_complete_file_path):
result_item = d
print("Exists", additional_complete_file_path)
break
if result_item is None:
time.sleep(60)
else:
result_items.remove(result_item)
break
complete_dir = result_item['finished_path']
result_path = result_item['result_path']
error_path = result_item['error_path']
retry_num = result_item['retry_num']
launch_command = result_item['launch_command']
print("[%0.1f sec elapsed] processing item %s" % (
time.time() - start, result_item))
if os.path.exists(error_path) or not os.path.exists(result_path):
if os.path.exists(error_path):
print("Error path exists", error_path)
try:
with open(error_path, "rb") as fd:
exception = pickle.load(fd)
print(exception)
except Exception as e:
exception = RuntimeError(
"Error, but couldn't read error path: %s %s" % (
type(e), str(e)))
else:
exception = RuntimeError("Error, but no exception saved")
if not os.path.exists(result_path):
print("Result path does NOT exist", result_path)
if retry_num < max_retries:
print("Relaunching", launch_command)
attempt_dir = os.path.join(
result_item['work_dir'], "attempt.%d" % retry_num)
if os.path.exists(complete_dir):
shutil.move(complete_dir, attempt_dir) # directory
if additional_complete_file and os.path.exists(
additional_complete_file_path):
shutil.move(additional_complete_file_path, attempt_dir)
if os.path.exists(error_path):
shutil.move(error_path, attempt_dir)
subprocess.check_call(launch_command, shell=True)
print("Invoked", launch_command)
result_item['retry_num'] += 1
result_items.append(result_item)
continue
else:
print("Max retries exceeded", max_retries)
raise exception
if os.path.exists(result_path):
print("Result path exists", result_path)
if result_serialization_method == "save_predictor":
result = Class1AffinityPredictor.load(result_path)
elif result_serialization_method == "pickle":
with open(result_path, "rb") as fd:
result = pickle.load(fd)
else:
raise ValueError(
"Unsupported serialization method",
result_serialization_method)
yield result
else:
raise RuntimeError("Results do not exist", result_path)
return result_generator()
parser = argparse.ArgumentParser(
usage="Entry point for cluster workers")
parser.add_argument(
"--constant-data",
required=True,
)
parser.add_argument(
"--worker-data",
required=True,
)
parser.add_argument(
"--result-out",
required=True,
)
parser.add_argument(
"--error-out",
required=True,
)
parser.add_argument(
"--complete-dir",
)
parser.add_argument(
"--input-serialization-method",
choices=("pickle", "dill"),
default="pickle")
parser.add_argument(
"--result-serialization-method",
choices=("pickle", "save_predictor"),
default="pickle")
def worker_entry_point(argv=sys.argv[1:]):
"""
Entry point for the worker command.
Parameters
----------
argv : list of string
"""
# On sigusr1 print stack trace
print("To show stack trace, run:\nkill -s USR1 %d" % os.getpid())
signal.signal(signal.SIGUSR1, lambda sig, frame: traceback.print_stack())
args = parser.parse_args(argv)
if args.input_serialization_method == "dill":
import dill
input_serialization_module = dill
else:
assert args.input_serialization_method == "pickle"
input_serialization_module = pickle
with open(args.constant_data, "rb") as fd:
constant_payload = input_serialization_module.load(fd)
with open(args.worker_data, "rb") as fd:
worker_data = input_serialization_module.load(fd)
kwargs = dict(worker_data)
if constant_payload['constant_data'] is not None:
kwargs['constant_data'] = constant_payload['constant_data']
try:
result = call_wrapped_kwargs(constant_payload['function'], kwargs)
if args.result_serialization_method == 'save_predictor':
result.save(args.result_out)
else:
with open(args.result_out, "wb") as fd:
pickle.dump(result, fd, pickle.HIGHEST_PROTOCOL)
print("Wrote:", args.result_out)
except Exception as e:
print("Exception: ", e)
with open(args.error_out, "wb") as fd:
pickle.dump(e, fd, pickle.HIGHEST_PROTOCOL)
print("Wrote:", args.error_out)
raise
finally:
if args.complete_dir:
os.mkdir(args.complete_dir)
print("Created: ", args.complete_dir)
| [
"[email protected]"
]
| |
b6863dd1e133f2d14e1c1aaa6a43917d8b01e00e | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-3429.py | 7e35c22deed3fcb33f0fb8c3ea44a31bea95a86c | []
| no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,289 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
$Var.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
]
| |
494cf359b8f1efd02f67635b8b12933e562d71b4 | c106149cccfac8dd4f05f976253f529b3234828c | /zerver/management/commands/send_realm_reactivation_email.py | 39b1dd91a654be9dba3474577f165a89b79bc915 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
]
| permissive | kartikey54/zulip | cc685686af3bc1fbadc9ded260f62f45087df301 | e8b44f491f8967823273a6d5acd3d3d376e62b90 | refs/heads/master | 2021-01-23T02:59:24.396882 | 2019-10-08T19:46:43 | 2019-10-08T19:46:43 | 86,029,881 | 1 | 0 | Apache-2.0 | 2019-10-08T19:46:44 | 2017-03-24T05:16:51 | Python | UTF-8 | Python | false | false | 805 | py |
from argparse import ArgumentParser
from zerver.lib.management import ZulipBaseCommand, CommandError
from zerver.lib.send_email import send_email, FromAddress
from zerver.lib.actions import do_send_realm_reactivation_email
from typing import Any
class Command(ZulipBaseCommand):
help = """Sends realm reactivation email to admins"""
def add_arguments(self, parser: ArgumentParser) -> None:
self.add_realm_args(parser, True)
def handle(self, *args: Any, **options: str) -> None:
realm = self.get_realm(options)
assert realm is not None
if not realm.deactivated:
raise CommandError("The realm %s is already active." % (realm.name,))
print('Sending email to admins')
do_send_realm_reactivation_email(realm)
print('Done!')
| [
"[email protected]"
]
| |
35dfc784fce1e1f84a5c902dfaad6aa13b45a15b | b7f9d32bfd0ba147182a880de9b257355d3bc945 | /pyedi/grammar/jsonnocheck.py | 28444610964efc57bb6f31665ff2ce012f9fe561 | []
| no_license | jedrus2000/pyedi | 25cbb930d854e0dbe79d251b3215040c978410b2 | b5d291c5f8565137bb845835c8fe439730b2f2c7 | refs/heads/master | 2020-04-12T09:15:58.802275 | 2018-12-19T07:16:55 | 2018-12-19T07:16:55 | 162,396,916 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,690 | py | """
This is modified code of Bots project:
http://bots.sourceforge.net/en/index.shtml
ttp://bots.readthedocs.io
https://github.com/eppye-bots/bots
originally created by Henk-Jan Ebbers.
This code include also changes from other forks, specially from:
https://github.com/bots-edi
This project, as original Bots is licenced under GNU GENERAL PUBLIC LICENSE Version 3; for full
text: http://www.gnu.org/copyleft/gpl.html
"""
from .json import Json
class JsonNoCheck(Json):
defaultsyntax = {
"charset": "utf-8",
"checkcharsetin": "strict", # strict, ignore or botsreplace (replace with char as set in bots.ini).
"checkcharsetout": "strict", # strict, ignore or botsreplace (replace with char as set in bots.ini).
"checkunknownentities": False,
"contenttype": "application/json",
"decimaal": ".",
"defaultBOTSIDroot": "ROOT",
"envelope": "",
"indented": False, # False: output is one string (no cr/lf); True: output is indented/human readable
"merge": False,
"triad": "",
# settings needed as defaults, but not useful for this editype
"add_crlfafterrecord_sep": "",
"escape": "",
"field_sep": "",
"forcequote": 0, # csv only
"quote_char": "",
"record_sep": "",
"record_tag_sep": "", # Tradacoms/GTDI
"reserve": "",
"sfield_sep": "",
"skip_char": "",
# bots internal, never change/overwrite
"has_structure": False, # is True, read structure, recorddef, check these
"checkcollision": False,
"lengthnumericbare": False,
"stripfield_sep": False,
} | [
"[email protected]"
]
| |
28642c224abb07f03d6e3c0002d570ec3095e530 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/stack_data/serializing.py | fb67d2906a1d42c448f6b8f99c6e470900813a01 | [
"MIT"
]
| permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 6,441 | py | import inspect
import logging
import sys
import traceback
from collections import Counter
from html import escape as escape_html
from types import FrameType, TracebackType
from typing import Union, Iterable, List
from stack_data import (
style_with_executing_node,
Options,
Line,
FrameInfo,
Variable,
RepeatedFrames,
)
log = logging.getLogger(__name__)
class Serializer:
def __init__(
self,
*,
options=None,
pygmented=False,
show_executing_node=True,
pygments_formatter_cls=None,
pygments_formatter_kwargs=None,
pygments_style="monokai",
executing_node_modifier="bg:#005080",
use_code_qualname=True,
strip_leading_indent=True,
html=False,
chain=True,
collapse_repeated_frames=True,
show_variables=False,
):
if options is None:
options = Options()
if pygmented and not options.pygments_formatter:
if show_executing_node:
pygments_style = style_with_executing_node(
pygments_style, executing_node_modifier
)
if pygments_formatter_cls is None:
if html:
from pygments.formatters.html import (
HtmlFormatter as pygments_formatter_cls,
)
else:
from pygments.formatters.terminal256 import (
Terminal256Formatter as pygments_formatter_cls,
)
options.pygments_formatter = pygments_formatter_cls(
style=pygments_style,
**pygments_formatter_kwargs or {},
)
self.pygmented = pygmented
self.use_code_qualname = use_code_qualname
self.strip_leading_indent = strip_leading_indent
self.html = html
self.chain = chain
self.options = options
self.collapse_repeated_frames = collapse_repeated_frames
self.show_variables = show_variables
def format_exception(self, e=None) -> List[dict]:
if e is None:
e = sys.exc_info()[1]
result = []
if self.chain:
if e.__cause__ is not None:
result = self.format_exception(e.__cause__)
result[-1]["tail"] = traceback._cause_message.strip()
elif e.__context__ is not None and not e.__suppress_context__:
result = self.format_exception(e.__context__)
result[-1]["tail"] = traceback._context_message.strip()
result.append(self.format_traceback_part(e))
return result
def format_traceback_part(self, e: BaseException) -> dict:
return dict(
frames=self.format_stack(e.__traceback__ or sys.exc_info()[2]),
exception=dict(
type=type(e).__name__,
message=traceback._some_str(e),
),
tail="",
)
def format_stack(self, frame_or_tb=None) -> List[dict]:
if frame_or_tb is None:
frame_or_tb = inspect.currentframe().f_back
return list(
self.format_stack_data(
FrameInfo.stack_data(
frame_or_tb,
self.options,
collapse_repeated_frames=self.collapse_repeated_frames,
)
)
)
def format_stack_data(
self, stack: Iterable[Union[FrameInfo, RepeatedFrames]]
) -> Iterable[dict]:
for item in stack:
if isinstance(item, FrameInfo):
if not self.should_include_frame(item):
continue
yield dict(type="frame", **self.format_frame(item))
else:
yield dict(type="repeated_frames", **self.format_repeated_frames(item))
def format_repeated_frames(self, repeated_frames: RepeatedFrames) -> dict:
counts = sorted(
Counter(repeated_frames.frame_keys).items(),
key=lambda item: (-item[1], item[0][0].co_name),
)
return dict(
frames=[
dict(
name=code.co_name,
lineno=lineno,
count=count,
)
for (code, lineno), count in counts
]
)
def format_frame(self, frame: Union[FrameInfo, FrameType, TracebackType]) -> dict:
if not isinstance(frame, FrameInfo):
frame = FrameInfo(frame, self.options)
result = dict(
name=(
frame.executing.code_qualname()
if self.use_code_qualname
else frame.code.co_name
),
filename=frame.filename,
lineno=frame.lineno,
lines=list(self.format_lines(frame.lines)),
)
if self.show_variables:
result["variables"] = list(self.format_variables(frame))
return result
def format_lines(self, lines):
for line in lines:
if isinstance(line, Line):
yield dict(type="line", **self.format_line(line))
else:
yield dict(type="line_gap")
def format_line(self, line: Line) -> dict:
return dict(
is_current=line.is_current,
lineno=line.lineno,
text=line.render(
pygmented=self.pygmented,
escape_html=self.html,
strip_leading_indent=self.strip_leading_indent,
),
)
def format_variables(self, frame_info: FrameInfo) -> Iterable[dict]:
try:
for var in sorted(frame_info.variables, key=lambda v: v.name):
yield self.format_variable(var)
except Exception: # pragma: no cover
log.exception("Error in getting frame variables")
def format_variable(self, var: Variable) -> dict:
return dict(
name=self.format_variable_part(var.name),
value=self.format_variable_part(self.format_variable_value(var.value)),
)
def format_variable_part(self, text):
if self.html:
return escape_html(text)
else:
return text
def format_variable_value(self, value) -> str:
return repr(value)
def should_include_frame(self, frame_info: FrameInfo) -> bool:
return True # pragma: no cover
| [
"[email protected]"
]
| |
e0ac24619a342a1b4cf7f9e015cbbcec4a3161d4 | c9b5f49906e213c0c6edd25c063961b8226b67af | /compression/evaluate.py | a6d3d4e3f36ec271fc6ec3957c65c952d9ea8164 | []
| no_license | danielzgsilva/jetson_projects | c481ff505c97ac40089438f34ae24b74e265631c | 72ab79a2d4759fe51d107432aa9ff6ce2c728a53 | refs/heads/master | 2023-01-20T20:22:04.410741 | 2020-12-01T21:31:07 | 2020-12-01T21:31:07 | 294,460,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,723 | py | import os
#os.environ['CUDA_LAUNCH_BLOCKING']='1'
import config
import torch
import numpy as np
from dataloader import TrainDataset, ValidationDataset, DataLoader, get_cifar100_dataset
from model import VGGModel, VGGModel_old
import time
from basisModel import basisModel, display_stats
from options import Options
opts = Options().parse()
if opts.tensorRT:
from torch2trt import torch2trt
def get_accuracy(y_pred, y):
y_argmax = torch.argmax(y_pred, -1)
return torch.mean((y_argmax==y).type(torch.float))
def validation(model, data_loader, opts):
model.eval()
if opts.compress:
print('Compressing model with basis filter algorithm, compression factor of {}'.format(opts.compress_factor))
model = basisModel(model, opts.use_weights, opts.add_bn, opts.fixed_basbs)
model.update_channels(opts.compress_factor)
display_stats(model, (64,64))
else:
print('No compression schema')
if config.use_cuda:
model.cuda()
if opts.tensorRT:
print('Optimizing model with TensorRT')
# Get random input to pass as a sample to TensorRT
x, _ = next(iter(data_loader))
if config.use_cuda:
x = x.cuda()
else:
raise RuntimeError('Cannot use TensorRT without CUDA')
# Optimize
trt_model = torch2trt(model, [x], max_batch_size=config.batch_size)
del model
del x
torch.cuda.empty_cache()
model = trt_model
model.cuda()
else:
print('No TensorRT')
print('memory usage:')
print(torch.cuda.memory_allocated())
print(torch.cuda.memory_summary())
print('Evaluating model with {} iterations over {} images'.format(opts.n, len(data_loader)*config.batch_size))
all_times, all_accs = [], []
for i in range(opts.n):
times, accs = [], []
for _, sample in enumerate(data_loader):
x, y = sample
if config.use_cuda:
x = x.cuda()
y = y.cuda()
with torch.no_grad():
start_time = time.time()
y_pred = model(x)
end_time = time.time()
times.append((end_time-start_time)/float(x.shape[0]) * 1000 * 1000) # saves the average time per image
acc = get_accuracy(y_pred, y) # computes the accuracy per batch
accs.append(acc.item())
iteration_time, iteration_acc = float(np.mean(times)), float(np.mean(accs))*100
all_times.append(iteration_time)
all_accs.append(iteration_acc)
print('Iteration %d: Avg Time per Image: %.4f (micro-sec) Accuracy: %.4f' % (i, iteration_time, iteration_acc), flush=True)
avg_time, avg_acc = float(np.mean(all_times[1:])), float(np.mean(all_accs))
print('-'*70)
print('Final reuslts: Avg Time per Image: %.4f (micro-sec) Accuracy: %.4f' % (avg_time, avg_acc), flush=True)
return avg_time, avg_acc
def evaluate(opts):
val_dataset = get_cifar100_dataset('./data/', False, download=True)
val_dataloader = DataLoader(val_dataset, batch_size=config.batch_size, shuffle=False, num_workers=config.workers)
save_file_path = os.path.join(opts.save_dir, opts.model)
if opts.load_state_dict:
if opts.use_vgg_old:
model = VGGModel_old(n_classes=config.n_classes)
else:
model = VGGModel(n_classes=config.n_classes)
model.load_state_dict(torch.load(save_file_path)['state_dict'])
else:
model = torch.load(save_file_path)
avg_time, avg_acc = validation(model, val_dataloader, opts)
if __name__ == '__main__':
evaluate(opts)
| [
"[email protected]"
]
| |
fb588a29ad13110bfe5be22b2eca5aff80ed72bc | 51bab842d885e6d5e6dc0892522ed8ce82f2be9d | /src/picktrue/sites/abstract.py | ca470e6512e31356d37569ab9e178342d9c4e097 | [
"MIT"
]
| permissive | winkidney/PickTrue | d941e5e9eb420f4a80bd0bbe1c06a0ab8ff3c861 | 772b105e4de3852bba41369221f47b8480bf1070 | refs/heads/master | 2023-03-12T01:40:54.780845 | 2023-03-10T17:23:55 | 2023-03-10T17:23:55 | 144,246,340 | 146 | 16 | MIT | 2020-07-24T13:02:24 | 2018-08-10T06:32:10 | Python | UTF-8 | Python | false | false | 2,353 | py | import os
from pathlib import Path
import requests
from picktrue.meta import UA, ImageItem
from picktrue.utils import retry
def normalize_proxy_string(proxy):
if 'socks5' in proxy:
if 'socks5h' not in proxy:
proxy = proxy.replace('socks5', 'socks5h')
return proxy
def get_proxy(proxy_string=None):
if proxy_string is None:
return {}
proxy = normalize_proxy_string(proxy_string)
proxies = {
'proxies': {
'http': proxy,
'https': proxy,
}
}
return proxies
class DummySite:
@property
def dir_name(self):
raise NotImplementedError()
@property
def fetcher(self):
raise NotImplementedError()
@property
def tasks(self):
raise NotImplementedError()
class DummyFetcher:
def __init__(self, proxies=None):
self.session = requests.session()
if proxies is not None:
self.session.proxies = proxies
self.session.headers.update(UA)
@staticmethod
def _safe_name(name):
name = name.replace("/", " ")
name = name.replace("\\", " ")
name = name.strip()
name = name.replace(" ", '-')
return name
@staticmethod
def _safe_path(path):
return Path(path).absolute()
@retry()
def get(self, url, **kwargs):
"""
:rtype: requests.Response
"""
if 'timeout' in kwargs:
kwargs.pop('timeout')
return self.session.get(url, timeout=(2, 30), **kwargs)
def get_save_path(self, base_path, image_name, image: ImageItem):
save_path = os.path.join(
base_path,
image_name,
)
return save_path
def save(self, content, task_item):
"""
:type content: bytearray
:type task_item: picktrue.meta.TaskItem
"""
image = task_item.image
image_name = image.name
if callable(image.name):
image_name = image.name(image.url, content)
save_path = self.get_save_path(
task_item.base_save_path,
image_name,
image,
)
save_path = self._safe_path(save_path)
if os.path.exists(save_path):
return
with open(save_path, "wb") as f:
f.write(content)
f.flush()
| [
"[email protected]"
]
| |
f07fecfbe41fa6f5a0d071a0779023ddd9a066ad | 43268854505070471e0911bc0e5b280cadec8601 | /modeller9v8/examples/commands/all_hydrogen.py | 277f74f956357eaf0efaf2fe95d7ea9c4afac96a | []
| no_license | realbigws/From_CA_to_FullAtom | 08621bf350c77e29140051d1af850a51e5fe138f | a59d9fcbc6c1f2bfc5fc2d77da26318c63ac3052 | refs/heads/master | 2020-05-30T01:37:47.378404 | 2019-05-30T21:42:45 | 2019-05-30T21:42:45 | 189,481,583 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | # This will read a specified atom file, generate all hydrogen atoms,
# add atomic radii and charges, and write the model to a PDB file in
# the GRASP format. This can be used with GRASP to display electrostatic
# properties without assigning charges and radii in GRASP.
from modeller import *
from modeller.scripts import complete_pdb
log.verbose()
env = environ()
env.io.atom_files_directory = ['../atom_files']
env.libs.topology.read(file='$(LIB)/top_allh.lib')
env.libs.parameters.read(file='$(LIB)/par.lib')
def patch_disulfides(mdl):
"""Patch topology to remove sulfhydril hydrogens"""
for ids in [ ('17', '39'),
( '3', '22'),
('53', '59'),
('41', '52') ]:
mdl.patch(residue_type='DISU', residues=[mdl.residues[r] for r in ids])
mdl = complete_pdb(env, "1fas", patch_disulfides)
mdl.write(file='1fas.ini1', model_format='GRASP')
mdl.write(file='1fas.ini2', model_format='PDB')
| [
"[email protected]"
]
| |
5a11112058ae007b6764e25e44cccde6c87c2df1 | 77ab593ed55a6d46b1778f6d41bc70ced3f8cd46 | /face_into/face72/see_data.py | f713bea53d21afcdad492cd716761cea8e41e100 | []
| no_license | wosxcc/bot | e93b92fbca79a915feb186160f3f72c99218ffcb | c097f5455bc6264c9f778fb72900475963836153 | refs/heads/master | 2021-06-12T12:43:47.314071 | 2018-12-14T08:51:43 | 2018-12-14T08:51:43 | 128,619,488 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | import os
import cv2 as cv
path_files = 'E:/dectect/dectect/face68'
for file in os.listdir(path_files):
if (file[-4:]=='.txt'):
print(file)
img = cv.imread(path_files+'/' + file[:-4]+'.jpg')
txt_open = open(path_files+'/' + file)
txt_read = txt_open.read()
txt_lines =txt_read.split(' ')
txt_float = [float(i) for i in txt_lines]
biaoq= 'xiao'
if txt_float[0]==0:
biaoq='buxiao'
elif txt_float[0]==2:
biaoq='daxiao'
biaoq += str(txt_float[1])
img = cv.putText(img, biaoq, (0, 25), 2, cv.FONT_HERSHEY_PLAIN, (255, 0, 0))
for x in range(int(len(txt_float)/2)-1):
img=cv.circle(img,(int(txt_float[2 + x * 2]*img.shape[1]),int(txt_float[2 + x * 2 + 1]*img.shape[0])),1,(0,255,0),-1)
cv.imshow('img', img)
txt_open.close()
k = cv.waitKey(0) & 0xFF
if k == ord('d'):
os.remove(path_files + '/' + file)
os.remove(path_files + '/' + file[:-4] + '.jpg')
print('删除成功', path_files + '/' + file)
elif k == ord('e'):
os.remove(last_img)
os.remove(last_img[:-4] + '.jpg')
print('删除前一张', last_img)
else:
last_img = path_files + '/' + file
| [
"[email protected]"
]
| |
ed202550c399b038c7bfb0bf2e966d9f0662b5d4 | 2d05050d0ada29f7680b4df20c10bb85b0530e45 | /python/tvm/contrib/tf_op/module.py | bcff2741630c5308254ab8df9ed28a5875b956ff | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"Zlib",
"LLVM-exception",
"BSD-2-Clause"
]
| permissive | apache/tvm | 87cb617f9a131fa44e1693303aaddf70e7a4c403 | d75083cd97ede706338ab413dbc964009456d01b | refs/heads/main | 2023-09-04T11:24:26.263032 | 2023-09-04T07:26:00 | 2023-09-04T07:26:00 | 70,746,484 | 4,575 | 1,903 | Apache-2.0 | 2023-09-14T19:06:33 | 2016-10-12T22:20:28 | Python | UTF-8 | Python | false | false | 4,901 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Module container of TensorFlow TVMDSO op"""
import tensorflow as tf
from tensorflow.python.framework import load_library
from tensorflow.python import platform
class OpModule:
"""Module container of TensorFlow TVMDSO op which wraps exported
TVM op implementation library to be called on TensorFlow side"""
def __init__(self, lib_path):
self.lib_path = lib_path
def func(self, name, output_dtype=None, output_shape=None):
"""Get tvm op function wrapped as TensorFlow tensor to tensor function
Parameters
----------
name: str
function name
output_dtype: str or TensorFlow datatype
Output datatype, default is float32
output_shape: List of integer/tf scalar tensor or tf shape tensor
Output shape, default the same with first input's shape
Returns
----------
Func object that acts as TensorFlow tensor to tensor function.
"""
return TensorFunc(self.lib_path, name, output_dtype, output_shape)
def __getitem__(self, func_name):
return self.func(func_name)
class TensorFunc:
"""Function object that acts as TensorFlow tensor to tensor function."""
def __init__(self, lib_path, func_name, output_dtype, output_shape):
self.lib_path = lib_path
self.func_name = func_name
self.output_dtype = output_dtype
# const(0) indicate invalid dynamic shape
self.dynamic_output_shape = tf.constant(0, tf.int64)
self.static_output_shape = None
self.has_static_output_shape = False # extra flag is required
if self._is_static_shape(output_shape):
self.static_output_shape = output_shape
self.has_static_output_shape = True
elif output_shape is not None:
self.dynamic_output_shape = self._pack_shape_tensor(output_shape)
self.module = self._load_platform_specific_library("libtvm_dso_op")
self.tvm_dso_op = self.module.tvm_dso_op
def apply(self, *params):
return self.tvm_dso_op(
params,
dynamic_output_shape=self.dynamic_output_shape,
static_output_shape=self.static_output_shape,
has_static_output_shape=self.has_static_output_shape,
lib_path=self.lib_path,
func_name=self.func_name,
output_dtype=self.output_dtype,
)
def __call__(self, *params):
return self.apply(*params)
def _load_platform_specific_library(self, lib_name):
system = platform.system()
if system == "Darwin":
lib_file_name = lib_name + ".dylib"
elif system == "Windows":
lib_file_name = lib_name + ".dll"
else:
lib_file_name = lib_name + ".so"
return load_library.load_op_library(lib_file_name)
def _is_static_shape(self, shape):
if shape is None or not isinstance(shape, list):
return False
for dim_value in shape:
if not isinstance(dim_value, int):
return False
if dim_value < 0:
raise Exception(f"Negative dimension is illegal: {dim_value}")
return True
def _pack_shape_tensor(self, shape):
if isinstance(shape, tf.Tensor):
if shape.dtype == tf.int32:
shape = tf.cast(shape, tf.int64)
elif isinstance(shape, list):
shape_dims = []
for dim_value in shape:
if isinstance(dim_value, int):
shape_dims.append(tf.constant(dim_value, tf.int64))
elif isinstance(dim_value, tf.Tensor) and dim_value.shape.rank == 0:
if dim_value.dtype == tf.int32:
dim_value = tf.cast(dim_value, tf.int64)
shape_dims.append(dim_value)
else:
raise TypeError("Input shape dimension is neither scalar tensor nor int")
shape = tf.stack(shape_dims)
else:
raise TypeError("Input shape is neither tensor nor list")
return shape
| [
"[email protected]"
]
| |
03e2d01099f8601a427ced9e76c0efe84bdc6d95 | 947af25b72b5b3037443fae3fb22fa3a2f1de363 | /nextgisweb_mapserver/mapfile/keyword_tests.py | 8857613f74b6be20b27ab8cb8421416a1f7d64c7 | []
| no_license | guardeivid/nextgisweb_mapserver | 2b527b160b6cb017ae9c6a663e4171783a9c89d2 | 34376442fe6d56794c32523050ceb338a902228f | refs/heads/master | 2020-03-30T02:50:50.893436 | 2014-04-14T09:19:49 | 2014-04-14T09:19:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | # -*- coding: utf-8 -*-
from lxml.etree import tostring, fromstring, RelaxNG
from .keyword import registry
def _test_shema(cls):
root = cls.element_schema()
root.set('datatypeLibrary', 'http://www.w3.org/2001/XMLSchema-datatypes')
xml = tostring(root, pretty_print=True)
idx = 1
print ''
for s in xml.split('\n'):
print "%03d: %s" % (idx, s)
idx += 1
print ''
RelaxNG(fromstring(xml))
def test_schema():
for directive in registry:
yield _test_shema, directive
| [
"[email protected]"
]
| |
7f5d989cb77b8fbbb53231f3820afe5b56fbe207 | 18f0ad99e21e2e35126f8c3c28079d358fa2129a | /SnakeBot/buzzer/code.py | 6488f15e023e6bf5709d147c707884478c919297 | [
"MIT"
]
| permissive | ladyada/Adafruit_Learning_System_Guides | 9bf18dfa35941e0cbecbb3c2d02b4fa3cb79744f | 6d76801878cbf65132ccea950dc47ae842c73dcd | refs/heads/master | 2023-08-20T20:30:42.910576 | 2022-01-10T20:28:11 | 2022-01-10T20:28:11 | 115,837,894 | 13 | 2 | MIT | 2020-03-31T23:23:45 | 2017-12-31T02:34:47 | C | UTF-8 | Python | false | false | 2,930 | py | import time
import random
from adafruit_crickit import crickit
LEFT = False
RIGHT = True
random.seed(int(time.monotonic()))
ss = crickit.seesaw
left_wheel = crickit.dc_motor_1
right_wheel = crickit.dc_motor_2
RIGHT_BUMPER = crickit.SIGNAL1
LEFT_BUMPER = crickit.SIGNAL2
CENTER_BUMPER = crickit.SIGNAL3
ss.pin_mode(RIGHT_BUMPER, ss.INPUT_PULLUP)
ss.pin_mode(LEFT_BUMPER, ss.INPUT_PULLUP)
ss.pin_mode(CENTER_BUMPER, ss.INPUT_PULLUP)
# These allow easy correction for motor speed variation.
# Factors are determined by observation and fiddling.
# Start with both having a factor of 1.0 (i.e. none) and
# adjust until the bot goes more or less straight
def set_right(speed):
right_wheel.throttle = speed * 0.9
def set_left(speed):
left_wheel.throttle = speed
# Uncomment this to find the above factors
# set_right(1.0)
# set_left(1.0)
# while True:
# pass
# Check for bumper activation and move away accordingly
# Returns False if we got clear, True if we gave up
def react_to_bumpers():
attempt_count = 0
# keep trying to back away and turn until we're free
while True:
# give up after 3 tries
if attempt_count == 3:
return True
bumped_left = not ss.digital_read(LEFT_BUMPER)
bumped_right = not ss.digital_read(RIGHT_BUMPER)
bumped_center = not ss.digital_read(CENTER_BUMPER)
# Didn't bump into anything, we're done here
if not bumped_left and not bumped_right and not bumped_center:
return False
# If the middle bumper was triggered, randomly pick a way to turn
if bumped_center:
bumped_left |= random.randrange(10) < 5
bumped_right = not bumped_left
# Back away a bit
set_left(-0.5)
set_right(-0.5)
time.sleep(0.5)
# If we bumped on the left, turn to the right
if bumped_left:
set_left(1.0)
set_right(0.0)
# If we bumped on the right, turn left
elif bumped_right:
set_left(0.0)
set_right(1.0)
# time to turn for
time.sleep(random.choice([0.2, 0.3, 0.4]))
attempt_count += 1
def tack(direction, duration):
target_time = time.monotonic() + duration
if direction == LEFT:
set_left(0.25)
set_right(1.0)
else:
set_left(1.0)
set_right(0.25)
while time.monotonic() < target_time:
if not(ss.digital_read(LEFT_BUMPER) and
ss.digital_read(RIGHT_BUMPER) and
ss.digital_read(CENTER_BUMPER)):
return react_to_bumpers()
return False
while True:
if tack(LEFT, 0.75):
break
if tack(RIGHT, 0.75):
break
set_left(0)
set_right(0)
while True:
for _ in range(3):
crickit.drive_2.fraction = 1.0
time.sleep(0.1)
crickit.drive_2.fraction = 0.0
time.sleep(.2)
time.sleep(10.0)
| [
"[email protected]"
]
| |
d5a2f49110fd363deb27708c646b22143667b47c | 5e381364c2ab31ff3618369085afffba6caa8edb | /recipes/xtr/all/conanfile.py | e4caa905f0a2d8e333cbaa86e7345766dda819a1 | [
"MIT"
]
| permissive | CAMOBAP/conan-center-index | 16aea68a6d22da22831ba985773125e8eda08f00 | 67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1 | refs/heads/master | 2023-07-30T08:58:57.285571 | 2021-10-02T14:57:54 | 2021-10-02T14:57:54 | 323,262,699 | 1 | 0 | MIT | 2021-05-29T13:37:04 | 2020-12-21T07:30:02 | Python | UTF-8 | Python | false | false | 3,685 | py | from conans import ConanFile, AutoToolsBuildEnvironment, tools
from conans.errors import ConanInvalidConfiguration
import os
class XtrConan(ConanFile):
name = "xtr"
description = \
"C++ Logging Library for Low-latency or Real-time Environments"
topics = ("xtr", "logging", "logger")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/choll/xtr"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
options = {
"fPIC": [True, False],
"enable_exceptions": [True, False],
"enable_lto": [True, False],
}
default_options = {
"fPIC": True,
"enable_exceptions": True,
"enable_lto": False,
}
generators = "make"
def requirements(self):
self.requires("fmt/7.1.3")
def validate(self):
if self.settings.os not in ("FreeBSD", "Linux"):
raise ConanInvalidConfiguration(f"Unsupported os={self.settings.os}")
if self.settings.compiler not in ("gcc", "clang"):
raise ConanInvalidConfiguration(f"Unsupported compiler={self.settings.compiler}")
if self.settings.arch not in ("x86_64", ):
raise ConanInvalidConfiguration(f"Unsupported arch={self.settings.arch}")
minimal_cpp_standard = 20
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
minimum_version = {"gcc": 10, "clang": 12}
compiler = str(self.settings.compiler)
version = tools.Version(self.settings.compiler.version)
if version < minimum_version[compiler]:
raise ConanInvalidConfiguration(
f"{self.name} requires {self.settings.compiler} version {minimum_version[compiler]} or later")
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True)
def build(self):
# FIXME: should be done in validate (but version is not yet available there)
if tools.Version(self.deps_cpp_info["fmt"].version) < 6:
raise ConanInvalidConfiguration("The version of fmt must >= 6.0.0")
if tools.Version(self.deps_cpp_info["fmt"].version) == "8.0.0" and self.settings.compiler == "clang":
raise ConanInvalidConfiguration("fmt/8.0.0 is known to not work with clang (https://github.com/fmtlib/fmt/issues/2377)")
autotools = AutoToolsBuildEnvironment(self)
env_build_vars = autotools.vars
# Conan uses LIBS, presumably following autotools conventions, while
# the XTR makefile follows GNU make conventions and uses LDLIBS
env_build_vars["LDLIBS"] = env_build_vars["LIBS"]
# fPIC and Release/Debug/RelWithDebInfo etc are set via CXXFLAGS,
# CPPFLAGS etc.
env_build_vars["EXCEPTIONS"] = \
str(int(bool(self.options.enable_exceptions)))
env_build_vars["LTO"] = str(int(bool(self.options.enable_lto)))
autotools.make(vars=env_build_vars)
autotools.make(vars=env_build_vars, target="xtrctl")
def package(self):
self.copy("LICENSE", dst="licenses")
self.copy("*.hpp", src="include", dst="include")
self.copy("*/libxtr.a", src="build", dst="lib", keep_path=False)
self.copy("*/xtrctl", src="build", dst="bin", keep_path=False)
tools.rmdir(os.path.join(self.package_folder, "man"))
def package_info(self):
self.cpp_info.libs = ["xtr"]
self.cpp_info.system_libs = ["pthread"]
bin_path = os.path.join(self.package_folder, "bin")
self.output.info(f"Appending PATH environment variable: {bin_path}")
self.env_info.PATH.append(bin_path)
| [
"[email protected]"
]
| |
e061f62c09a8e8d7f78c9313d2d96595f9dbd27a | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/RUCKUS-WLAN-MIB.py | 0603b37cfd38c7a3260f7fc34fa80689d922e6f4 | [
"Apache-2.0"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 48,959 | py | #
# PySNMP MIB module RUCKUS-WLAN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RUCKUS-WLAN-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:50:58 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint")
ifIndex, IpAddress, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "ifIndex", "IpAddress", "InterfaceIndex")
ruckusCommonWLANModule, = mibBuilder.importSymbols("RUCKUS-ROOT-MIB", "ruckusCommonWLANModule")
RuckusSSID, RuckusdB, RuckusWEPKey, RuckusAdminStatus, RuckusRadioMode, RuckusWPAPassPhrase = mibBuilder.importSymbols("RUCKUS-TC-MIB", "RuckusSSID", "RuckusdB", "RuckusWEPKey", "RuckusAdminStatus", "RuckusRadioMode", "RuckusWPAPassPhrase")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Integer32, TimeTicks, ModuleIdentity, IpAddress, Gauge32, ObjectIdentity, NotificationType, MibIdentifier, Counter32, iso, Unsigned32, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Integer32", "TimeTicks", "ModuleIdentity", "IpAddress", "Gauge32", "ObjectIdentity", "NotificationType", "MibIdentifier", "Counter32", "iso", "Unsigned32", "Bits")
TextualConvention, MacAddress, DisplayString, RowStatus, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "MacAddress", "DisplayString", "RowStatus", "TruthValue")
ruckusWLANMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1))
if mibBuilder.loadTexts: ruckusWLANMIB.setLastUpdated('201010150800Z')
if mibBuilder.loadTexts: ruckusWLANMIB.setOrganization('Ruckus Wireless, Inc.')
ruckusWLANObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1))
ruckusWLANInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1))
ruckusWLANStaInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2))
ruckusWLANSecurityInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3))
ruckusWLANEvents = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 2))
ruckusWLANTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1), )
if mibBuilder.loadTexts: ruckusWLANTable.setStatus('current')
ruckusWLANEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ruckusWLANEntry.setStatus('current')
ruckusWLANSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 1), RuckusSSID()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANSSID.setStatus('current')
ruckusWLANBSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANBSSID.setStatus('current')
ruckusWLANBSSType = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("station", 1), ("master", 2), ("independent", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANBSSType.setStatus('current')
ruckusWLANOperationalRateSet = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANOperationalRateSet.setStatus('current')
ruckusWLANBeaconPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(100, 1000))).setUnits('milli seconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANBeaconPeriod.setStatus('current')
ruckusWLANDTIMPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANDTIMPeriod.setStatus('current')
ruckusWLANRTSThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(256, 2346))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANRTSThreshold.setStatus('current')
ruckusWLANFragmentationThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(256, 2346))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANFragmentationThreshold.setStatus('current')
ruckusWLANRadioMode = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 9), RuckusRadioMode()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANRadioMode.setStatus('current')
ruckusWLANChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 14))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANChannel.setStatus('current')
ruckusWLANWDSEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 11), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANWDSEnable.setStatus('current')
ruckusWLANAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANAdminStatus.setStatus('current')
ruckusWLANProtectionMode = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("ctsOnly", 2), ("ctsRts", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANProtectionMode.setStatus('current')
ruckusWLANName = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 14), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 16))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANName.setStatus('current')
ruckusWLANSSIDBcastDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 15), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANSSIDBcastDisable.setStatus('current')
ruckusWLANVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANVlanID.setStatus('current')
ruckusWLANIGMPSnooping = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANIGMPSnooping.setStatus('current')
ruckusWLANSuppDataRatesTxTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 2), )
if mibBuilder.loadTexts: ruckusWLANSuppDataRatesTxTable.setStatus('current')
ruckusWLANSuppDataRatesTxEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "RUCKUS-WLAN-MIB", "ruckusWLANSuppDataRatesTxIndex"))
if mibBuilder.loadTexts: ruckusWLANSuppDataRatesTxEntry.setStatus('current')
ruckusWLANSuppDataRatesTxIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 2, 1, 1), Integer32())
if mibBuilder.loadTexts: ruckusWLANSuppDataRatesTxIndex.setStatus('current')
ruckusWLANSuppDataRatesTxValue = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANSuppDataRatesTxValue.setStatus('current')
ruckusWLANSuppDataRatesRxTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 3), )
if mibBuilder.loadTexts: ruckusWLANSuppDataRatesRxTable.setStatus('current')
ruckusWLANSuppDataRatesRxEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "RUCKUS-WLAN-MIB", "ruckusWLANSuppDataRatesRxIndex"))
if mibBuilder.loadTexts: ruckusWLANSuppDataRatesRxEntry.setStatus('current')
ruckusWLANSuppDataRatesRxIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 3, 1, 1), Integer32())
if mibBuilder.loadTexts: ruckusWLANSuppDataRatesRxIndex.setStatus('current')
ruckusWLANSuppDataRatesRxValue = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 3, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANSuppDataRatesRxValue.setStatus('current')
ruckusWLANStaStatsTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1), )
if mibBuilder.loadTexts: ruckusWLANStaStatsTable.setStatus('current')
ruckusWLANStaStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "RUCKUS-WLAN-MIB", "ruckusWLANStaStatsMacAddr"))
if mibBuilder.loadTexts: ruckusWLANStaStatsEntry.setStatus('current')
ruckusWLANStaStatsMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 1), MacAddress())
if mibBuilder.loadTexts: ruckusWLANStaStatsMacAddr.setStatus('current')
ruckusWLANStaStatsSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 2), RuckusSSID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsSSID.setStatus('current')
ruckusWLANStaStatsRxDataFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxDataFrames.setStatus('current')
ruckusWLANStaStatsRxMgmtFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxMgmtFrames.setStatus('current')
ruckusWLANStaStatsRxCtrlFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxCtrlFrames.setStatus('current')
ruckusWLANStaStatsRxUnicastFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxUnicastFrames.setStatus('current')
ruckusWLANStaStatsRxMulticastFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxMulticastFrames.setStatus('current')
ruckusWLANStaStatsRxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxBytes.setStatus('current')
ruckusWLANStaStatsRxDup = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxDup.setStatus('current')
ruckusWLANStaStatsRxNoPrivacy = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxNoPrivacy.setStatus('current')
ruckusWLANStaStatsRxWEPFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxWEPFail.setStatus('current')
ruckusWLANStaStatsRxDemicFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxDemicFail.setStatus('current')
ruckusWLANStaStatsTxDecap = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxDecap.setStatus('current')
ruckusWLANStaStatsRxDefrag = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxDefrag.setStatus('current')
ruckusWLANStaStatsTxDataFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxDataFrames.setStatus('current')
ruckusWLANStaStatsTxMgmtFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxMgmtFrames.setStatus('current')
ruckusWLANStaStatsTxUnicastFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxUnicastFrames.setStatus('current')
ruckusWLANStaStatsTxMulticastFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxMulticastFrames.setStatus('current')
ruckusWLANStaStatsTxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxBytes.setStatus('current')
ruckusWLANStaStatsTxAssoc = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxAssoc.setStatus('current')
ruckusWLANStaStatsTxAssocFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxAssocFail.setStatus('current')
ruckusWLANStaStatsTxAuth = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxAuth.setStatus('current')
ruckusWLANStaStatsTxAuthFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxAuthFail.setStatus('current')
ruckusWLANStaStatsRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRSSI.setStatus('current')
ruckusWLANStaStatsTxRxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxRxBytes.setStatus('current')
ruckusWLANStaStatsTxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 26), Unsigned32()).setUnits('Bps').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxRate.setStatus('current')
ruckusWLANStaStatsRxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 27), Unsigned32()).setUnits('Bps').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsRxRate.setStatus('current')
ruckusWLANStaStatsTxDropRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 1, 1, 28), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaStatsTxDropRate.setStatus('current')
ruckusWLANStaTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2), )
if mibBuilder.loadTexts: ruckusWLANStaTable.setStatus('current')
ruckusWLANStaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "RUCKUS-WLAN-MIB", "ruckusWLANStaAddr"))
if mibBuilder.loadTexts: ruckusWLANStaEntry.setStatus('current')
ruckusWLANStaAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaAddr.setStatus('current')
ruckusWLANStaRssi = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRssi.setStatus('current')
ruckusWLANStaErp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaErp.setStatus('current')
ruckusWLANState = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANState.setStatus('current')
ruckusWLANStaCapInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaCapInfo.setStatus('current')
ruckusWLANStaAssocid = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaAssocid.setStatus('current')
ruckusWLANStaOpMode = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaOpMode.setStatus('current')
ruckusWLANStaIdle = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaIdle.setStatus('current')
ruckusWLANStaRates = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRates.setStatus('current')
ruckusWLANStaIpaddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 16), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaIpaddr.setStatus('current')
ruckusWLANStaAuthMode = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 2, 1, 20), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaAuthMode.setStatus('current')
ruckusWLANStaMQTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3), )
if mibBuilder.loadTexts: ruckusWLANStaMQTable.setStatus('current')
ruckusWLANStaMQEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "RUCKUS-WLAN-MIB", "ruckusWLANStaMQAddr"), (0, "RUCKUS-WLAN-MIB", "ruckusWLANStaMQQIndex"))
if mibBuilder.loadTexts: ruckusWLANStaMQEntry.setStatus('current')
ruckusWLANStaMQAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 1), MacAddress())
if mibBuilder.loadTexts: ruckusWLANStaMQAddr.setStatus('current')
ruckusWLANStaMQQIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4)))
if mibBuilder.loadTexts: ruckusWLANStaMQQIndex.setStatus('current')
ruckusWLANStaMQPktsQueued = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQPktsQueued.setStatus('current')
ruckusWLANStaMQNumEnqueued = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQNumEnqueued.setStatus('current')
ruckusWLANStaMQNumDequeued = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQNumDequeued.setStatus('current')
ruckusWLANStaMQNumRequeued = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQNumRequeued.setStatus('current')
ruckusWLANStaMQNumDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQNumDropped.setStatus('current')
ruckusWLANStaMQNumDeactivateQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQNumDeactivateQueue.setStatus('current')
ruckusWLANStaMQAveIpg = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQAveIpg.setStatus('current')
ruckusWLANStaMQMinIpg = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQMinIpg.setStatus('current')
ruckusWLANStaMQMaxIpg = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQMaxIpg.setStatus('current')
ruckusWLANStaMQAveTxLatency = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQAveTxLatency.setStatus('current')
ruckusWLANStaMQMinTxLatency = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQMinTxLatency.setStatus('current')
ruckusWLANStaMQMaxTxLatency = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 3, 1, 14), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaMQMaxTxLatency.setStatus('current')
ruckusWLANStaRksTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4), )
if mibBuilder.loadTexts: ruckusWLANStaRksTable.setStatus('current')
ruckusWLANStaRksEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "RUCKUS-WLAN-MIB", "ruckusWLANStaRksAddr"))
if mibBuilder.loadTexts: ruckusWLANStaRksEntry.setStatus('current')
ruckusWLANStaRksAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1, 1), MacAddress())
if mibBuilder.loadTexts: ruckusWLANStaRksAddr.setStatus('current')
ruckusWLANStaRksRxGoodFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRksRxGoodFrames.setStatus('current')
ruckusWLANStaRksRxCrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRksRxCrcErrors.setStatus('current')
ruckusWLANStaRksTxGoodFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRksTxGoodFrames.setStatus('current')
ruckusWLANStaRksTxRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRksTxRetries.setStatus('current')
ruckusWLANStaRksTxDiscardExRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRksTxDiscardExRetries.setStatus('current')
ruckusWLANStaRksTxRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRksTxRate.setStatus('current')
ruckusWLANStaRksTxKbps = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRksTxKbps.setStatus('current')
ruckusWLANStaRksTxPer = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRksTxPer.setStatus('current')
ruckusWLANStaRksTxRssi = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 2, 4, 1, 10), RuckusdB()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStaRksTxRssi.setStatus('current')
ruckusWLANSecurityTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 1), )
if mibBuilder.loadTexts: ruckusWLANSecurityTable.setStatus('current')
ruckusWLANSecurityEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ruckusWLANSecurityEntry.setStatus('current')
ruckusWLANSecurityMode = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("wep", 2), ("wpa", 3))).clone('none')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANSecurityMode.setStatus('current')
ruckusWLANSecurityAuthMode = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("open", 1), ("wep-shared", 2), ("auto", 3), ("wpa-eap-802-1x", 4))).clone('open')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANSecurityAuthMode.setStatus('current')
ruckusWLANSecurityEncryMode = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("tkip", 2), ("aes", 3), ("auto", 4))).clone('none')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANSecurityEncryMode.setStatus('current')
ruckusWLANWEPTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 2), )
if mibBuilder.loadTexts: ruckusWLANWEPTable.setStatus('current')
ruckusWLANWEPEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ruckusWLANWEPEntry.setStatus('current')
ruckusWLANWEPEncryLenType = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("bit-64", 1), ("bit-128", 2))).clone('bit-128')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANWEPEncryLenType.setStatus('current')
ruckusWLANWEPKeyIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 2, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANWEPKeyIndex.setStatus('current')
ruckusWLANWEPKey = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 2, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(3, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANWEPKey.setStatus('current')
ruckusWLANWPATable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 3), )
if mibBuilder.loadTexts: ruckusWLANWPATable.setStatus('current')
ruckusWLANWPAEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ruckusWLANWPAEntry.setStatus('current')
ruckusWLANWPAVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("wpa", 1), ("wpa2", 2), ("auto", 3))).clone('wpa')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANWPAVersion.setStatus('current')
ruckusWLANWPAKey = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 3, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 63))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANWPAKey.setStatus('current')
ruckusWLANWPARadiusNasId = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 3, 1, 15), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANWPARadiusNasId.setStatus('current')
ruckusWLANWPAReAuthenticationPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 3, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 3600)).clone(600)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANWPAReAuthenticationPeriod.setStatus('current')
ruckusWLANAAAServerTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 4), )
if mibBuilder.loadTexts: ruckusWLANAAAServerTable.setStatus('current')
ruckusWLANAAAServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "RUCKUS-WLAN-MIB", "ruckusWLANSeverMode"))
if mibBuilder.loadTexts: ruckusWLANAAAServerEntry.setStatus('current')
ruckusWLANSeverMode = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("auth", 1), ("account", 2))))
if mibBuilder.loadTexts: ruckusWLANSeverMode.setStatus('current')
ruckusWLANServerIpAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 4, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 40))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANServerIpAddress.setStatus('current')
ruckusWLANServerPort = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 4, 1, 12), Integer32().clone(1812)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANServerPort.setStatus('current')
ruckusWLANServerSecret = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 3, 4, 1, 15), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusWLANServerSecret.setStatus('current')
ruckusWLANStatsTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4), )
if mibBuilder.loadTexts: ruckusWLANStatsTable.setStatus('current')
ruckusWLANStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ruckusWLANStatsEntry.setStatus('current')
ruckusWLANStatsSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 1), RuckusSSID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsSSID.setStatus('current')
ruckusWLANStatsBSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsBSSID.setStatus('current')
ruckusWLANStatsNumSta = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumSta.setStatus('current')
ruckusWLANStatsNumAuthSta = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumAuthSta.setStatus('current')
ruckusWLANStatsNumAuthReq = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumAuthReq.setStatus('current')
ruckusWLANStatsNumAuthResp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumAuthResp.setStatus('current')
ruckusWLANStatsNumAuthSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumAuthSuccess.setStatus('current')
ruckusWLANStatsNumAuthFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumAuthFail.setStatus('current')
ruckusWLANStatsNumAssocReq = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumAssocReq.setStatus('current')
ruckusWLANStatsNumAssocResp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumAssocResp.setStatus('current')
ruckusWLANStatsNumReAssocReq = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumReAssocReq.setStatus('current')
ruckusWLANStatsNumReAssocResp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumReAssocResp.setStatus('current')
ruckusWLANStatsNumAssocSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumAssocSuccess.setStatus('current')
ruckusWLANStatsNumAssocFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsNumAssocFail.setStatus('current')
ruckusWLANStatsAssocFailRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 15), Unsigned32()).setUnits('percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsAssocFailRate.setStatus('current')
ruckusWLANStatsAuthFailRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 16), Unsigned32()).setUnits('percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsAuthFailRate.setStatus('current')
ruckusWLANStatsAssocSuccessRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 17), Unsigned32()).setUnits('percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsAssocSuccessRate.setStatus('current')
ruckusWLANStatsRxDataFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxDataFrames.setStatus('current')
ruckusWLANStatsRxMgmtFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxMgmtFrames.setStatus('current')
ruckusWLANStatsRxCtrlFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxCtrlFrames.setStatus('current')
ruckusWLANStatsRxUnicastFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxUnicastFrames.setStatus('current')
ruckusWLANStatsRxMulticastFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxMulticastFrames.setStatus('current')
ruckusWLANStatsRxBroadcastFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxBroadcastFrames.setStatus('current')
ruckusWLANStatsRxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxBytes.setStatus('current')
ruckusWLANStatsRxDup = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxDup.setStatus('current')
ruckusWLANStatsRxNoPrivacy = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxNoPrivacy.setStatus('current')
ruckusWLANStatsRxWEPFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxWEPFail.setStatus('current')
ruckusWLANStatsRxDecryptCRCError = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxDecryptCRCError.setStatus('current')
ruckusWLANStatsRxMICError = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxMICError.setStatus('current')
ruckusWLANStatsRxDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxDrops.setStatus('current')
ruckusWLANStatsRxErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxErrors.setStatus('current')
ruckusWLANStatsRxFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxFrames.setStatus('current')
ruckusWLANStatsRxDropRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 33), Unsigned32()).setUnits('percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsRxDropRate.setStatus('current')
ruckusWLANStatsTxDataFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 34), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsTxDataFrames.setStatus('current')
ruckusWLANStatsTxMgmtFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 35), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsTxMgmtFrames.setStatus('current')
ruckusWLANStatsTxUnicastFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 36), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsTxUnicastFrames.setStatus('current')
ruckusWLANStatsTxMulticastFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 37), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsTxMulticastFrames.setStatus('current')
ruckusWLANStatsTxBroadcastFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 38), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsTxBroadcastFrames.setStatus('current')
ruckusWLANStatsTxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 39), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsTxBytes.setStatus('current')
ruckusWLANStatsTxDrops = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 40), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsTxDrops.setStatus('current')
ruckusWLANStatsTxErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 41), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsTxErrors.setStatus('current')
ruckusWLANStatsTxFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 42), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsTxFrames.setStatus('current')
ruckusWLANStatsPeriodRxErrorRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 43), Unsigned32()).setUnits('percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsPeriodRxErrorRate.setStatus('current')
ruckusWLANStatsPeriodTxErrorRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 44), Unsigned32()).setUnits('percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsPeriodTxErrorRate.setStatus('current')
ruckusWLANStatsPeriodAssocReq = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 45), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsPeriodAssocReq.setStatus('current')
ruckusWLANStatsPeriodAssocResp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 46), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsPeriodAssocResp.setStatus('current')
ruckusWLANStatsPeriodAssocSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 1, 6, 1, 1, 1, 4, 1, 47), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusWLANStatsPeriodAssocSuccess.setStatus('current')
mibBuilder.exportSymbols("RUCKUS-WLAN-MIB", ruckusWLANStaMQNumRequeued=ruckusWLANStaMQNumRequeued, ruckusWLANSSIDBcastDisable=ruckusWLANSSIDBcastDisable, ruckusWLANStatsNumReAssocResp=ruckusWLANStatsNumReAssocResp, ruckusWLANStaOpMode=ruckusWLANStaOpMode, ruckusWLANSuppDataRatesRxIndex=ruckusWLANSuppDataRatesRxIndex, ruckusWLANStatsTxDrops=ruckusWLANStatsTxDrops, ruckusWLANStatsRxMgmtFrames=ruckusWLANStatsRxMgmtFrames, ruckusWLANStaMQAveTxLatency=ruckusWLANStaMQAveTxLatency, ruckusWLANStaAuthMode=ruckusWLANStaAuthMode, ruckusWLANStaTable=ruckusWLANStaTable, ruckusWLANStaEntry=ruckusWLANStaEntry, ruckusWLANStaStatsTxAuth=ruckusWLANStaStatsTxAuth, ruckusWLANServerSecret=ruckusWLANServerSecret, ruckusWLANStaStatsTxAuthFail=ruckusWLANStaStatsTxAuthFail, ruckusWLANWPAVersion=ruckusWLANWPAVersion, ruckusWLANStaStatsRxRate=ruckusWLANStaStatsRxRate, ruckusWLANStatsTxUnicastFrames=ruckusWLANStatsTxUnicastFrames, ruckusWLANStatsTxMgmtFrames=ruckusWLANStatsTxMgmtFrames, ruckusWLANAAAServerEntry=ruckusWLANAAAServerEntry, ruckusWLANInfo=ruckusWLANInfo, ruckusWLANStaRates=ruckusWLANStaRates, ruckusWLANStatsSSID=ruckusWLANStatsSSID, ruckusWLANFragmentationThreshold=ruckusWLANFragmentationThreshold, ruckusWLANStatsBSSID=ruckusWLANStatsBSSID, ruckusWLANStaMQNumDeactivateQueue=ruckusWLANStaMQNumDeactivateQueue, ruckusWLANStatsNumAssocSuccess=ruckusWLANStatsNumAssocSuccess, ruckusWLANStaMQNumDropped=ruckusWLANStaMQNumDropped, ruckusWLANWEPKey=ruckusWLANWEPKey, ruckusWLANStatsRxFrames=ruckusWLANStatsRxFrames, ruckusWLANWPAReAuthenticationPeriod=ruckusWLANWPAReAuthenticationPeriod, ruckusWLANStatsTxMulticastFrames=ruckusWLANStatsTxMulticastFrames, ruckusWLANIGMPSnooping=ruckusWLANIGMPSnooping, ruckusWLANStatsRxDecryptCRCError=ruckusWLANStatsRxDecryptCRCError, ruckusWLANStaStatsMacAddr=ruckusWLANStaStatsMacAddr, ruckusWLANWEPTable=ruckusWLANWEPTable, ruckusWLANStatsRxDrops=ruckusWLANStatsRxDrops, ruckusWLANStaStatsRxMgmtFrames=ruckusWLANStaStatsRxMgmtFrames, ruckusWLANStaAssocid=ruckusWLANStaAssocid, ruckusWLANStatsRxDup=ruckusWLANStatsRxDup, ruckusWLANStatsNumReAssocReq=ruckusWLANStatsNumReAssocReq, ruckusWLANStatsRxUnicastFrames=ruckusWLANStatsRxUnicastFrames, ruckusWLANSSID=ruckusWLANSSID, ruckusWLANBSSType=ruckusWLANBSSType, ruckusWLANStatsRxErrors=ruckusWLANStatsRxErrors, ruckusWLANStaStatsTxBytes=ruckusWLANStaStatsTxBytes, ruckusWLANStatsTxFrames=ruckusWLANStatsTxFrames, ruckusWLANStatsPeriodRxErrorRate=ruckusWLANStatsPeriodRxErrorRate, ruckusWLANStaStatsTxDropRate=ruckusWLANStaStatsTxDropRate, ruckusWLANOperationalRateSet=ruckusWLANOperationalRateSet, ruckusWLANStaStatsRxDemicFail=ruckusWLANStaStatsRxDemicFail, ruckusWLANStatsRxBytes=ruckusWLANStatsRxBytes, ruckusWLANStatsPeriodAssocResp=ruckusWLANStatsPeriodAssocResp, ruckusWLANStatsPeriodAssocSuccess=ruckusWLANStatsPeriodAssocSuccess, ruckusWLANStaStatsRxBytes=ruckusWLANStaStatsRxBytes, ruckusWLANWEPEntry=ruckusWLANWEPEntry, ruckusWLANStaMQTable=ruckusWLANStaMQTable, ruckusWLANVlanID=ruckusWLANVlanID, ruckusWLANStaStatsTxAssocFail=ruckusWLANStaStatsTxAssocFail, ruckusWLANStaRksTxRetries=ruckusWLANStaRksTxRetries, ruckusWLANState=ruckusWLANState, ruckusWLANStaMQNumEnqueued=ruckusWLANStaMQNumEnqueued, ruckusWLANSecurityMode=ruckusWLANSecurityMode, ruckusWLANStaRksRxGoodFrames=ruckusWLANStaRksRxGoodFrames, ruckusWLANWDSEnable=ruckusWLANWDSEnable, ruckusWLANStaStatsRxWEPFail=ruckusWLANStaStatsRxWEPFail, ruckusWLANStaStatsTxUnicastFrames=ruckusWLANStaStatsTxUnicastFrames, ruckusWLANObjects=ruckusWLANObjects, ruckusWLANBeaconPeriod=ruckusWLANBeaconPeriod, ruckusWLANStaRksTxPer=ruckusWLANStaRksTxPer, ruckusWLANStatsRxNoPrivacy=ruckusWLANStatsRxNoPrivacy, ruckusWLANStatsTxBytes=ruckusWLANStatsTxBytes, ruckusWLANStaMQMaxIpg=ruckusWLANStaMQMaxIpg, ruckusWLANSuppDataRatesRxValue=ruckusWLANSuppDataRatesRxValue, ruckusWLANStaRksAddr=ruckusWLANStaRksAddr, ruckusWLANStatsNumAuthFail=ruckusWLANStatsNumAuthFail, ruckusWLANStaCapInfo=ruckusWLANStaCapInfo, ruckusWLANStatsRxBroadcastFrames=ruckusWLANStatsRxBroadcastFrames, ruckusWLANStatsAuthFailRate=ruckusWLANStatsAuthFailRate, ruckusWLANStatsEntry=ruckusWLANStatsEntry, ruckusWLANProtectionMode=ruckusWLANProtectionMode, ruckusWLANStatsRxDataFrames=ruckusWLANStatsRxDataFrames, ruckusWLANRadioMode=ruckusWLANRadioMode, ruckusWLANEntry=ruckusWLANEntry, ruckusWLANStatsRxMICError=ruckusWLANStatsRxMICError, ruckusWLANTable=ruckusWLANTable, ruckusWLANStaStatsEntry=ruckusWLANStaStatsEntry, ruckusWLANStatsTxErrors=ruckusWLANStatsTxErrors, ruckusWLANRTSThreshold=ruckusWLANRTSThreshold, ruckusWLANStaMQMaxTxLatency=ruckusWLANStaMQMaxTxLatency, ruckusWLANStaMQEntry=ruckusWLANStaMQEntry, ruckusWLANStaRksTxGoodFrames=ruckusWLANStaRksTxGoodFrames, ruckusWLANStatsTable=ruckusWLANStatsTable, ruckusWLANStatsAssocSuccessRate=ruckusWLANStatsAssocSuccessRate, ruckusWLANStaStatsTxRxBytes=ruckusWLANStaStatsTxRxBytes, ruckusWLANSecurityEntry=ruckusWLANSecurityEntry, ruckusWLANStaRssi=ruckusWLANStaRssi, ruckusWLANStatsNumAuthResp=ruckusWLANStatsNumAuthResp, ruckusWLANSuppDataRatesRxTable=ruckusWLANSuppDataRatesRxTable, ruckusWLANChannel=ruckusWLANChannel, ruckusWLANStaStatsTxAssoc=ruckusWLANStaStatsTxAssoc, ruckusWLANStaStatsRxDefrag=ruckusWLANStaStatsRxDefrag, ruckusWLANStatsRxWEPFail=ruckusWLANStatsRxWEPFail, ruckusWLANStatsNumAuthSta=ruckusWLANStatsNumAuthSta, ruckusWLANAdminStatus=ruckusWLANAdminStatus, ruckusWLANWEPKeyIndex=ruckusWLANWEPKeyIndex, ruckusWLANStaMQMinTxLatency=ruckusWLANStaMQMinTxLatency, ruckusWLANDTIMPeriod=ruckusWLANDTIMPeriod, ruckusWLANStaIdle=ruckusWLANStaIdle, ruckusWLANStaMQMinIpg=ruckusWLANStaMQMinIpg, ruckusWLANName=ruckusWLANName, ruckusWLANStaAddr=ruckusWLANStaAddr, ruckusWLANStatsPeriodAssocReq=ruckusWLANStatsPeriodAssocReq, ruckusWLANStaStatsTable=ruckusWLANStaStatsTable, ruckusWLANStatsAssocFailRate=ruckusWLANStatsAssocFailRate, ruckusWLANStaRksTable=ruckusWLANStaRksTable, ruckusWLANStaStatsRxDup=ruckusWLANStaStatsRxDup, ruckusWLANStatsNumAssocReq=ruckusWLANStatsNumAssocReq, ruckusWLANStaMQPktsQueued=ruckusWLANStaMQPktsQueued, ruckusWLANSecurityTable=ruckusWLANSecurityTable, ruckusWLANStatsTxDataFrames=ruckusWLANStatsTxDataFrames, ruckusWLANWEPEncryLenType=ruckusWLANWEPEncryLenType, ruckusWLANStaStatsTxMgmtFrames=ruckusWLANStaStatsTxMgmtFrames, ruckusWLANStatsPeriodTxErrorRate=ruckusWLANStatsPeriodTxErrorRate, ruckusWLANStaRksTxRate=ruckusWLANStaRksTxRate, ruckusWLANWPAEntry=ruckusWLANWPAEntry, ruckusWLANStaStatsRxCtrlFrames=ruckusWLANStaStatsRxCtrlFrames, ruckusWLANStaStatsRSSI=ruckusWLANStaStatsRSSI, ruckusWLANStaErp=ruckusWLANStaErp, ruckusWLANStaIpaddr=ruckusWLANStaIpaddr, ruckusWLANSecurityAuthMode=ruckusWLANSecurityAuthMode, ruckusWLANStaMQAveIpg=ruckusWLANStaMQAveIpg, ruckusWLANSecurityEncryMode=ruckusWLANSecurityEncryMode, ruckusWLANSecurityInfo=ruckusWLANSecurityInfo, PYSNMP_MODULE_ID=ruckusWLANMIB, ruckusWLANMIB=ruckusWLANMIB, ruckusWLANStatsNumAuthReq=ruckusWLANStatsNumAuthReq, ruckusWLANServerPort=ruckusWLANServerPort, ruckusWLANStatsNumAssocResp=ruckusWLANStatsNumAssocResp, ruckusWLANStaStatsRxDataFrames=ruckusWLANStaStatsRxDataFrames, ruckusWLANStaInfo=ruckusWLANStaInfo, ruckusWLANStaRksTxKbps=ruckusWLANStaRksTxKbps, ruckusWLANStatsNumSta=ruckusWLANStatsNumSta, ruckusWLANSuppDataRatesTxTable=ruckusWLANSuppDataRatesTxTable, ruckusWLANAAAServerTable=ruckusWLANAAAServerTable, ruckusWLANWPAKey=ruckusWLANWPAKey, ruckusWLANStaStatsRxMulticastFrames=ruckusWLANStaStatsRxMulticastFrames, ruckusWLANWPARadiusNasId=ruckusWLANWPARadiusNasId, ruckusWLANBSSID=ruckusWLANBSSID, ruckusWLANStaStatsTxRate=ruckusWLANStaStatsTxRate, ruckusWLANStaRksTxRssi=ruckusWLANStaRksTxRssi, ruckusWLANEvents=ruckusWLANEvents, ruckusWLANSuppDataRatesTxIndex=ruckusWLANSuppDataRatesTxIndex, ruckusWLANStatsRxCtrlFrames=ruckusWLANStatsRxCtrlFrames, ruckusWLANStatsTxBroadcastFrames=ruckusWLANStatsTxBroadcastFrames, ruckusWLANServerIpAddress=ruckusWLANServerIpAddress, ruckusWLANStaStatsTxMulticastFrames=ruckusWLANStaStatsTxMulticastFrames, ruckusWLANStaMQNumDequeued=ruckusWLANStaMQNumDequeued, ruckusWLANStaMQQIndex=ruckusWLANStaMQQIndex, ruckusWLANSuppDataRatesTxValue=ruckusWLANSuppDataRatesTxValue, ruckusWLANStaMQAddr=ruckusWLANStaMQAddr, ruckusWLANStatsRxDropRate=ruckusWLANStatsRxDropRate, ruckusWLANSeverMode=ruckusWLANSeverMode, ruckusWLANWPATable=ruckusWLANWPATable, ruckusWLANStaStatsSSID=ruckusWLANStaStatsSSID, ruckusWLANStaStatsTxDecap=ruckusWLANStaStatsTxDecap, ruckusWLANSuppDataRatesRxEntry=ruckusWLANSuppDataRatesRxEntry, ruckusWLANStaStatsTxDataFrames=ruckusWLANStaStatsTxDataFrames, ruckusWLANStaRksRxCrcErrors=ruckusWLANStaRksRxCrcErrors, ruckusWLANStaRksTxDiscardExRetries=ruckusWLANStaRksTxDiscardExRetries, ruckusWLANStatsNumAssocFail=ruckusWLANStatsNumAssocFail, ruckusWLANStatsRxMulticastFrames=ruckusWLANStatsRxMulticastFrames, ruckusWLANStatsNumAuthSuccess=ruckusWLANStatsNumAuthSuccess, ruckusWLANStaStatsRxUnicastFrames=ruckusWLANStaStatsRxUnicastFrames, ruckusWLANStaRksEntry=ruckusWLANStaRksEntry, ruckusWLANStaStatsRxNoPrivacy=ruckusWLANStaStatsRxNoPrivacy, ruckusWLANSuppDataRatesTxEntry=ruckusWLANSuppDataRatesTxEntry)
| [
"[email protected]"
]
| |
5404d94c2cb141aaa4fa36f139bbfc2f161ce03f | a8731ed73a1fbae2d1e490fc8951aa17873aa7d4 | /iga/population/standard_analysis.py | 163123558095eccca04cfb007d8470d1031bdae1 | []
| no_license | juancq/character-evolver | 452bf84afd52766502fbe6ba6471519d1a7635e1 | 5dcae96916dbfef03cc8f6625e4a4c31fe25224f | refs/heads/master | 2021-01-23T18:08:21.058617 | 2017-05-22T02:08:58 | 2017-05-22T02:08:58 | 129,113 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,637 | py | import nsga_ii
from iga.gacommon import gaParams
import copy
class Population(nsga_ii.Population):
def __init__(self, paramClass, paramDict):
nsga_ii.Population.__init__(self, paramClass, paramDict)
ops = gaParams.getVar('operators')
len_ops = len(ops)
op_prob = 1. / len_ops
for i in xrange(len_ops):
ops[i] = op_prob
self.op_len = len(ops)
print 'ops ', ops
#---------------------------------------#
def userInput(self, user_selection = []):
'''
Takes a list of the indices of the individuals selected
by the user during evaluation, and we save the actual
users on a list, to be used during fitness evaluation.
It is the programmer's responsibility to decide on
the meaning of the user feedback.
'''
# if feedback provided, otherwise the user selection was made
# on the injected individuals
if user_selection:
user_selected = []
ops = gaParams.getVar('operators')
inc_prob = .05
dec_prob = .05 / len(ops)-1
for i in user_selection:
ind = self.subset[i]
user_selected.append(ind)
ind_ops = ind.operators
ind_ops.count(1)
if ind_ops:
for j in xrange(self.op_len):
if ind_ops[j]:
ops[j] = min(ops[j] + 0.05, 0.9)
else:
ops[j] = max(ops[j] - 0.05, 0.1)
print 'ops ', ops
self.user_selected = user_selected
#---------------------------------------#
def nextgen(self):
'''
Create next generation from current population.
'''
best = copy.deepcopy(self.user_selected[0])
newPop = [best]
random = self.params.random
for i in xrange(0, self.popsize-1):
p = self.pop[i]
c1, c2 = self.crossover(p, best)
self.params.mutate(c1)
self.params.mutate(c2)
newPop.extend([c1,c2])
# evaluate children
self.eval(newPop)
new_fronts = self.createFronts(newPop)
self.crowdedDistance(new_fronts)
self.combinepop(new_fronts)
# explicitly copy the user selected individual into
# the next generation and set its rank to the highest
randPos = random.randrange(0, self.popsize)
self.pop[randPos] = copy.deepcopy(self.user_selected[0])
self.pop[randPos].rank = 0
#---------------------------------------#
| [
"juan@dragonite.(none)"
]
| juan@dragonite.(none) |
4608f91e2fdbac43339f6c6d31842b17ae0dafae | 9607f45b501c62d0500536e14d134a1aca0a6982 | /datum/utils/binary_utils.py | 298f14ff5da4d3abe55ce11e5906ea9163bdccc3 | [
"Apache-2.0"
]
| permissive | shashank-subex/datum | b8fb552d4180ea3ee6345fa6bf4a2620231c7601 | 089b687fc569c8c6ce613349297997c67ce40c7a | refs/heads/master | 2023-05-28T01:00:04.701429 | 2021-06-10T11:13:38 | 2021-06-10T11:13:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | # Copyright 2020 The OpenAGI Datum Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple
import tensorflow as tf
def is_binary_image(string: tf.Tensor) -> Tuple[bool, str]:
"""Determine image compression type using a binary string tensor/object.
Args:
string: binary string, can be `tf.Tensor` or python format..
Returns:
a tuple containing a flag denoting whether input string is an image and the corresponding
extension (if its an image, else empty).
"""
if not isinstance(string, (bytes, tf.Tensor)):
raise ValueError(f'Input {string} is not a bytes string or `tf.Tensor`.')
if isinstance(string, tf.Tensor):
string = string.numpy()
if string.startswith(b'\xff\xd8\xff'):
return True, 'jpg'
elif string.startswith(b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'):
return True, 'png'
elif string.startswith(b'bm'):
return True, 'bmp'
else:
return False, ''
| [
"[email protected]"
]
| |
62cdd2136986368bbf1b0b11e0bd9c2b67467903 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ysgbRFTPujx8v37yF_9.py | 6636e68c4e204f96c2ae4dff5513f35eb4a94f15 | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py |
class Triangle:
def __init__(self):
self.r = {1: [1]}
self.last_row = 1
self.last_num = 1
def advance(self):
new_row = self.last_row + 1
nr = [n for n in range(self.last_num+1, self.last_num + new_row + 1)]
self.last_num = nr[-1]
self.last_row += 1
self.r[new_row] = nr
return True
def advance_to_row(self, row_goal):
while self.last_row < row_goal:
self.advance()
return True
def advance_to_num(self, num_goal):
while self.last_num < num_goal:
self.advance()
return True
def search_by_row(self, row):
return self.r[row]
def search_by_num(self, num):
return self.r[[k for k in self.r.keys() if num in self.r[k]][0]]
t = Triangle()
t.advance_to_row(1000)
def row_sum(n):
return sum(t.search_by_row(n))
| [
"[email protected]"
]
| |
957e5920703f3c85f244e9d1b7a3969fb8b9b5b2 | b3586235dc1e1acbd49fab996f581269a808480b | /sistema/producao/migrations/0102_bobine_diam.py | b042cee2ccaad255cc483cecad77993e16a08807 | []
| no_license | gonfersilva/Sistema | 37ad1cd03dfbb7889fa0b0367c6ebd9044712ae3 | 4c6d9ade22040972efbe892eae0130939d7b5c46 | refs/heads/master | 2021-10-23T23:21:51.262723 | 2021-10-13T19:45:49 | 2021-10-13T19:45:49 | 155,545,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | # Generated by Django 2.2.7 on 2020-06-16 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('producao', '0101_auto_20200616_1506'),
]
operations = [
migrations.AddField(
model_name='bobine',
name='diam',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Diametro'),
),
]
| [
"[email protected]"
]
| |
0d0efe5aebd2fc8deddd6b033513330081a63629 | 72cdc45a345fe47c525468ff82ef8ce845d9f800 | /Python/django_ajax/ajax_post/app/models.py | db2ea4bd5133f6de68fbf6b39f46e364a5fbe072 | []
| no_license | bopopescu/Coding-Dojo-assignments | 474242e14371e729b5948602ffc0a9328f1e43cb | 0598d7162b37d9472c6f1b82acc51d625ac871ca | refs/heads/master | 2022-11-23T18:55:36.393073 | 2018-07-20T07:43:56 | 2018-07-20T07:43:56 | 281,670,452 | 0 | 0 | null | 2020-07-22T12:24:30 | 2020-07-22T12:24:30 | null | UTF-8 | Python | false | false | 414 | py | from django.db import models
# Create your models here.
class NoteManager(models.Manager):
def create_note(self,post_data): self.create(note=post_data['note'])
def notes_rev(self): return self.all().order_by('-id')
class Note(models.Model):
note = models.TextField()
create_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
obj = NoteManager() | [
"[email protected]"
]
| |
5c6bb3335f57b10132f77850a43d0ba321816f36 | d7cfe98faeb0fe1b4ce02d54d8bbedaca82764f7 | /study/정올/Language_Coder/j_504_자가진단4.py | f31b55453217d08896730ef9b8cd88efaa777e8b | []
| no_license | Anseik/algorithm | 27cb5c8ec9692cf705a8cea1d60e079a7d78ef72 | 925404006b84178682206fbbb3b989dcf4c3dee9 | refs/heads/master | 2023-02-26T00:02:01.696624 | 2021-02-03T14:10:28 | 2021-02-03T14:10:28 | 301,753,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | line1 = '(@) (@)'
line2 = '(=^.^=)'
line3 = '(-m-m-)'
print(line1, line2, line3, sep='\n')
| [
"[email protected]"
]
| |
bcb9eb4ee3f78a2f529aeb211382936a1af9215f | 4262dcafe190db05852c7e1cfafc687031d23367 | /bin/easy_install | e469a8b93b622d8b2b9ebe5516a43986fad42ee1 | []
| no_license | ShunnoSaiful/JobPortal | b39930fcdb1bc30567f8a2c91d80786ab497afd5 | c8f3064b87c5d967b8f415fc5f080e167fc0c77d | refs/heads/main | 2023-01-07T02:44:33.831589 | 2020-11-11T11:47:46 | 2020-11-11T11:47:46 | 308,109,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | #!/home/slash/Desktop/SocialBook/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
218ddcad43477775f0a864743f17dad426889cb9 | 5456502f97627278cbd6e16d002d50f1de3da7bb | /components/browser_watcher/DEPS | d488e82a2acc8b591a8a58b50277b1ce2e1d7b9c | [
"BSD-3-Clause"
]
| permissive | TrellixVulnTeam/Chromium_7C66 | 72d108a413909eb3bd36c73a6c2f98de1573b6e5 | c8649ab2a0f5a747369ed50351209a42f59672ee | refs/heads/master | 2023-03-16T12:51:40.231959 | 2017-12-20T10:38:26 | 2017-12-20T10:38:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | include_rules = [
"+components/metrics",
"+third_party/crashpad/crashpad",
]
| [
"[email protected]"
]
| ||
037fc634a6c96cca0b1264ed15b267b1d4a37fa3 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/2243.py | 3f2ecb9e374f1d2bd6ced2658ba82fc35ee1861b | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,156 | py | # 0123
# 0 ....
# 1 ....
# 2 ....
# 3 ....
class Game:
def __init__(self, size = 4):
self.size = 4
self.rows = { 'X': 0, 'O': 0 }
self.columns = {}
self.columns['X'] = [True] * size
self.columns['O'] = [True] * size
self.diagonals = {'X': [True, True], 'O': [True, True]}
self.empty = 0
self.result = None
def add_empty(self):
self.empty += 1
def add_to_row(self, symbol):
self.rows[symbol] += 1
def discard_diagonal(self, symbol, i, j):
if i == j:
self.diagonals[symbol][0] = False
if (self.size - i - 1) == j:
self.diagonals[symbol][1] = False
def discard_column(self, symbol, column):
self.columns[symbol][column] = False
def discard_row(self, symbol):
self.rows[symbol] = 0
def check_and_clean_rows(self):
for symbol in self.rows.keys():
if self.rows[symbol] == self.size:
self.result = symbol
self.rows[symbol] = 0
def game_over(self):
if not self.result:
for symbol in self.columns.keys():
if any(self.columns[symbol]):
self.result = symbol
break
for symbol in self.diagonals.keys():
if any(self.diagonals[symbol]):
self.result = symbol
break
if not self.result:
if self.empty:
self.result = 'N'
else:
self.result = 'D'
def print_result(self, game_count):
if self.result in ('X', 'O'):
print "Case #%i: %s won" % (game_count, self.result)
elif self.result == 'D':
print "Case #%i: Draw" % (game_count)
elif self.result == 'N':
print "Case #%i: Game has not completed" % (game_count)
def get_stats(input):
n = int(input.readline().strip())
counts = {}
other = { 'X': 'O', 'O': 'X' }
g = Game()
game_count = 1
i = 0
j = 0
while game_count <= n:
line = input.readline().strip()
i = 0
for cell in line:
if cell == 'T':
g.add_to_row('X')
g.add_to_row('O')
elif cell == '.':
g.discard_column('X', i)
g.discard_column('O', i)
g.discard_diagonal('X', i, j)
g.discard_diagonal('O', i, j)
g.add_empty()
elif cell in ('X', 'O'):
g.add_to_row(cell)
g.discard_row(other[cell])
g.discard_column(other[cell], i)
g.discard_diagonal(other[cell], i, j)
i += 1
g.check_and_clean_rows()
j += 1
if j >= g.size:
g.game_over()
if g.result:
g.print_result(game_count)
if i == 0: input.readline()
while input.readline().strip(): pass
g = Game()
game_count += 1
i = 0
j = 0
if __name__ == '__main__':
import sys
get_stats(sys.stdin)
| [
"[email protected]"
]
| |
1fa634ea7182b12e6ce9021f8050dcc3492d90b5 | fe3ecb9b1ddd8de17b8cc93209134f86cd9c4a6f | /4_Python_ML/chap06_Regression/lecture/step01_regression.py | 9dd4e34d1e6a6aed096bba40c35b13ede650b6d2 | []
| no_license | nsh92/Bigdata-and-Machine-Learning-Education-at-ITWILL | d1a7292ee4865a3d0c664dd6ecf3afc0d6325847 | 3cb5661001597499178a2c85f4ccf70dcf0855d6 | refs/heads/master | 2022-11-21T23:10:51.421708 | 2020-07-23T12:49:11 | 2020-07-23T12:49:11 | 275,540,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,471 | py | # -*- coding: utf-8 -*-
"""
회귀방정식에서 기울기와 절편 식
기울기 = Cov(x,y) / Sxx(x의 편차 제곱의 평균)
절편 = y_mu - (기울기 * x_mu)
"""
from scipy import stats # 회귀모델
import pandas as pd
galton = pd.read_csv('C:/ITWILL/4_Python-II/data/galton.csv')
galton.info()
'''
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 928 entries, 0 to 927
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 child 928 non-null float64
1 parent 928 non-null float64
'''
# 부모의 키X가 자녀의 키Y에 미치는 영향
x = galton['parent']
y = galton['child']
model = stats.linregress(x, y)
# slope=0.6462905819936423, intercept=23.941530180412748,
# rvalue=0.4587623682928238, pvalue=1.7325092920142867e-49, stderr=0.04113588223793335
# rvalue : 부모의 키가 모든 것을 결정하지 않는구나
# Y = x*a + b
y_pred = x*model.slope + model.intercept
y_pred
y_true = y
# 예측치 vs 관측치(정답)
y_pred.mean()
y_true.mean() # 먼저 평균 비교 : 매우 유사
# 기울기 계산식
xu = x.mean()
yu = y.mean()
Cov_xy = sum((x-xu) * (y-yu)) / len(x)
Sxx = np.mean((x-xu)**2)
slope = Cov_xy / Sxx # 0.6462905819936413
# 절편 계산식
incept = yu - (slope * xu) # 23.94153018041171
# 설명력 rvalue
galton.corr() # 0.458762 : 이게 걍 rvalue구만
y_pred = x * slope + incept
y_pred.mean() # 68.08846982758423
| [
"[email protected]"
]
| |
48421324896cdf92d22394ec3216d8ebde4bb512 | cbedb18df0aaac810aeea87a2273edb15c1cf899 | /Strings/49. Group Anagrams(3).py | 376bd9d635f4dd8b8830d8cec0f586ee7d9eeefc | []
| no_license | kanglicheng/CodeBreakersCode | 71b833bb9f4c96d520c26f0044365dc62137a940 | 31f7f730227a0e10951e7468bad1b995cf2eafcb | refs/heads/master | 2023-08-07T20:32:05.267695 | 2020-09-14T14:36:25 | 2020-09-14T14:36:25 | 265,978,034 | 0 | 0 | null | 2020-05-22T00:05:29 | 2020-05-22T00:05:29 | null | UTF-8 | Python | false | false | 960 | py | # not sure about the solution at start
# sort each str => O(W logW), w is the length of each words -> O(N * W log W) or counting sort O(N*W)
# turn each word into count char list: each list length is fixed:26 -> need O(W) to turn to list
# but lst can't hash -> turn to str then use hash map
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
def toCountString(w):
lst = [0] * 26
for c in w:
lst[ord(c) - ord('a')] += 1
res = []
for i in range(0, 26):
res.append(str(lst[i]))
res.append("|")
return "".join(res)
d = {}
for w in strs:
countStr = toCountString(w)
if countStr in d:
d[countStr].append(w)
else:
d[countStr] = [w]
return list(d.values())
| [
"[email protected]"
]
| |
706004ded5504282ced7c31b8a3c251769a0d8c8 | b00330d48bfe09da78e50694a72793fe128c6a01 | /27_머신러닝_붓꽃 품종 학습.py | e10c7892374db24c3f940ab357d31d34b02b1683 | []
| no_license | swj8905/2021_Hongik_Summer | c177d64c6f0326f00d974e20e1334d8ac0ede3e4 | e3c28d1bfeb4d6a55b152bd922b61b77a17bb84c | refs/heads/master | 2023-06-16T00:47:15.852607 | 2021-07-08T12:01:31 | 2021-07-08T12:01:31 | 378,916,618 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | from sklearn.svm import SVC
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# 데이터 불러오기
df = pd.read_csv("./iris.csv")
label = df["variety"]
data = df[["sepal.length", "sepal.width", "petal.length", "petal.width"]]
train_data, valid_data, train_label, valid_label = train_test_split(data, label)
# 학습시키기
model = SVC() # Support Vector Machine Classifier
model.fit(train_data, train_label)
# 예측시켜보기
result = model.predict(valid_data)
# 정확도 확인하기
score = accuracy_score(result, valid_label)
print(score) | [
"[email protected]"
]
| |
e9f257c5fcb395ace5094f1d7f8a4c7089a59c4d | fdfffa8cacb572a157ead4a9723f90b25ecfe50c | /modules/ducktests/tests/ignitetest/tests/self_test.py | 5ebabde56c14a2f8f89d2c9ef23b989133e61a1a | [
"Apache-2.0",
"LicenseRef-scancode-gutenberg-2020",
"CC0-1.0",
"BSD-3-Clause"
]
| permissive | apache/ignite | 0bc83435a8db46d9c4df000fe05b1c70165b37d4 | dbf1c7825d74809cd6859c85a8ac9ed9ac071e39 | refs/heads/master | 2023-08-31T21:31:04.618489 | 2023-08-31T19:43:09 | 2023-08-31T19:43:09 | 31,006,158 | 4,806 | 2,308 | Apache-2.0 | 2023-09-14T18:56:33 | 2015-02-19T08:00:05 | Java | UTF-8 | Python | false | false | 11,141 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains smoke tests that checks that ducktape works as expected
"""
import os
from ducktape.mark import matrix
from ignitetest.services.ignite import IgniteService
from ignitetest.services.ignite_app import IgniteApplicationService
from ignitetest.services.ignite_execution_exception import IgniteExecutionException
from ignitetest.services.utils.control_utility import ControlUtility
from ignitetest.services.utils.ignite_configuration import DataStorageConfiguration, TransactionConfiguration, \
BinaryConfiguration, \
TcpCommunicationSpi
from ignitetest.services.utils.ignite_configuration import IgniteConfiguration, IgniteClientConfiguration
from ignitetest.services.utils.ignite_configuration.cache import CacheConfiguration
from ignitetest.services.utils.ignite_configuration.data_storage import DataRegionConfiguration
from ignitetest.services.utils.ignite_configuration.discovery import from_ignite_cluster
from ignitetest.services.utils.ssl.client_connector_configuration import ClientConnectorConfiguration, \
ThinClientConfiguration
from ignitetest.services.utils.ssl.connector_configuration import ConnectorConfiguration
from ignitetest.utils import ignite_versions, cluster
from ignitetest.utils.bean import Bean
from ignitetest.utils.ignite_test import IgniteTest
from ignitetest.utils.version import DEV_BRANCH, IgniteVersion
class SelfTest(IgniteTest):
"""
Self test
"""
@cluster(num_nodes=1)
@ignite_versions(str(DEV_BRANCH))
def test_assertion_convertion(self, ignite_version):
"""
Test to make sure Java assertions are converted to python exceptions
"""
server_configuration = IgniteConfiguration(version=IgniteVersion(ignite_version))
app = IgniteApplicationService(
self.test_context,
server_configuration,
java_class_name="org.apache.ignite.internal.ducktest.tests.smoke_test.AssertionApplication")
try:
app.start()
except IgniteExecutionException as ex:
assert str(ex) == "Java application execution failed. java.lang.AssertionError"
else:
app.stop()
assert False
@cluster(num_nodes=4)
@ignite_versions(str(DEV_BRANCH))
def test_simple_services_start_stop(self, ignite_version):
"""
Tests plain services start and stop (termitation vs self-terination).
"""
ignites = IgniteService(self.test_context, IgniteConfiguration(version=IgniteVersion(ignite_version)),
num_nodes=1)
ignites.start()
client = IgniteService(self.test_context, IgniteClientConfiguration(version=IgniteVersion(ignite_version)),
num_nodes=1)
client.start()
node1 = IgniteApplicationService(
self.test_context,
IgniteClientConfiguration(version=IgniteVersion(ignite_version),
discovery_spi=from_ignite_cluster(ignites)),
java_class_name="org.apache.ignite.internal.ducktest.tests.self_test.TestKillableApplication",
startup_timeout_sec=180)
node2 = IgniteApplicationService(
self.test_context,
IgniteClientConfiguration(version=IgniteVersion(ignite_version),
discovery_spi=from_ignite_cluster(ignites)),
java_class_name="org.apache.ignite.internal.ducktest.tests.self_test.TestSelfKillableApplication",
startup_timeout_sec=180)
node1.start()
node2.run()
node1.stop()
client.stop()
ignites.stop()
@cluster(num_nodes=1)
@ignite_versions(str(DEV_BRANCH))
def test_logs_rotation(self, ignite_version):
"""
Test logs rotation after ignite service restart.
"""
def get_log_lines_count(service, filename):
node = service.nodes[0]
log_file = os.path.join(service.log_dir, filename)
log_cnt = list(node.account.ssh_capture(f'cat {log_file} | wc -l', callback=int))[0]
return log_cnt
def get_logs_count(service):
node = service.nodes[0]
return list(node.account.ssh_capture(f'ls {service.log_dir}/ignite.log* | wc -l', callback=int))[0]
ignites = IgniteService(self.test_context, IgniteConfiguration(version=IgniteVersion(ignite_version)),
num_nodes=1)
ignites.start()
num_restarts = 6
for i in range(num_restarts - 1):
ignites.stop()
old_cnt = get_log_lines_count(ignites, "ignite.log")
assert old_cnt > 0
ignites.start(clean=False)
new_cnt = get_log_lines_count(ignites, "ignite.log")
assert new_cnt > 0
# check that there is no new entry in rotated file
assert old_cnt == get_log_lines_count(ignites, f"ignite.log.{i + 1}")
assert get_logs_count(ignites) == num_restarts
@cluster(num_nodes=1)
@ignite_versions(str(DEV_BRANCH))
@matrix(is_ignite_service=[True, False])
def test_config_add_to_result(self, ignite_version, is_ignite_service):
"""
Test that the config file is in config directory
and Service.logs contains the config directory to add to the result.
"""
ignite_cfg = IgniteConfiguration(version=IgniteVersion(ignite_version))
if is_ignite_service:
ignite = IgniteService(self.test_context, ignite_cfg, num_nodes=1)
else:
ignite = IgniteApplicationService(
self.test_context, ignite_cfg,
java_class_name="org.apache.ignite.internal.ducktest.tests.self_test.TestKillableApplication")
ignite.start()
assert ignite.logs.get('config').get('path') == ignite.config_dir
assert ignite.config_file.startswith(ignite.config_dir)
ignite.nodes[0].account.ssh(f'ls {ignite.config_dir} | grep {os.path.basename(ignite.config_file)}')
ignite.nodes[0].account.ssh(f'ls {ignite.config_dir} | grep {os.path.basename(ignite.log_config_file)}')
ignite.stop()
@cluster(num_nodes=1)
@ignite_versions(str(DEV_BRANCH))
def test_server_config_options(self, ignite_version):
"""
Test to make sure non-default non-trivial ignite node configuration XML file is generated correctly.
"""
ignite = IgniteService(self.test_context, get_server_config(ignite_version), 1, jvm_opts="-DCELL=1")
ignite.start()
control_utility = ControlUtility(ignite)
control_utility.activate()
ignite.stop()
def get_server_config(ignite_version):
affinity = Bean("org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction",
partitions=16384,
affinityBackupFilter=Bean(
"org.apache.ignite.cache.affinity.rendezvous.ClusterNodeAttributeColocatedBackupFilter",
constructor_args=["CELL"]))
cache_templates = [
CacheConfiguration(name="PARTITIONED*", cache_mode="PARTITIONED", atomicity_mode="TRANSACTIONAL",
statistics_enabled=True, affinity=affinity),
CacheConfiguration(name="AffinityTemplate*", cache_mode="PARTITIONED",
atomicity_mode="TRANSACTIONAL", statistics_enabled=True, affinity=affinity,
affinity_mapper=Bean(
"org.apache.ignite.internal.ducktest.tests.self_test.TestAffinityMapper")),
]
return IgniteConfiguration(version=IgniteVersion(ignite_version),
data_storage=DataStorageConfiguration(
checkpoint_frequency=10000,
wal_history_size=2147483647,
wal_segment_size=1024 * 1024 * 1024,
wal_mode="LOG_ONLY",
metrics_enabled=True,
metrics_rate_time_interval=60000,
wal_buffer_size=5242880,
wal_compaction_enabled=True,
default=DataRegionConfiguration(
persistence_enabled=True,
max_size=1024 * 1024 * 1024,
metrics_enabled=True,
metrics_rate_time_interval=1000
)),
client_connector_configuration=ClientConnectorConfiguration(
thread_pool_size=10,
thin_client_configuration=ThinClientConfiguration(
max_active_compute_tasks_per_connection=100)),
transaction_configuration=TransactionConfiguration(
default_tx_timeout=300000,
default_tx_isolation="READ_COMMITTED",
tx_timeout_on_partition_map_exchange=120000),
sql_schemas=["schema1", "schema2"],
caches=cache_templates,
metrics_log_frequency=30000,
failure_detection_timeout=120000,
rebalance_thread_pool_size=8,
peer_class_loading_enabled=True,
auto_activation_enabled=False,
binary_configuration=BinaryConfiguration(compact_footer=True),
communication_spi=TcpCommunicationSpi(
idle_connection_timeout=600000,
socket_write_timeout=30000,
selectors_count=18,
connections_per_node=4,
use_paired_connections=True,
message_queue_limit=0),
connector_configuration=ConnectorConfiguration(idle_timeout=180000)
)
| [
"[email protected]"
]
| |
3beaca888a36dc002b9fa618283aacf86d72d4a1 | e7b20ed87e8402bb8f55f0bf2b91824c264e07b4 | /pyhypnolsd/hypnolsd.py | 76420c7b4a25359320b46780ee95a1c1dcd98b44 | [
"Unlicense"
]
| permissive | rclough/pyHypnoLSD | 1ebe10929e6067e219851239f6e5a27fafcdb4b4 | 6d8c0539633d47c7e59368fcc849ca9b79ac6db4 | refs/heads/master | 2016-09-05T16:51:15.803912 | 2014-02-14T21:27:58 | 2014-02-14T21:27:58 | 16,823,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,016 | py | """
Module that contains various functions, constants, and tools
for interacting with a hypnoLSD object
"""
import sys, serial
###############################################################################
# Constants
###############################################################################
NATIVE_BAUD = 9600
MAX_BAUD = 12000000
COMMAND_MODE = 0
DRAW_MODE = 1
READLINE_TIMEOUT = 0.5
SYNC_BYTE = b'0xFE'
###############################################################################
# Utility methods
###############################################################################
def baud_from_divisor(divisor):
"""
Returns the baud rate, given a divisor.
Parameters
----------
divisor : int
baud rate divisor for the HypnoLSD. See HypnoLSD
docs for more details
"""
return MAX_BAUD / (int(divisor)+1)
def divisor_from_baud(baud) :
"""
Returns the divisor, given a baud rate. ints only please
Parameters
----------
baud : int
baud rate you'd like to operate at, and need the divisor
for to set speed on the HypnoLSD
"""
return int(MAX_BAUD/int(baud))-1
###############################################################################
# HypnoLSD class
###############################################################################
class HypnoLSD:
"""
Class meant to symbolize a single HypnoLSD module. Tracks the state
of the module so that it may be used by different convenience interfaces,
or used directly.
By default, demo is turned off because it is useless when programming.
"""
def __init__(self, port, baudrate=NATIVE_BAUD):
# Initialize internal variables
self.mode = COMMAND_MODE
# Initialize serial connection
self.serial = serial.Serial(port=port, baudrate=NATIVE_BAUD, timeout=READLINE_TIMEOUT)
self.serial.close()
self.serial.open()
# Turn off demo mode
self.demo_off()
# Update baud rate if necessary
if baudrate != NATIVE_BAUD:
self.change_baudrate(baudrate)
def change_baudrate(self, baudrate):
""" Change the baud rate used to speak to HypnoLSD """
if baudrate == self.serial.baudrate:
return ["Baudrate already set to " + str(baudrate)]
divisor = divisor_from_baud(baudrate)
return self.change_divisor(divisor, baudrate)
def change_divisor(self, divisor, baudrate=False):
""" Change the baud rate divisor on the HypnoLSD """
if not baudrate:
baudrate = baud_from_divisor(divisor)
if baudrate == self.serial.baudrate:
return ["Baudrate already set to " + str(baudrate)]
# Send command
response = self.send_command("set speed " + str(divisor))
self.serial.flush() # Flush command so we can read output with new baud rate
self.serial.baudrate = baudrate
return response
def send_command(self, command, override=False, print_it=False):
"""
Send a command to HypnoLSD, only available in Command Mode
Parameters
----------
command : string
Command to send to HypnoLSD, with no return chars
override : boolean (optional)
Set true if you want to switch modes (if currently in draw mode)
so you can send the command.
resp : boolean (optional)
Set true if you want to return the response (list of strings)
"""
# Check modes
if self.mode == DRAW_MODE and not override:
print "Currently in Draw Mode, cannot execute commands"
return
elif self.mode == DRAW_MODE and override:
self.draw_mode()
# Execute command
self.serial.write(command+"\r\n")
return self.get_response(print_it)
def get_response(self, print_it=False, break_on_OK=True):
"""
Get one HypnoLSD response, a list of lines.
Parameters
----------
print_it : boolean (optional)
Print the output to stdout
break_on_OK : boolean (optional)
If set true, it will only print up to the last "OK".
This can speed up program flow if you are retrieving responses
for each command you send. When set to false, it will spit out
everything available for it to spit out.
"""
response = []
has_serial = True
while has_serial:
# Keep reading lines from serial until you timeout
from_serial = self.serial.readline()
if not from_serial:
has_serial = False
else:
response.append(from_serial.strip())
if print_it:
sys.stdout.write(from_serial)
if break_on_OK and from_serial == "OK\r\n":
break
if print_it:
sys.stdout.flush()
return response
def command_mode(self):
""" Put HypnoLSD in Command Mode """
self.serial.write(SYNC_BYTE+SYNC_BYTE)
self.mode = COMMAND_MODE
def draw_mode(self):
""" Put HypnoLSD in Draw Mode """
if self.mode == DRAW_MODE:
return
self.send_command("draw")
self.mode = DRAW_MODE
def demo_off(self):
""" Turn off demo mode. Useless when coding. """
self.send_command("demodelay 0")
self.get_response(break_on_OK=False) # swallow response
def close(self):
""" Close connection to HypnoLSD """
self.change_baudrate(NATIVE_BAUD)
self.serial.close()
def flush(self):
""" Flush inputs and outputs of device """
self.serial.flush()
| [
"root@beaglebone.(none)"
]
| root@beaglebone.(none) |
40b38c9d62305e2dc9eb496685175c25a2a73a41 | 21dd7d56c370ea9a02b66654525fd96a398a9e49 | /apps/competition/urls.py | 8fda71942b35148c142e463894bd46fdd17e9acf | []
| no_license | hqpr/fame | fdad5d03bf9ee7ca31ae8a4701ff05bafd49540f | 8b77e3a822ae70ee6d79a8003e1d9f9bc5ba8355 | refs/heads/master | 2023-01-14T16:58:46.533090 | 2015-08-31T15:37:09 | 2015-08-31T15:37:09 | 35,205,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,243 | py | from django.conf.urls import url
from apps.competition.views import competitions, single_competition, single_competition_terms, pick_media_file, SingleCompetitionEnter, \
competition_add_audio, competition_add_video, entry_review
urlpatterns = [
url(r'^$', competitions, name='all_competitions'),
url(r'^(?P<slug>[\w\-]+)/$', single_competition, {"display": "overview"}, name='single_competition'),
url(r'^(?P<slug>[\w\-]+)/chart/$', single_competition, {"display": "chart"}, name='single_competition_chart'),
url(r'^(?P<slug>[\w\-]+)/entry/(?P<entry_slug>[\w\-]+)$', single_competition, {"display": "chart"}, name='single_competition_entry'),
url(r'^(?P<slug>[\w\-]+)/terms/$', single_competition_terms, {"display": "terms"}, name='single_competition_terms'),
url(r'^(?P<slug>[\w\-]+)/enter/$', SingleCompetitionEnter.as_view(), name='single_competition_enter'),
url(r'^(?P<slug>[\w\-]+)/pick/$', pick_media_file, name='pick_media_file'),
url(r'^add/(?P<object_id>\d+)/$', competition_add_audio, name='competition_add_audio'),
url(r'^add/video/(?P<object_id>\d+)/$', competition_add_video, name='competition_add_video'),
url(r'^(?P<slug>[\w\-]+)/review/$', entry_review, name='entry_review'),
]
| [
"[email protected]"
]
| |
5181e499b3220d7aa79460e74c4d532b634fb8bc | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /QgAwpaFWD2jtxZ2wG_8.py | c19c34476af79ec15c2595c52b9b91290b20f82c | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | """
Given an integer `n`. Your task is to find how many digits this integer
contains without using `str` or `len` methods!
### Examples
sum_digits(100) ➞ 3
sum_digits(1000) ➞ 4
sum_digits(1) ➞ 1
### Notes
N/A
"""
def sum_digits(n):
count=0
if n==0:
return 1
while n!=0:
n//=10
count+=1
return count
| [
"[email protected]"
]
| |
f5ef2c723c317a80eb8cad3ed47da968f1411d5f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_inferiors.py | acab87363dd1d78f2b03e3b0fd1313fda1eeab3e | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py |
from xai.brain.wordbase.adjectives._inferior import _INFERIOR
#calss header
class _INFERIORS(_INFERIOR, ):
def __init__(self,):
_INFERIOR.__init__(self)
self.name = "INFERIORS"
self.specie = 'adjectives'
self.basic = "inferior"
self.jsondata = {}
| [
"[email protected]"
]
| |
d812823dcc5f741c075b136a22be8928175d68e6 | 5388e00d992050d515f72666e2f76c51b2ca56ee | /calc_angle_between_two_locs.py | 424c037ae9233d5870ce13b03f901b6b13ad4281 | [
"Apache-2.0"
]
| permissive | lonecrane/PyGuymer3 | df9ffc62d4f7fddf04ae3ea18f5487dec48472c7 | c7eb017dac18abb5eafe74e23a93bf7e68e48916 | refs/heads/master | 2020-09-11T23:44:14.036051 | 2019-11-20T07:01:22 | 2019-11-20T07:01:22 | 222,229,475 | 0 | 0 | Apache-2.0 | 2019-11-17T10:13:51 | 2019-11-17T10:13:51 | null | UTF-8 | Python | false | false | 1,049 | py | def calc_angle_between_two_locs(lon1_deg, lat1_deg, lon2_deg, lat2_deg):
"""
This function reads in two coordinates (in degrees) on the surface of a
sphere and calculates the angle (in degrees) between them.
"""
# Import modules ...
import math
# Convert to radians ...
lon1_rad = math.radians(lon1_deg) # [rad]
lat1_rad = math.radians(lat1_deg) # [rad]
lon2_rad = math.radians(lon2_deg) # [rad]
lat2_rad = math.radians(lat2_deg) # [rad]
# Calculate angle in radians ...
distance_rad = 2.0 * math.asin(
math.hypot(
math.sin((lat1_rad - lat2_rad) / 2.0),
math.cos(lat1_rad) * math.cos(lat2_rad) * math.sin((lon1_rad - lon2_rad) / 2.0)
)
) # [rad]
# Return angle ...
return math.degrees(distance_rad)
| [
"[email protected]"
]
| |
6f659c63d644601207b26f69956c91a1acf67b0c | 8734446b29c3424e25ef82c3ba65db61f1736a12 | /prngmgr/migrations/0007_auto_20161008_1643.py | 1ea7a65c037d1ef783856208172d4c2c0bbe84f8 | [
"Apache-2.0"
]
| permissive | decolnz/prngmgr | 25ef02a3c76c00864009c6470d532805a5b52af3 | 6ea19e0095c123d337e523f3c832f5688254c7f1 | refs/heads/master | 2021-01-19T00:51:26.798203 | 2016-10-11T09:16:04 | 2016-10-11T09:16:04 | 87,213,736 | 1 | 0 | null | 2017-04-04T17:05:25 | 2017-04-04T17:05:25 | null | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('prngmgr', '0006_auto_20161008_1336'),
]
operations = [
migrations.AlterField(
model_name='peeringsession',
name='previous_state',
field=models.CharField(default=b'None', max_length=12),
),
]
| [
"[email protected]"
]
| |
5bfde6f4620ebb1cad3fa1035a1c961d7bfff609 | c25be81a90291c9dd7eed076509729b1c730531e | /tests.py | 93f031029fc25e60902294dba580f390e688262c | [
"Apache-2.0"
]
| permissive | icYFTL/RTULAB_Service | 8707b7f9082494e7513c6afc0e4ede89f18cc320 | a16d0fc2ac9ac103f0a14e90824caded7156bf11 | refs/heads/main | 2023-03-18T15:26:56.586261 | 2021-03-18T09:23:30 | 2021-03-18T09:23:30 | 343,216,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,017 | py | import requests
import unittest
class Test(unittest.TestCase):
def setUp(self) -> None:
self.__host = 'https://rulab.icyftl.ru/'
self.__purchases_host = self.__host + 'purchases'
self.__shop_host = self.__host + 'shop'
self.__factory_host = self.__host + 'factory'
self.__shop_password = 'lol'
def test_upstate(self):
try:
shop_state = requests.get(self.__shop_host).status_code
purchases_state = requests.get(self.__purchases_host).status_code
factory_state = requests.get(self.__factory_host).status_code
self.assertEqual(shop_state, 200)
self.assertEqual(purchases_state, 200)
self.assertEqual(factory_state, 200)
except Exception as e:
raise AssertionError('Some services are down or something went wrong\n' + str(e))
def test_create_shop(self):
try:
shop = requests.post(self.__shop_host + '/create', json={
"name": "UTest",
"address": "UTest",
"number": "79167031312"
}, headers={'XXX-CODE': self.__shop_password})
self.assertEqual(shop.status_code, 201)
except:
raise AssertionError('Shop service is down or something went wrong')
def test_add_items(self):
try:
shop = requests.put(self.__shop_host + '/1/add_items', json={
"items": [
{
"name": "TestCake",
"category": "TestCakes",
"count": 100
}
]
}, headers={'XXX-CODE': self.__shop_password})
self.assertEqual(shop.status_code, 201)
except Exception as e:
raise AssertionError('Shop service is down or something went wrong\n' + str(e))
def test_new_purchase(self):
try:
slots = requests.get(self.__shop_host + '/1/get_slots').json()
self.assertTrue(any([x['name'] == 'testcake' for x in slots['response']['result']]))
slots = slots['response']['result']
slot = None
for x in slots:
if x['name'] == 'testcake':
slot = x
break
r = requests.post(self.__shop_host + '/1/new_purchase', json={
"slot_id": slot['id'],
"count": 1,
"user_id": 1,
"method": "card"
})
self.assertEqual(r.status_code, 200)
r = requests.post(self.__shop_host + '/1/new_purchase', json={
"slot_id": slot['id'],
"count": 100,
"user_id": 1,
"method": "card"
})
self.assertEqual(r.status_code, 424)
except Exception as e:
raise AssertionError('Shop service is down or something went wrong\n' + str(e))
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
9fda03d01e11adae9c1c4533fc5502a2d6fe71d9 | ba9e1fc7797ebc55a61a40ee66c51b467f353ff1 | /web_scraping_with_python_demos/2-cleangrams.py | 1c489e8f587b8315ebadba2f6d45592f570f395e | []
| no_license | sanpianye/the-little-python | 77c938164d43cbb120063a6d17d0705cc9e92e93 | c04898bf0812afb53b71567699ee523d1bc56a29 | refs/heads/master | 2021-06-14T01:55:31.452777 | 2017-03-09T13:31:59 | 2017-03-09T13:31:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
''''''
__author__ = 'Engine'
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import string
from collections import OrderedDict
def cleanInput(input):
input = re.sub("\n+", " ", input) # 将多个换行符转为一个空格
input = re.sub("\[[0-9]*\]", "", input) # 剔除维基百科的引用标记,如[1]
input = re.sub(" +", " ", input) # 将多个空格合并为1个空格, 确保单词间只有一个空格
# 以utf8编码,再以ascii解码, 剔除unicode字符
input = bytes(input, "UTF-8")
input = input.decode("ascii", "ignore")
cleanInput = []
input = input.split(' ') # 以空格分割单词, 获得单词的list
for item in input:
# 剔除单词两边的标点符号,有点矫枉过正
item = item.strip(string.punctuation)
# 剔除单字符, 除非是i或a
if len(item) > 1 or (item.lower() == 'a' or item.lower() == 'i'):
cleanInput.append(item)
return cleanInput
def getNgrams(input, n):
input = cleanInput(input) # 数据清洗
output = dict() # 创建字典, 用于保存n元 组
for i in range(len(input) - n + 1):
newNGram = ' '.join(input[i:i+n]) # 相邻的单词构成一个n元组
if newNGram in output: # 统计n元组的词频
output[newNGram] += 1
else:
output[newNGram] = 1
return output
request = "http://en.wikipedia.org/wiki/Python_(programming_language)"
response = urlopen(request)
bsObj = BeautifulSoup(response)
# 获取词条的主体部分内容
input = bsObj.find("div", {"id": "mw-content-text"}).get_text() # str
ngrams = getNgrams(input, 2) # 获取2元 组
# 对n元组按词频排序
ngrams = OrderedDict(sorted(ngrams.items(), key=lambda t: t[1], reverse=True))
print(ngrams)
| [
"[email protected]"
]
| |
070b67571830fbc22ce4702fded26ee0e03f646a | 6d7c488d14cf2bc0322c955a53ec34cfd67e8c3b | /.history/plane_ticket/spiders/ticket_spider_20200709002008.py | 9258ec92ad4166dbf5107784494df0a8a3454021 | []
| no_license | byebyeyuchi/getPlaneTicket-web-crawl- | e247b7d015e35d1036e023c748764abb0ad66fe1 | 91a666659a537c053d8cd19c8214a54eab460800 | refs/heads/main | 2023-02-25T21:43:53.046865 | 2021-01-30T02:14:43 | 2021-01-30T02:14:43 | 334,308,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import scrapy
class TicketSpider(scrapy.Spider):
name = "tickets"
def start_requests(self):
urls =[
"http://montreal.chineseconsulate.org/chn/zlgxw/"
]
def parse(self, response):
all = response.css('.Text_Center li').css()
| [
"[email protected]"
]
| |
9c0565a7a799b4d983060bea22a4462692fd3731 | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /helpers/mixins/store_data_with_default_value_by_key.py | e418e8fb8cbcfde82dbe0fe1b92fbd4096f04bed | []
| no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,174 | py | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.core.exceptions import ValidationError
from core.models import MixeurBaseModel
class StoreDataWithDefaultValueByKey(MixeurBaseModel):
"""
Mixin for storing value with a unique default value for all
and a unique default value for each item associated by key
"""
class Meta:
abstract = True
ordering = ("created_at",)
is_default_value = models.BooleanField(
_("est une valeur par défaut"), default=False
)
@classmethod
def default_value(cls, key=None):
"""
Return default value which is:
- default value for the key if exists
- else generic default value if exists
- else None
"""
if key:
if cls.objects.filter(key=key).exists():
return cls.objects.filter(key=key).first()
if cls.objects.filter(key=None).exists():
return cls.objects.filter(key=None).first()
return None
def clean(self):
"""
verify that:
- an unique value exists without a key
- an unique value exists with a key
"""
if self.is_default_value:
if (
self.key is None
and self.__class__.objects.exclude(pk=self.pk)
.filter(key=None, is_default_value=True)
.exists()
):
raise ValidationError(
"Une seule valeur par défaut générique est possible"
)
if (
self.key is not None
and self.__class__.objects.exclude(pk=self.pk)
.filter(key=self.key, is_default_value=True)
.exists()
):
raise ValidationError(
"Une seule valeur par défaut par clef est possible"
)
else:
if self.key is None:
raise ValidationError(
"Une valeur non générique doit être associée à une clef"
)
return super().clean()
| [
"[email protected]"
]
| |
42ce348fe55b62181045120eb229a2509121b694 | 86f2eb787624e293be660fa97f6bbb35980f2e29 | /translate-app-tkinter/app/utils/thread.py | 11fc4e975c3224a345df30657371cadb3b9e9957 | [
"MIT"
]
| permissive | jadsonlucio/holidays-projects | 73e762b7d5669b8850f3fcecf59aa152430c2d19 | 136992f499d37640decf67072280ae87b83fe830 | refs/heads/master | 2023-05-09T03:55:24.433421 | 2020-10-27T13:13:01 | 2020-10-27T13:13:01 | 256,420,473 | 2 | 0 | null | 2021-06-02T01:30:37 | 2020-04-17T06:26:05 | Python | UTF-8 | Python | false | false | 273 | py | from threading import Thread
from functools import wraps
def run_async(func):
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.