blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8ebea4a768a74346f669199a18f9034a5a72fcd3 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part006912.py | b626d2d86f3f2c0c06e456ec94bdbe994424a7eb | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher101446(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.3.1.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.1.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher101446._instance is None:
CommutativeMatcher101446._instance = CommutativeMatcher101446()
return CommutativeMatcher101446._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 101445
return
yield
from collections import deque | [
"[email protected]"
] | |
b66d2ae78a2abe5aa52b983e5773f4b95f0dfe9f | 11334e46d3575968de5062c7b0e8578af228265b | /Examples/OpenCV/PPOCV/C6/arithmetic.py | f062fdf7c94256bec4b4af759f4f493579109cb4 | [] | no_license | slowrunner/Carl | 99262f16eaf6d53423778448dee5e5186c2aaa1e | 1a3cfb16701b9a3798cd950e653506774c2df25e | refs/heads/master | 2023-06-08T05:55:55.338828 | 2023-06-04T02:39:18 | 2023-06-04T02:39:18 | 145,750,624 | 19 | 2 | null | 2023-06-04T02:39:20 | 2018-08-22T18:59:34 | Roff | UTF-8 | Python | false | false | 2,217 | py | #!/usr/bin/env python3
#
# arithmetic.py
"""
Documentation:
PPOCV C6
"""
# from __future__ import print_function # use python 3 syntax but make it compatible with python 2
# from __future__ import division # ''
import sys
try:
sys.path.append('/home/pi/Carl/plib')
import speak
import tiltpan
import status
import battery
import myDistSensor
import runLog
Carl = True
except:
Carl = False
import easygopigo3 # import the GoPiGo3 class
import numpy as np
import datetime as dt
import argparse
from time import sleep
import imutils
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to image file")
# ap.add_argument("-n", "--num", type=int, default=5, help="number")
args = vars(ap.parse_args())
# print("Started with args:",args)
# constants
# varibles
def main():
if Carl: runLog.logger.info("Started")
egpg = easygopigo3.EasyGoPiGo3(use_mutex=True)
if Carl:
tiltpan.tiltpan_center()
sleep(0.5)
tiltpan.off()
try:
image = cv2.imread(args["image"])
cv2.imshow("Original", image)
print("max of 255: {}".format(cv2.add(np.uint8([200]), np.uint8([100]))))
print("min of 0: {}".format(cv2.subtract(np.uint8([50]), np.uint8([100]))))
print("wrap around: {}".format(np.uint8([200]) + np.uint8([100])))
print("wrap around: {}".format(np.uint8([50]) - np.uint8([100])))
M = np.ones(image.shape, dtype = "uint8") * 100
added = cv2.add(image, M)
cv2.imshow("Added", added)
M = np.ones(image.shape, dtype = "uint8") * 50
subtracted = cv2.subtract(image, M)
cv2.imshow("Subtracted", subtracted)
cv2.waitKey(0)
except KeyboardInterrupt: # except the program gets interrupted by Ctrl+C on the keyboard.
if (egpg != None): egpg.stop() # stop motors
print("\n*** Ctrl-C detected - Finishing up")
sleep(1)
if (egpg != None): egpg.stop()
if Carl: runLog.logger.info("Finished")
sleep(1)
if (__name__ == '__main__'): main()
| [
"[email protected]"
] | |
6b692d10b1730270b76669e1b35e9650edd7aaf2 | 01dfd817931803f5ca3a41832171082e84323d9e | /apps/applogReg/urls.py | fd05ed8fe6c674eb9ffb88c415767ef447c8705f | [] | no_license | alialwahish/rep92 | 41d715b043233a85ce968d835a3c7ba303593445 | 743e2684783f8f86df9e5b8493ab267679e250bc | refs/heads/master | 2022-11-26T21:33:23.643273 | 2018-05-23T01:22:42 | 2018-05-23T01:22:42 | 134,494,751 | 0 | 0 | null | 2022-11-22T01:06:15 | 2018-05-23T01:20:24 | Python | UTF-8 | Python | false | false | 484 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',views.index),
url(r'^main$',views.main),
url(r'^login$',views.login),
url(r'^register$',views.register),
url(r'^quotes$',views.quotes),
url(r'logout$',views.logout),
url(r'^add_quote$',views.add_quote),
url(r'^add_fav/(?P<id>\d+)/$',views.add_fav),
url(r'^view_user/(?P<id>\d+)$',views.view_user),
url(r'^remove_fav_quote/(?P<id>\d+)/$',views.remove_fav_quote),
]
| [
"[email protected]"
] | |
d6466105ed7fcebea67a87df05fdc82930ef9b46 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/removeBrackets_20200908135618.py | 2cc917333227bb1d3ebec239f45ed72cebf22209 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | def remove(str):
# find how many make a complete pair
# subtract that from the | [
"[email protected]"
] | |
3569db6b3ac516591e08da38e3f8674310ee5db2 | 2e29ed138ab0fdb7e0a6e87b7c52c097b350fecf | /MechPro Tech/randomElementSets.py | be6c201ac4e08bf05befb843bb10394f4b91df6b | [] | no_license | ronniegeiger/Abaqus-Scripts | 1e9c66664bd7dc7e5264bf763f15936eadcff529 | c071bbfe0e6c54148dfd4a23f786f017dfef4ae4 | refs/heads/master | 2023-03-18T06:33:13.690549 | 2018-08-14T11:37:07 | 2018-08-14T11:37:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,431 | py | # Script to create element sets based on % distribution
from textRepr import prettyPrint as pp
import random
N_150mm = 3591
N_100mm = 7271
N_50mm = 38778
n_200mm = 2020
n_150mm = 3591
n_100mm = 8079
n_50mm = 32315
N_total = float(N_150mm + N_100mm + N_50mm + n_200mm + n_150mm + n_100mm + n_50mm)
elset = {}
elset['N_150mm'] = []
elset['N_100mm'] = []
elset['N_50mm'] = []
elset['n_200mm'] = []
elset['n_150mm'] = []
elset['n_100mm'] = []
elset['n_50mm'] = []
for i in range(1,95645):
rnd = random.random()
if rnd <= N_150mm/N_total:
elset['N_150mm'].append(i)
elif rnd > N_150mm/N_total and rnd <= (N_150mm+N_100mm)/N_total:
elset['N_100mm'].append(i)
elif rnd > (N_150mm+N_100mm)/N_total and rnd <= (N_150mm+N_100mm+N_50mm)/N_total:
elset['N_50mm'].append(i)
elif rnd > (N_150mm+N_100mm+N_50mm)/N_total and rnd <= (N_150mm+N_100mm+N_50mm+n_200mm)/N_total:
elset['n_200mm'].append(i)
elif rnd > (N_150mm+N_100mm+N_50mm+n_200mm)/N_total and rnd <= (N_150mm+N_100mm+N_50mm+n_200mm+n_150mm)/N_total:
elset['n_150mm'].append(i)
elif rnd > (N_150mm+N_100mm+N_50mm+n_200mm+n_150mm)/N_total and rnd <= (N_150mm+N_100mm+N_50mm+n_200mm+n_150mm+n_100mm)/N_total:
elset['n_100mm'].append(i)
elif rnd > (N_150mm+N_100mm+N_50mm+n_200mm+n_150mm+n_100mm)/N_total and rnd <= (N_150mm+N_100mm+N_50mm+n_200mm+n_150mm+n_100mm+n_50mm)/N_total:
elset['n_50mm'].append(i)
else:
print 'Number %i not assigned into an element set'%i
print 'N150mm %s, %s'%(len(elset['N_150mm'])/N_total, N_150mm/N_total)
print 'N100mm %s, %s'%(len(elset['N_100mm'])/N_total, N_100mm/N_total)
print 'N50mm %s, %s'%( len(elset['N_50mm'])/N_total, N_50mm/N_total)
print 'n200mm %s, %s'%(len(elset['n_200mm'])/N_total, n_200mm/N_total)
print 'n150mm %s, %s'%(len(elset['n_150mm'])/N_total, n_150mm/N_total)
print 'n100mm %s, %s'%(len(elset['n_100mm'])/N_total, n_100mm/N_total)
print 'n50mm %s, %s'%( len(elset['n_50mm'])/N_total, n_50mm/N_total)
file = open('elementSets.txt', 'w')
for key, elementList in elset.items():
if len(elementList) > 0:
file.write('*Elset, elset=%s \n'%key)
myList = [elementList[i:i+10] for i in range(0,len(elementList), 10)]
for subList in myList:
if len(subList) == 10:
file.write('%s, \n'%str(subList).strip('[]'))
else:
file.write('%s\n'%str(subList).strip('[]'))
file.close()
| [
"[email protected]"
] | |
343ba2e8c8dbc7bad29a57b1416f628800566367 | b0bd3342c244ebf30ae5ab29daa078f2b39010f7 | /SimpleEmbedModel.py | eea650c39544213933830a7ab48737a89881b223 | [] | no_license | naiqili/itime_learning | 30a8af7f1234277162ccdd4c69cd9f9a4a7ab412 | d9b191bb32a7e49cb99443d7dccea5bb392aee90 | refs/heads/master | 2021-06-19T04:54:06.239320 | 2017-06-26T13:35:39 | 2017-06-26T13:35:39 | 92,792,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,211 | py | import tensorflow as tf
import numpy as np
class SimpleEmbedModel():
def __init__(self, conf):
self.conf = conf
self.uif_mat = np.load(conf.uif_path)
self.iur_mat = np.load(conf.iur_path)
def add_variables(self, reuse=False):
conf = self.conf
with tf.variable_scope('Fixed', reuse=reuse):
self.uif = tf.get_variable('uif',
[conf.user_size,
conf.item_size,
len(conf.recAlgos)],
initializer=tf.constant_initializer(self.uif_mat),
trainable=False)
self.iur = tf.get_variable('iur',
[conf.item_size,
conf.user_size+conf.feat_size],
initializer=tf.constant_initializer(self.iur_mat),
trainable=False)
with tf.variable_scope('Weights', reuse=reuse):
self.v1 = tf.get_variable('v1',
[len(conf.recAlgos), 1])
self.v2 = tf.get_variable('v2',
[conf.z_size, 1])
self.W_z = tf.get_variable('W_z',
[conf.z_size,
conf.embed_size,
conf.embed_size])
with tf.variable_scope('Embeddings', reuse=reuse):
self.embed = tf.get_variable('embed',
[conf.user_size+conf.feat_size, conf.embed_size])
self.ph_selected_items = tf.placeholder(tf.int32, shape=(None,))
self.ph_all_items = tf.placeholder(tf.int32, shape=(None,))
self.ph_groundtruth = tf.placeholder(tf.int32, shape=[])
self.ph_user = tf.placeholder(tf.int32, shape=[])
def build_model(self):
uif_u = self.uif[self.ph_user]
uif_u = tf.contrib.layers.dropout(uif_u, self.conf.keep_prob, is_training=self.conf.is_training) # Add dropout layer
score1 = tf.matmul(uif_u, self.v1)
def fn_i0(): # (choices, score_sum) when i = 0
return (self.ph_all_items, tf.squeeze(score1))
def fn_not_i0(): # (choices, score_sum) when i != 0
selected_items = self.ph_selected_items
iur = self.iur
iur = tf.contrib.layers.dropout(iur, self.conf.keep_prob, is_training=self.conf.is_training) # Add dropout layer
iur_embed = tf.matmul(iur, self.embed)
se = tf.nn.embedding_lookup(iur, selected_items)
se_embed = tf.matmul(se, self.embed)
se_embed = tf.transpose(se_embed)
# see test/einsum_test.py
iur_w = tf.einsum('nu,zud->znd', iur_embed, self.W_z)
iur_w_se = tf.einsum('znu,uk->znk', iur_w, se_embed)
mp_iur_w_se = tf.reduce_max(iur_w_se, axis=2) # z x n
mp_iur_w_se = tf.transpose(mp_iur_w_se) # n x z
score2 = tf.matmul(mp_iur_w_se, self.v2) # n x 1
score_sum = tf.squeeze(score1 + score2) # vec of n
choices = tf.reshape(tf.sparse_tensor_to_dense(tf.sets.set_difference([self.ph_all_items], [selected_items])), [-1]) # vec of remaining choices
return (choices, score_sum)
i = tf.shape(self.ph_selected_items)[0]
choices, score_sum = tf.cond(tf.equal(i, 0),
lambda: fn_i0(),
lambda: fn_not_i0())
eff_score = tf.gather(score_sum, choices, validate_indices=False) # vec of choices
_argmax = tf.argmax(eff_score, axis=0)
_pred = tf.gather(choices, _argmax, validate_indices=False)
_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=score_sum, labels=self.ph_groundtruth)
self.loss = _loss
self.pred = _pred
self.loss_summary = tf.summary.scalar('Loss', self.loss)
if self.conf.is_training:
self.train_op = tf.train.AdamOptimizer(self.conf.lr).minimize(self.loss)
| [
"[email protected]"
] | |
ac2f389497f158fe59cc5e3f600d4fd7a0de203e | 2316ce8a21d44a5d09284968ef42530633dc10d2 | /sample_code/ep188/rev01/foo.py | 0576101a0190082ed83986d6e8f76bcf20481717 | [] | no_license | AlexanderWinkelmeier/explains | 160de2c41fc5fc0156b482b41f89644dc585c4f3 | d47ec53e384e4303a2d8e71fab9073a1a8d2d6bc | refs/heads/master | 2023-07-30T04:55:31.234482 | 2021-09-15T02:59:42 | 2021-09-15T02:59:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py |
x = 1
def increment_thing() -> None:
global x
x += 1
def get_thing() -> int:
return x
| [
"[email protected]"
] | |
0ca716046914007256d10bbb37b5f1a4cafa8580 | 71b7b6d84a61f514b038fac7741e6d16973fcaa9 | /build/object_manipulation_msgs/catkin_generated/pkg.installspace.context.pc.py | 3e6fd0a4ff97ee24b62a27bea0521aebb8561551 | [] | no_license | YiKangJ/perception_driven_ws | 15c02e523f1a708fe63b216d73019c8c2bde97a1 | 0a0f8fcbe3f5fed26439f449999b85f1e38c0f70 | refs/heads/master | 2020-04-01T19:47:48.372111 | 2018-10-18T06:17:57 | 2018-10-18T06:17:57 | 153,571,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/jyk/perception_driven_ws/install/include".split(';') if "/home/jyk/perception_driven_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;actionlib_msgs;std_msgs;geometry_msgs;sensor_msgs;trajectory_msgs;sensor_msgs;household_objects_database_msgs;shape_msgs;manipulation_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "object_manipulation_msgs"
PROJECT_SPACE_DIR = "/home/jyk/perception_driven_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
373e4f049cea8386fbd57e288660868ad6f2a5ab | a1bffcd8854e1843e56bb812d4d83b3161a5211e | /tests/unit/modules/storage/netapp/test_na_ontap_net_port.py | 07ea6a104c088e71872248865edf07e619628ac8 | [] | no_license | goneri/ansible.community | 1a71f9d98c164b77f8ed2ed7f558b4963005ff8f | f26f612dd0a3154050d90b51a75502018c95f6e4 | refs/heads/master | 2020-12-29T07:47:35.353515 | 2020-01-22T17:43:18 | 2020-01-22T17:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,647 | py | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from ansible_collections.ansible.community.tests.unit.compat import unittest
from ansible_collections.ansible.community.tests.unit.compat.mock import patch, Mock
from ansible_collections.ansible.community.plugins.module_utils import basic
from ansible_collections.ansible.community.plugins.module_utils._text import to_bytes
import ansible_collections.ansible.community.plugins.module_utils.netapp as netapp_utils
from ansible_collections.ansible.community.plugins.modules.na_ontap_net_port \
import NetAppOntapNetPort as port_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.type = kind
self.data = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'port':
xml = self.build_port_info(self.data)
self.xml_out = xml
return xml
@staticmethod
def build_port_info(port_details):
''' build xml data for net-port-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'net-port-info': {
# 'port': port_details['port'],
'mtu': port_details['mtu'],
'is-administrative-auto-negotiate': 'true',
'ipspace': 'default',
'administrative-flowcontrol': port_details['flowcontrol_admin'],
'node': port_details['node']
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.server = MockONTAPConnection()
self.mock_port = {
'node': 'test',
'ports': 'a1',
'flowcontrol_admin': 'something',
'mtu': '1000'
}
def mock_args(self):
return {
'node': self.mock_port['node'],
'flowcontrol_admin': self.mock_port['flowcontrol_admin'],
'ports': [self.mock_port['ports']],
'mtu': self.mock_port['mtu'],
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!'
}
def get_port_mock_object(self, kind=None, data=None):
"""
Helper method to return an na_ontap_net_port object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_net_port object
"""
obj = port_module()
obj.autosupport_log = Mock(return_value=None)
if data is None:
data = self.mock_port
obj.server = MockONTAPConnection(kind=kind, data=data)
return obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
port_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_get_nonexistent_port(self):
''' Test if get_net_port returns None for non-existent port '''
set_module_args(self.mock_args())
result = self.get_port_mock_object().get_net_port('test')
assert result is None
def test_get_existing_port(self):
''' Test if get_net_port returns details for existing port '''
set_module_args(self.mock_args())
result = self.get_port_mock_object('port').get_net_port('test')
assert result['mtu'] == self.mock_port['mtu']
assert result['flowcontrol_admin'] == self.mock_port['flowcontrol_admin']
def test_successful_modify(self):
''' Test modify_net_port '''
data = self.mock_args()
data['mtu'] = '2000'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_port_mock_object('port').apply()
assert exc.value.args[0]['changed']
def test_successful_modify_multiple_ports(self):
''' Test modify_net_port '''
data = self.mock_args()
data['ports'] = ['a1', 'a2']
data['mtu'] = '2000'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_port_mock_object('port').apply()
assert exc.value.args[0]['changed']
@patch('ansible_collections.ansible.community.plugins.modules.na_ontap_net_port.NetAppOntapNetPort.get_net_port')
def test_get_called(self, get_port):
''' Test get_net_port '''
data = self.mock_args()
data['ports'] = ['a1', 'a2']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_port_mock_object('port').apply()
assert get_port.call_count == 2
| [
"[email protected]"
] | |
7885a77c550774f08a3cff1f19f60dcc326bf6ef | f780b05549bea9ecb0b80e4e9ea63376aa59f962 | /iplauction/migrations/0004_auto_20160305_1217.py | 8a77f654b1476c81a8fd5872f2939d3becf242b4 | [] | no_license | PunitGr/django-kickstart | ebff3e7e60a27091468f45a35e3b562954609168 | 5e8a21b0408ade1be4860deb3bc2cb80f033b159 | refs/heads/master | 2021-01-24T20:26:06.970847 | 2016-03-07T14:33:17 | 2016-03-07T14:33:17 | 52,964,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-05 12:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('iplauction', '0003_auto_20160305_1102'),
]
operations = [
migrations.AddField(
model_name='team',
name='slug',
field=models.SlugField(blank=True, default='', null=True),
),
migrations.AlterField(
model_name='player',
name='slug',
field=models.SlugField(blank=True, default='', null=True),
),
]
| [
"[email protected]"
] | |
34c8f2b1fad1c29421bec555fae6b807a7102a8e | 599709e7687a78f92b268315590d6ad750ce97d6 | /calc/opt_h_pi/1skp_l/gto_part_shift/w1_dense/search/calc.py | 2e3b85307b244e4655cabc6fd13518b2f7d18fa3 | [] | no_license | ReiMatsuzaki/cbasis2 | b99d096150d87f9301ed0e34f7be5f0203e4a81e | 86f21146fab6fc6f750d02fb2200ea94616ca896 | refs/heads/master | 2021-01-19T23:15:32.864686 | 2017-04-27T07:29:26 | 2017-04-27T07:29:26 | 88,953,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | import sys
import numpy as np
import pandas as pd
from itertools import product, combinations
import subprocess
sys.path.append("../../../../../../src_py/nnewton")
sys.path.append("../../../../../../r1basis")
from r1basis import *
from opt_green import *
## one GTO optimization value is 0.0105604,-0.0548991
with open("search.out", "w") as f:
print_timestamp("start", f)
yos = [0.0001, 0.00014, 0.0002, 0.0003, 0.0005, 0.0007,
0.001, 0.0014, 0.002, 0.003, 0.005, 0.007,
0.01, 0.014, 0.02, 0.03, 0.05, 0.07]
xos = [0.0001, 0.00014, 0.0002, 0.0003, 0.0005, 0.0007,
0.001, 0.0014, 0.002, 0.003, 0.005, 0.007,
0.01, 0.014, 0.02, 0.03, 0.05, 0.07]
zos = [ x-1.0j*y for y in yos for x in xos]
num = 20
numopt = 5
z0 = 0.005
z1 = 20.0
r = (z1/z0)**(1.0/(num-1))
zs = [z0*r**n for n in range(num)]
z0s = zs[0:numopt]
z1s = zs[numopt:]
f = open('search.csv', 'w')
for z0 in zos:
basis_info = [('shift', True, 2, z0s, z0),
('shift', False, 2, z1s, 0.0)]
res = opt_main(
basis_type = 'GTO',
basis_info = basis_info,
target = 'h_pi',
channel= '1s->kp',
dipole = 'length',
w0 = 1.0,
tol = pow(10.0, -5.0),
maxit = 50,
conv = 'grad',
fdif = 0.0001,
grad = False,
hess = False,
outfile = 'res.out',
print_level = 0)
opt_res = res['w_res_list'][0][1]
z0 = opt_res.x[0]
eps = 0.00001
if(z0.real > 0.0 and z0.imag < 0.0 and
100.0 > z0.real and z0.imag > -100.0 and
opt_res.success):
subprocess.call("cat res.out >> search.out", shell=True)
f.write("{0},{1}\n".format(z0.real, z0.imag))
f.close()
with open("search.out", "a") as f:
print_timestamp("end", f)
| [
"[email protected]"
] | |
020cc492835e3f6213e03d6a0067ede9641141eb | 9c73dd3043f7db7c9ec76d560484e99ad134fdb6 | /students/douglas_klos/lesson05/activity/code/rdbms_api.py | 1d84511af15a33b1c59531bb3f00eef544715492 | [] | no_license | UWPCE-PythonCert-ClassRepos/py220-online-201904-V2 | 546b316025b680ca28d24b523663095398616b13 | ac12beeae8aa57135bbcd03ac7a4f977fa3bdb56 | refs/heads/master | 2022-12-10T03:14:25.514630 | 2019-06-11T02:14:17 | 2019-06-11T02:14:17 | 179,139,181 | 1 | 19 | null | 2022-12-08T01:43:38 | 2019-04-02T18:49:10 | Python | UTF-8 | Python | false | false | 985 | py | class BaseModel(Model):
class Meta:
database = database
class Person(BaseModel):
"""
This class defines Person, which maintains details of someone
for whom we want to research career to date.
"""
person_name = CharField(primary_key=True, max_length=30)
lives_in_town = CharField(max_length=40)
nickname = CharField(max_length=20, null=True)
class Job(BaseModel):
"""
This class defines Job, which maintains details of past Jobs
held by a Person.
"""
job_name = CharField(primary_key=True, max_length=30)
start_date = DateField(formats="YYYY-MM-DD")
end_date = DateField(formats="YYYY-MM-DD")
salary = DecimalField(max_digits=7, decimal_places=2)
person_employed = ForeignKeyField(Person, related_name="was_filled_by", null=False)
new_person = Person.create(
person_name="Fred", lives_in_town="Seattle", nickname="Fearless"
)
new_person.save()
aperson = Person.get(Person.person_name == "Fred")
| [
"[email protected]"
] | |
0ca1c1b2b6d4a5a92a1cf44d6e0600fa459c73fa | 78f43f8bd07ae0fc91738a63cd7bbca08ae26066 | /leetcode/interval/merge_interval.py | 9f4a681237c817d9390d7cf79195feeb3effddb0 | [] | no_license | hanrick2000/LeetcodePy | 2f3a841f696005e8f0bf4cd33fe586f97173731f | b24fb0e7403606127d26f91ff86ddf8d2b071318 | refs/heads/master | 2022-04-14T01:34:05.044542 | 2020-04-12T06:11:29 | 2020-04-12T06:11:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | from leetcode.interval.interval import Interval
class Solution(object):
def merge(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[Interval]
"""
if len(intervals) <= 1:
return intervals
def overlap(l1, l2):
"""
:type l1: Interval
:type l2: Interval
"""
return l2.start <= l1.end
def merge_one(l1, l2):
"""
:type l1: Interval
:type l2: Interval
"""
return Interval(l1.start, max(l1.end, l2.end))
def eq(l1, l2):
"""
:type l1: Interval
:type l2: Interval
"""
return l1.start == l2.start and l1.end == l2.end
ret = []
# Greedy: Earliest start time
sorted_itvs = sorted(intervals, key=lambda x: x.start)
merged_itv = sorted_itvs[0]
for i in range(1, len(sorted_itvs)):
itv = sorted_itvs[i]
if overlap(merged_itv, itv):
merged_itv = merge_one(merged_itv, itv)
else:
ret.append(merged_itv)
merged_itv = itv
if not ret or not eq(ret[-1], merged_itv):
ret.append(merged_itv)
return ret
| [
"[email protected]"
] | |
402200e5c6c3d708a3beb5e97137c59c1ddd8bd8 | cf5f24e5a32f8cafe90d4253d727b1c0457da6a4 | /algorithm/boj_21317.py | 55af763cbe118028143f347a61cddc9f96f9e1d5 | [] | no_license | seoljeongwoo/learn | 537659ca942875f6846646c2e21e1e9f2e5b811e | 5b423e475c8f2bc47cb6dee09b8961d83ab08568 | refs/heads/main | 2023-05-04T18:07:27.592058 | 2021-05-05T17:32:50 | 2021-05-05T17:32:50 | 324,725,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | import sys
input = sys.stdin.readline
def solve(curr, ok):
if curr > n : return inf
if curr == n: return 0
if dp[curr][ok] != inf: return dp[curr][ok]
dp[curr][ok] = solve(curr+1,ok) + jump[curr][0]
dp[curr][ok] = min(dp[curr][ok] , solve(curr+2,ok) + jump[curr][1])
if ok: dp[curr][ok] = min(dp[curr][ok] , solve(curr+3,ok-1) + k)
return dp[curr][ok]
n = int(input())
jump = [[0]*2 for _ in range(n)]
for _ in range(n-1):
small, big = map(int,input().split())
jump[_+1][0] , jump[_+1][1] = small, big
k = int(input())
inf = int(1e9)+5
dp = [[inf]*2 for _ in range(n+1)]
print(solve(1,1)) | [
"[email protected]"
] | |
ef16a7892614d638cd0b989921e2c479a82bda61 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/pip-19.0.3-py3.6.egg/pip/_vendor/requests/api.py | abada96d4627a52bf0f6040ee2e08b0eff32c77c | [
"MIT"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 6,253 | py | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the body of the :class:`Request`.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'https://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary, list of tuples or bytes to send
in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs)
| [
"[email protected]"
] | |
74b88d61b79bd5ebc7c8ea4b42c60bfc7ba59bc5 | 130a98632d2ab4c171503b79e455b7aa27a1dda4 | /models/official/utils/flags/_conventions.py | b6f248b443a689f82770c2a725791ec76dc46591 | [
"Apache-2.0",
"MIT"
] | permissive | aboerzel/German_License_Plate_Recognition | d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787 | 6fc53292b1d3ce3c0340ce724c2c11c77e663d27 | refs/heads/master | 2023-01-30T18:08:37.339542 | 2023-01-07T07:41:36 | 2023-01-07T07:41:36 | 245,586,430 | 34 | 12 | MIT | 2023-01-07T07:41:37 | 2020-03-07T07:16:51 | Python | UTF-8 | Python | false | false | 1,808 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Central location for shared argparse convention definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import codecs
import functools
from absl import app as absl_app
from absl import flags
# This codifies help string conventions and makes it easy to update them if
# necessary. Currently the only major effect is that help bodies start on the
# line after flags are listed. All flag definitions should wrap the text bodies
# with help wrap when calling DEFINE_*.
_help_wrap = functools.partial(
flags.text_wrap, length=80, indent="", firstline_indent="\n")
# Pretty formatting causes issues when utf-8 is not installed on a system.
def _stdout_utf8():
try:
codecs.lookup("utf-8")
except LookupError:
return False
return getattr(sys.stdout, "encoding", "") == "UTF-8"
if _stdout_utf8():
help_wrap = _help_wrap
else:
def help_wrap(text, *args, **kwargs):
return _help_wrap(text, *args, **kwargs).replace(u"\ufeff", u"")
# Replace None with h to also allow -h
absl_app.HelpshortFlag.SHORT_NAME = "h"
| [
"[email protected]"
] | |
f01c7a44eccbf92d7cc1e3ecbafa94dc41b95918 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/easy/8_15.py | 86dea0108cdf7dbb420de013fbc38390cad29623 | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,388 | py | Python – Reading last N lines of a file
**Prerequisite:** Read a file line-by-line in Python
Given a text file _fname_ , a number _N_ , the task is to read the last N
lines of the file.
As we know, Python provides multiple in-built features and modules for
handling files. Let’s discuss different ways to read last N lines of a file
using Python.
** _File:_**

**Method 1: Naive approach**
In this approach, the idea is to use a negative iterator with the
readlines() function to read all the lines requested by the user from the
end of file.
__
__
__
__
__
__
__
# Python implementation to
# read last N lines of a file
# Function to read
# last N lines of the file
def LastNlines(fname, N):
# opening file using with() method
# so that file get closed
# after completing work
with open(fname) as file:
# loop to read iterate
# last n lines and print it
for line in (file.readlines() [-N:]):
print(line, end ='')
# Driver Code:
if __name__ == '__main__':
fname = 'File1.txt'
N = 3
try:
LastNlines(fname, N)
except:
print('File not found'
---
__
__
**Output:**
Eighth line
Ninth line
Tenth line
**Method 2: Using OS module and buffering policy**
In this approach, the idea is to work on the buffering policy in the python. A
buffer stores a part of data received from a file stream of the operating
system for a time period it is used and then more data comes in.
The buffer size determines the size of the data that can be stored at a time
until it is used. We have the option to pass an integer to buffering in order
to set buffering policy and if we do not specify any policy then the size of
the buffer depends upon the device’s block size. Usually, the buffer is 4096
or 8192 bytes long. In this approach size of the buffer is 8192 bytes.
Moreover, the **st_size** attribute of os.stat() method in the OS module is
used to represent the size of the file in bytes.
Below is the implementation of the above approach.
__
__
__
__
__
__
__
# Python implementation to
# read last N lines of a file
# Using OS module and buffering policy
# importing os module
import os
# Function to read
# last N lines of the file
def LastNlines(fname, N):
# taking buffer size of 8192 bytes
bufsize = 8192
# calculating size of
# file in bytes
fsize = os.stat(fname).st_size
iter = 0
# opening file using with() method
# so that file get closed
# after completing work
with open(fname) as f:
if bufsize > fsize:
# adjusting buffer size
# according to size
# of file
bufsize = fsize-1
# list to store
# last N lines
fetched_lines = []
# while loop to
# fetch last N lines
while True:
iter += 1
# moving cursor to
# the last Nth line
# of file
f.seek(fsize-bufsize * iter)
# storing each line
# in list upto
# end of file
fetched_lines.extend(f.readlines())
# halting the program
# when size of list
# is equal or greater to
# the number of lines requested or
# when we reach end of file
if len(fetched_lines) >= N or f.tell() == 0:
print(''.join(fetched_lines[-N:]))
break
# Driver Code:
if __name__ == '__main__':
fname = 'File1.txt'
N = 3
try:
LastNlines(fname, N)
except:
print('File not found')
---
__
__
**Output:**
Eighth line
Ninth line
Tenth line
**Method 3: Through Exponential search**
In this method, the idea is to use Exponential Search algorithm which is
generally used for searching sorted, unbounded or infinite lists. To get
information about exponential search click here.
This approach uses assert statement which acts as a debugging tool to checks a
condition. The program will continue to execute if the given statement is true
otherwise, it generates an **AssertionError exception**. To get more details
of assert statements click here.
Click here to get familiar with different kinds of use of seek() method.
Below is the implementation of the above approach.
__
__
__
__
__
__
__
# Python implementation to
# read last N lines of a file
# through Exponential search
# Function to read
# last N lines of the file
def LastNlines(fname, N):
# assert statement check
# a condition
assert N >= 0
# declaring variable
# to implement
# exponential search
pos = N + 1
# list to store
# last N lines
lines = []
# opening file using with() method
# so that file get closed
# after completing work
with open(fname) as f:
# loop which runs
# until size of list
# becomes equal to N
while len(lines) <= N:
# try block
try:
# moving cursor from
# left side to
# pos line from end
f.seek(-pos, 2)
# exception block
# to hadle any run
# time error
except IOError:
f.seek(0)
break
# finally block
# to add lines
# to list after
# each iteration
finally:
lines = list(f)
# increasing value
# of variable
# exponentially
pos *= 2
# returning the
# whole list
# which stores last
# N lines
return lines[-N:]
# Driver Code:
if __name__ == '__main__':
fname = 'File1.txt'
N = 3
try:
lines = LastNlines(fname, N)
for line in lines:
print (line, end ='')
except:
print('File not found')
---
__
__
**Output:**
Eighth line
Ninth line
Tenth line
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"[email protected]"
] | |
3f2ab7df5f4f7fc432c79ef2ed7a0604e20a3ceb | e3afad642b98f0c6cdda91b550c080f7cd6fdf4c | /epi/10-binary-trees-15-bst/BinaryTreeNode.py | dcca3a3d0241a37beb165c28593cec5c1728f1ca | [] | no_license | danielcodes/practice-problems | 50e7ceb71305f69eafcd50c1507f9aa8829a8a2c | b06dfaed6b71e5a86dc43940fb15bc2e7f07903f | refs/heads/master | 2020-04-12T06:19:25.552398 | 2016-11-30T02:13:06 | 2016-11-30T02:13:06 | 65,409,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py |
# module for binary trees
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
self.parent = None
def printInOrder(root):
nodes = []
printInOrderUtil(root, nodes)
return nodes
def printInOrderUtil(root, nodes):
if root:
printInOrderUtil(root.left, nodes)
nodes.append(root.value)
printInOrderUtil(root.right, nodes)
| [
"[email protected]"
] | |
7b48d917637fb4d2a301c58da1464aef2f61fbe6 | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res/scripts/client/tmpconsolecmds.py | 0c0a4a16136a52bf97658edd60418a96123dc691 | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,405 | py | # 2015.11.18 11:51:09 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/TmpConsoleCmds.py
import BigWorld
from debug_utils import *
class LanServers:
def search(self):
BigWorld.serverDiscovery.searching = 1
def stop(self):
BigWorld.serverDiscovery.searching = 0
def show(self):
for server in BigWorld.serverDiscovery.servers:
print server
def searchAndConnect(self, owner, user):
self.search()
self.__owner = owner
self.__user = user
BigWorld.serverDiscovery.changeNotifier = self.__checkIfFound
def __checkIfFound(self):
for server in BigWorld.serverDiscovery.servers:
if server.ownerName == self.__owner:
self.__host = server.serverString
del self.__owner
self.stop()
self.__login()
break
def __login(self):
class LoginInfo:
pass
login = LoginInfo()
login.username = self.__user
BigWorld.connect(self.__host, login, self.__progressFn)
def __progressFn(self, stage, status, serverMsg):
print stage, status, serverMsg
def printPeriodTime():
arena = BigWorld.player().arena
print '%f / %f' % (arena.periodEndTime - BigWorld.serverTime(), arena.periodLength)
def printStatistics(byTotal = False, bots = True):
statistics = BigWorld.player().arena.statistics
teams = (None, [], [])
for (name, team), stats in statistics.iteritems():
if bots or not name.startswith('Bot'):
teams[team].append((name, stats))
key = 'totalFrags' if byTotal else 'frags'
teams[1].sort(lambda x, y: cmp(x[1]['key'], y[1]['key']))
teams[2].sort(lambda x, y: cmp(x[1]['key'], y[1]['key']))
for i in xrange(1, 3):
print 'Team %d\n' % i
for name, stats in teams[i]:
print '%s\t%d\t%d' % (name, stats['frags'], stats['totalFrags'])
return
def printConst(module, prefix, value):
mod = __import__(module)
for c in dir(mod):
if c.startswith(prefix) and getattr(mod, c) == value:
print c
return
print 'Not found'
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\tmpconsolecmds.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:51:09 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
a08261dfb20e2f5f2b1ccac1c04c7469f46f54e4 | 25ebec4dd05334b63d62e238ccaa1700fdb7dcc4 | /Arase/PWE/DeleteDate.py | 54b02b15eff997334804e5fcbe1ec03e7b387862 | [
"MIT"
] | permissive | mattkjames7/Arase | 2513416f0211f82a75d6b963fc8d6ea081dbc4f0 | 996167be35a13bbb1fdddfbe75e3a06d124b1d25 | refs/heads/master | 2023-04-07T03:54:59.407811 | 2021-05-20T12:03:50 | 2021-05-20T12:03:50 | 187,637,689 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | import numpy as np
from ..Tools.Downloading._DeleteDate import _DeleteDate
from .. import Globals
def DeleteDate(Date,subcomp,L,prod,Confirm=True):
'''
delete all of the files from a given date
'''
if subcomp == 'hfa' and L == 3:
idxfname = Globals.DataPath + 'PWE/Index-L{:01d}-{:s}.dat'.format(L,subcomp)
datapath = Globals.DataPath + 'PWE/{:s}/L{:01d}/'.format(subcomp,L)
else:
idxfname = Globals.DataPath + 'PWE/Index-L{:01d}-{:s}-{:s}.dat'.format(L,subcomp,prod)
datapath = Globals.DataPath + 'PWE/{:s}/L{:01d}/{:s}/'.format(subcomp,L,prod)
_DeleteDate(Date,idxfname,datapath,Confirm)
| [
"[email protected]"
] | |
041645ad029ae1da78cd282c6980c9e489f5b47e | 59cdb8b3995ee5938dc4710e32f29ac273410265 | /_archive/nosepoke_code/_old/cuedtaste_abu3.py | ca2954d2612ada74593f061d92279ad1919ac11a | [] | no_license | abuzarmahmood/firing_space_plot | 15ff667fada8f4e985a6a6c6f31261b72b0f4b60 | 9fe925d9b443fda96d8e23d6d2d2d2aa60b08f15 | refs/heads/master | 2023-07-25T01:39:31.942434 | 2023-07-15T14:24:38 | 2023-07-15T14:24:38 | 139,602,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,345 | py | """
Code to ONLY CONTROL cue and RESPOND to nosepoke
No recording --> Leave that to Intan Board
"""
from threading import Thread, Lock
import time
import RPi.GPIO as GPIO
import datetime
import numpy as np
class nosepoke_task:
"""
- Class to run nosepoke task
- Stores main variables and performs appropriate setup
- Will allow delivery of tastant and control of laser dependent on nosepoke parameters
- No need to involve INTAN board since laser is either on or off for all trials
and nosepoke parameters will stay the same
"""
class nosepoke_trigger:
"""
Class to control cue and activate outputs
"""
def __init__(self, nosepoke_gpio, cue_gpio, freq,
taste_output, laser_output, iti):
"""
nosepoke_gpio :: Which port to read from
freq :: Frequency of readings
"""
# Initialize board details
GPIO.setmode(GPIO.BOARD)
GPIO.setup(nosepoke_gpio, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.setup(cue_gpio, GPIO.OUT)
self.nosepoke_gpio = nosepoke_gpio
self.cue_gpio = cue_gpio
self.freq = np.float(freq)
self.poke_bool = 0
self.stopped = 0
self.taste_output = taste_output
self.laser_output = laser_output
self.freq = freq
self.iti_delta = datetime.timedelta(seconds = iti)
self.latest_trigger_time = 0
self.wait_till = datetime.datetime.now()
self.iti = iti
self.cue_freq = np.float(2)
self.cue_on = 1
def update(self):
# Keep looping indefinitely till thread is stopped
while True:
time.sleep(1/self.freq)
if self.stopped:
return
temp_read = GPIO.input(self.nosepoke_gpio)
#temp_read = np.random.choice([0,1], p = [0.9,0.1])
#print(temp_read)
if not temp_read: # Assuming 1 indicates poke
self.action_check()
def action_check(self):
"""
Checks whether action should be allowed to pass
"""
current_time = datetime.datetime.now()
#print("Check initiated")
#if current_time > self.wait_till:
self.cue_on = 0
#self.latest_trigger_time = current_time
#self.wait_till = current_time + self.iti
print("ACTION COMPLETED")
time.sleep(self.iti)
self.cue_on = 1
return
def cue_protocol(self):
while True:
time.sleep(1/self.cue_freq)
GPIO.output(self.cue_gpio, 0)
if not self.stopped:
time.sleep(0.5/self.cue_freq)
GPIO.output(self.cue_gpio, 1)
def start_update(self):
# Start thread to write from buffer
t = Thread(target = self.update(), name = 'check_thread', args = ())
t.daemon = True
t.start()
return self
def start_cue(self):
# Start thread to write from buffer
t = Thread(target = self.cue_protocol(), name = 'cue_thread', args = ())
t.daemon = True
t.start()
return self
def stop_all(self):
self.stopped = True
self.out_connect.close()
freq = 100
light = 36
beam = 11
test_poke_io = nosepoke_trigger(beam,light,freq,1,1,10)
test_poke_io.start_update()
test_poke_io.start_cue()
| [
"[email protected]"
] | |
311cc87a3b20d8b6748d58c460a77d6164fb510b | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.2/tests/regressiontests/test_client_regress/urls.py | ede4a3681af80b4442b4d39b9ee1f20d85f729a7 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.2/tests/regressiontests/test_client_regress/urls.py | [
"[email protected]"
] | |
f0e71dab23c9d10b9ed81e223aeb65c5444569bd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02784/s847410997.py | fd50024c37c9f2ef3942f98bcea7d5ff189e9a6b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | import sys
from itertools import accumulate
H, N = map(int, next(sys.stdin.buffer).split())
A = map(int, next(sys.stdin.buffer).split())
ans = any(map(lambda v: v >= H, accumulate(A, initial=0)))
print('Yes' if ans else 'No') | [
"[email protected]"
] | |
725ae5cb559adac73aa5ac94b2f193dbed895a91 | a2b7fba22a16f379ccca2e38d9d6291b9562abc3 | /Graph Theory/Connectivity/Biconnected_Components.py | 4967813f98137ae85ef4c6f0d35ab613b52ff0ee | [] | no_license | neelamy/Algorithm | 565c1cea72715745653e90a3dabbba1e9e283fd8 | 7c9f53ff27bcb840b9dbc20d520f003f4d76fe17 | refs/heads/master | 2020-06-10T15:53:12.967832 | 2017-07-18T07:59:32 | 2017-07-18T07:59:32 | 75,953,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,445 | py | # Python program to find biconnected components in a given
# undirected graph
#Complexity : O(V+E)
from collections import defaultdict
#This class represents an directed graph using adjacency list representation
class Graph:
def __init__(self,vertices):
self.V= vertices #No. of vertices
self.graph = defaultdict(list) # default dictionary to store graph
self.Time = 0 # time is used to find discovery times
self.count = 0 # Count is number of biconnected components
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
self.graph[v].append(u)
'''A recursive function that finds and prints strongly connected
components using DFS traversal
u --> The vertex to be visited next
disc[] --> Stores discovery times of visited vertices
low[] -- >> earliest visited vertex (the vertex with minimum
discovery time) that can be reached from subtree
rooted with current vertex
st -- >> To store visited edges
result -->> To store all edges already printed
'''
def BCCUtil(self,u, parent, low, disc, st, result):
#Count of children in current node
children =0
# Initialize discovery time and low value
disc[u] = self.Time
low[u] = self.Time
self.Time += 1
#Recur for all the vertices adjacent to this vertex
for v in self.graph[u]:
# If v is not visited yet, then make it a child of u
# in DFS tree and recur for it
if disc[v] == -1 :
parent[v] = u
children += 1
st.append((u, v)) #store the edge in stack
self.BCCUtil(v, parent, low, disc, st, result)
# Check if the subtree rooted with v has a connection to
# one of the ancestors of u
# Case 1 -- per Strongly Connected Components Article
low[u] = min(low[u], low[v])
# If u is an articulation point,pop all edges from stack till (u, v)
if parent[u] == -1 and children > 1 or parent[u] != -1 and low[v] >= disc[u]:
self.count +=1 # increment count
w = -1
while w != (u,v):
w = st.pop()
result.append(w) # store output edges
print w,
print""
elif v != parent[u]:
#Update low value of 'u' only of 'v' is still in stack
low[u] = min(low [u], disc[v])
# add the edge if (u,v) and (v,u) are not already in stack
# or result
if ((u,v) not in st and (v,u) not in st and
(u,v) not in result and (v,u) not in result):
st.append((u,v))
#The function to do DFS traversal. It uses recursive BCCUtil()
def BCC(self):
# Initialize disc and low, and parent arrays
disc = [-1] * (self.V)
low = [-1] * (self.V)
parent = [-1] * (self.V)
st = []
result = []
# Call the recursive helper function to find articulation points
# in DFS tree rooted with vertex 'i'
for i in range(self.V):
if disc[i] == -1:
self.BCCUtil(i, parent, low, disc, st, result)
#If stack is not empty, pop all edges from stack
if st:
self.count = self.count + 1
while st:
w = st.pop()
result.append(w) # store output edges
print w,
print ""
# Create a graph given in the above diagram
g = Graph(12)
g.addEdge(0,1)
g.addEdge(1,2)
g.addEdge(1,3)
g.addEdge(2,3)
g.addEdge(2,4)
g.addEdge(3,4)
g.addEdge(1,5)
g.addEdge(0,6)
g.addEdge(5,6)
g.addEdge(5,7)
g.addEdge(5,8)
g.addEdge(7,8)
g.addEdge(8,9)
g.addEdge(10,11)
g.BCC();
print ("Above are %d biconnected components in graph" %(g.count)); | [
"[email protected]"
] | |
0675fa79cb50d6f70ae4d1f4b9bfa46070797d0a | 67b0379a12a60e9f26232b81047de3470c4a9ff9 | /slideshow/admin.py | 365cd2affc5bde2acbcc881b16c1a4523658de1c | [] | no_license | vintkor/whitemandarin | 8ea9022b889fac718e0858873a07c586cf8da729 | 5afcfc5eef1bb1cc2febf519b04a4819a7b9648f | refs/heads/master | 2021-05-06T03:35:09.367375 | 2017-12-20T15:43:08 | 2017-12-20T15:43:08 | 114,904,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | from django.contrib import admin
from .models import *
from content.fields import AdminImageWidget
from django.db import models
class SlidAdmin(admin.ModelAdmin):
list_display = ('name', 'pic', 'published', 'ordering')
list_editable = ('published', 'ordering')
formfield_overrides = {
models.ImageField: {'widget': AdminImageWidget},
}
admin.site.register(Slid, SlidAdmin)
admin.site.register(Category)
| [
"[email protected]"
] | |
82ae6081af92203e7de8a4eae701c34ca3048032 | fb0c02a5529f41384598dab941180152f39fa10e | /dopzad.py | d4825d0e0ef8fa4c07a4e4ec12b31c244ee1f015 | [] | no_license | Sezimm/Problem2 | 951200c92dcdbd43da3daefd32491c8850184745 | 0a3f392f025d22db4ef2ca8045c6d8a770b54f9f | refs/heads/main | 2023-02-22T02:03:25.424031 | 2021-01-26T15:49:45 | 2021-01-26T15:49:45 | 333,133,588 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py |
#2
'''
text = input()
l = text.split()
q=""
for i in sorted(l,key=lambda a: len(a)):
q = q + " " + i
print(q)
'''
p = input("vedite dannye cherez probel: \n")
s = int(input("vedite shag: "))
x = p.split()
for j in range(int(len(x))):
| [
"[email protected]"
] | |
2e424e970f298b6e3f96fe5aaf6fdb58c9648820 | ee4265c3c6c99f189e2202e7f0d2b5a78475376d | /code_forces/Accommodation.py | 58c0a2f2021e7ef312b1bdda00f5506ed981caeb | [] | no_license | akashgkrishnan/HackerRank_Solutions | 96f762848a77b731748d3331d17314f5e343abae | 75eeb5bd0f6e81c62ecd2898eb7de9b540e98b46 | refs/heads/master | 2023-03-18T18:01:21.402781 | 2020-09-30T18:08:43 | 2020-09-30T18:08:43 | 271,615,533 | 1 | 0 | null | 2021-03-20T05:03:59 | 2020-06-11T18:09:23 | Python | UTF-8 | Python | false | false | 143 | py | count = 0
for _ in range(int(input())):
p, q = list(map(int, input().split()))
if q - p >= 2:
count += 1
print(count)
| [
"[email protected]"
] | |
e71c26708fa1dac89ce425302946faba0f6cdd88 | e1a2c6ed4a4b93b4697974e3b0a32a4d67daa6f6 | /venv/Lib/site-packages/pybrain/structure/modules/biasunit.py | a85d6212c196f190500ec537e31bc49c033bddc8 | [
"MIT"
] | permissive | ishatserka/MachineLearningAndDataAnalysisCoursera | cdf0f23a58617e17d6b938e3a9df17daae8585e4 | e82e772df2f4aec162cb34ac6127df10d14a625a | refs/heads/master | 2021-09-11T01:39:26.228392 | 2018-04-05T14:33:39 | 2018-04-05T14:33:39 | 117,153,454 | 0 | 0 | MIT | 2018-03-27T05:20:37 | 2018-01-11T21:05:33 | Python | UTF-8 | Python | false | false | 387 | py | __author__ = 'Tom Schaul, [email protected]'
from neuronlayer import NeuronLayer
from module import Module
class BiasUnit(NeuronLayer):
"""A simple bias unit with a single constant output."""
dim = 1
def __init__(self, name=None):
Module.__init__(self, 0, 1, name = name)
def _forwardImplementation(self, inbuf, outbuf):
outbuf[:] = 1 | [
"[email protected]"
] | |
c045d46e66a4f04621788b28c78761a1223a3cee | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/uon.py | 949b044bc5d13f037b1b044c161086ca0f3cc3ab | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'uON':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
83746c2f8f8c278e1647782695f53eca9c354ff6 | 249ef744ece1167f6e5bf7edc0b3ce5b1a31e66a | /EnvCmp.py | baea40b8a0e3cacfae52fdb69bb066dfb621da57 | [] | no_license | ftakanashi/EnvCmpWeb | dba2b9bec6a0c3dc5a2a623f52257c3a56ca957c | 692164d341ae7d6aeff946a7e89c3ceabc96b6e3 | refs/heads/master | 2020-03-21T17:41:47.209949 | 2018-07-11T11:04:25 | 2018-07-11T11:04:25 | 138,847,521 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,275 | py | #!/usr/bin/env python
# coding=utf8
# @author:weiyz
import os
import uuid
import threading
import tarfile
import shutil
import socket
import time
from filecmp import dircmp
### beg 20180111 重设系统编码为GBK保证可以读取中文字符 ###
import sys
reload(sys)
sys.setdefaultencoding('gbk')
### end 20180111 ###
def get_sub_dic(lst, key):
for dic in lst:
if not isinstance(dic, dict):
continue
if dic.has_key(key):
return dic
def path2json(path, root_dir, res, type):
simple_path = path.split(root_dir, 1)[1]
info = simple_path.lstrip(os.sep).split(os.sep, 1)
if len(info) > 1:
nlv, rest = info
sub_dic = get_sub_dic(res[root_dir], nlv)
if sub_dic:
path2json(simple_path, nlv, sub_dic, type)
else:
res[root_dir].append({nlv: []})
path2json(simple_path, nlv, get_sub_dic(res[root_dir], nlv), type)
else:
# print type,path
if type == 'f':
res[root_dir].append(info[0])
elif type == 'd':
res[root_dir].append({info[0]: []})
class EnviromentCompare:
def __init__(self, dir1, dir2, dir1_flag, dir2_flag):
self.left_only_whole = []
self.right_only_whole = []
self.differing = []
self.dir1 = dir1
self.dir1_flag = dir1_flag
self.dir2 = dir2
self.dir2_flag = dir2_flag
# print repr(dir1),repr(dir2)
def getLeftOnly(self):
res = {self.getLeftDir(): []}
for item in self.left_only_whole:
type = 'f' if os.path.isfile(item) else 'd'
path2json(item, self.getLeftDir(), res, type)
if self.dir1_flag:
res[self.getFullLeftDir()] = res[self.getLeftDir()]
del res[self.getLeftDir()]
return res
def getRightOnly(self):
res = {self.getRightDir(): []}
for item in self.right_only_whole:
type = 'f' if os.path.isfile(item) else 'd'
path2json(item, self.getRightDir(), res, type)
if self.dir2_flag:
res[self.getFullRightDir()] = res[self.getRightDir()]
del res[self.getRightDir()]
return res
def getFormatDiffering(self):
res = {self.getLeftDir(): []}
for item, _ in self.differing:
type = 'f' if os.path.isfile(item) else 'd'
path2json(item, self.getLeftDir(), res, type)
# path2json中含有递归的部分,不能传入绝对路径,所以先用相对路径完成计算,最后再把根节点名改成绝对路径即可
if self.dir1_flag:
res[self.getFullLeftDir()] = res[self.getLeftDir()]
del res[self.getLeftDir()]
elif self.dir2_flag:
res[self.getFullRightDir()] = res[self.getLeftDir()]
del res[self.getLeftDir()]
else:
pass
return res
def getDiffering(self):
for item, _ in self.differing:
# print item.split(self.getLeftDir())[1]
yield item.split(self.getLeftDir())[1]
def getLeftDir(self):
return os.path.basename(self.dir1).encode('gbk')
def getFullLeftDir(self):
return self.dir1
def getRightDir(self):
return os.path.basename(self.dir2).encode('gbk')
def getFullRightDir(self):
return self.dir2
def textResult(self):
txt = u''
txt += u'=============================\n只存在于%s中的文件/目录' % self.getLeftDir()
if self.left_only_whole:
for item in self.left_only_whole:
txt += u'\n%s' % item
else:
txt += u'\n无。'
txt += u'\n============================\n只存在于%s中的文件/目录' % self.getRightDir()
if self.right_only_whole:
for item in self.right_only_whole:
try:
txt += u'\n%s' % item
except Exception,e:
txt += u'\n%s' % repr(item)
else:
txt += u'\n无。'
txt += u'\n=============================\n文件名相同内容不同的文件(不包括目录)'
if self.getDiffering():
for item in self.getDiffering():
txt += u'\n%s' % item
else:
txt += u'\n无。'
txt += u'\n\n\n============================================================'
return txt
def compare(self, dir1, dir2):
dcmp = dircmp(dir1, dir2)
if dcmp.left_only:
for fi in dcmp.left_only:
try:
self.left_only_whole.append(os.path.join(dir1, fi).encode('gbk'))
except UnicodeEncodeError,e:
print repr(os.path.join(dir1,fi)) + u'编码格式复杂'
raise
except Exception,e:
print repr(os.path.join(dir1,fi)) + u'出错'
raise
if dcmp.right_only:
for fi in dcmp.right_only:
try:
self.right_only_whole.append(os.path.join(dir2, fi).encode('gbk'))
except UnicodeEncodeError,e:
print repr(os.path.join(dir2,fi)) + u'编码格式复杂'
except Exception,e:
print repr(os.path.join(dir2,fi)) + u'出错'
raise
if dcmp.diff_files:
for fi in dcmp.diff_files:
try:
self.differing.append((os.path.join(dir1, fi).encode('gbk'), os.path.join(dir2, fi).encode('gbk')))
except Exception,e:
print repr(os.path.join(dir1,fi)),repr(os.path.join(dir2,fi)) + u'出错'
raise
if dcmp.common_dirs:
for di in dcmp.common_dirs:
try:
self.compare(os.path.join(dir1, di), os.path.join(dir2, di))
except Exception,e:
# print repr(os.path.join(dir1,di)),repr(os.path.join(dir2,di)) # 错误信息打印在错的那个文件即可,以上的递归进去的目录没必要写
raise
class DownloadThread(threading.Thread):
def __init__(self, dir):
threading.Thread.__init__(self)
self.dir = dir
self.id = str(uuid.uuid1())[:4]
def setTrans(self,trans):
self.trans = trans
return self
def setSftp(self, sftp):
self.sftp = sftp
return self
def setSsh(self, ssh):
self.ssh = ssh
return self
def setChannel(self,channel):
self.channel = channel
return self
def run(self):
ssh = self.ssh
sftp = self.sftp
basedir = os.path.basename(sftp.mydir)
remoteTarName = '/tmp/%s@@%s.tar.gz' % (basedir, sftp.myip)
with open(self.id, 'w') as f:
f.write('正在打包...')
relativedir, base = os.path.dirname(sftp.mydir), os.path.basename(sftp.mydir)
# stdin, stdout, stderr = ssh.exec_command('cd %s;tar -cvzf \'%s\' %s' % (relativedir, remoteTarName, base),
# get_pty=True)
# err = stderr.read()
# if err != '':
# raise Exception(err)
cmd = 'cd %s;tar -cvzf \'%s\' %s' % (relativedir, remoteTarName, base)
self.channel.send(cmd+'\n')
while 1:
time.sleep(5)
try:
self.channel.recv(65535)
except socket.timeout,e:
break
sftp.get(remoteTarName, os.path.join('Compare', os.path.basename(remoteTarName)), callback=self.writeProcess)
ssh.exec_command('rm -f %s' % remoteTarName)
# 进行解压
with open(self.id, 'w') as f:
f.write('正在解压...')
if os.path.isdir(os.path.join('Compare',os.path.basename(remoteTarName).rsplit('.',2)[0])):
shutil.rmtree(os.path.join('Compare',os.path.basename(remoteTarName).rsplit('.',2)[0]))
os.mkdir(os.path.join('Compare',os.path.basename(remoteTarName).rsplit('.',2)[0]))
tar = tarfile.open(os.path.join('Compare', os.path.basename(remoteTarName)))
for fi in tar.getnames():
##### 20180201 解压时由于各种原因引起的解压错误处理,给出错误信息 begin #####
try:
tar.extract(fi, path=os.path.join('Compare',os.path.basename(remoteTarName).rsplit('.',2)[0]))
except Exception,e:
if os.path.splitext(fi)[1] in ('.txt','.doc','.docx'):
# 忽略这些说明类文件无法解压时报的错
continue
with open(self.id,'w') as f:
f.write('EFF:解压文件%s失败: %s' % (repr(fi),str(e)))
shutil.rmtree(os.path.join('Compare',os.path.basename(remoteTarName).rsplit('.',2)[0]))
return
##### end #####
self.ssh.close()
self.channel.close()
self.sftp.close()
self.channel.close()
with open(self.id, 'w') as f:
f.write('EFF')
def writeProcess(self, done, total):
with open(self.id, 'w') as f:
percent = '%.4s%%' % (float(done) * 100 / total)
f.write(percent)
| [
"[email protected]"
] | |
a472871209ba36dddc71717632c40210ffcdb40d | ad0d092d26b126ebf9c5f79a0254e79320a4d732 | /avgtest_pico.py | 8801fc479c10cafaf34933348fe63907e7b0caf7 | [
"MIT"
] | permissive | peterhinch/micropython-filters | be9bcbc3ace92987a1ef23fca5faa90af2391d35 | 4e0b737574073bab36ec1c776e8dfb80b8fe5f9f | refs/heads/master | 2022-02-07T23:01:52.492287 | 2022-01-26T18:29:34 | 2022-01-26T18:29:34 | 30,693,766 | 59 | 13 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | # Demo program for moving average filter
# Author: Peter Hinch
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2021 Peter Hinch
# 16th Dec 2021
import array
from time import ticks_us, ticks_diff
from avg_pico import avg
data = array.array('i', (0 for _ in range(19))) # Average over 16 samples
data[0] = len(data)
def test():
for x in range(16):
print(avg(data, 1000, 4)) # Scale by 4 bits (divide by 16)
for x in range(18):
print(avg(data, 0, 4))
def timing():
t = ticks_us()
avg(data, 10, 4)
t1 = ticks_diff(ticks_us(), t) # Time for one call with timing overheads
t = ticks_us()
avg(data, 10, 4)
avg(data, 10, 4)
t2 = ticks_diff(ticks_us(), t) # Time for two calls with timing overheads
print(t2-t1,"uS") # Time to execute the avg() call
test()
print("Timing test")
timing()
| [
"[email protected]"
] | |
9ba88f9b386ad10642120c245525438f08dd9c0b | d15bdaddab59d1cfea76790004cbad3e5f0c2c55 | /batkin/build_isolated/move_slow_and_clear/catkin_generated/generate_cached_setup.py | e0ab70b36efd40a0bb19f5981e56650cf00a794b | [] | no_license | gychen-n/robot | 4265a1ff469d22550b6b537d1c81aa846ee7641a | 0663a33aea2c2de9e3ac5863307619091e5b5959 | refs/heads/main | 2023-04-10T13:32:06.623682 | 2021-04-16T00:41:04 | 2021-04-16T00:41:04 | 358,431,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,820 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/kinetic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/kinetic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/robot/batkin/devel_isolated/loop_path_planner;/home/robot/batkin/devel_isolated/dwa_local_planner;/home/robot/batkin/devel_isolated/clear_costmap_recovery;/home/robot/batkin/devel_isolated/carrot_planner;/home/robot/batkin/devel_isolated/base_local_planner;/home/robot/batkin/devel_isolated/nav_core;/home/robot/batkin/devel_isolated/costmap_2d;/home/robot/batkin/devel_isolated/voxel_grid;/home/robot/batkin/devel_isolated/turtlebot_teleop;/home/robot/batkin/devel_isolated/turtlebot_rapps;/home/robot/batkin/devel_isolated/turtlebot_navigation;/home/robot/batkin/devel_isolated/turtlebot_follower;/home/robot/batkin/devel_isolated/turtlebot_description;/home/robot/batkin/devel_isolated/turtlebot_capabilities;/home/robot/batkin/devel_isolated/turtlebot_calibration;/home/robot/batkin/devel_isolated/turtlebot_bringup;/home/robot/batkin/devel_isolated/turtlebot_apps;/home/robot/batkin/devel_isolated/turtlebot_actions;/home/robot/batkin/devel_isolated/turtlebot;/home/robot/batkin/devel_isolated/tl740d;/home/robot/batkin/devel_isolated/stim;/home/robot/batkin/devel_isolated/stereo_image_proc;/home/robot/batkin/devel_isolated/spacenav_node;/home/robot/batkin/devel_isolated/slam_gmapping;/home/robot/batkin/devel_isolated/simulation_launch;/home/robot/batkin/devel_isolated/rviz_imu_plugin;/home/robot/batkin/devel_isolated/rslidar_sync;/home/robot/batkin/devel_isolated/rslidar_pointcloud;/home/robot/batkin/devel_isolated/rslidar_driver;/home/robot/batkin/devel_isolated/rslidar_msgs;/home/robot/batkin/devel_isolated/rslidar;/home/robot/batkin/devel_isolated/rbx1_apps;/home/robot/batkin/devel_isolated/ps3joy;/home/robot/batkin/devel_isolated/pointcloud_to_laserscan;/home/robot/batkin/devel_isolated/path_rviz_plugin;/home/robot/batkin/devel_isolated/path_server;/home/robot/batkin/devel_isolated/gmapping;/home/robot/batkin/devel_isolated/openslam_gmapping;/home/robot/batkin/devel_isolated/navigation;/home/robot/batkin/devel_isolated/map_server;/home/robot/batkin/devel_isolated/location_fusion;/home/robot/batkin/devel_isolated/joystick_drivers;/home/robot/batkin/devel_isolated/joy_to_twist;/home/robot/batkin/devel_isolated/joy;/home/robot/batkin/devel_isolated/image_view;/home/robot/batkin/devel_isolated/image_rotate;/home/robot/batkin/devel_isolated/image_publisher;/home/robot/batkin/devel_isolated/image_proc;/home/robot/batkin/devel_isolated/image_pipeline;/home/robot/batkin/devel_isolated/freenect_stack;/home/robot/batkin/devel_isolated/freenect_launch;/home/robot/batkin/devel_isolated/freenect_camera;/home/robot/batkin/devel_isolated/fake_localization;/home/robot/batkin/devel_isolated/depth_image_proc;/home/robot/batkin/devel_isolated/dashgo_driver;/home/robot/batkin/devel_isolated/cartographer_rviz;/home/robot/batkin/devel_isolated/cartographer_ros;/home/robot/batkin/devel_isolated/cartographer_ros_msgs;/home/robot/batkin/devel_isolated/camera_calibration;/home/robot/batkin/devel_isolated/autolabor_test_launch;/home/robot/batkin/devel_isolated/autolabor_simulation_object;/home/robot/batkin/devel_isolated/autolabor_simulation_stage;/home/robot/batkin/devel_isolated/autolabor_simulation_location;/home/robot/batkin/devel_isolated/autolabor_simulation_lidar;/home/robot/batkin/devel_isolated/autolabor_simulation_base;/home/robot/batkin/devel_isolated/autolabor_navigation_launch;/home/robot/batkin/devel_isolated/autolabor_keyboard_control;/home/robot/batkin/devel_isolated/autolabor_description;/home/robot/batkin/devel_isolated/ah100b;/home/robot/catkin_ws/devel;/opt/ros/kinetic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/robot/batkin/devel_isolated/move_slow_and_clear/env.sh')
output_filename = '/home/robot/batkin/build_isolated/move_slow_and_clear/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"[email protected]"
] | |
577618aff1bb4740987b1b69a5f59941d0390c84 | c3897f243ee55949de5a12d5e0952c393b9166b2 | /v0.5.0/google/cloud_v2.512/resnet-tpuv2-512/code/resnet/model/staging/models/rough/nmt_gpu/nmt.py | 8b52d4d22d6aceee063d64099deb7ebe702f584e | [
"Apache-2.0"
] | permissive | myelintek/results | f189267475ee1fc2d02f6e0572d2185bfd8a1acd | 11c38436a158c453e3011f8684570f7a55c03330 | refs/heads/master | 2020-05-29T19:28:54.838290 | 2019-07-31T05:58:31 | 2019-07-31T05:58:31 | 189,329,589 | 0 | 0 | Apache-2.0 | 2019-05-30T02:14:08 | 2019-05-30T02:14:07 | null | UTF-8 | Python | false | false | 46,512 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
# import matplotlib.image as mpimg
import numpy as np
import time
import tensorflow as tf
from mlperf_compliance import mlperf_log
import estimator
from utils import evaluation_utils
from utils import iterator_utils
from utils import misc_utils as utils
from utils import vocab_utils
from variable_mgr import constants
utils.check_tensorflow_version()
FLAGS = None
# LINT.IfChange
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument(
"--num_units", type=int, default=1024, help="Network size.")
parser.add_argument(
"--num_layers", type=int, default=4, help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument(
"--encoder_type",
type=str,
default="gnmt",
help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""")
parser.add_argument(
"--residual",
type="bool",
nargs="?",
const=True,
default=True,
help="Whether to add residual connections.")
parser.add_argument("--time_major", type="bool", nargs="?", const=True,
default=True,
help="Whether to use time-major mode for dynamic RNN.")
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# attention mechanisms
parser.add_argument(
"--attention",
type=str,
default="normed_bahdanau",
help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""")
parser.add_argument(
"--attention_architecture",
type=str,
default="gnmt_v2",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""")
parser.add_argument(
"--output_attention", type="bool", nargs="?", const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""")
parser.add_argument(
"--pass_hidden_state", type="bool", nargs="?", const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""")
# optimizer
parser.add_argument(
"--optimizer", type=str, default="adam", help="sgd | adam")
parser.add_argument(
"--learning_rate",
type=float,
default=5e-4,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument("--warmup_steps", type=int, default=0,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_scheme", type=str, default="", help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong5: after 1/2 num train steps, we start halving the learning rate
for 5 times before finishing.\
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""")
parser.add_argument(
"--num_train_steps", type=int, default=100000, help="Num steps to train.")
parser.add_argument(
"--max_train_epochs", type=int, default=8, help="Max number of epochs.")
parser.add_argument("--num_examples_per_epoch", type=int, default=4068191,
help="Number of examples in one epoch")
parser.add_argument(
"--target_bleu", type=float, default=22.0, help="Target bleu.")
parser.add_argument("--colocate_gradients_with_ops", type="bool", nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
parser.add_argument("--label_smoothing", type=float, default=0.1,
help=("If nonzero, smooth the labels towards "
"1/num_classes."))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument(
"--src", type=str, default="en", help="Source suffix, e.g., en.")
parser.add_argument(
"--tgt", type=str, default="de", help="Target suffix, e.g., de.")
parser.add_argument(
"--data_dir", type=str, default="",
help="Training/eval data directory.")
parser.add_argument(
"--train_prefix",
type=str,
default="train.tok.clean.bpe.32000",
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--dev_prefix",
type=str,
default="newstest2014.tok.bpe.32000",
help="Dev prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--test_prefix",
type=str,
default="newstest2014.tok.bpe.32000",
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--output_dir", type=str, default="",
help="Store log/model files.")
# Vocab
parser.add_argument(
"--vocab_prefix",
type=str,
default="vocab.bpe.32000",
help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument(
"--embed_prefix",
type=str,
default=None,
help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formatted txt files.\
""")
parser.add_argument("--sos", type=str, default="<s>",
help="Start-of-sentence symbol.")
parser.add_argument("--eos", type=str, default="</s>",
help="End-of-sentence symbol.")
parser.add_argument(
"--share_vocab",
type="bool",
nargs="?",
const=True,
default=True,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument(
"--src_max_len",
type=int,
default=50,
help="Max length of src sequences during training.")
parser.add_argument(
"--tgt_max_len",
type=int,
default=50,
help="Max length of tgt sequences during training.")
parser.add_argument("--src_max_len_infer", type=int, default=None,
help="Max length of src sequences during inference.")
parser.add_argument("--tgt_max_len_infer", type=int, default=80,
help="""\
Max length of tgt sequences during inference. Also use to restrict the
maximum decoding length.\
""")
# Default settings works well (rarely need to change)
parser.add_argument("--unit_type", type=str, default="lstm",
help="lstm | gru | layer_norm_lstm | nas")
parser.add_argument("--forget_bias", type=float, default=1.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)")
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
parser.add_argument("--steps_per_stats", type=int, default=5,
help=("How many training steps to do per stats logging."
"Save checkpoint every 10x steps_per_stats"))
parser.add_argument("--max_train", type=int, default=0,
help="Limit on the size of training data (0: no limit).")
parser.add_argument(
"--num_buckets",
type=int,
default=1,
help="Put data into similar-length buckets.")
# SPM
parser.add_argument("--subword_option", type=str, default="bpe",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Experimental encoding feature.
parser.add_argument("--use_char_encode", type="bool", default=False,
help="""\
Whether to split each word or bpe into character, and then
generate the word-level representation from the character
reprentation.
""")
# Misc
parser.add_argument(
"--save_checkpoints_steps", type=int, default=1000,
help="save_checkpoints_steps")
parser.add_argument(
"--num_gpus", type=int, default=1, help="Number of gpus in each worker.")
parser.add_argument(
"--log_device_placement",
type="bool",
nargs="?",
const=True,
default=True,
help="Debug GPU allocation.")
parser.add_argument("--steps_per_external_eval", type=int, default=None,
help="""\
How many training steps to do per external evaluation. Automatically set
based on data if None.\
""")
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument(
"--random_seed",
type=int,
default=1,
help="Random seed (>0, set a specific seed).")
parser.add_argument("--override_loaded_hparams", type="bool", nargs="?",
const=True, default=False,
help="Override loaded hparams with values specified")
parser.add_argument("--num_keep_ckpts", type=int, default=5,
help="Max number of checkpoints to keep.")
parser.add_argument("--avg_ckpts", type="bool", nargs="?",
const=True, default=False, help=("""\
Average the last N checkpoints for external evaluation.
N can be controlled by setting --num_keep_ckpts.\
"""))
parser.add_argument("--language_model", type="bool", nargs="?",
const=True, default=False,
help="True to train a language model, ignoring encoder")
# Inference
parser.add_argument("--ckpt", type=str, default="",
help="Checkpoint file to load a model for inference.")
parser.add_argument("--inference_input_file", type=str, default=None,
help="Set to the text to decode.")
parser.add_argument("--inference_list", type=str, default=None,
help=("A comma-separated list of sentence indices "
"(0-based) to decode."))
parser.add_argument(
"--infer_batch_size",
type=int,
default=64,
help="Batch size for inference mode.")
parser.add_argument("--detokenizer_file", type=str,
default="",
help=("""Detokenizer script file."""))
parser.add_argument("--use_borg", type="bool", default=False)
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="beam_search",
choices=["greedy", "sample", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=5,
help=("""\
beam width when using beam search decoder. If 0 (default), use standard
decoder with greedy helper.\
"""))
parser.add_argument(
"--length_penalty_weight",
type=float,
default=0.6,
help="Length penalty for beam search.")
parser.add_argument(
"--coverage_penalty_weight",
type=float,
default=0.1,
help="Coverage penalty for beam search.")
parser.add_argument("--sampling_temperature", type=float,
default=0.0,
help=("""\
Softmax sampling temperature for inference decoding, 0.0 means greedy
decoding. This option is ignored when using beam search.\
"""))
parser.add_argument("--num_translations_per_input", type=int, default=1,
help=("""\
Number of translations generated for each sentence. This is only used for
inference.\
"""))
# Job info
parser.add_argument("--jobid", type=int, default=0,
help="Task id of the worker.")
parser.add_argument("--num_workers", type=int, default=1,
help="Number of workers (inference only).")
parser.add_argument("--num_inter_threads", type=int, default=0,
help="number of inter_op_parallelism_threads")
parser.add_argument("--num_intra_threads", type=int, default=0,
help="number of intra_op_parallelism_threads")
# Fp16
parser.add_argument("--use_fp16", type="bool", default=False,
help="use_fp16 for training and inference")
parser.add_argument(
"--fp16_loss_scale",
type=float,
default=128,
help="If fp16 is enabled, the loss is multiplied by this amount "
"right before gradients are computed, then each gradient "
"is divided by this amount. Mathematically, this has no "
"effect, but it helps avoid fp16 underflow. Set to 1 to "
"effectively disable.")
parser.add_argument(
"--enable_auto_loss_scale",
type="bool",
default=False,
help="If True and use_fp16 is True, automatically adjust the "
"loss scale during training.")
parser.add_argument(
"--fp16_inc_loss_scale_every_n",
type=int,
default=1000,
help="If fp16 is enabled and enable_auto_loss_scale is "
"True, increase the loss scale every n steps.")
parser.add_argument(
"--check_tower_loss_numerics",
type="bool",
default=False, # Set to false for xla.compile()
help="whether to check tower loss numerics")
parser.add_argument(
"--use_fp32_batch_matmul",
type="bool",
default=True,
help="Whether to use fp32 batch matmul")
# Performance
# XLA
parser.add_argument(
"--force_inputs_padding",
type="bool",
default=False,
help="Force padding input batch to src_max_len and tgt_max_len")
parser.add_argument(
"--use_xla",
type="bool",
default=False,
help="Use xla to compile a few selected locations, mostly Defuns.")
parser.add_argument(
"--xla_compile",
type="bool",
default=False,
help="Use xla.compile() for each tower's fwd and bak pass.")
parser.add_argument(
"--use_autojit_xla",
type="bool",
default=False,
help="Use auto jit xla.")
# GPU knobs
parser.add_argument(
"--use_pintohost_optimizer",
type="bool",
default=False,
help="whether to use PinToHost optimizer")
parser.add_argument(
"--use_cudnn_lstm",
type="bool",
default=False,
help="whether to use cudnn_lstm for encoder, non residual layers")
parser.add_argument(
"--use_loose_bidi_cudnn_lstm",
type="bool",
default=False,
help="whether to use loose bidi cudnn_lstm")
parser.add_argument(
"--use_fused_lstm",
type="bool",
default=False,
help="whether to use fused lstm and variant. If enabled, training will "
"use LSTMBlockFusedCell, infer will use LSTMBlockCell when appropriate.")
parser.add_argument(
"--use_fused_lstm_dec",
type="bool",
default=False,
help="whether to use fused lstm for decoder (training only).")
parser.add_argument(
"--gpu_indices",
type=str,
default="",
help="Indices of worker GPUs in ring order")
parser.add_argument(
"--gpu_thread_mode",
type=str,
default="global",
help="Methods to assign GPU host work to threads. "
"global: all GPUs and CPUs share the same global threads; "
"gpu_private: a private threadpool for each GPU; "
"gpu_shared: all GPUs share the same threadpool.")
parser.add_argument(
"--per_gpu_thread_count",
type=int,
default=0,
help="The number of threads to use for GPU. Only valid when "
"gpu_thread_mode is not global.")
parser.add_argument(
"--sync_on_finish",
type="bool",
default=False,
help="Enable/disable whether the devices are synced after each "
"step.")
parser.add_argument(
"--force_gpu_compatible",
type="bool",
default=False,
help="whether to enable force_gpu_compatible in GPU_Options")
# Graph knobs
parser.add_argument("--parallel_iterations", type=int, default=10,
help="number of parallel iterations in dynamic_rnn")
parser.add_argument("--use_dist_strategy", type="bool", default=False,
help="whether to use distribution strategy")
parser.add_argument(
"--hierarchical_copy",
type="bool",
default=False,
help="Use hierarchical copies. Currently only optimized for "
"use on a DGX-1 with 8 GPUs and may perform poorly on "
"other hardware. Requires --num_gpus > 1, and only "
"recommended when --num_gpus=8")
parser.add_argument(
"--network_topology",
type=constants.NetworkTopology,
default=constants.NetworkTopology.DGX1,
choices=list(constants.NetworkTopology))
parser.add_argument(
"--enable_layout_optimizer",
type="bool",
default=False,
help="whether to enable layout optimizer")
parser.add_argument(
"--use_block_lstm",
type="bool",
default=False,
help="whether to use block lstm")
parser.add_argument(
"--use_defun",
type="bool",
default=False,
help="whether to use Defun")
# Gradient tricks
parser.add_argument(
"--gradient_repacking",
type=int,
default=0,
help="Use gradient repacking. It"
"currently only works with replicated mode. At the end of"
"of each step, it repacks the gradients for more efficient"
"cross-device transportation. A non-zero value specifies"
"the number of split packs that will be formed.")
parser.add_argument(
"--compact_gradient_transfer",
type="bool",
default=True,
help="Compact gradient as much as possible for cross-device transfer and "
"aggregation.")
parser.add_argument(
"--all_reduce_spec",
type=str,
default="nccl",
help="A specification of the all_reduce algorithm to be used "
"for reducing gradients. For more details, see "
"parse_all_reduce_spec in variable_mgr.py. An "
"all_reduce_spec has BNF form:\n"
"int ::= positive whole number\n"
"g_int ::= int[KkMGT]?\n"
"alg_spec ::= alg | alg#int\n"
"range_spec ::= alg_spec | alg_spec/alg_spec\n"
"spec ::= range_spec | range_spec:g_int:range_spec\n"
"NOTE: not all syntactically correct constructs are "
"supported.\n\n"
"Examples:\n "
"\"xring\" == use one global ring reduction for all "
"tensors\n"
"\"pscpu\" == use CPU at worker 0 to reduce all tensors\n"
"\"nccl\" == use NCCL to locally reduce all tensors. "
"Limited to 1 worker.\n"
"\"nccl/xring\" == locally (to one worker) reduce values "
"using NCCL then ring reduce across workers.\n"
"\"pscpu:32k:xring\" == use pscpu algorithm for tensors of "
"size up to 32kB, then xring for larger tensors.")
parser.add_argument(
"--agg_small_grads_max_bytes",
type=int,
default=0,
help="If > 0, try to aggregate tensors of less than this "
"number of bytes prior to all-reduce.")
parser.add_argument(
"--agg_small_grads_max_group",
type=int,
default=10,
help="When aggregating small tensors for all-reduce do not "
"aggregate more than this many into one new tensor.")
parser.add_argument(
"--allreduce_merge_scope",
type=int,
default=1,
help="Establish a name scope around this many "
"gradients prior to creating the all-reduce operations. "
"It may affect the ability of the backend to merge "
"parallel ops.")
# Other knobs
parser.add_argument(
"--local_parameter_device",
type=str,
default="gpu",
help="Device to use as parameter server: cpu or gpu. For "
"distributed training, it can affect where caching of "
"variables happens.")
parser.add_argument(
"--autotune_threshold",
type=int,
default=0,
help="The autotune threshold for the models")
parser.add_argument(
"--datasets_num_private_threads",
type=int,
default=None,
help="Number of threads for a private threadpool created for "
"all datasets computation. By default, we pick an "
"appropriate number. If set to 0, we use the default "
"tf-Compute threads for dataset operations.")
parser.add_argument(
"--winograd_nonfused",
type="bool",
default=True,
help="Enable/disable using the Winograd non-fused algorithms.")
parser.add_argument(
"--batchnorm_persistent",
type="bool",
default=True,
help="Enable/disable using the CUDNN_BATCHNORM_SPATIAL_PERSISTENT "
"mode for batchnorm.")
parser.add_argument(
"--device",
type=str,
default="gpu",
help="Device to use for computation: cpu or gpu")
parser.add_argument(
"--allow_growth",
type="bool",
default=False,
help="whether to enable allow_growth in GPU_Options")
parser.add_argument(
"--use_resource_vars",
type="bool",
default=False,
help="Use resource variables instead of normal variables. "
"Resource variables are slower, but this option is useful "
"for debugging their performance.")
# Performance tuning specific to MKL.
parser.add_argument(
"--mkl",
type="bool",
default=False,
help="If true, set MKL environment variables.")
parser.add_argument(
"--kmp_blocktime",
type=int,
default=30,
help="The time, in milliseconds, that a thread should wait, "
"after completing the execution of a parallel region, "
"before sleeping")
parser.add_argument(
"--kmp_affinity",
type=str,
default="granularity=fine,verbose,compact,1,0",
help="Restricts execution of certain threads (virtual execution "
"units) to a subset of the physical processing units in a "
"multiprocessor computer.")
parser.add_argument(
"--kmp_settings", type=int, default=1,
help="If set to 1, MKL settings will be printed.")
# Debug
parser.add_argument("--debug", type="bool", default=False,
help="Debug train and eval")
parser.add_argument("--show_metrics", type="bool", default=True,
help="whether to show detailed metrics")
parser.add_argument("--build_graph_only", type="bool", default=False,
help="whehter or not just building the graph")
parser.add_argument("--clip_grads", type="bool", default=True,
help="whether to clip gradients")
parser.add_argument("--profile", type="bool", default=False,
help="If generate profile")
parser.add_argument("--profile_save_steps", type=int, default=10,
help="Save timeline every N steps.")
# TPU
parser.add_argument("--use_dynamic_rnn", type="bool", default=True)
parser.add_argument("--master", type=str, default="")
parser.add_argument("--use_synthetic_data", type="bool", default=False)
parser.add_argument(
"--iterations_per_loop",
type=int,
default=100,
help="the number of iterations to run on TPU before returning to host")
parser.add_argument(
"--mode", type=str, default="train_and_eval",
choices=["train", "train_and_eval", "infer"])
parser.add_argument(
"--run_name",
type=str,
default="",
help=
"if set, load ckpt from /gs://ij-d/home/mlperf-nmt/'run_name'"
)
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=os.path.join(flags.data_dir, flags.train_prefix),
dev_prefix=os.path.join(flags.data_dir, flags.dev_prefix),
test_prefix=os.path.join(flags.data_dir, flags.test_prefix),
vocab_prefix=os.path.join(flags.data_dir, flags.vocab_prefix),
embed_prefix=flags.embed_prefix,
output_dir=flags.output_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
unit_type=flags.unit_type,
encoder_type=flags.encoder_type,
residual=flags.residual,
time_major=flags.time_major,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Attention mechanisms
attention=flags.attention,
attention_architecture=flags.attention_architecture,
output_attention=flags.output_attention,
pass_hidden_state=flags.pass_hidden_state,
# Train
optimizer=flags.optimizer,
num_train_steps=flags.num_train_steps,
max_train_epochs=flags.max_train_epochs,
num_examples_per_epoch=flags.num_examples_per_epoch,
target_bleu=flags.target_bleu,
label_smoothing=flags.label_smoothing,
batch_size=flags.batch_size,
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_scheme=flags.decay_scheme,
colocate_gradients_with_ops=flags.colocate_gradients_with_ops,
# Data constraints
num_buckets=flags.num_buckets,
max_train=flags.max_train,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
infer_batch_size=flags.infer_batch_size,
detokenizer_file=flags.detokenizer_file,
use_borg=flags.use_borg,
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
coverage_penalty_weight=flags.coverage_penalty_weight,
sampling_temperature=flags.sampling_temperature,
num_translations_per_input=flags.num_translations_per_input,
# Vocab
sos=flags.sos if flags.sos else vocab_utils.SOS,
eos=flags.eos if flags.eos else vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
use_char_encode=flags.use_char_encode,
# Misc
forget_bias=flags.forget_bias,
num_gpus=flags.num_gpus,
save_checkpoints_steps=flags.save_checkpoints_steps,
epoch_step=0, # record where we were within an epoch.
steps_per_stats=flags.steps_per_stats,
steps_per_external_eval=flags.steps_per_external_eval,
share_vocab=flags.share_vocab,
log_device_placement=flags.log_device_placement,
random_seed=flags.random_seed,
override_loaded_hparams=flags.override_loaded_hparams,
num_keep_ckpts=flags.num_keep_ckpts,
avg_ckpts=flags.avg_ckpts,
language_model=flags.language_model,
num_intra_threads=flags.num_intra_threads,
num_inter_threads=flags.num_inter_threads,
# Fp16
use_fp16=flags.use_fp16,
fp16_loss_scale=flags.fp16_loss_scale,
enable_auto_loss_scale=flags.enable_auto_loss_scale,
fp16_inc_loss_scale_every_n=flags.fp16_inc_loss_scale_every_n,
check_tower_loss_numerics=flags.check_tower_loss_numerics,
use_fp32_batch_matmul=flags.use_fp32_batch_matmul,
# Performance
# GPU knbs
force_inputs_padding=flags.force_inputs_padding,
use_xla=flags.use_xla,
xla_compile=flags.xla_compile,
use_autojit_xla=flags.use_autojit_xla,
use_pintohost_optimizer=flags.use_pintohost_optimizer,
use_cudnn_lstm=flags.use_cudnn_lstm,
use_loose_bidi_cudnn_lstm=flags.use_loose_bidi_cudnn_lstm,
use_fused_lstm=flags.use_fused_lstm,
use_fused_lstm_dec=flags.use_fused_lstm_dec,
gpu_indices=flags.gpu_indices,
gpu_thread_mode=flags.gpu_thread_mode,
per_gpu_thread_count=flags.per_gpu_thread_count,
sync_on_finish=flags.sync_on_finish,
force_gpu_compatible=flags.force_gpu_compatible,
# Graph knobs
parallel_iterations=flags.parallel_iterations,
use_dynamic_rnn=flags.use_dynamic_rnn,
use_dist_strategy=flags.use_dist_strategy,
hierarchical_copy=flags.hierarchical_copy,
network_topology=flags.network_topology,
enable_layout_optimizer=flags.enable_layout_optimizer,
use_block_lstm=flags.use_block_lstm,
# Grad tricks
gradient_repacking=flags.gradient_repacking,
compact_gradient_transfer=flags.compact_gradient_transfer,
all_reduce_spec=flags.all_reduce_spec,
agg_small_grads_max_bytes=flags.agg_small_grads_max_bytes,
agg_small_grads_max_group=flags.agg_small_grads_max_group,
allreduce_merge_scope=flags.allreduce_merge_scope,
# Other knobs
local_parameter_device=("cpu" if flags.num_gpus ==0
else flags.local_parameter_device),
autotune_threshold=flags.autotune_threshold,
datasets_num_private_threads=flags.datasets_num_private_threads,
winograd_nonfused=flags.winograd_nonfused,
batchnorm_persistent=flags.batchnorm_persistent,
device=flags.device,
allow_growth=flags.allow_growth,
use_resource_vars=flags.use_resource_vars,
mkl=flags.mkl,
kmp_blocktime=flags.kmp_blocktime,
kmp_affinity=flags.kmp_affinity,
kmp_settings=flags.kmp_settings,
# Debug
debug=flags.debug,
build_graph_only=flags.build_graph_only,
clip_grads=flags.clip_grads,
profile=flags.profile,
profile_save_steps=flags.profile_save_steps,
show_metrics=flags.show_metrics,
# TPU
master=flags.master,
use_synthetic_data=flags.use_synthetic_data,
iterations_per_loop=flags.iterations_per_loop,
mode=flags.mode,
run_name=flags.run_name)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.encoder_type == "bi" and hparams.num_encoder_layers % 2 != 0:
raise ValueError("For bi, num_encoder_layers %d should be even" %
hparams.num_encoder_layers)
if (hparams.attention_architecture in ["gnmt"] and
hparams.num_encoder_layers < 2):
raise ValueError("For gnmt attention architecture, "
"num_encoder_layers %d should be >= 2" %
hparams.num_encoder_layers)
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
if hparams.infer_mode == "sample" and hparams.sampling_temperature <= 0.0:
raise ValueError("sampling_temperature must greater than 0.0 when using"
"sample decoder.")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers and hparams.num_decoder_layers
if hparams.num_encoder_layers != hparams.num_decoder_layers:
hparams.pass_hidden_state = False
utils.print_out("Num encoder layer %d is different from num decoder layer"
" %d, so set pass_hidden_state to False" % (
hparams.num_encoder_layers,
hparams.num_decoder_layers))
# Set residual layers
num_encoder_residual_layers = 0
num_decoder_residual_layers = 0
if hparams.residual:
if hparams.num_encoder_layers > 1:
num_encoder_residual_layers = hparams.num_encoder_layers - 1
if hparams.num_decoder_layers > 1:
num_decoder_residual_layers = hparams.num_decoder_layers - 1
if hparams.encoder_type == "gnmt":
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
# Compatible for GNMT models
if hparams.num_encoder_layers == hparams.num_decoder_layers:
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
# Language modeling
if hparams.language_model:
hparams.attention = ""
hparams.attention_architecture = ""
hparams.pass_hidden_state = False
hparams.share_vocab = True
hparams.src = hparams.tgt
utils.print_out("For language modeling, we turn off attention and "
"pass_hidden_state; turn on share_vocab; set src to tgt.")
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.output_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
# Target vocab
if hparams.share_vocab:
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
else:
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.output_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
mlperf_log.gnmt_print(key=mlperf_log.PREPROC_VOCAB_SIZE,
value={"src": src_vocab_size, "tgt": tgt_vocab_size})
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
_add_argument(
hparams, "num_enc_emb_partitions", hparams.num_embeddings_partitions)
_add_argument(
hparams, "num_dec_emb_partitions", hparams.num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
if hparams.embed_prefix:
src_embed_file = hparams.embed_prefix + "." + hparams.src
tgt_embed_file = hparams.embed_prefix + "." + hparams.tgt
if tf.gfile.Exists(src_embed_file):
utils.print_out(" src_embed_file %s exist" % src_embed_file)
hparams.src_embed_file = src_embed_file
utils.print_out(
"For pretrained embeddings, set num_enc_emb_partitions to 1")
hparams.num_enc_emb_partitions = 1
else:
utils.print_out(" src_embed_file %s doesn't exist" % src_embed_file)
if tf.gfile.Exists(tgt_embed_file):
utils.print_out(" tgt_embed_file %s exist" % tgt_embed_file)
hparams.tgt_embed_file = tgt_embed_file
utils.print_out(
"For pretrained embeddings, set num_dec_emb_partitions to 1")
hparams.num_dec_emb_partitions = 1
else:
utils.print_out(" tgt_embed_file %s doesn't exist" % tgt_embed_file)
# Evaluation
metric = "bleu"
best_metric_dir = os.path.join(hparams.output_dir, "best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "best_" + metric, 0, update=False)
_add_argument(hparams, "best_" + metric + "_dir", best_metric_dir)
if hparams.avg_ckpts:
best_metric_dir = os.path.join(hparams.output_dir, "avg_best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "avg_best_" + metric, 0, update=False)
_add_argument(hparams, "avg_best_" + metric + "_dir", best_metric_dir)
return hparams
def create_or_load_hparams(default_hparams, hparams_path):
"""Create hparams or load hparams from output_dir."""
hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def run_main(flags, default_hparams, estimator_fn):
"""Run main."""
# Job
jobid = flags.jobid
utils.print_out("# Job id %d" % jobid)
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed + jobid)
np.random.seed(random_seed + jobid)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = flags.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
# Load hparams.
hparams = create_or_load_hparams(default_hparams, flags.hparams_path)
# Train or Evaluation
estimator_fn(hparams)
return hparams
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
if FLAGS.use_fp16 and FLAGS.use_dist_strategy:
raise ValueError("use_fp16 and use_dist_strategy aren't compatible")
# Set up hacky envvars.
# Hack that affects Defun in attention_wrapper.py
active_xla_option_nums = np.sum([FLAGS.use_xla, FLAGS.use_autojit_xla,
FLAGS.xla_compile])
if active_xla_option_nums > 1:
raise ValueError(
"Only one of use_xla, xla_compile, use_autojit_xla can be set")
os.environ["use_xla"] = str(FLAGS.use_xla).lower()
if FLAGS.use_xla:
os.environ["use_defun"] = str(True).lower()
else:
os.environ["use_defun"] = str(FLAGS.use_defun).lower()
utils.print_out("use_defun is %s for attention" % os.environ["use_defun"])
# TODO(jamesqin): retire this config after Cuda9.1
os.environ["use_fp32_batch_matmul"] = ("true" if FLAGS.use_fp32_batch_matmul
else "false")
os.environ["xla_compile"] = "true" if FLAGS.xla_compile else "false"
os.environ["force_inputs_padding"] = (
"true" if FLAGS.force_inputs_padding else "false")
if FLAGS.mode == "train":
utils.print_out("Running training mode.")
FLAGS.num_buckets = 5
default_hparams = create_hparams(FLAGS)
run_main(FLAGS, default_hparams, estimator.train_fn)
elif FLAGS.mode == "infer":
utils.print_out("Running inference mode.")
# Random
random_seed = FLAGS.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = FLAGS.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
# Load hparams.
default_hparams = create_hparams(FLAGS)
default_hparams.num_buckets = 1
# The estimator model_fn is written in a way allowing train hparams to be
# passed in infer mode.
hparams = create_or_load_hparams(default_hparams, FLAGS.hparams_path)
utils.print_out("infer_hparams:")
utils.print_hparams(hparams)
# Run evaluation when there's a new checkpoint
for i, ckpt in enumerate(
evaluation_utils.get_all_checkpoints(FLAGS.output_dir)):
tf.logging.info("Starting to evaluate...")
eval_start = time.time()
bleu_score = estimator.eval_fn(hparams, ckpt)
eval_end = time.time()
utils.print_out("eval time for %d th ckpt: %.2f mins" %
(i, (eval_end - eval_start) / 60.), f=sys.stderr)
else:
assert FLAGS.mode == "train_and_eval"
utils.print_out("Running train and eval mode.")
# Random
random_seed = FLAGS.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
tf.set_random_seed(random_seed)
# Model output directory
output_dir = FLAGS.output_dir
if output_dir and not tf.gfile.Exists(output_dir):
utils.print_out("# Creating output directory %s ..." % output_dir)
tf.gfile.MakeDirs(output_dir)
# Load hparams.
default_hparams = create_hparams(FLAGS)
default_hparams.num_buckets = 5
hparams = create_or_load_hparams(default_hparams, FLAGS.hparams_path)
utils.print_out("training hparams:")
utils.print_hparams(hparams)
with tf.gfile.GFile(os.path.join(output_dir, "train_hparams.txt"), "w") as f:
f.write(utils.serialize_hparams(hparams) + "\n")
# The estimator model_fn is written in a way allowing train hparams to be
# passed in infer mode.
infer_hparams = tf.contrib.training.HParams(**hparams.values())
infer_hparams.num_buckets = 1
utils.print_out("infer_hparams:")
utils.print_hparams(infer_hparams)
with tf.gfile.GFile(os.path.join(output_dir, "infer_hparams.txt"), "w") as f:
f.write(utils.serialize_hparams(infer_hparams) + "\n")
epochs = 0
should_stop = epochs >= FLAGS.max_train_epochs
mlperf_log.gnmt_print(key=mlperf_log.TRAIN_LOOP)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_TARGET, value=hparams.target_bleu)
while not should_stop:
utils.print_out("Starting epoch %d" % epochs)
mlperf_log.gnmt_print(key=mlperf_log.TRAIN_EPOCH, value=epochs)
mlperf_log.gnmt_print(
key=mlperf_log.INPUT_SIZE,
value=iterator_utils.get_effective_train_epoch_size(hparams))
mlperf_log.gnmt_print(
key=mlperf_log.TRAIN_CHECKPOINT,
value=("Under " + hparams.output_dir))
try:
train_start = time.time()
estimator.train_fn(hparams)
except tf.errors.OutOfRangeError:
utils.print_out("training hits OutOfRangeError", f=sys.stderr)
train_end = time.time()
utils.print_out("training time for epoch %d: %.2f mins" %
(epochs, (train_end - train_start) / 60.), f=sys.stderr)
# This is probably sub-optimal, doing eval per-epoch
mlperf_log.gnmt_print(key=mlperf_log.EVAL_START)
eval_start = time.time()
bleu_score = estimator.eval_fn(infer_hparams)
eval_end = time.time()
utils.print_out("eval time for epoch %d: %.2f mins" %
(epochs, (eval_end - eval_start) / 60.), f=sys.stderr)
mlperf_log.gnmt_print(key=mlperf_log.EVAL_ACCURACY,
value={"epoch": epochs, "value": bleu_score})
mlperf_log.gnmt_print(key=mlperf_log.EVAL_STOP, value=epochs)
if FLAGS.debug or bleu_score > FLAGS.target_bleu:
should_stop = True
utils.print_out(
"Stop job since target bleu is reached at epoch %d ." % epochs,
f=sys.stderr)
mlperf_log.gnmt_print(mlperf_log.RUN_STOP, {"success": True})
if epochs >= FLAGS.max_train_epochs:
should_stop = True
utils.print_out("Stop job since max_train_epochs is reached.",
f=sys.stderr)
mlperf_log.gnmt_print(mlperf_log.RUN_STOP, {"success": False})
epochs += 1
mlperf_log.gnmt_print(key=mlperf_log.RUN_FINAL)
if __name__ == "__main__":
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
mlperf_log.gnmt_print(key=mlperf_log.RUN_START)
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"[email protected]"
] | |
65f9e5229c6c9a6a1e020096dd3b187b66fcee09 | e86fa6b618822fc800bdc699b95efd404065509e | /python/practice/matrixMultiplication.py | 475810eb7ae26f8568fb5ce2c80ae20e1d8d212e | [] | no_license | KimYeong-su/programmers | 97b0ba53833176690bf5f87243d4e98402526f86 | caf3bcb824c4199832ca94b073340b8c49ada31d | refs/heads/master | 2021-08-29T07:06:12.936109 | 2021-08-06T12:40:02 | 2021-08-06T12:40:02 | 251,619,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | '''
# zip(a) 표현과 zip(*a) unpacking 표현을 잘 기억하자..
def solution(arr1, arr2):
return [[sum(a*b for a, b in zip(A_row,B_col)) for B_col in zip(*arr2)] for A_row in arr1]
'''
def solution(arr1, arr2):
answer = [[0 for _ in range(len(arr2[0]))]for _ in range(len(arr1))]
for i in range(len(arr1)):
for j in range(len(arr2[0])):
for k in range(len(arr2)):
answer[i][j] += arr1[i][k]*arr2[k][j]
return answer | [
"[email protected]"
] | |
2bc1edafefe685826dff39c42d7e69e9c67e1523 | 10f2f0c06d213d9272de256d5da47473c885b448 | /Demo3/data_preprocess.py | 898d19be2d4212d97eb9092141a34ab844b524fe | [
"MIT"
] | permissive | kinglai/compare-aggregate | 9cb3d60050dcd7903da1b1836c205694c2b355de | 2544468842ac0f991bede858c6cc4943ae22d640 | refs/heads/master | 2022-02-13T14:00:34.761738 | 2019-07-25T12:45:17 | 2019-07-25T12:45:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,840 | py | # -*- coding: utf-8 -*-
# @Time : 2019/1/23 14:35
# @Author : Alan
# @Email : [email protected]
# @File : data_preprocess.py
# @Software: PyCharm
import sys
import numpy as np
import random
from collections import namedtuple
import pickle
import time
random.seed(1337)
np.random.seed(1337)
ModelParam = namedtuple("ModelParam",
"hidden_dim,enc_timesteps,dec_timesteps,batch_size,random_size,k_value_ques,k_value_ans,lr")
UNKNOWN_TOKEN = '<UNK>'
PAD_TOKEN = '<PAD>'
class DataGenerator(object):
"""Dataset class
"""
def __init__(self, vocab, model_param, answer_file=""):
self.vocab = vocab
self.param = model_param
self.corpus_amount = 0
if answer_file != "":
self.answers = pickle.load(open(answer_file, 'rb')) # Bad Answer, Type: dic
print(self.answers.values())
def padq(self, data):
return self.pad_q(data, self.param.ques_len)
def pada(self, data):
return self.pad_a(data, self.param.ans_len)
def pad_q(self, data, lens=None):
def _pad(seq, lens):
if (len(seq) >= lens):
return seq[:lens], lens
else:
return seq + [0 for i in range(lens - len(seq))], len(seq)
# return pad_sequences(data, maxlen=len, padding='post', truncating='post', value=0)
return map(lambda x: _pad(x, lens), data)
def pad_a(self, data, lens=None):
def _pad(seq, lens):
if (len(seq) >= lens):
return seq[:lens]
else:
return seq + [0 for i in range(lens - len(seq))]
# return pad_sequences(data, maxlen=len, padding='post', truncating='post', value=0)
return map(lambda x: _pad(x, lens), data)
def cut(self, x, y):
if x > y:
return y
else:
return x
def wikiQaGenerate(self, filename, out_file, flag="basic", verbose=True):
# data Type: [([511, 18], [64, 23637, 11818, 44, 23638, 30, 19447, 4486, 28657, 14], 0),(..),..]
data = pickle.load(open(filename, 'rb'))
print(data[:2])
print('raw_data:{}'.format(len(data)))
question_dic = {}
question = list()
question_mask = list()
answer = list()
answer_mask = []
label = list()
for item in data:
# setdefault: 如果字典中包含有给定键,则返回该键对应的值,否则返回为该键设置的值。
# 将同一问题的答案和标签收集到一起
question_dic.setdefault(str(item[0]), {})
question_dic[str(item[0])].setdefault("question", [])
question_dic[str(item[0])].setdefault("answer", [])
question_dic[str(item[0])].setdefault("label", [])
question_dic[str(item[0])]["question"].append(item[0])
question_dic[str(item[0])]["answer"].append(item[1])
question_dic[str(item[0])]["label"].append(item[2])
delCount = 0
for key in list(question_dic.keys()):
question_dic[key]["question"] = question_dic[key]["question"]
question_dic[key]["answer"] = question_dic[key]["answer"]
if sum(question_dic[key]["label"]) == 0:
delCount += 1
del (question_dic[key])
bad_answers = []
for item in question_dic.values():
bad_ans = [item["answer"][i] for i in range(len(item["question"])) if item["label"][i] == 0]
bad_answers.extend(bad_ans)
# 进行listwise填充
for item in question_dic.values():
good_answer = [item["answer"][i] for i in range(len(item["question"])) if item["label"][i] == 1]
good_length = len(good_answer)
bad_answer = [item["answer"][i] for i in range(len(item["question"])) if item["label"][i] == 0]
bad_answers.extend(bad_answer)
if len(item["answer"]) >= self.param.list_size:
good_answer.extend(random.sample(bad_answer, self.param.list_size - good_length))
temp_answer = good_answer
temp_label = [1 / float(sum(item["label"])) for i in range(good_length)]
# temp_label = [1.0 for i in range(good_length)]
temp_label.extend([0.0 for i in range(self.param.list_size - good_length)])
else:
temp_answer = item["answer"]
temp_answer.extend(random.sample(bad_answers, self.param.list_size - len(item["question"])))
temp_label = [i / float(sum(item["label"])) for i in item["label"]]
# temp_label = item["label"]
temp_label.extend([0.0 for i in range(self.param.list_size - len(item["question"]))])
label.append(temp_label)
answer.append(list(self.pada(temp_answer)))
answer_mask.append([self.cut(len(single_ans), self.param.ans_len) for single_ans in temp_answer])
# 得出每个问题的长度
question += [[list(self.padq([item["question"][0]]))[0][0]] * self.param.list_size]
question_mask += [[list(self.padq([item["question"][0]]))[0][1]] * self.param.list_size]
question = np.array(question)
answer = np.array(answer)
label = np.array(label)
question_mask = np.array(question_mask)
answer_mask = np.array(answer_mask)
if (verbose):
print(question[23])
print(question.shape)
print(question_mask[23])
print(question_mask.shape)
print(answer[23])
print(answer.shape)
print(answer_mask[23])
print(answer_mask.shape)
print(label[23])
print(label.shape)
if flag == "size":
return question, answer, label, question_mask, answer_mask
all_out = (question, answer, label, question_mask, answer_mask)
with open(out_file, 'wb') as f:
pickle.dump(all_out, f)
return question, answer, label, question_mask, answer_mask
# -------------------------------------------------------------------------------------------------
def trecQaGenerate(self, filename, flag="basic", verbose=False):
data = pickle.load(open(filename, 'r'))
question_dic = {}
question = list()
answer = list()
label = list()
question_len = list()
answer_len = list()
answer_size = list()
for item in data:
question_dic.setdefault(str(item[0]), {})
question_dic[str(item[0])].setdefault("question", [])
question_dic[str(item[0])].setdefault("answer", [])
question_dic[str(item[0])].setdefault("label", [])
question_dic[str(item[0])]["question"].append(item[0])
question_dic[str(item[0])]["answer"].append(item[1])
question_dic[str(item[0])]["label"].append(item[2])
delCount = 0
for key in question_dic.keys():
question_dic[key]["question"] = question_dic[key]["question"]
question_dic[key]["answer"] = question_dic[key]["answer"]
if sum(question_dic[key]["label"]) == 0:
delCount += 1
del (question_dic[key])
for item in question_dic.values():
good_answer = [item["answer"][i] for i in range(len(item["question"])) if item["label"][i] == 1]
good_length = len(good_answer)
if good_length >= self.param.random_size / 2:
good_answer = random.sample(good_answer, self.param.random_size / 2)
good_length = len(good_answer)
bad_answer = [item["answer"][i] for i in range(len(item["question"])) if item["label"][i] == 0]
trash_sample = self.param.random_size
if len(bad_answer) >= self.param.random_size - good_length:
good_answer.extend(random.sample(bad_answer, self.param.random_size - good_length))
temp_answer = good_answer
temp_label = [1 / float(good_length) for i in range(good_length)]
temp_label.extend([0.0 for i in range(self.param.random_size - good_length)])
else:
temp_answer = good_answer + bad_answer
trash_sample = len(temp_answer)
temp_answer.extend(random.sample(self.answers.values(), self.param.random_size - len(temp_answer)))
temp_label = [1 / float(len(good_answer)) for i in range(len(good_answer))]
temp_label.extend([0.0 for i in range(self.param.random_size - len(good_answer))])
label.append(temp_label)
answer.append(self.pada(temp_answer))
length = [1 for i in range(len(item["question"][0]))]
ans_length = [[1 for i in range(len(single_ans))] for single_ans in temp_answer]
answer_len.append(self.pada(ans_length))
question_len += [[list(self.padq([length]))[0]] * self.param.list_size]
question += [[list(self.padq([item["question"][0]]))[0]] * self.param.list_size]
answer_size += [[1 for i in range(self.param.random_size) if i < trash_sample] + [0 for i in range(
self.param.random_size - trash_sample)]]
question = np.array(question)
answer = np.array(answer)
label = np.array(label)
question_len = np.array(question_len)
answer_len = np.array(answer_len)
answer_size = np.array(answer_size)
if (verbose):
print(question.shape)
print(question_len.shape)
print(answer.shape)
print(answer_len.shape)
print(label.shape)
if flag == "size":
return question, answer, label, question_len, answer_len, answer_size
return question, answer, question_len, answer_len, label
def EvaluateGenerate(self, filename):
data = pickle.load(open(filename, 'r'))
question_dic = {}
for item in data:
question_dic.setdefault(str(item[0]), {})
question_dic[str(item[0])].setdefault("question", [])
question_dic[str(item[0])].setdefault("answer", [])
question_dic[str(item[0])].setdefault("label", [])
question_dic[str(item[0])]["question"].append(item[0])
question_dic[str(item[0])]["answer"].append(item[1])
question_dic[str(item[0])]["label"].append(item[2])
delCount = 0
for key in question_dic.keys():
question_dic[key]["question"] = self.padq(question_dic[key]["question"])
question_dic[key]["answer"] = self.pada(question_dic[key]["answer"])
question_dic[key]["ques_len"] = self.padq(
[[1 for i in range(len(single_que))] for single_que in question_dic[key]["question"]])
question_dic[key]["ans_len"] = self.pada(
[[1 for i in range(len(single_ans))] for single_ans in question_dic[key]["answer"]])
if sum(question_dic[key]["label"]) == 0:
delCount += 1
del (question_dic[key])
print(delCount)
print(len(question_dic))
return question_dic
class DataGenerator2(object):
def __init__(self,params):
self.params = params
def padseq(self,seq_to_pad,pad_to_len):
if(len(seq_to_pad)>=pad_to_len):
return seq_to_pad[:pad_to_len]
else:
seq_to_pad.extend([0 for i in range(pad_to_len-len(seq_to_pad))])
return seq_to_pad
def test_listwise_clean(self, test_file, padding=True):
list_size = 30
test_f = pickle.load(open(test_file, 'rb'))
test_size = len(test_f)
question, answer, label = zip(*test_f)
print('raw questions:{}'.format(np.shape(question)))
print('raw answers:{}'.format(np.shape(answer)))
print('raw labels:{}'.format(np.shape(label)))
question_len= list(map(lambda x: [1 for _ in range(len(x))],question))
answer_len= list(map(lambda x: [1 for _ in range(len(x))],answer))
test_dic = dict()
for i, ques in enumerate(question):
test_dic.setdefault(str(ques), [])
test_dic[str(ques)].append([ques, answer[i], question_len[i], answer_len[i], label[i]])
print("size of test_dic: ", len(test_dic))
questions = []
answers = []
questions_len = []
answers_len = []
labels = []
answers_size = []
for k, v in test_dic.items():
ques, ans, ques_len, ans_len, label = zip(*v)
if (np.sum(label)==0): continue
ques = list(map(lambda x: self.padseq(x, self.params.ques_len), ques))
ans = list(map(lambda x: self.padseq(x, self.params.ans_len), ans))
ques_len = list(map(lambda x: self.padseq(x, self.params.ques_len), ques_len))
ans_len = list(map(lambda x: self.padseq(x, self.params.ans_len), ans_len))
# 用0来padding剩下的list里面的句子
if(padding):
if list_size-len(label) < 0:
print(label)
ques_pad = [[0]*self.params.ques_len]*(list_size-len(label))
ans_pad = [[0]*self.params.ans_len]*(list_size-len(label))
ques.extend(ques_pad)
ans.extend(ans_pad)
ques_len.extend(ques_pad)
ans_len.extend(ans_pad)
label_pad = [0]*(list_size-len(label))
label = list(label)
label.extend(label_pad)
answer_size = [1]*len(label)+[0]*(list_size-len(label))
answers_size.append(answer_size)
questions.append(np.array(ques))
answers.append(np.array(ans))
questions_len.append(np.array(ques_len))
answers_len.append(np.array(ans_len))
labels.append(np.array(label))
questions = np.array(questions)
answers = np.array(answers)
labels = np.array(labels)
questions_len = np.array(questions_len)
answers_len = np.array(answers_len)
answers_size = np.array(answers_size)
#print np.array(questions[100]).shape
print ("questions: ",questions.shape)
print ("questions_len: ",questions_len.shape)
print ("answers: ",answers.shape)
print ("answers_len: ",answers_len.shape)
print ("labels: ",labels.shape)
print ("answers_size: ",answers_size.shape)
all_out = (questions, answers, labels)
with open('./data/wikiqa/self/raw/pre/pre_dev.pkl', 'wb') as f:
pickle.dump(all_out, f)
return (questions, answers, questions_len, answers_len, labels, answers_size) if padding else \
(questions, answers, questions_len, answers_len, labels)
# 根据词表生成对应的embedding
def data_transform(embedding_size):
words = []
with open('data/wikiqa/self/raw/wiki_vocab.txt', 'r', encoding='utf-8') as f1:
for line in f1:
word = line.strip().split('\t')[1].lower()
words.append(word)
print(len(words))
raw_glove = 'D:/NLP_shiyan/MAN-WikiQA-V1-3/glove/glove.6B.300d.txt'
embedding_dic = {}
count = 1
rng = np.random.RandomState(None)
pad_embedding = rng.uniform(-0.25, 0.25, size=(1, embedding_size))
unk_embedding = rng.uniform(-0.25, 0.25, size=(1, embedding_size))
embeddings = []
clean_words = ['<PAD>', '<UNK>']
embeddings.append(pad_embedding.reshape(-1).tolist())
embeddings.append(unk_embedding.reshape(-1).tolist())
print('uniform_init...')
with open(raw_glove, 'r', encoding='utf-8') as fin:
for line in fin:
try:
line_info = line.strip().split()
word = line_info[0]
embedding = [float(val) for val in line_info[1:]]
embedding_dic[word] = embedding
if word in words:
count += 1
clean_words.append(word)
embeddings.append(embedding)
except:
print('Error while loading line: {}'.format(line.strip()))
print(count)
print(len(clean_words))
print(len(embeddings))
print(np.shape(embeddings))
with open('data/wikiqa/self/raw/wiki_clean_vocab.txt', 'w', encoding='utf-8') as f:
for i, j in enumerate(clean_words):
f.write('{}\t{}\n'.format(i, j))
with open('./data/wikiqa/self/raw/wiki_embedding.pkl', 'wb') as f2:
pickle.dump(embeddings, f2)
# 获得train、dev、test中所有的词
def gen_vocab():
words = []
data_sets = ['train', 'dev', 'test']
for set_name in data_sets:
fin_path = 'data/wikiqa/self/raw/WikiQA-{}.tsv'.format(set_name)
with open(fin_path, 'r', encoding='utf-8') as fin:
fin.readline()
for line in fin:
line_in = line.strip().split('\t')
question = line_in[1].split(' ')
answer = line_in[3].split(' ')
for r1 in question:
if r1 not in words:
words.append(r1)
for r2 in answer:
if r2 not in words:
words.append(r2)
with open('data/wikiqa/self/raw/wiki_vocab.txt', 'w', encoding='utf-8') as f:
for i, j in enumerate(words):
f.write('{}\t{}\n'.format(i, j))
# 将train、dev、test替换成数字
def gen_data():
clean_vocab = {}
with open('data/wikiqa/self/raw/wiki_clean_vocab.txt', 'r', encoding='utf-8') as f1:
for w in f1:
w_in = w.strip().split('\t')
clean_vocab[w_in[1]] = int(w_in[0])
# print(clean_vocab)
def trans(x, y):
tran = []
for i in x:
if i.lower() in y:
t = y[i.lower()]
else:
t = y['<UNK>']
tran.append(t)
return tran
data_sets = ['train', 'dev', 'test']
for loc,set_name in enumerate(data_sets):
all_trans = []
fin_path = 'data/wikiqa/self/raw/WikiQA-{}.tsv'.format(set_name)
with open(fin_path, 'r', encoding='utf-8') as fin:
fin.readline()
for line in fin:
line_in = line.strip().split('\t')
question = line_in[1].split(' ')
question_transed = trans(question,clean_vocab)
answer = line_in[3].split(' ')
answer_transed = trans(answer, clean_vocab)
lable = int(line_in[4].split(' ')[0])
res = (question_transed, answer_transed, lable)
all_trans.append(res)
print(all_trans)
if loc == 0:
with open('./data/wikiqa/self/raw/WikiQA_train.pkl', 'wb') as f:
pickle.dump(all_trans, f)
elif loc == 1:
with open('./data/wikiqa/self/raw/WikiQA_dev.pkl', 'wb') as f:
pickle.dump(all_trans, f)
elif loc == 2:
with open('./data/wikiqa/self/raw/WikiQA_test.pkl', 'wb') as f:
pickle.dump(all_trans, f)
if __name__ == '__main__':
# 分别生成所有词的词表 / 生成embedding和clean词表/ 根据clean词表生成数字列表
# gen_vocab()
# data_transform(300)
# gen_data()
# 数据扩充:生成listwise数据格式并进行padding
class M_P():
random_size = 15
list_size = 15
ans_len = 100
ques_len = 25
m_p = M_P()
dg = DataGenerator(1, m_p, '')
infile = './data/wikiqa/self/raw/WikiQA_train.pkl'
outfile = './data/wikiqa/self/raw/pre/15_15/float/pre_train.pkl'
train_data = dg.wikiQaGenerate(infile, outfile)
# raw_train: 20359
# raw_dev: 1129
# raw_test: 6164 | [
"[email protected]"
] | |
207a9991e8cb9429d32159efa22844a2b24c6035 | aa65d43b8775b1eb49fa5cf3c23e6ce6f7aefb7b | /src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/express_route/gateway/_show.py | e7ccfe86f71ea0e67350f48b228dc22a5f0f2027 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | yungezz/azure-cli | 8acef71d30d937f2fc48981804fa66fd9e1c980d | 5a80d3507270cc3b9313a3e6e60daf291489a80d | refs/heads/dev | 2023-03-15T14:22:26.085920 | 2023-03-03T08:13:04 | 2023-03-03T08:13:04 | 279,810,977 | 0 | 1 | MIT | 2021-02-08T08:09:06 | 2020-07-15T08:26:59 | Python | UTF-8 | Python | false | false | 11,694 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network express-route gateway show",
)
class Show(AAZCommand):
"""Get the details of an ExpressRoute gateway.
:example: Get the details of an ExpressRoute gateway. (autogenerated)
az network express-route gateway show --name MyExpressRouteGateway --resource-group MyResourceGroup
"""
_aaz_info = {
"version": "2022-01-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/expressroutegateways/{}", "2022-01-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="ExpressRoute gateway name.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.ExpressRouteGatewaysGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class ExpressRouteGatewaysGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"expressRouteGatewayName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-01-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.id = AAZStrType()
_schema_on_200.location = AAZStrType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.auto_scale_configuration = AAZObjectType(
serialized_name="autoScaleConfiguration",
)
properties.express_route_connections = AAZListType(
serialized_name="expressRouteConnections",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.virtual_hub = AAZObjectType(
serialized_name="virtualHub",
flags={"required": True},
)
auto_scale_configuration = cls._schema_on_200.properties.auto_scale_configuration
auto_scale_configuration.bounds = AAZObjectType()
bounds = cls._schema_on_200.properties.auto_scale_configuration.bounds
bounds.max = AAZIntType()
bounds.min = AAZIntType()
express_route_connections = cls._schema_on_200.properties.express_route_connections
express_route_connections.Element = AAZObjectType()
_element = cls._schema_on_200.properties.express_route_connections.Element
_element.id = AAZStrType()
_element.name = AAZStrType(
flags={"required": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.properties.express_route_connections.Element.properties
properties.authorization_key = AAZStrType(
serialized_name="authorizationKey",
)
properties.enable_internet_security = AAZBoolType(
serialized_name="enableInternetSecurity",
)
properties.express_route_circuit_peering = AAZObjectType(
serialized_name="expressRouteCircuitPeering",
flags={"required": True},
)
properties.express_route_gateway_bypass = AAZBoolType(
serialized_name="expressRouteGatewayBypass",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.routing_configuration = AAZObjectType(
serialized_name="routingConfiguration",
)
properties.routing_weight = AAZIntType(
serialized_name="routingWeight",
)
express_route_circuit_peering = cls._schema_on_200.properties.express_route_connections.Element.properties.express_route_circuit_peering
express_route_circuit_peering.id = AAZStrType()
routing_configuration = cls._schema_on_200.properties.express_route_connections.Element.properties.routing_configuration
routing_configuration.associated_route_table = AAZObjectType(
serialized_name="associatedRouteTable",
)
_build_schema_sub_resource_read(routing_configuration.associated_route_table)
routing_configuration.propagated_route_tables = AAZObjectType(
serialized_name="propagatedRouteTables",
)
routing_configuration.vnet_routes = AAZObjectType(
serialized_name="vnetRoutes",
)
propagated_route_tables = cls._schema_on_200.properties.express_route_connections.Element.properties.routing_configuration.propagated_route_tables
propagated_route_tables.ids = AAZListType()
propagated_route_tables.labels = AAZListType()
ids = cls._schema_on_200.properties.express_route_connections.Element.properties.routing_configuration.propagated_route_tables.ids
ids.Element = AAZObjectType()
_build_schema_sub_resource_read(ids.Element)
labels = cls._schema_on_200.properties.express_route_connections.Element.properties.routing_configuration.propagated_route_tables.labels
labels.Element = AAZStrType()
vnet_routes = cls._schema_on_200.properties.express_route_connections.Element.properties.routing_configuration.vnet_routes
vnet_routes.bgp_connections = AAZListType(
serialized_name="bgpConnections",
flags={"read_only": True},
)
vnet_routes.static_routes = AAZListType(
serialized_name="staticRoutes",
)
bgp_connections = cls._schema_on_200.properties.express_route_connections.Element.properties.routing_configuration.vnet_routes.bgp_connections
bgp_connections.Element = AAZObjectType()
_build_schema_sub_resource_read(bgp_connections.Element)
static_routes = cls._schema_on_200.properties.express_route_connections.Element.properties.routing_configuration.vnet_routes.static_routes
static_routes.Element = AAZObjectType()
_element = cls._schema_on_200.properties.express_route_connections.Element.properties.routing_configuration.vnet_routes.static_routes.Element
_element.address_prefixes = AAZListType(
serialized_name="addressPrefixes",
)
_element.name = AAZStrType()
_element.next_hop_ip_address = AAZStrType(
serialized_name="nextHopIpAddress",
)
address_prefixes = cls._schema_on_200.properties.express_route_connections.Element.properties.routing_configuration.vnet_routes.static_routes.Element.address_prefixes
address_prefixes.Element = AAZStrType()
virtual_hub = cls._schema_on_200.properties.virtual_hub
virtual_hub.id = AAZStrType()
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
_schema_sub_resource_read = None
def _build_schema_sub_resource_read(_schema):
global _schema_sub_resource_read
if _schema_sub_resource_read is not None:
_schema.id = _schema_sub_resource_read.id
return
_schema_sub_resource_read = AAZObjectType()
sub_resource_read = _schema_sub_resource_read
sub_resource_read.id = AAZStrType()
_schema.id = _schema_sub_resource_read.id
__all__ = ["Show"]
| [
"[email protected]"
] | |
cde830d7a4274d28fb3f1d7f4f807e7245e65ec0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_omens.py | 6723d6d1487e28f10d04f20ab87de03a0fc7e444 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _OMENS():
def __init__(self,):
self.name = "OMENS"
self.definitions = omen
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['omen']
| [
"[email protected]"
] | |
5c9fa7d4fd757aaaf8048f435a7047661ebea2c9 | 9de6d0967bd79a31073b9c6a9d4cd318b7d7b13e | /myside/rgnbgn/apps.py | 7babc5ceee213c3ad837eed08fdb79791a029bc8 | [] | no_license | FSchierok/schierok.de_alt | 9684af3a7147bb643306fa97d4c470c849c56407 | 7bb205599508203a8cbb76aa4bf962323b0adfa4 | refs/heads/master | 2021-10-18T06:07:34.331231 | 2019-02-13T06:51:24 | 2019-02-13T06:51:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class RgnbgnConfig(AppConfig):
name = 'rgnbgn'
| [
"[email protected]"
] | |
4067df6deabc0c09665985515f58a01b388540cf | 0fb9e72ca0e41a06b08dd03c49f340a5c3af583d | /main.py | e579bc240cf9a2dc0834118f9a3bd25c531290f8 | [] | no_license | osmlab/routerelationranger | 74146da70cad1f989f0bff764859e4b43112b998 | 1fcd00493ca6e612fcc0ce6b2878a0749fb6e869 | refs/heads/master | 2020-06-14T06:35:33.626186 | 2016-12-13T18:53:05 | 2016-12-13T18:53:05 | 75,222,534 | 8 | 1 | null | 2016-12-13T19:22:04 | 2016-11-30T20:05:44 | HTML | UTF-8 | Python | false | false | 3,206 | py | from flask import Flask, render_template, jsonify, request
import requests
import json
import pycountry
import us
app = Flask(__name__)
route_type = ''
@app.route("/")
def index():
return render_template('index.html')
@app.route('/countries')
def get_countries():
return json.dumps([[c.alpha_2, c.name] for c in pycountry.countries])
@app.route('/states/<countrycode>')
def get_states(countrycode):
try:
states = [[s.code, s.name] for s in pycountry.subdivisions.get(country_code=countrycode)]
return json.dumps(states)
except KeyError:
return jsonify([])
return jsonify([])
@app.route('/routes/interstate/<country_code>')
def get_interstate_relations(country_code):
# get route type parameter
overpass_query = '[out:json];relation[network="{country_code}:I"][ref];out meta;'.format(country_code=country_code)
print(overpass_query)
response = perform_overpass(overpass_query)
relations = response.json()
if 'elements' in relations and len(relations['elements']) > 0:
out = process_elements(relations['elements'])
return jsonify(out)
return jsonify([])
@app.route('/routes/bicycle/<country_code>/<state_code>')
def get_bicycle_relations(country_code, state_code):
# get route type parameter
overpass_query = '[out:json];area[name="{statename}"]->.a;relation[route=bicycle][network](area.a);out meta;'.format(
statename=pycountry.subdivisions.get(code='{}-{}'.format(country_code, state_code)).name)
print(overpass_query)
response = perform_overpass(overpass_query)
relations = response.json()
if 'elements' in relations and len(relations['elements']) > 0:
out = process_elements(relations['elements'])
return jsonify(out)
return jsonify([])
@app.route('/routes/state/<country_code>/<state_code>')
def get_relations(country_code, state_code):
overpass_query = '[out:json];relation[network="{country_code}:{state_code}"][ref];out meta;'.format(
country_code=country_code,
state_code=state_code)
print(overpass_query)
response = perform_overpass(overpass_query)
relations = response.json()
if 'elements' in relations and len(relations['elements']) > 0:
out = process_elements(relations['elements'])
return jsonify(out)
return jsonify([])
def process_elements(elements):
out = []
for element in elements:
element = cleanup_element(element)
out.append(element)
return out
def perform_overpass(query):
overpass_api_url = 'https://overpass-api.de/api/interpreter'
payload = {'data': query}
return requests.get(overpass_api_url, params=payload)
def cleanup_element(element):
#print(element)
osmid = element['id']
# remove members we don't need em
if 'members' in element:
del element['members']
# flatten tags
if 'tags' in element:
for tag in element['tags']:
element[tag] = element['tags'][tag]
# delete original tags
del element['tags']
return element
def split_code(state_code):
# format is COUNTRY_CODE-STATE_CODE
return state_code.split('-')
if __name__ == "__main__":
app.run() | [
"[email protected]"
] | |
b0a72059e1c525bf44b28cac421173919bffd8ff | f531c56db4cd2776c765b9aca0c4cebaea864ec2 | /ABC180/a.py | 71af98fba89cb4bff73b9f900f0741c8dd163f1c | [] | no_license | SatoKeiju/AtCoder-Python3 | 1c76f8ec5d99470b6e316115f0433b4b3cb64024 | 9c2860e2cfda490d5848b0557876ef616eff01a2 | refs/heads/master | 2021-06-23T05:59:46.911733 | 2021-03-30T08:00:34 | 2021-03-30T08:00:34 | 212,088,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | def main() -> None:
n, a, b = map(int, input().split())
print(n - a + b)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
01fb37b7d5d16a167a3f98e0b2b0c9ed7a36cd06 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005250.py | a115bdd05a7d58de6b87e28027cd20e59675b0ad | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,998 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher94883(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.4.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i2.4.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher94883._instance is None:
CommutativeMatcher94883._instance = CommutativeMatcher94883()
return CommutativeMatcher94883._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 94882
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.4.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 94884
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.4.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 94885
if len(subjects) == 0:
pass
# 0: x*d
yield 0, subst2
subjects.appendleft(tmp2)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.4.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 95169
if len(subjects) >= 1:
tmp5 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.4.1.0', tmp5)
except ValueError:
pass
else:
pass
# State 95170
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst2
subjects.appendleft(tmp5)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp7 = subjects.popleft()
associative1 = tmp7
associative_type1 = type(tmp7)
subjects8 = deque(tmp7._args)
matcher = CommutativeMatcher94887.get()
tmp9 = subjects8
subjects8 = []
for s in tmp9:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp9, subst0):
pass
if pattern_index == 0:
pass
# State 94888
if len(subjects) == 0:
pass
# 0: x*d
yield 0, subst1
if pattern_index == 1:
pass
# State 95171
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst1
subjects.appendleft(tmp7)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from collections import deque
from .generated_part005251 import *
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"[email protected]"
] | |
d5c9b86ee0c37f13f1cbb6d03e3008cd35a1b9bb | 1bdb0da31d14102ca03ee2df44f0ec522b0701a4 | /Lombardia/MM/ReteAcquedotti/AggiornamentoReteAcquedotti.py | c746357b05945e6e29e8a6e020d668da83aa3a93 | [] | no_license | figuriamoci/Acqua | dc073d90c3c5e5899b22005685847916de1dfd95 | aef22fcd0c80c92441e0e3df2468d7a2f23a848a | refs/heads/master | 2020-12-15T04:00:26.855139 | 2020-06-08T21:17:55 | 2020-06-08T21:17:55 | 234,986,179 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | import os, geojson, logging, acqua.labelCollection as lc
def create_water_supply_network(geoJsonfile, regione):
#logging.info( "reading %s from %s...", geoJsonfile, regione )
with open( geoJsonfile ) as f:
geo = geojson.load( f )
fc = []
for f in geo['features']:
logging.info(f)
if f['geometry']['type'] == 'Point':
logging.info('Skip %s',f)
else:
name = f['properties']['NIL']
new_properties = {'name': name,'comune':'MM', 'regione': regione}
f['properties'] = new_properties
fc.append( f )
feature_collection = geojson.FeatureCollection( fc )
logging.info( "Collected %s feature(s).", len( fc ) )
return feature_collection
os.chdir( '/Lombardia/MM/ReteAcquedotti' )
geoJsonFile = '../Medadata/Quartieri_Milano.geojson'
geoJsonFile_standardized = 'rete_acquedotti_milano.geojson'
regione = 'Lombardia'
fc = create_water_supply_network( geoJsonFile,regione)
lc.to_file( fc, geoJsonFile_standardized )
ll = lc.to_mongoDB_ReteAcuquedotti( geoJsonFile_standardized )
| [
"[email protected]"
] | |
1b4e0dba6cccc7a01d2f1f2473da5593ccf3a7ee | 6a68b230964540bc3f7eb19a9a5adc9b218370c7 | /MiddlewareScan/F-MiddlewareScan.py | c00c9bde26d9c4c15f126b1dc017a4972d1497ec | [] | no_license | chuxuantinh/hack-tools | 7705e485959d1612fee1786a80d98a3fe5500e20 | c2203e8fa5c42b26e23b9c3db5e88ec7d11ea120 | refs/heads/master | 2023-01-09T03:13:40.074380 | 2020-10-31T19:17:37 | 2020-10-31T19:17:37 | 308,955,506 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,741 | py | #coding:utf-8
#author:wolf@future-sec
import getopt
import sys
import Queue
import threading
import socket
import urllib2
import time
import ssl
import os
queue = Queue.Queue()
sys.path.append("plugins")
mutex = threading.Lock()
timeout = 10
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
class ThreadNum(threading.Thread):
def __init__(self,queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
try:
if queue.empty():break
queue_task = self.queue.get()
except:
break
try:
task_type,task_host,task_port = queue_task.split(":")
if task_type == 'portscan':
port_status = scan_port(task_type,task_host,task_port)
if port_status == True:
queue.put(":".join(['discern',task_host,task_port]))
elif task_type == 'discern':
discern_type = scan_discern(task_type,task_host,task_port)
if discern_type:
queue.put(":".join([discern_type,task_host,task_port]))
else:
scan_vul(task_type,task_host,task_port)
except:
continue
def scan_port(task_type,host,port):
try:
socket.setdefaulttimeout(timeout/2)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((str(host),int(port)))
log(task_type,host,port)
sock.close()
return True
except:
return False
def log(scan_type,host,port,info=''):
mutex.acquire()
time_str = time.strftime('%X', time.localtime( time.time()))
if scan_type == 'portscan':
print "[%s] %s:%d open"%(time_str,host,int(port))
elif scan_type == 'discern':
print "[%s] http://%s:%d is %s"%(time_str,host,int(port),info)
else:
if info:
print "[*%s] %s"%(time_str,info)
log_file = open('result.log','a')
log_file.write("[*%s] %s\r\n"%(time_str,info))
log_file.close()
else:
print "[%s] http://%s:%s call plugin %s"%(time_str,host,port,scan_type)
mutex.release()
def read_config(config_type):
if config_type == 'discern':
mark_list=[]
config_file = open('discern_config.ini','r')
for mark in config_file:
name,location,key,value = mark.strip().split("|")
mark_list.append([name,location,key,value])
config_file.close()
return mark_list
elif config_type == 'plugin':
plugin_list = {}
config_file = open('plugin_config.ini','r')
for plugin in config_file:
name,plugin_file_list = plugin.strip().split("|")
plugin_list[name]=[]
plugin_list[name] = plugin_file_list.split(",")
config_file.close()
return plugin_list
def scan_discern(scan_type,host,port):
mark_list = read_config('discern')
for mark_info in mark_list:
if mark_info[1] == 'header':
try:
header = urllib2.urlopen("http://%s:%d"%(host,int(port)),timeout=timeout).headers
except urllib2.HTTPError,e:
header = e.headers
except Exception,e:
return False
try:
if mark_info[3].lower() in header[mark_info[2]].lower():
log(scan_type,host,port,mark_info[0])
return mark_info[0]
except Exception,e:
continue
elif mark_info[1] == 'file':
try:
re_html = urllib2.urlopen("http://%s:%d/%s"%(host,int(port),mark_info[2]),timeout=timeout).read()
except urllib2.HTTPError,e:
re_html = e.read()
except Exception,e:
return False
if mark_info[3].lower() in re_html.lower():
log(scan_type,host,port,mark_info[0])
return mark_info[0]
def scan_vul(scan_type,host,port):
vul_plugin = read_config("plugin")
for plugin_name in vul_plugin[scan_type]:
try:
req = __import__(plugin_name)
log(plugin_name,host,port)
vul_data = req.check(host,port,timeout)
if vul_data.split("|")[0].upper()=="YES":
log(scan_type,host,port,vul_data.split("|")[1])
except:
continue
def get_ip_list(ip):
ip_list = []
iptonum = lambda x:sum([256**j*int(i) for j,i in enumerate(x.split('.')[::-1])])
numtoip = lambda x: '.'.join([str(x/(256**i)%256) for i in range(3,-1,-1)])
if '-' in ip:
ip_range = ip.split('-')
ip_start = long(iptonum(ip_range[0]))
ip_end = long(iptonum(ip_range[1]))
ip_count = ip_end - ip_start
if ip_count >= 0 and ip_count <= 65536:
for ip_num in range(ip_start,ip_end+1):
ip_list.append(numtoip(ip_num))
else:
print '-h wrong format'
elif '.ini' in ip:
ip_config = open(ip,'r')
for ip in ip_config:
ip_list.extend(get_ip_list(ip.strip()))
ip_config.close()
else:
ip_split=ip.split('.')
net = len(ip_split)
if net == 2:
for b in range(1,255):
for c in range(1,255):
ip = "%s.%s.%d.%d"%(ip_split[0],ip_split[1],b,c)
ip_list.append(ip)
elif net == 3:
for c in range(1,255):
ip = "%s.%s.%s.%d"%(ip_split[0],ip_split[1],ip_split[2],c)
ip_list.append(ip)
elif net ==4:
ip_list.append(ip)
else:
print "-h wrong format"
return ip_list
def t_join(m_count):
tmp_count = 0
i = 0
while True:
time.sleep(1)
ac_count = threading.activeCount()
if ac_count < m_count and ac_count == tmp_count:
i+=1
else:
i = 0
tmp_count = ac_count
#print ac_count,queue.qsize()
if (queue.empty() and threading.activeCount() <= 1) or i > 5:
break
def put_queue(ip_list,port_list):
for ip in ip_list:
for port in port_list:
queue.put(":".join(['portscan',ip,port]))
if __name__=="__main__":
msg = '''
A vulnerability detection scripts for middleware services author:wolf@future-sec
Usage: python F-MiddlewareScan.py -h 192.168.1 [-p 7001,8080] [-m 50] [-t 10]
'''
if len(sys.argv) < 2:
print msg
try:
options,args = getopt.getopt(sys.argv[1:],"h:p:m:t:")
ip = ''
port = '80,4848,7001,7002,8000,8001,8080,8081,8888,9999,9043,9080'
m_count = 100
for opt,arg in options:
if opt == '-h':
ip = arg
elif opt == '-p':
port = arg
elif opt == '-m':
m_count = int(arg)
elif opt == '-t':
timeout = int(arg)
if ip:
ip_list = get_ip_list(ip)
port_list = []
if '.ini' in port:
port_config = open(port,'r')
for port in port_config:
port_list.append(port.strip())
port_config.close()
else:
port_list = port.split(',')
put_queue(ip_list,port_list)
for i in range(m_count):
t = ThreadNum(queue)
t.setDaemon(True)
t.start()
t_join(m_count)
except Exception,e:
print msg
| [
"[email protected]"
] | |
7fe5bc22dac1909ed556d1c7bb7127e52a4d4dae | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_189/ch20_2019_04_02_12_58_25_892579.py | fe0235bbac5d83917113e11d70f62f63d46439b1 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | nome=str(input('digite seu nome: ')
if nome='chris':
return ('todo mundo odeia o chris')
else:
return ('olá, {0}'.format (nome)) | [
"[email protected]"
] | |
c3c8c62c75b3a96cd5d71067e6269516d0c95ee0 | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/cache/4baf47439edb0ab2a6d043f8872032cbd5c6dee035b265d6f6b2b9443945c60a/pyexpat/errors.py | ead44496b49caa3d6d90ff34ba43efbfbac4b2f6 | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,532 | py | # encoding: utf-8
# module pyexpat.errors
# from C:\Users\Doly\Anaconda3\lib\site-packages\skimage\io\_plugins\_colormixer.cp37-win_amd64.pyd
# by generator 1.147
""" Constants used to describe error conditions. """
# no imports
# Variables with simple values
XML_ERROR_ABORTED = 'parsing aborted'
XML_ERROR_ASYNC_ENTITY = 'asynchronous entity'
XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF = 'reference to external entity in attribute'
XML_ERROR_BAD_CHAR_REF = 'reference to invalid character number'
XML_ERROR_BINARY_ENTITY_REF = 'reference to binary entity'
XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING = 'cannot change setting once parsing has begun'
XML_ERROR_DUPLICATE_ATTRIBUTE = 'duplicate attribute'
XML_ERROR_ENTITY_DECLARED_IN_PE = 'entity declared in parameter entity'
XML_ERROR_EXTERNAL_ENTITY_HANDLING = 'error in processing external entity reference'
XML_ERROR_FEATURE_REQUIRES_XML_DTD = 'requested feature requires XML_DTD support in Expat'
XML_ERROR_FINISHED = 'parsing finished'
XML_ERROR_INCOMPLETE_PE = 'incomplete markup in parameter entity'
XML_ERROR_INCORRECT_ENCODING = 'encoding specified in XML declaration is incorrect'
XML_ERROR_INVALID_TOKEN = 'not well-formed (invalid token)'
XML_ERROR_JUNK_AFTER_DOC_ELEMENT = 'junk after document element'
XML_ERROR_MISPLACED_XML_PI = 'XML or text declaration not at start of entity'
XML_ERROR_NOT_STANDALONE = 'document is not standalone'
XML_ERROR_NOT_SUSPENDED = 'parser not suspended'
XML_ERROR_NO_ELEMENTS = 'no element found'
XML_ERROR_NO_MEMORY = 'out of memory'
XML_ERROR_PARAM_ENTITY_REF = 'illegal parameter entity reference'
XML_ERROR_PARTIAL_CHAR = 'partial character'
XML_ERROR_PUBLICID = 'illegal character(s) in public id'
XML_ERROR_RECURSIVE_ENTITY_REF = 'recursive entity reference'
XML_ERROR_SUSPENDED = 'parser suspended'
XML_ERROR_SUSPEND_PE = 'cannot suspend in external parameter entity'
XML_ERROR_SYNTAX = 'syntax error'
XML_ERROR_TAG_MISMATCH = 'mismatched tag'
XML_ERROR_TEXT_DECL = 'text declaration not well-formed'
XML_ERROR_UNBOUND_PREFIX = 'unbound prefix'
XML_ERROR_UNCLOSED_CDATA_SECTION = 'unclosed CDATA section'
XML_ERROR_UNCLOSED_TOKEN = 'unclosed token'
XML_ERROR_UNDECLARING_PREFIX = 'must not undeclare prefix'
XML_ERROR_UNDEFINED_ENTITY = 'undefined entity'
XML_ERROR_UNEXPECTED_STATE = 'unexpected parser state - please send a bug report'
XML_ERROR_UNKNOWN_ENCODING = 'unknown encoding'
XML_ERROR_XML_DECL = 'XML declaration not well-formed'
__loader__ = None
__spec__ = None
# no functions
# no classes
# variables with complex values
codes = {
'XML declaration not well-formed': 30,
'XML or text declaration not at start of entity': 17,
'asynchronous entity': 13,
'cannot change setting once parsing has begun': 26,
'cannot suspend in external parameter entity': 37,
'document is not standalone': 22,
'duplicate attribute': 8,
'encoding specified in XML declaration is incorrect': 19,
'entity declared in parameter entity': 24,
'error in processing external entity reference': 21,
'illegal character(s) in public id': 32,
'illegal parameter entity reference': 10,
'incomplete markup in parameter entity': 29,
'junk after document element': 9,
'mismatched tag': 7,
'must not undeclare prefix': 28,
'no element found': 3,
'not well-formed (invalid token)': 4,
'out of memory': 1,
'parser not suspended': 34,
'parser suspended': 33,
'parsing aborted': 35,
'parsing finished': 36,
'partial character': 6,
'recursive entity reference': 12,
'reference to binary entity': 15,
'reference to external entity in attribute': 16,
'reference to invalid character number': 14,
'requested feature requires XML_DTD support in Expat': 25,
'syntax error': 2,
'text declaration not well-formed': 31,
'unbound prefix': 27,
'unclosed CDATA section': 20,
'unclosed token': 5,
'undefined entity': 11,
'unexpected parser state - please send a bug report': 23,
'unknown encoding': 18,
}
messages = {
1: 'out of memory',
2: 'syntax error',
3: 'no element found',
4: 'not well-formed (invalid token)',
5: 'unclosed token',
6: 'partial character',
7: 'mismatched tag',
8: 'duplicate attribute',
9: 'junk after document element',
10: 'illegal parameter entity reference',
11: 'undefined entity',
12: 'recursive entity reference',
13: 'asynchronous entity',
14: 'reference to invalid character number',
15: 'reference to binary entity',
16: 'reference to external entity in attribute',
17: 'XML or text declaration not at start of entity',
18: 'unknown encoding',
19: 'encoding specified in XML declaration is incorrect',
20: 'unclosed CDATA section',
21: 'error in processing external entity reference',
22: 'document is not standalone',
23: 'unexpected parser state - please send a bug report',
24: 'entity declared in parameter entity',
25: 'requested feature requires XML_DTD support in Expat',
26: 'cannot change setting once parsing has begun',
27: 'unbound prefix',
28: 'must not undeclare prefix',
29: 'incomplete markup in parameter entity',
30: 'XML declaration not well-formed',
31: 'text declaration not well-formed',
32: 'illegal character(s) in public id',
33: 'parser suspended',
34: 'parser not suspended',
35: 'parsing aborted',
36: 'parsing finished',
37: 'cannot suspend in external parameter entity',
}
| [
"[email protected]"
] | |
1cc9928d529ad5736e478cc4ac402889c072e6f5 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /waf_write_f/xss-match-set_create.py | c7702ffe2384dfbda2f82dad83e597d3abc55b61 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
delete-xss-match-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/waf/delete-xss-match-set.html
get-xss-match-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/waf/get-xss-match-set.html
list-xss-match-sets : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/waf/list-xss-match-sets.html
update-xss-match-set : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/waf/update-xss-match-set.html
"""
write_parameter("waf", "create-xss-match-set") | [
"[email protected]"
] | |
34c447b2702d0a89243b9206af1af98f49ae8b5b | 5832f65747e6142d1b8de9d46aa507092782aafc | /Codeforces/1304/d/d1.py | 1153970f359f16501943bc2d0f927e46b15fc1ac | [] | no_license | subhashreddykallam/Competitive-Programming | 64cc42c5b23c03536187a1bb54e2b2ed82ee7844 | 973b66b4eb81352b98409ca52fa3aa75c28d8b6f | refs/heads/master | 2022-05-28T21:07:43.012922 | 2020-05-05T20:34:20 | 2020-05-05T20:34:20 | 226,814,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | from itertools import permutations
def CeilIndex(A, l, r, key):
while (r - l > 1):
m = l + (r - l)//2
if (A[m] >= key):
r = m
else:
l = m
return r
def LongestIncreasingSubsequenceLength(A, size):
tailTable = [0 for i in range(size + 1)]
len = 0
tailTable[0] = A[0]
len = 1
for i in range(1, size):
if (A[i] < tailTable[0]):
tailTable[0] = A[i]
elif (A[i] > tailTable[len-1]):
tailTable[len] = A[i]
len += 1
else:
tailTable[CeilIndex(tailTable, -1, len-1, A[i])] = A[i]
return len
s = input()
n = len(s)+1
z = [i for i in range(1, n+1)]
z = list(permutations(z))
maxp, minp = [], []
maxpl, minpl = 0, 100
for perm in z:
flag = 1
for i in range(n-1):
if s[i] == '>':
if perm[i] < perm[i+1]:
flag = 0
else:
if perm[i] > perm[i+1]:
flag = 0
if flag:
if LongestIncreasingSubsequenceLength(perm, len(perm)) > maxpl:
maxp = [perm]
maxpl = LongestIncreasingSubsequenceLength(perm, len(perm))
elif LongestIncreasingSubsequenceLength(perm, len(perm)) == maxpl:
maxp.append(perm)
if LongestIncreasingSubsequenceLength(perm, len(perm)) < minpl:
minp = [perm]
minpl = LongestIncreasingSubsequenceLength(perm, len(perm))
elif LongestIncreasingSubsequenceLength(perm, len(perm)) == minpl:
minp.append(perm)
for i in maxp:
print(i, 'max')
print()
for i in minp:
print(i)
| [
"[email protected]"
] | |
b3463b6fc44f470bc1c49964be9a1293d2c14279 | 38d93c5fd72fee380ec431b2ca60a069eef8579d | /Baekjoon,SWEA, etc/프로그래머스/소수 찾기.py | d4f4170daebf494386f6c0311a6f986a270b6131 | [] | no_license | whgusdn321/Competitive-programming | 5d1b681f5bee90de5678219d91cd0fa764476ddd | 3ff8e6b1d2facd31a8210eddeef851ffd0dce02a | refs/heads/master | 2023-01-01T01:34:22.936373 | 2020-10-24T11:05:08 | 2020-10-24T11:05:08 | 299,181,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,080 | py | import itertools
# permutations = []
#
#
# def make_permutations(nn, n, permutation, visited):
# '''
# if n == 4, create [[0,1,2,3],[0,1,3,2],[0,2,3,4] . . .]
# '''
# if len(permutation) == nn:
# permutations.append(permutation)
# return
# for i in range(n):
# if not visited[i]:
# visited[i] = True
# permutation.append(i)
# make_permutations(nn, n, permutation.copy(), visited.copy())
# permutation.pop()
# visited[i] = False
#
# def isprime(str_num):
# num = int(str_num)
# if num == 1:
# return False
# flag = True
# for i in range(2, num//2+1):
# if num %i == 0:
# flag = False
# break
# return flag
#
#
# def solution(numbers):
# global permutations
# n = len(numbers)
# for nn in range(1, n+1):
# visited = [False] * n
# make_permutations(nn, n, [], visited)
#
# cnt = 0
#
# numberVisited = []
#
# for permutation in permutations:
# number =''
# for index in permutation:
# number += numbers[index]
#
# j = -1
# while j != len(number)-1 and number[j+1] == '0':
# j += 1
#
# if j == len(number)-1:
# continue
# else:
# number = number[j+1:]
# if number not in numberVisited:
# numberVisited.append(number)
# if isprime(number):
# cnt += 1
# return cnt
def isPrime(num):
if num <= 1 :
return False
for i in range(2, num//2):
if num % i == 0:
return False
return True
def solutions(numbers):
sett = set([])
N = len(numbers)
cnt = 0
for n in range(1, N+1):
for permu in itertools.permutations(numbers, n):
#print('permu : ', ''.join(permu))
candidate = int(''.join(permu))
sett.add(candidate)
for number in sett:
if isPrime(number):
cnt += 1
return cnt
print(solutions('011')) | [
"[email protected]"
] | |
b0ddb85d5d6654d0bbf42e807f0356fc1e877ba3 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AntfortuneContentCommunityContentEventSaveResponse.py | 460049c5dc88e8c5dfdbefc078a9cfe3196f03ef | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 491 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AntfortuneContentCommunityContentEventSaveResponse(AlipayResponse):
def __init__(self):
super(AntfortuneContentCommunityContentEventSaveResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AntfortuneContentCommunityContentEventSaveResponse, self).parse_response_content(response_content)
| [
"[email protected]"
] | |
56bc7e285a243b20416b0354cc8bd2200989d3a0 | ac6e4102dfb49a4e49de0e2766feb6e80ab0b5c2 | /test/test_networking_project_netgw_attach.py | 74a8534b8692359526d9c3f667bbf6100f6e6b64 | [
"MIT"
] | permissive | hyperonecom/h1-client-python | df01f05ad295121e3dd391a3274c41e2f5b88e53 | 4ce355852ba3120ec1b8f509ab5894a5c08da730 | refs/heads/master | 2023-04-05T01:51:31.637002 | 2021-03-29T00:05:41 | 2021-03-29T00:05:41 | 319,309,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | """
HyperOne
HyperOne API # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import h1
from h1.model.netgw_private import NetgwPrivate
globals()['NetgwPrivate'] = NetgwPrivate
from h1.model.networking_project_netgw_attach import NetworkingProjectNetgwAttach
class TestNetworkingProjectNetgwAttach(unittest.TestCase):
"""NetworkingProjectNetgwAttach unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNetworkingProjectNetgwAttach(self):
"""Test NetworkingProjectNetgwAttach"""
# FIXME: construct object with mandatory attributes with example values
# model = NetworkingProjectNetgwAttach() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2e3c7259450c57b1ae6db5382c7f7aa424a5a2c9 | a5b6dfd04e49b86a4ec3ba1ca1fc941233fd1906 | /leetCode/46Permutations.py | 5ac95b13a5ac47b2245845689b668f8e2a71e15e | [] | no_license | JuDa-hku/ACM | c57423c4c619991ab5b8df170ace6c68fbe6bb48 | 3add05a6b07ec60ae148290f7f25d122336de47d | refs/heads/master | 2021-06-25T09:33:05.396914 | 2016-10-29T03:27:03 | 2016-10-29T03:27:03 | 21,881,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def permute(self, nums):
res, C = [], []
self.permuteHelp(nums, res, C)
return C
def permuteHelp(self, nums,res, C):
if len(nums) == 0:
C.append(res)
return
for num in nums:
tmpNum, tmpRes = nums[:], res[:]
tmpNum.remove(num)
tmpRes.append(num)
self.permuteHelp(tmpNum,tmpRes,C)
s = Solution()
print s.permute([1,2,3]) | [
"[email protected]"
] | |
47fe148cfd3970866527a76d900bd6a16e5902c0 | fb4fc6ca3b6ea208a2377325fd1d41e6fe068734 | /tests/test_speed.py | 40c06c1d58652a5c103c60146cc0bb2ed869b958 | [] | no_license | nadia-el/ontology-semsim-py | d15977cebe5d750c256a4ce3ff3fdb6c6c7098a1 | 216ee2a7b83b951b3bce7865e1dd7c94acc17211 | refs/heads/master | 2022-04-16T21:08:53.126204 | 2020-04-03T23:12:47 | 2020-04-03T23:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | from ontology_semsim.util import time_all_ancestors, time_all_jaccard
from ontology_semsim.fast_semsim import FastSemSimEngine
from pytest import approx
from typing import Optional, Set, List, Union, Dict, Any
from rdflib import URIRef, BNode, Literal, Graph, RDFS, OWL, Namespace
from ontology_semsim.semsim_rdflib import RdfSemSimEngine
from pytest import approx
import logging
g = Graph()
g.parse("tests/data/chromosome.owl", format="xml")
GO = Namespace("http://purl.obolibrary.org/obo/GO_")
BFO = Namespace("http://purl.obolibrary.org/obo/BFO_")
NC = GO['0000228'].toPython()
MC = GO['0000262'].toPython()
Ch = GO['0005694'].toPython()
Mt = GO['0005739'].toPython()
SUBCLASS_OF = RDFS['subClassOf'].toPython()
PART_OF = BFO['0000050'].toPython()
logging.basicConfig(level=logging.INFO)
def test_timings():
print('')
rdf_sse = RdfSemSimEngine(g)
rpt('rdf0', rdf_sse)
rpt('rdf1', rdf_sse)
rpt('rdf2', rdf_sse)
rpt('rdf3', rdf_sse)
rdf_sse = RdfSemSimEngine(g)
fast_sse = FastSemSimEngine(rdf_sse)
rpt('fast0', fast_sse)
rpt('fast1', fast_sse)
rpt('fast2', fast_sse)
rpt('fast3', fast_sse)
# to see output: pytest -s tests/test_speed.py
def rpt(n, sse):
t = time_all_ancestors(sse)
print(f'A {n} :: {t}')
t = time_all_jaccard(sse)
print(f'J {n} :: {t}')
| [
"[email protected]"
] | |
e90c2f5b3dafdf720e55ab32568ca0901b82703d | 1cedcf27afa82ada8eb9fb929bacc5233ecfb807 | /process_data/create_entity_annotated_corpus.py | 4ebe173f354ee7fccf225cc357946d2833e93497 | [] | no_license | lukuang/trec_news | b4ae20ea0a9d72524e92287212b9753727b2ae0d | 1a58e9ca9317dc16a327d2e5b259c257ddda6531 | refs/heads/master | 2020-03-17T06:59:43.204178 | 2019-04-04T13:25:48 | 2019-04-04T13:25:48 | 133,377,851 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,378 | py | """
Create corpus with entity annotation and each entity
will be replaced by its cannonical form with some changes.
More specifically, each entity will have 'ENT' annotated at
the begining and the end. Besides that, the underscores
and all other symbols to connect terms in the
cannonical form of the entity will be replaced with
this annotated as well
"""
import os
import json
import sys
import re
import argparse
import codecs
from collections import defaultdict
from string import Template
sys.path.append("/infolab/node4/lukuang/trec_news/trec_news/src")
from data import ParagraphDoc
from entity import dbpedia
paragraph_doc_template = Template("""
<DOC>
\t<DOCNO>$did</DOCNO>
\t<published_date>$published_date</published_date>
\t<TEXT>
\t$body
\t</TEXT>
</DOC>\n""")
doc_template = Template("""
<DOC>
\t<DOCNO>$did</DOCNO>
\t<published_date>$published_date</published_date>
\t<TEXT>
\t\t<tt>$tt</tt>
\t\t<body>$body</body>
\t</TEXT>
</DOC>\n""")
def get_cannonical_form(url):
m = re.search("resource/(.+)$",url)
return m.group(1)
def get_annotation_from_text(text,annotator):
text_entities = []
returned_json = annotator.annotate(text)
try:
entities = returned_json["Resources"]
except KeyError:
pass
else:
for entitiy_struct in entities:
surface_form = entitiy_struct["@surfaceForm"]
cannonical_form = get_cannonical_form(entitiy_struct["@URI"])
start = entitiy_struct["@offset"]
end = int(start)+len(surface_form)
single_entity = {
"cannonical":cannonical_form,
"start" : start,
"end" : end,
"string" : surface_form
}
text_entities.append(single_entity)
return text_entities
def read_title_entity_annotation(title_entity_file):
title_entities = defaultdict(list)
with open(title_entity_file) as f:
for line in f:
parts = line.split()
docid = parts[0]
entity_start = parts[1]
entity_end = parts[2]
entity_string = " ".join(parts[3:-1])
entity_cannonical = parts[-1]
single_entity = {
"cannonical":entity_cannonical,
"start" : int(entity_start),
"end" : int(entity_end),
"string" : entity_string
}
title_entities[docid].append(single_entity)
return title_entities
def read_paragraph_entity_annotation(entity_file):
pharagraph_entities = defaultdict(lambda: defaultdict(list))
with open(entity_file) as f:
for line in f:
parts = line.split()
phara_id = parts[0]
entity_start = parts[1]
entity_end = parts[2]
entity_string = " ".join(parts[3:-1])
entity_cannonical = parts[-1]
m = re.match("^(.+)-(\d+)",phara_id)
try:
docid = m.group(1)
pid = int(m.group(2))
except AttributeError:
print "Malformatted line!"
print line
else:
single_entity = {
"cannonical":entity_cannonical,
"start" : int(entity_start),
"end" : int(entity_end),
"string" : entity_string
}
pharagraph_entities[docid][pid].append(single_entity)
return pharagraph_entities
def annotating_entity(entity):
"""
annotated entity with 'ENT' and the beginning and end
Other non-word symbols ('_' included) are also replaced with it
"""
entity = re.sub("[^0-9a-zA-Z]+", "ENT", entity)
return " ENT%sENT " %(entity)
def annotating_text(text,entity_info):
annotated_text = ""
last_index = 0
for e in entity_info:
annotated_text += text[last_index:e["start"]-1]
annotated_text += annotating_entity(e["cannonical"])
last_index = e["end"]
annotated_text += text[last_index:]
return annotated_text
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--collection_dump","-cd",default="/infolab/node4/lukuang/trec_news/data/washington_post/collection_json_dump/v2/dump")
parser.add_argument("--pharagraph_entity_dir","-ed",default="/infolab/node4/lukuang/trec_news/data/washington_post/paragraph_entities")
parser.add_argument("--title_entity_file","-tf",default="/infolab/node4/lukuang/trec_news/data/washington_post/title_entities/title_entities")
parser.add_argument("--doc_type","-dt",default=0,type=int,choices=range(2),
help="""
Choose document type:
0: document
1: paragraph_document
""")
parser.add_argument("dest_dir")
args=parser.parse_args()
collection = json.load(open(args.collection_dump))
print "collection loaded"
# print "entities loaded"
# print pharagraph_entities
# Test
# docid = "988b5c4e173959205822977aa244ce0d"
docids_with_entities = set()
if args.doc_type == 0:
title_entities = read_title_entity_annotation(args.title_entity_file)
for file_name in os.walk(args.pharagraph_entity_dir).next()[2]:
print "read from file %s" %(file_name)
entity_file = os.path.join(args.pharagraph_entity_dir,file_name)
dest_file = os.path.join(args.dest_dir,file_name)
pharagraph_entities = read_paragraph_entity_annotation(entity_file)
with codecs.open(dest_file,"w",'utf-8') as of:
for docid in pharagraph_entities:
# print "For document %s" %(docid)
doc = collection[docid]
docids_with_entities.add(docid)
# print "print results only for paragraphs with entities"
annotated_paragraphs = []
for pid,paragrahs in enumerate(doc["paragraphs"]):
para_text = doc["paragraphs"][pid]
published_date = doc["published_date"]
if pid not in pharagraph_entities[docid]:
annotated_paragraphs.append(para_text)
else:
annotated_paragraphs.append(annotating_text(para_text,pharagraph_entities[docid][pid]))
if args.doc_type == 0:
# get annotated title
title_text = doc["title"]
if title_entities[docid]:
title_text = annotating_text(title_text, title_entities[docid])
# get annotated body
body = "\n".join(annotated_paragraphs)
text = doc_template.substitute(
did=docid,
published_date=published_date,
body=body,
tt=title_text
)
of.write(text)
else:
for pid,annotated_text in enumerate(annotated_paragraphs):
p_docid = "%s-%d" %(docid,pid)
text = paragraph_doc_template.substitute(did=p_docid,
published_date=published_date,
body=annotated_text)
of.write(text)
# print "Parsed text:"
# print "\n".join(annotated_paragraphs)
# print '='*20
# process the documents without entities in their paragraphs
leftover_file = os.path.join(args.dest_dir,"leftover")
with codecs.open(leftover_file,"w",'utf-8') as of:
for docid in collection:
if docid not in docids_with_entities:
doc = collection[docid]
# print "print results only for paragraphs with entities"
if args.doc_type == 0:
# get annotated title
title_text = doc["title"]
if title_entities[docid]:
title_text = annotating_text(title_text, title_entities[docid])
# get annotated body
body = "\n".join(doc["paragraphs"])
text = doc_template.substitute(
did=docid,
published_date=published_date,
body=body,
tt=title_text
)
of.write(text)
else:
for pid,paragraph_text in enumerate(doc["paragraphs"]):
p_docid = "%s-%d" %(docid,pid)
text = paragraph_doc_template.substitute(did=p_docid,
published_date=published_date,
body=paragraph_text)
of.write(text)
if __name__=="__main__":
main()
| [
"[email protected]"
] | |
6d60e2ec9b7cae3397a6fc0edadaa40037633b41 | 275c25b4a8f8009799dc0e7aebab21df1e43f542 | /FruitStore/main.py | e2bc96dcd13b8dfc1b3f07acd8704117a6564c5a | [] | no_license | aranooke/Python-Data-Structure | 9a81cdf39a7d9a865a43b73b4e7db1f4f8a1e570 | 3e4ad8488385823b0c36f453159b463831a19335 | refs/heads/master | 2023-06-05T06:34:55.095557 | 2021-06-29T09:46:51 | 2021-06-29T09:46:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,708 | py | from FStore import FStore
from FStore import Cart
import time
import getpass
import logging
import json
def getAvilableStock():
stockInfo = open(r"FruitStore\stock.json", "r")
return json.load(stockInfo)
openStore = FStore(getAvilableStock())
cartInstance = Cart()
def getUserInput(fromWhichMenu):
inputMessage = ''
if fromWhichMenu == "fromMainMenu":
inputMessage = "Please enter your choice : "
elif fromWhichMenu == "fruitMenu":
inputMessage = "Please enter fruit id : "
elif fromWhichMenu == "numbers":
inputMessage = "how many you need? "
elif fromWhichMenu == "addMoreItems":
try:
choice = input("Do you want to add more items to your cart? Y or N ").strip()
if choice == "Y" or choice == "y" or choice == "yes" or choice == "YES":
return True
else:
return False
except ValueError:
print("That's not an int!")
elif fromWhichMenu == "adminStuff":
try:
choice = getpass.getpass("Enter admin password")
if choice == "admin123":
return True
else:
return False
except ValueError:
print("That's not a valid password!")
try:
choice = input(inputMessage).strip()
except ValueError:
print("That's not an int!")
return choice
def displayMainMenu():
print("""
1. Show available fruits
2. Buy Fruits
3. Show Cart
4. Checkout
5. Exit
6. Display available Stocks (only store admin can access)
""")
def addMoreItems():
if (getUserInput("addMoreItems")):
displayFruitMenu()
choice = getUserInput("fruitMenu")
return choice
else:
print("purchase done")
def displayFruitMenu():
for i in enumerate(openStore.listOfFruits(), start=1):
print(i[0], i[1])
def billFormat(billObj):
for fruitName, price in billObj.items():
print(fruitName + " - " + str(price))
print("Total Bill amount to pay " + str(sum(billObj.values())) + " Rupees \n")
def checkOutCart():
billMap = {}
cartItems = cartInstance.showCart()
for fn,count in cartItems.items():
fruitPrice = openStore.getFruitPrice(fn)
billMap[fn] = fruitPrice * count
billFormat(billMap)
def showAvailableFruits():
availableFruits = openStore.listOfFruits()
print("Here's the available fruits, happy purchasing\n")
for id, fruit in availableFruits.items():
print(str(id) + " - " + fruit[0] + "(each " + fruit[0] + " cost " + str(fruit[1]) + " Rupees)")
def buyFruit(fruitId):
if int(fruitId) in openStore.getFruitsIDs():
fruitCount = int(getUserInput("numbers"))
if fruitCount <= openStore.getAvailableCountForFruit(fruitId):
cartInstance.addToCart(openStore.getFruitName(fruitId), fruitCount)
openStore.updateStock(openStore.getFruitName(fruitId), fruitCount)
print(str(fruitCount) + " " +openStore.getFruitName(fruitId) + " added to your cart \n")
else:
print("The count you entered is either exceeding or we nearing out of stock soon")
else:
print("ID which's entered isn't matching with any fruits which we have!")
if __name__ == "__main__":
while True:
displayMainMenu()
userChoice = getUserInput("fromMainMenu")
if userChoice == '1':
showAvailableFruits()
elif userChoice == '2':
showAvailableFruits()
choice = getUserInput("fruitMenu")
buyFruit(choice)
if(getUserInput("addMoreItems")):
for i in range(len(openStore.giveAvailableFruitsInStock())):
showAvailableFruits()
choice = getUserInput("fruitMenu")
buyFruit(choice)
else:
displayFruitMenu()
elif userChoice == '3':
cartItems = cartInstance.showCart()
print("Currently you have below items in your cart, ")
for itemName, itemCount in cartItems.items():
print(itemName + "-" + str(itemCount))
time.sleep(7)
elif userChoice == '4':
checkOutCart()
print("Enjoy Shopping at Ram's Fruit Store!\n")
break
elif userChoice == '5':
break
elif userChoice == '6':
if(getUserInput("adminStuff")):
openStore.displayStock()
break
else:
print("Invalid input. Please enter number between 1-6 ") | [
"[email protected]"
] | |
0a239158da7a4e929daf2fab7fc0257797715246 | 3fcb1d6e8566f1d31237c934a75ffbfa4b5742e0 | /app_case/migrations/0001_initial.py | 9d41d9298f47319eed946430f88a15e6ecd4fdd2 | [] | no_license | xuxushenlan/itest_platform | 42714bd9ee5dc776aefecb80fdeff3bfa654785e | 56ced10fc9fe5ba05f6b699c98b882b93e9982e3 | refs/heads/master | 2021-02-23T08:02:16.418178 | 2019-12-21T10:11:49 | 2019-12-21T10:11:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | # Generated by Django 2.2.6 on 2019-11-30 14:46
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('app_manage', '0002_module'),
]
operations = [
migrations.CreateModel(
name='TestCase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='名称')),
('url', models.TextField(verbose_name='URL')),
('method', models.IntegerField(verbose_name='请求方法')),
('header', models.TextField(verbose_name='请求头')),
('parameter_type', models.IntegerField(verbose_name='参数类型')),
('parameter_body', models.TextField(verbose_name='参数内容')),
('result', models.TextField(verbose_name='结果')),
('assert_type', models.IntegerField(verbose_name='断言类型')),
('assert_text', models.TextField(verbose_name='结果')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('module', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_manage.Module')),
],
),
]
| [
"[email protected]"
] | |
8f9f2b4084e6feae2c4e3634ec6c31e48e4bc526 | b64c45e75aa215ddcf7249fb92e047f3e7731187 | /projectdir/utils.py | a0dfa25016c6994023a415d114a185558e6928ca | [] | no_license | johngaitho05/CohMat | 6731b4dfb94475c75f1cd1d2ec55cc810729f939 | ff5b8e5eb877f68a0477f4f19b78c6e7c407af2c | refs/heads/master | 2022-12-12T15:55:53.363782 | 2021-04-04T13:17:05 | 2021-04-04T13:17:05 | 239,868,710 | 1 | 0 | null | 2022-11-04T19:31:50 | 2020-02-11T21:31:47 | Python | UTF-8 | Python | false | false | 2,081 | py | import random
from datetime import datetime, date, timedelta
from django.template.defaultfilters import timesince, register
from django.utils import timezone
import pytz
from django.utils import timezone
class AgoTime:
def __init__(self, date_time):
ago = get_ago_time(date_time)
if type(ago) != str or 'Yesterday' in ago:
self.time = ago
else:
self.time = ago.replace(u'\xa0', ' ')
def count(self):
if type(self.time) == str:
return int(self.time.split(' ')[0]) if 'Yesterday' not in self.time else None
return
def desc(self):
if type(self.time) == str:
return self.time[len(str(self.count())) + 1:] if 'Yesterday' not in self.time else None
return
def __str__(self):
if type(self.time) == datetime:
if timezone.now().year == self.time.year:
return self.time.strftime("%d/%m at %H:%M")
else:
return self.time.strftime("%m/%Y")
return self.time
@register.filter
def get_ago_time(passed_time):
yesterday = timezone.now().date() - timedelta(days=1)
diff = abs(passed_time - timezone.now())
d = diff.days
if d <= 30:
span = timesince(passed_time)
span = span.split(",")[0] # just the most significant digit
if passed_time.date() == yesterday:
return "Yesterday at %s" % passed_time.strftime('%H:%M')
return "%s ago" % span
return passed_time
class CustomTimezoneMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
tzname = request.session.get('custom_timezone')
if tzname:
timezone.activate(pytz.timezone(tzname))
else:
timezone.deactivate()
return self.get_response(request)
def randomColor():
letters = "0123456789ABCDEF"
color = '#'
for i in range(6):
j = random.randint(0, 14)
color += letters[j]
return color if 'FF' not in color else randomColor()
| [
"[email protected]"
] | |
ccfac22a5a38d03aaf49369740523ca537f17435 | f3806d9fb54773908cd9704121a543b114470aca | /angr/procedures/definitions/win32_compstui.py | e3654bf84bcd95b3c81d56795784dd736af69b89 | [
"BSD-2-Clause"
] | permissive | angr/angr | 8ae95fceca51b0a001de56477d984dd01193ac1d | 37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd | refs/heads/master | 2023-08-17T03:15:21.007865 | 2023-08-15T18:44:57 | 2023-08-15T18:44:57 | 40,328,394 | 7,184 | 1,306 | BSD-2-Clause | 2023-09-14T20:14:23 | 2015-08-06T21:46:55 | Python | UTF-8 | Python | false | false | 4,752 | py | # pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("compstui.dll")
prototypes = \
{
#
'CommonPropertySheetUIA': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimStruct({"cbSize": SimTypeShort(signed=False, label="UInt16"), "Version": SimTypeShort(signed=False, label="UInt16"), "Flags": SimTypeShort(signed=False, label="UInt16"), "Reason": SimTypeShort(signed=False, label="UInt16"), "hComPropSheet": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "pfnComPropSheet": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["hComPropSheet", "Function", "lParam1", "lParam2"]), offset=0), "lParamInit": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "UserData": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Result": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="PROPSHEETUI_INFO", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pPSUIInfo", "lParam"]), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hWndOwner", "pfnPropSheetUI", "lParam", "pResult"]),
#
'CommonPropertySheetUIW': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeFunction([SimTypePointer(SimStruct({"cbSize": SimTypeShort(signed=False, label="UInt16"), "Version": SimTypeShort(signed=False, label="UInt16"), "Flags": SimTypeShort(signed=False, label="UInt16"), "Reason": SimTypeShort(signed=False, label="UInt16"), "hComPropSheet": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "pfnComPropSheet": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["hComPropSheet", "Function", "lParam1", "lParam2"]), offset=0), "lParamInit": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "UserData": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), "Result": SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)}, name="PROPSHEETUI_INFO", pack=False, align=None), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pPSUIInfo", "lParam"]), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hWndOwner", "pfnPropSheetUI", "lParam", "pResult"]),
#
'GetCPSUIUserData': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), arg_names=["hDlg"]),
#
'SetCPSUIUserData': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hDlg", "CPSUIUserData"]),
}
lib.set_prototypes(prototypes)
| [
"[email protected]"
] | |
a1d57be50b7c6b7de643f9c7e0d3ee889b0adefe | cf7118bcfbde5d2bfae51f74bffb44a5f39b3961 | /examples/discoro_client3.py | ee9165856b1f16aa58adac8fc11d8cec2376b2f3 | [
"MIT"
] | permissive | tierralibre/asyncoro | b6aba8654a6c974898590f7814190c805704da1f | d0b7b4bb4f4e569235ee74ccc52810d74fe8af12 | refs/heads/master | 2021-01-17T07:56:01.467307 | 2016-08-06T19:24:56 | 2016-08-06T19:24:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,981 | py | # Run 'discoronode.py' program to start processes to execute
# computations sent by this client, along with this program.
# Example where this client sends computation to remote discoro process to run
# as remote coroutines. Computations are scheduled with custom scheduler
# (without using RemoteCoroScheduler). Remote coroutines and client can use
# message passing to exchange data.
import asyncoro.disasyncoro as asyncoro
from asyncoro.discoro import *
from asyncoro.discoro_schedulers import RemoteCoroScheduler
# objects of C are exchanged between client and servers
class C(object):
def __init__(self, i):
self.i = i
self.n = None
def __repr__(self):
return '%d: %s' % (self.i, self.n)
# this generator function is sent to remote discoro servers to run
# coroutines there
def compute(obj, client, coro=None):
# obj is an instance of C
import math
# this coroutine and client can use message passing; client sends
# data to this coro as a message
print('process at %s received: %s' % (coro.location, obj.n))
yield coro.sleep(obj.n)
obj.n = math.sqrt(obj.n)
# send result back to client
yield client.deliver(obj, timeout=5)
def client_proc(computation, njobs, coro=None):
# distribute computation to server
if (yield computation.schedule()):
raise Exception('schedule failed')
# create a separate coroutine to receive results, so they can be processed
# as soon as received
def recv_results(coro=None):
for i in range(njobs):
msg = yield coro.receive()
print(' result for job %d: %s' % (i, msg))
results_coro = asyncoro.Coro(recv_results)
# remote coroutines send replies as messages to this coro
for i in range(njobs):
cobj = C(i)
cobj.n = random.uniform(5, 10)
# as noted in 'discoro_client1.py', 'schedule' method is used to run
# jobs sequentially; use 'submit' to run multiple jobs on one server
# concurrently
print(' request %d: %s' % (i, cobj.n))
rcoro = yield rcoro_scheduler.schedule(compute, cobj, results_coro)
if not isinstance(rcoro, asyncoro.Coro):
print('failed to create rcoro %s: %s' % (i, rcoro))
# wait for all results
yield results_coro.finish()
yield computation.close()
if __name__ == '__main__':
import logging, random
asyncoro.logger.setLevel(logging.DEBUG)
# if scheduler is not already running (on a node as a program),
# start it (private scheduler):
Scheduler()
# send generator function and class C (as the computation uses
# objects of C)
computation = Computation([compute, C])
# use RemoteCoroScheduler to schedule/submit coroutines; scheduler must be
# created before computation is scheduled (next step below)
rcoro_scheduler = RemoteCoroScheduler(computation)
# create 10 remote coroutines (jobs)
asyncoro.Coro(client_proc, computation, 10)
| [
"[email protected]"
] | |
a1812f7f2ebf3080a7e2a9e6e77a45f739bd7ac5 | e65a428ca7ee11d2f62d702842d4afbd493f08a4 | /dictionaries/odd_occurrences.py | 94e312ac04aa7a670ac1baa72e8af5f96bccd5f7 | [] | no_license | NikiDimov/SoftUni-Python-Fundamentals | d8ba24a06c4366e76bdc69f1c5225dca29fe955e | 5bb1bf5928e40f2bac867d33566c8b9dac13f566 | refs/heads/main | 2023-07-15T05:57:57.085880 | 2021-08-19T10:27:45 | 2021-08-19T10:27:45 | 323,631,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | words = input().split()
words = [el.lower() for el in words]
dictionary = {}
final_list = []
value = 1
for index in range(len(words)):
key = words[index]
if key in dictionary:
dictionary[key] += 1
continue
dictionary[key] = value
for key, value in dictionary.items():
if dictionary[key] % 2 == 0:
continue
final_list.append(key)
print(' '.join(final_list)) | [
"[email protected]"
] | |
63f0b3714d9174e540a1a9c7a40db8b81ca459e1 | 21b0b4c27193898207751c91b8b2ed168a1b1638 | /py/py_0074_digit_factorial_chains.py | 576fc3629b2ebe4479494eab79c28b6178f4a805 | [
"MIT"
] | permissive | lcsm29/project-euler | 67560a4e66968f1671a3d7ecf2dda6c956893dca | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | refs/heads/main | 2023-07-04T11:45:24.374841 | 2021-08-07T08:20:41 | 2021-08-07T08:20:41 | 371,808,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,196 | py | # Solution of;
# Project Euler Problem 74: Digit factorial chains
# https://projecteuler.net/problem=74
#
# The number 145 is well known for the property that the sum of the factorial
# of its digits is equal to 145:1! + 4! + 5! = 1 + 24 + 120 = 145Perhaps less
# well known is 169, in that it produces the longest chain of numbers that
# link back to 169; it turns out that there are only three such loops that
# exist:169 → 363601 → 1454 → 169871 → 45361 → 871872 → 45362 → 872It is not
# difficult to prove that EVERY starting number will eventually get stuck in a
# loop. For example,69 → 363600 → 1454 → 169 → 363601 (→ 1454)78 → 45360 → 871
# → 45361 (→ 871)540 → 145 (→ 145)Starting with 69 produces a chain of five
# non-repeating terms, but the longest non-repeating chain with a starting
# number below one million is sixty terms. How many chains, with a starting
# number below one million, contain exactly sixty non-repeating terms?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 74
timed.caller(dummy, n, i, prob_id)
| [
"[email protected]"
] | |
801a2b62b4da99a4dcc49110be6c373608da7381 | 7b1a2930931191444c76d0ee4863912dc811ff4f | /advertising/templatetags/image.py | 0d586ae2cdf441bfb2b19177898112acb045fa95 | [
"BSD-3-Clause"
] | permissive | django-ve/django-advertising | 97a68e49dc2102bfc2dc50eac4fbd443b44f3af8 | d369de379c224161e1c8d5eecde14bfb2b7423c3 | refs/heads/master | 2022-11-21T20:31:53.675161 | 2020-07-23T16:27:11 | 2020-07-23T16:27:11 | 281,835,835 | 0 | 0 | null | 2020-07-23T02:59:43 | 2020-07-23T02:59:42 | null | UTF-8 | Python | false | false | 2,527 | py | import sys
from django import template
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.utils.safestring import mark_safe
from ..models import Advertising
register = template.Library()
@register.simple_tag
def get_images_advertising(height=100, campaign="", *args, **kwargs):
"""
@method: get_images_advertising
@descrip: Method thar return images advertising
@param height: height img
"""
if sys.version_info >= (3, 0):
if not isinstance(height, str):
height = str(height)
else:
if not isinstance(height, basestring):
height = str(height)
if campaign is None:
try:
data = Advertising.objects.all()[0]
except Advertising.DoesNotExist:
data = None
else:
try:
data = Advertising.objects.get(id_advertising=campaign)
except Advertising.DoesNotExist:
data = None
html = ""
if data:
id_adv = data.id_advertising.strip()
if data.timeout:
timeout = data.timeout * 1000
html += """
<script>
window.TimeOutAdvertising_""" + id_adv + """ = """ + str(timeout) + """
</script>"""
# Style css
class_parent = "position: relative; min-height: "+height+"px;"
class_img = "position: absolute; width: 100%; height: auto;"
if hasattr(data, 'images'):
html += '<div class="img-advertising" id="images_advertising_' + id_adv + '"'
html += ' style="' + class_parent + '">'
counter = 0
for image in data.images.all():
html += '<div id="image_container_advertising_' + str(counter)
html += '_' + id_adv + '"'
html += '> <a target="_blank" href="' + image.url + '">'
html += '<img src="' + settings.MEDIA_URL + str(image.photo)
html += '" style="' + class_img + '"'
html += ' id="img_advertising_' + str(counter) + '_' + id_adv
html += '"></a>'
html += '</div>'
counter = counter + 1
html += '</div>'
html += """
<script>
document.addEventListener("DOMContentLoaded", function(event) {
advertisingModule.initialize('""" + id_adv + """');
});
</script>
"""
else:
html = ""
return mark_safe(html)
| [
"[email protected]"
] | |
a239a13d734f2f85c901126fa077257bd88ba16d | c8b541ea4fa7d159b80bef116e5cd232ac61b8c1 | /venv/Lib/test/libregrtest/runtest.py | 6b74a3bf598ab50b296434d0a0a67a0538e054f3 | [] | no_license | shengmenghui/knowledge_building | 7a2d8eef040c2d3a45726b3a908be301e922024b | 04fd7784f15535efed917cce44856526f1f0ce48 | refs/heads/master | 2022-12-31T14:18:05.282092 | 2020-10-23T02:51:37 | 2020-10-23T02:51:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,435 | py | import faulthandler
import importlib
import io
import os
import sys
import time
import traceback
import unittest
from sql_mode import support
from sql_mode.libregrtest.refleak import dash_R, clear_caches
from sql_mode.libregrtest.save_env import saved_test_environment
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
_FORMAT_TEST_RESULT = {
PASSED: '%s passed',
FAILED: '%s failed',
ENV_CHANGED: '%s failed (env changed)',
SKIPPED: '%s skipped',
RESOURCE_DENIED: '%s skipped (resource denied)',
INTERRUPTED: '%s interrupted',
CHILD_ERROR: '%s crashed',
}
# Minimum duration of a test to display its duration or to mention that
# the test is running in background
PROGRESS_MIN_TIME = 30.0 # seconds
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def format_test_result(test_name, result):
fmt = _FORMAT_TEST_RESULT.get(result, "%s")
return fmt % test_name
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
def get_abs_module(ns, test):
if test.startswith('test.') or ns.testdir:
return test
else:
# Always import it from the test package
return 'test.' + test
def runtest(ns, test):
"""Run a single test.
ns -- regrtest namespace of options
test -- the name of the test
Returns the tuple (result, test_time), where result is one of the
constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
output_on_failure = ns.verbose3
use_timeout = (ns.timeout is not None)
if use_timeout:
faulthandler.dump_traceback_later(ns.timeout, exit=True)
try:
support.match_tests = ns.match_tests
if ns.failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(ns, test, display_failure=False)
if result[0] != PASSED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = ns.verbose # Tell tests to be moderately quiet
result = runtest_inner(ns, test, display_failure=not ns.verbose)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_traceback_later()
cleanup_test_droppings(test, ns.verbose)
runtest.stringio = None
def runtest_inner(ns, test, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
abstest = get_abs_module(ns, test)
clear_caches()
with saved_test_environment(test, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
start_time = time.time()
the_module = importlib.import_module(abstest)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
def test_runner():
loader = unittest.TestLoader()
tests = loader.loadTestsFromModule(the_module)
for error in loader.errors:
print(error, file=sys.stderr)
if loader.errors:
raise Exception("errors while loading tests")
support.run_unittest(tests)
test_runner()
if ns.huntrleaks:
refleak = dash_R(the_module, test, test_runner, ns.huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not ns.quiet and not ns.pgo:
print(test, "skipped --", msg, flush=True)
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not ns.quiet and not ns.pgo:
print(test, "skipped --", msg, flush=True)
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if not ns.pgo:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr,
flush=True)
else:
print("test", test, "failed", file=sys.stderr, flush=True)
return FAILED, test_time
except:
msg = traceback.format_exc()
if not ns.pgo:
print("test", test, "crashed --", msg, file=sys.stderr,
flush=True)
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def findtestdir(path=None):
return path or os.path.dirname(os.path.dirname(__file__)) or os.curdir
| [
"[email protected]"
] | |
482f5d49eaccc3c22ece3c7ac66ee332cc9347d4 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/homework02/maoyanspiders/maoyanspiders/spiders/movies_20200627215602.py | ad264859fade5fe3cd565586e3a4f0428ad647bf | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 1,183 | py | # -*- coding: utf-8 -*-
import scrapy
from maoyanspiders.items import MaoyanspidersItem
# import xlml.etree
from bs4 import BeautifulSoup as bs
class MoviesSpider(scrapy.Spider):
name = 'movies'
allowed_domains = ['maoyan.com']
start_urls = ['http://maoyan.com/board/4']
# def parse(self, response):
# pass
def start_requests(self):
url = f'https://maoyan.com/board/4'
print(url)
yield scrapy.Request(url=url,callback=self.parse)
def parse(self, response):
soup = bs(response.text,'html.parser')
print(soup.text)
return soup
for i in soup.find_all('div',attrs={'class' : 'movie-item-info'}):\
item = MaoyanspidersItem()
title = i.find('p',attrs={'class':'name'}).find('a')
name = title.get('title')
link = 'https://maoyan.com/'+ title.get('href')
time = i.find('p',attrs={'class' : 'releasetime'}).text
item['films_name'] = name
item['release_time'] = t
yield scrapy.Request(url=link, meta={'item':item},callback=self.parse1)
return item
def parse1(self, response):
| [
"[email protected]"
] | |
374696dce42ea18decb6f012afe4ef136ea501a1 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /1/1.two-sum.333959566.Runtime-Error.leetcode.python3.py | 8a44ca6d16b23521fd72653ab990f48e7fc81b5b | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | class Solution:
def firstBadVersion(self, n):
MAX = n
MIN = 1
while MAX >= MIN:
MID = (MAX + MIN) // 2
if isBadVersion(MID):
MAX = MID - 1
else:
MIN = MID + 1
return MAX + 1
| [
"[email protected]"
] | |
93aa196eba09a4b348af0294abef4a924b0caa0e | 068d271e241d8cdb46dbf4243166e4b8ee7025b2 | /Django/进阶部分/day68orm/day68orm/app01/migrations/0007_auto_20180510_1228.py | bb626764f5aef8969f86f26f887e6cae0ca42e17 | [] | no_license | caiqinxiong/python | f6e226e76cb62aac970bcfbcb6c8adfc64858b60 | 9029f6c528d2cb742b600af224e803baa74cbe6a | refs/heads/master | 2023-05-26T19:41:34.911885 | 2020-05-15T09:02:08 | 2020-05-15T09:02:08 | 195,261,757 | 1 | 0 | null | 2021-06-10T23:33:33 | 2019-07-04T15:01:42 | JavaScript | UTF-8 | Python | false | false | 494 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-05-10 04:28
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app01', '0006_auto_20180510_1227'),
]
operations = [
migrations.AlterModelTable(
name='book',
table='book',
),
migrations.AlterModelTable(
name='publisher',
table='publisher',
),
]
| [
"[email protected]"
] | |
15113c318997014e892984c93d19e78847c9149d | bc183f7357cda3ad064f8c2ff34a176c406446d3 | /pastepwn/analyzers/tests/alwaystrueanalyzer_test.py | 0973040b894968d1844e79f9968191617269cd59 | [
"MIT"
] | permissive | luton1507/pastepwn | b8a790168ce08f10c62574eeb0a68f0dedd5425d | 9b2fee22857e54a5312fdb3d388b472a7d271c50 | refs/heads/master | 2022-11-10T20:18:40.102277 | 2020-06-19T23:34:14 | 2020-06-19T23:34:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | # -*- coding: utf-8 -*-
import unittest
from unittest import mock
from pastepwn.actions.basicaction import BasicAction
from pastepwn.analyzers.alwaystrueanalyzer import AlwaysTrueAnalyzer
class TestAlwaysTrueAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = AlwaysTrueAnalyzer(None)
self.paste = mock.Mock()
def test_match(self):
self.paste.body = "Test"
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = None
self.assertTrue(self.analyzer.match(self.paste))
self.paste.body = ""
self.assertTrue(self.analyzer.match(self.paste))
self.paste = None
self.assertTrue(self.analyzer.match(self.paste))
def test_actions_present(self):
action = mock.MagicMock(spec=BasicAction)
analyzer = AlwaysTrueAnalyzer(action)
self.assertEqual([action], analyzer.actions)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
5a8709278627f8364d572381810393c18f7ade2c | 297497957c531d81ba286bc91253fbbb78b4d8be | /testing/web-platform/tests/tools/ci/run_tc.py | 52d2dcf6fda24d64f4ab918c097479efb08369d5 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | marco-c/gecko-dev-comments-removed | 7a9dd34045b07e6b22f0c636c0a836b9e639f9d3 | 61942784fb157763e65608e5a29b3729b0aa66fa | refs/heads/master | 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 | NOASSERTION | 2019-09-29T01:27:49 | 2019-09-27T10:44:24 | C++ | UTF-8 | Python | false | false | 14,441 | py |
"""Wrapper script for running jobs in Taskcluster
This is intended for running test jobs in Taskcluster. The script
takes a two positional arguments which are the name of the test job
and the script to actually run.
The name of the test job is used to determine whether the script should be run
for this push (this is in lieu of having a proper decision task). There are
several ways that the script can be scheduled to run
1. The output of wpt test-jobs includes the job name
2. The job name is included in a job declaration (see below)
3. The string "all" is included in the job declaration
4. The job name is set to "all"
A job declaration is a line appearing in the pull request body (for
pull requests) or first commit message (for pushes) of the form:
tc-jobs: job1,job2,[...]
In addition, there are a number of keyword arguments used to set options for the
environment in which the jobs run. Documentation for these is in the command help.
As well as running the script, the script sets two environment variables;
GITHUB_BRANCH which is the branch that the commits will merge into (if it's a PR)
or the branch that the commits are on (if it's a push), and GITHUB_PULL_REQUEST
which is the string "false" if the event triggering this job wasn't a pull request
or the pull request number if it was. The semantics of these variables are chosen
to match the corresponding TRAVIS_* variables.
Note: for local testing in the Docker image the script ought to still work, but
full functionality requires that the TASK_EVENT environment variable is set to
the serialization of a GitHub event payload.
"""
import argparse
import fnmatch
import json
import os
import subprocess
import sys
import tarfile
import tempfile
import zipfile
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))
from tools.wpt.utils import get_download_to_descriptor
root = os.path.abspath(
os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
def run(cmd, return_stdout=False, **kwargs):
print(" ".join(cmd))
if return_stdout:
f = subprocess.check_output
if "encoding" not in kwargs:
kwargs["encoding"] = "utf-8"
else:
f = subprocess.check_call
return f(cmd, **kwargs)
def start(cmd):
print(" ".join(cmd))
subprocess.Popen(cmd)
def get_parser():
p = argparse.ArgumentParser()
p.add_argument("--oom-killer",
action="store_true",
default=False,
help="Run userspace OOM killer")
p.add_argument("--hosts",
dest="hosts_file",
action="store_true",
default=True,
help="Setup wpt entries in hosts file")
p.add_argument("--no-hosts",
dest="hosts_file",
action="store_false",
help="Don't setup wpt entries in hosts file")
p.add_argument("--browser",
action="append",
default=[],
help="Browsers that will be used in the job")
p.add_argument("--channel",
default=None,
choices=["experimental", "dev", "nightly", "beta", "stable"],
help="Chrome browser channel")
p.add_argument("--xvfb",
action="store_true",
help="Start xvfb")
p.add_argument("--install-certificates", action="store_true", default=None,
help="Install web-platform.test certificates to UA store")
p.add_argument("--no-install-certificates", action="store_false", default=None,
help="Don't install web-platform.test certificates to UA store")
p.add_argument("--no-setup-repository", action="store_false", dest="setup_repository",
help="Don't run any repository setup steps, instead use the existing worktree. "
"This is useful for local testing.")
p.add_argument("--checkout",
help="Revision to checkout before starting job")
p.add_argument("--ref",
help="Git ref for the commit that should be run")
p.add_argument("--head-rev",
help="Commit at the head of the branch when the decision task ran")
p.add_argument("--merge-rev",
help="Provisional merge commit for PR when the decision task ran")
p.add_argument("script",
help="Script to run for the job")
p.add_argument("script_args",
nargs=argparse.REMAINDER,
help="Additional arguments to pass to the script")
return p
def start_userspace_oom_killer():
start(["sudo", "earlyoom", "-p", "-r", "60", "--prefer=(chrome|firefox)", "--avoid=python"])
def make_hosts_file():
run(["sudo", "sh", "-c", "./wpt make-hosts-file >> /etc/hosts"])
def checkout_revision(rev):
run(["git", "checkout", "--quiet", rev])
def install_certificates():
run(["sudo", "cp", "tools/certs/cacert.pem",
"/usr/local/share/ca-certificates/cacert.crt"])
run(["sudo", "update-ca-certificates"])
def install_chrome(channel):
if channel in ("experimental", "dev"):
deb_archive = "google-chrome-unstable_current_amd64.deb"
elif channel == "beta":
deb_archive = "google-chrome-beta_current_amd64.deb"
elif channel == "stable":
deb_archive = "google-chrome-stable_current_amd64.deb"
else:
raise ValueError("Unrecognized release channel: %s" % channel)
dest = os.path.join("/tmp", deb_archive)
deb_url = "https://dl.google.com/linux/direct/%s" % deb_archive
with open(dest, "wb") as f:
get_download_to_descriptor(f, deb_url)
run(["sudo", "apt-get", "-qqy", "update"])
run(["sudo", "gdebi", "-qn", "/tmp/%s" % deb_archive])
def start_xvfb():
start(["sudo", "Xvfb", os.environ["DISPLAY"], "-screen", "0",
"%sx%sx%s" % (os.environ["SCREEN_WIDTH"],
os.environ["SCREEN_HEIGHT"],
os.environ["SCREEN_DEPTH"])])
start(["sudo", "fluxbox", "-display", os.environ["DISPLAY"]])
def set_variables(event):
ref_prefix = "refs/heads/"
pull_request = "false"
branch = None
if "pull_request" in event:
pull_request = str(event["pull_request"]["number"])
branch = event["pull_request"]["base"]["ref"]
elif "ref" in event:
branch = event["ref"]
if branch.startswith(ref_prefix):
branch = branch[len(ref_prefix):]
os.environ["GITHUB_PULL_REQUEST"] = pull_request
if branch:
os.environ["GITHUB_BRANCH"] = branch
def task_url(task_id):
root_url = os.environ['TASKCLUSTER_ROOT_URL']
if root_url == 'https://taskcluster.net':
queue_base = "https://queue.taskcluster.net/v1/task"
else:
queue_base = root_url + "/api/queue/v1/task"
return "%s/%s" % (queue_base, task_id)
def download_artifacts(artifacts):
artifact_list_by_task = {}
for artifact in artifacts:
base_url = task_url(artifact["task"])
if artifact["task"] not in artifact_list_by_task:
with tempfile.TemporaryFile() as f:
get_download_to_descriptor(f, base_url + "/artifacts")
f.seek(0)
artifacts_data = json.load(f)
artifact_list_by_task[artifact["task"]] = artifacts_data
artifacts_data = artifact_list_by_task[artifact["task"]]
print("DEBUG: Got artifacts %s" % artifacts_data)
found = False
for candidate in artifacts_data["artifacts"]:
print("DEBUG: candidate: %s glob: %s" % (candidate["name"], artifact["glob"]))
if fnmatch.fnmatch(candidate["name"], artifact["glob"]):
found = True
print("INFO: Fetching aritfact %s from task %s" % (candidate["name"], artifact["task"]))
file_name = candidate["name"].rsplit("/", 1)[1]
url = base_url + "/artifacts/" + candidate["name"]
dest_path = os.path.expanduser(os.path.join("~", artifact["dest"], file_name))
dest_dir = os.path.dirname(dest_path)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
with open(dest_path, "wb") as f:
get_download_to_descriptor(f, url)
if artifact.get("extract"):
unpack(dest_path)
if not found:
print("WARNING: No artifact found matching %s in task %s" % (artifact["glob"], artifact["task"]))
def unpack(path):
dest = os.path.dirname(path)
if tarfile.is_tarfile(path):
run(["tar", "-xf", path], cwd=os.path.dirname(path))
elif zipfile.is_zipfile(path):
with zipfile.ZipFile(path) as archive:
archive.extractall(dest)
else:
print("ERROR: Don't know how to extract %s" % path)
raise Exception
def setup_environment(args):
if "TASK_ARTIFACTS" in os.environ:
artifacts = json.loads(os.environ["TASK_ARTIFACTS"])
download_artifacts(artifacts)
if args.hosts_file:
make_hosts_file()
if args.install_certificates:
install_certificates()
if "chrome" in args.browser:
assert args.channel is not None
install_chrome(args.channel)
if args.xvfb:
start_xvfb()
if args.oom_killer:
start_userspace_oom_killer()
def setup_repository(args):
is_pr = os.environ.get("GITHUB_PULL_REQUEST", "false") != "false"
if args.ref:
if is_pr:
assert args.ref.endswith("/merge")
expected_head = args.merge_rev
else:
expected_head = args.head_rev
task_head = run(["git", "rev-parse", "task_head"], return_stdout=True).strip()
if task_head != expected_head:
if not is_pr:
try:
run(["git", "fetch", "origin", expected_head])
run(["git", "reset", "--hard", expected_head])
except subprocess.CalledProcessError:
print("CRITICAL: task_head points at %s, expected %s and "
"unable to fetch expected commit.\n"
"This may be because the branch was updated" % (task_head, expected_head))
sys.exit(1)
else:
head_ref = args.ref.rsplit("/", 1)[0] + "/head"
try:
remote_head = run(["git", "ls-remote", "origin", head_ref],
return_stdout=True).split("\t")[0]
except subprocess.CalledProcessError:
print("CRITICAL: Failed to read remote ref %s" % head_ref)
sys.exit(1)
if remote_head != args.head_rev:
print("CRITICAL: task_head points at %s, expected %s. "
"This may be because the branch was updated" % (task_head, expected_head))
sys.exit(1)
print("INFO: Merge commit changed from %s to %s due to base branch changes. "
"Running task anyway." % (expected_head, task_head))
if os.environ.get("GITHUB_PULL_REQUEST", "false") != "false":
parents = run(["git", "rev-parse", "task_head^@"],
return_stdout=True).strip().split()
if len(parents) == 2:
base_head = parents[0]
pr_head = parents[1]
run(["git", "branch", "base_head", base_head])
run(["git", "branch", "pr_head", pr_head])
else:
print("ERROR: Pull request HEAD wasn't a 2-parent merge commit; "
"expected to test the merge of PR into the base")
commit = run(["git", "rev-parse", "task_head"],
return_stdout=True).strip()
print("HEAD: %s" % commit)
print("Parents: %s" % ", ".join(parents))
sys.exit(1)
branch = os.environ.get("GITHUB_BRANCH")
if branch:
run(["git", "fetch", "--quiet", "origin", "%s:%s" % (branch, branch)])
checkout_rev = args.checkout if args.checkout is not None else "task_head"
checkout_revision(checkout_rev)
refs = run(["git", "for-each-ref", "refs/heads"], return_stdout=True)
print("INFO: git refs:\n%s" % refs)
print("INFO: checked out commit:\n%s" % run(["git", "rev-parse", "HEAD"],
return_stdout=True))
def fetch_event_data():
try:
task_id = os.environ["TASK_ID"]
except KeyError:
print("WARNING: Missing TASK_ID environment variable")
return None
with tempfile.TemporaryFile() as f:
get_download_to_descriptor(f, task_url(task_id))
f.seek(0)
task_data = json.load(f)
event_data = task_data.get("extra", {}).get("github_event")
if event_data is not None:
return json.loads(event_data)
def include_job(job):
if "GITHUB_PULL_REQUEST" not in os.environ:
return True
if (os.environ["GITHUB_PULL_REQUEST"] == "false" and
job == "run-all"):
return True
jobs_str = run([os.path.join(root, "wpt"),
"test-jobs"], return_stdout=True)
print(jobs_str)
return job in set(jobs_str.splitlines())
def main():
args = get_parser().parse_args()
if "TASK_EVENT" in os.environ:
event = json.loads(os.environ["TASK_EVENT"])
else:
event = fetch_event_data()
if event:
set_variables(event)
if args.setup_repository:
setup_repository(args)
if args.script in ["run-all", "lint", "update_built", "tools_unittest",
"wpt_integration", "resources_unittest",
"wptrunner_infrastructure", "stability", "affected_tests"]:
job = args.script
if not include_job(job):
return
args.script = args.script_args[0]
args.script_args = args.script_args[1:]
setup_environment(args)
os.chdir(root)
cmd = [args.script] + args.script_args
print(" ".join(cmd))
sys.exit(subprocess.call(cmd))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
4894236295787d0bb66c0692901d406cee22aea6 | 80b591ef2fc062b7b7935ac0ed21ab007adf2b69 | /auto_input.py | 45d7affb261a6143681ecdd9a54a1abe3979501a | [] | no_license | flysafely/Auto_input | 52677e492e3ec0d0177fc8f524940bd4b3b1dcf6 | fa55997616bcbee0276191e8087e57d634532f26 | refs/heads/master | 2022-01-09T08:48:34.014990 | 2019-06-05T10:57:29 | 2019-06-05T10:57:29 | 106,656,447 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,718 | py | from pymouse import PyMouse
from pykeyboard import PyKeyboard
from pykeyboard import PyKeyboardEvent
from openpyxl import Workbook # 写excel时候用,因为win32com使用多线程的时候会提示 被呼叫方拒绝接收呼叫 的错误
from openpyxl import load_workbook
from openpyxl.writer.excel import ExcelWriter
from win32com.client import GetObject
from win32com.client import Dispatch
from win32com.client import constants as constants
from oscrypto._win import symmetric
from PIL import Image, ImageTk
from tkinter import *
from tkinter import ttk
from tkinter.filedialog import *
from tkinter.messagebox import *
import win32gui
import win32api
import win32con
import win32process
import tkinter
import time
import ctypes
import inspect
import win32com.client
import threading
import pythoncom # 多线程调用COM
import platform
import os
import time
import datetime
import uuid
import urllib.request
import re
import CheckRegister as ckr
#import CheckUpdate as cku
Version = "4.0"
Software_Name = "ae"
m = PyMouse()
k = PyKeyboard()
global time_stamp, isOk, isContinue, isStop, isRegistered, Status_label, data_array, hllDll, VK_CAPITAL, VK_NUMLOCK, VK_SCROLL, VK_ESCAPE, UserName, Company, Department
isContinue = True
isStop = False
isOk = False
isRegistered = False
isOver = False
time_stamp = 0
hllDll = ctypes.WinDLL("User32.dll")
key_hex = [0x1B, 0x23, 0x14, 0x90, 0x91]
VK_ESCAPE = key_hex[0]
VK_END = key_hex[1]
VK_CAPITAL = key_hex[2]
VK_NUMLOCK = key_hex[3]
VK_SCROLL = key_hex[4]
ADD_REG_EXP = "now : '(.*?) " # 获取内网OA中的时间戳信息
server_ip = "http://130.130.200.49"
column_var = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z", "AA", "AB", "AC", "AD", "AE", "AF", "AG", "AH", "AI", "AJ", "AK", "AL", "AM", "AN", "AO", "AP", "AQ", "AR", "AS", "AT"]
title_var = ["专柜", "合同号", "时间"]
pay_num = ["B100", "B102", "B110", "B112", "B120", "B122", "B130", "B132", "B200", "B202",
"C100", "C102", "C110", "C112", "C120", "C122", "C130", "C132", "C140", "C142", "C150", "C152", "C200", "C202", "C210", "C212", "C220", "C222", "C230", "C232",
"C240", "C242", "C250", "C252", "C260", "C262", "C270", "C272", "C280", "C282", "C290", "C292", "C900",
"D010", "D020",
"E100", "E900",
"F100", "F102", "F110", "F112", "F120",
"V100", "V110", "V120", "V210", "V220", "V230", "V250", "V260", "V270", "V280", "V300"]
pay_mode = ["Z", "X", "C", "z", "x", "c"]
data_array = []
class myThread (threading.Thread):
def __init__(self, functions):
threading.Thread.__init__(self)
self.functions = functions
self.result = object
def run(self):
self.functions()
def get_result(self):
return self.result
def Add_Thread(function):
thread = myThread(function)
thread.setDaemon(True)
thread.start()
return thread
def operation_with_pause(mode, *args, **kwargs):
if mode == "pk":
for i in range(1, kwargs["num"] + 1):
k.tap_key(args[0])
elif mode == "ps":
for i in range(1, kwargs["num"] + 1):
k.type_string(args[0])
elif mode == "pks":
for i in range(1, kwargs["num"] + 1):
k.press_keys(list(args))
def setAppWindowForeground(Appname, windowsize=3): # 让特定程序获得焦点
hForeWnd = win32gui.GetForegroundWindow()
dwCurID = win32api.GetCurrentThreadId()
dwForeID = win32process.GetWindowThreadProcessId(hForeWnd)
windowHandle = win32gui.FindWindow(0, Appname)
if hForeWnd != 0 and dwCurID != dwForeID[0]:
win32process.AttachThreadInput(dwCurID, dwForeID[0], True)
# 参数解释https://msdn.microsoft.com/en-us/library/windows/desktop/ms633548(v=vs.85).aspx
win32gui.ShowWindow(windowHandle, windowsize)
win32gui.SetWindowPos(windowHandle, -1, 0, 0, 0, 0, 0x0001 | 0x0002)
win32gui.SetWindowPos(windowHandle, -2, 0, 0, 0, 0, 0x0001 | 0x0002)
if hForeWnd != 0:
win32gui.SetForegroundWindow(windowHandle)
def ensure_App_Foreground(appname, windowsize=3):
if win32gui.FindWindow(0, appname) != win32gui.GetForegroundWindow():
setAppWindowForeground(appname, windowsize)
def ensure_CapsLock():
global hllDll, VK_CAPITAL, isContinue
if hllDll.GetKeyState(VK_CAPITAL) == 0:
k.tap_key(k.caps_lock_key)
def check_window(appname):
global isStop
if win32gui.GetForegroundWindow() != win32gui.FindWindow(0, appname):
isStop = True
def keyboard_monitor(hexnum, function):
global hllDll, isContinue
if hexnum in key_hex and hexnum != 0x23:
pre_status = hllDll.GetKeyState(hexnum)
while 1:
after_status = hllDll.GetKeyState(hexnum)
# 0,1状态为up,65408,65409为dowm状态
if pre_status != after_status and (after_status == 65408 or after_status == 65409):
function() # 使用lambda表达式来传递每个按键执行的功能
pre_status = hllDll.GetKeyState(hexnum)
else:
pre_status = hllDll.GetKeyState(hexnum)
while 1:
after_status = hllDll.GetKeyState(hexnum)
# 0,1状态为up,-127,-128为dowm状态 end键
if pre_status != after_status and (after_status == -128 or after_status == -127):
function() # 使用lambda表达式来传递每个按键执行的功能
pre_status = hllDll.GetKeyState(hexnum)
def Change_isContinue():
global isContinue, time_stamp
if time.time() - time_stamp > 0.05:
time_stamp = time.time()
isContinue = not isContinue
else:
pass
def Change_isStop():
global isStop
isStop = True
def Open_WorkBook_By_Openpyxl(path):
try:
Workbook = load_workbook(path)
return Workbook
except Exception as e:
tkinter.messagebox.showinfo(
"表格数据有误!", "请检查导出文件%s格式、内容是否正确,亦或是没有选择对应的正确文件!" % path)
raise
def Open_Sheet_By_Openpyxl(path, offset): # openpyxl sheet序号是从0开始的
try:
Workbook = load_workbook(path)
return (Workbook, Workbook.worksheets[0 + offset])
except Exception as e:
tkinter.messagebox.showinfo(
"表格数据有误!", "请检查导出文件%s格式、内容是否正确,亦或是没有选择对应的正确文件!" % path)
raise
def Get_Max_Range_Num(worksheet, method):
if method == "row":
num = 0
for i in range(1, worksheet.max_row + 1):
if worksheet.cell(row=i, column=1).value != None:
num += 1
return num
else:
num = 0
for i in range(1, worksheet.max_column + 1):
if worksheet.cell(row=1, column=i).value != None:
num += 1
return num
def is_num_by_except(num, message, v_num):
try:
float(num)
return True
except Exception as e:
v_num.set("警告!", message + "单元格内容为非数字!")
tkinter.messagebox.showinfo("警告!", message + "单元格内容为非数字!")
return False
def get_path(label):
label.set("正在处理导入数据......")
choose_path = tkinter.filedialog.askopenfilename()
if choose_path:
choose_path = choose_path.replace("/", "\\")
check_data(choose_path, 0, label)
else:
label.set("等待导入数据......")
def check_data(path, offset, label):
global isOk
isOk = True
Excel_WorkBook, Excel_WorkSheet = Open_Sheet_By_Openpyxl(path, offset)
Max_Column = Get_Max_Range_Num(Excel_WorkSheet, "column")
Max_Row = Get_Max_Range_Num(Excel_WorkSheet, "row")
# 检查表格中是否有数据
if Max_Row < 2 or Max_Column < 4:
tkinter.messagebox.showinfo("警告!", "未检测到导入表格中的有效数据!")
return
# 检查是否存在有数据,但是没有字段名称的情况
for column_num in range(1, Max_Column + 1):
Cell_Value = Excel_WorkSheet.cell(row=1, column=column_num).value
if Cell_Value == None:
tkinter.messagebox.showinfo("警告!", "%s列:'%s'<标题>为空!" % (
column_var[column_num - 1], Cell_Value))
isOk = False
return
else:
if column_num < 4:
if Cell_Value != title_var[column_num - 1]:
tkinter.messagebox.showinfo("警告!", "%s列 : '%s' <标题>错误!,正确为: '%s' " % (
column_var[column_num - 1], Cell_Value, title_var[column_num - 1]))
label.set("%s列 : '%s' <标题>错误!,正确为: '%s' " % (
column_var[column_num - 1], Cell_Value, title_var[column_num - 1]))
isOk = False
return
elif Cell_Value[0:4] not in pay_num:
tkinter.messagebox.showinfo("警告!", "%s列:'%s'<费用编码>错误!" % (
column_var[column_num - 1], Cell_Value))
label.set("%s列:'%s'<费用编码>错误!" %
(column_var[column_num - 1], Cell_Value))
isOk = False
return
for row_num in range(2, Max_Row + 1):
Cell_Value1 = Excel_WorkSheet.cell(row=row_num, column=2).value
Cell_Value2 = Excel_WorkSheet.cell(row=row_num, column=3).value
if Cell_Value1 != None and str(Cell_Value1).isdigit() and Cell_Value2 != None:
date_detail = str(Cell_Value2).split(" ")[0].split("-")
if len(date_detail) == 3:
if len(date_detail[0]) + len(date_detail[1]) + len(date_detail[2]) != 8:
tkinter.messagebox.showinfo(
"警告!", "C%d单元格日期格式有误!例如:2017-08-25" % row_num)
label.set("C%d单元格日期格式有误!例如:2017-08-25" % row_num)
isOk = False
return
else:
tkinter.messagebox.showinfo(
"警告!", "C%d单元格日期分割符号有误!分隔符为'-'" % row_num)
label.set("C%d单元格日期分割符号有误!分隔符为'-'" % row_num)
isOk = False
return
else:
tkinter.messagebox.showinfo(
"警告!", "%s行:<合同号>或者<录入时间>错误!" % str(row_num))
label.set("%s行:<合同号>或者<录入时间>错误!" % str(row_num))
isOk = False
return
get_excel_data(Excel_WorkSheet, Max_Column, Max_Row, label)
def get_excel_data(Excel_WorkSheet, Max_Column, Max_Row, label):
global data_array
title_array = []
contract_error = []
value_error_row = ""
con_error_row = ""
all_data_array = []
for i in range(2, Max_Column + 1):
title_array.append(Excel_WorkSheet.cell(row=1, column=i).value)
for r in range(2, Max_Row + 1):
row_array = []
for j in range(2, Max_Column + 1):
if j <= 3:
row_array.append((title_array[j - 2], str(Excel_WorkSheet.cell(
row=r, column=j).value).replace(".0", "")))
elif j > 3 and j % 2 == 0:
cell_value = str(Excel_WorkSheet.cell(row=r, column=j).value)
cell_value_without_char = cell_value.replace(
".", "", 1).replace("-", "", 1)
if cell_value not in ["None", "0", "0.0"] and cell_value_without_char.isdigit() and cell_value_without_char != "0":
if float(Excel_WorkSheet.cell(row=r, column=j).value) > 0:
if str(Excel_WorkSheet.cell(row=r, column=j + 1).value) in pay_mode:
row_array.append((title_array[j - 2],
str(Excel_WorkSheet.cell(
row=r, column=j).value),
str(Excel_WorkSheet.cell(row=r, column=j + 1).value).upper()))
else:
row_array.append((title_array[j - 2],
str(Excel_WorkSheet.cell(
row=r, column=j).value),
"Z"))
else:
if str(Excel_WorkSheet.cell(row=r, column=j + 1).value) in pay_mode:
row_array.append((title_array[j - 2][:3] + "2",
str(abs(float(Excel_WorkSheet.cell(
row=r, column=j).value))),
str(Excel_WorkSheet.cell(row=r, column=j + 1).value).upper()))
else:
row_array.append((title_array[j - 2][:3] + "2",
str(abs(float(Excel_WorkSheet.cell(
row=r, column=j).value))),
"Z"))
all_data_array.append(row_array)
data_array = list(filter(lambda data: len(data) > 2, all_data_array))
print(isOk)
label.set("导入数据检查通过!")
def start_input(second, pay_windows, rowinfo, pending):
global isOk, isStop, isRegistered, hllDll, VK_CAPITAL, VK_NUMLOCK, VK_SCROLL, VK_ESCAPE, data_array
if isRegistered == False:
pay_windows.deiconify()
if tkinter.messagebox.askyesno("警告!", "软件'未激活'或者'注册码已过期',请扫描左侧二维码!"):
pass
else:
pay_windows.withdraw()
else:
if len(data_array) != 0 and len(second.split("-")) == 2 and second.split("-")[0].isdigit() and second.split("-")[1].replace("0.", "").isdigit() and isOk:
wait_time = second.split("-")[0]
trim_time = second.split("-")[1]
if tkinter.messagebox.askyesno("提示!", "即将开始录入数据!请确认录入窗口已经打开!"):
try:
ensure_App_Foreground(u"富基融通商业连锁门店管理系统", 3)
except Exception as e:
tkinter.messagebox.showinfo("警告!", "尚未打开费用录入单界面!")
else:
if analyze_rowinfo(rowinfo)[0] == -1:
tkinter.messagebox.showinfo(
"警告!", analyze_rowinfo(rowinfo)[1])
else:
time.sleep(1)
check_window(u"富基融通商业连锁门店管理系统")
ensure_CapsLock()
program = Simulation_operation(
int(wait_time), analyze_rowinfo(rowinfo), pending)
do_and_check_pause(
program, float(trim_time), True, False)
else:
print("错误!")
else:
tkinter.messagebox.showinfo("警告!", "<延迟秒速>或<起始行>或<费用数据>有误,请检查!")
def do_and_check_pause(program, trim, iscontinue, isstop):
global isContinue, isStop
isContinue = iscontinue
isStop = isstop
wait_time = time.time()
while 1:
if isContinue == True and isStop == False:
try:
next(program)
time.sleep(trim)
except StopIteration:
tkinter.messagebox.showinfo("提示!", "录入已完成!")
break
elif isContinue == True and isStop == True:
break
elif isContinue == False and isStop == True:
break
else:
if time.time() - wait_time <= 60:
pass
else:
print("wait out!")
break
tkinter.messagebox.showinfo("警告!", "已经终止录入!")
def analyze_rowinfo(rowinfo):
global data_array
if "*" in rowinfo:
if int(rowinfo.replace("*", "")) >= 2 and int(rowinfo.replace("*", "")) <= len(data_array) + 1:
return [int(rowinfo.replace("*", "")) - 2, int(rowinfo.replace("*", "")) - 1]
else:
return [-1, "单行录入超出<起始>或<结束>位置!"]
elif "-" in rowinfo:
if int(rowinfo.split("-")[0]) >= 2 and int(rowinfo.split("-")[0]) <= int(rowinfo.split("-")[1]) and int(rowinfo.split("-")[1]) <= len(data_array) + 1:
return [int(rowinfo.split("-")[0]) - 2, int(rowinfo.split("-")[1]) - 1]
elif int(rowinfo.split("-")[0]) < 2 and int(rowinfo.split("-")[1]) <= len(data_array) + 1:
return [-1, "指定行录入<起始>位置超出范围!"]
elif int(rowinfo.split("-")[0]) > 2 and int(rowinfo.split("-")[1]) > len(data_array) + 1:
return [-1, "指定行录入<结束>位置超出范围!"]
elif rowinfo.isdigit():
if int(rowinfo) >= 2 and int(rowinfo) <= len(data_array) + 1:
return [int(rowinfo) - 2, len(data_array)]
else:
return [-1, "指定行录入<起始>位置超出范围!"]
else:
return [-1, "<起始行>信息输入有误!"]
def Add_thread(function):
thread = myThread(function)
thread.setDaemon(True)
thread.start()
return thread
def _async_raise(tid, exctype): # 用于退出子线程
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def Stop_thread(thread): # 用于退出子线程
_async_raise(thread.ident, SystemExit)
def Simulation_operation(stoptime, rowinfo, pending):
global data_array
# 开启检测capslocks(暂停功能)状态
keyboard_monitor_thread = Add_thread(
lambda: keyboard_monitor(VK_CAPITAL, lambda: Change_isContinue()))
# 开启检测END(停止功能)状态
os_exit_byEnd_thread = Add_thread(
lambda: keyboard_monitor(VK_ESCAPE, lambda: Change_isStop()))
# 开启检测ESC(退出功能)状态
os_exit_thread = Add_thread(
lambda: keyboard_monitor(VK_END, lambda: os._exit(0)))
# 获取屏幕尺寸
x_dim, y_dim = m.screen_size()
# 当富基全屏后,点击屏幕中间位置即为列表空白处
m.click(x_dim // 2, y_dim // 2, 1, 1)
for i in range(rowinfo[0], rowinfo[1]):
for j in range(0, len(data_array[i])):
if j < 2:
yield
operation_with_pause("pk", k.tab_key, num=3)
yield
operation_with_pause("pk", k.delete_key, num=10)
yield
operation_with_pause("ps", data_array[i][j][1], num=1)
else:
yield
m.click(x_dim // 2, y_dim // 2, 1, 1)
yield
operation_with_pause("pks", k.control_key, "A", num=1)
yield
operation_with_pause("ps", data_array[i][j][0], num=1)
yield
operation_with_pause("pk", k.tab_key, num=1)
yield
operation_with_pause("ps", data_array[i][j][1], num=1)
yield
operation_with_pause("pk", k.tab_key, num=2)
yield
operation_with_pause("pk", k.up_key, num=1)
yield
# # 通过字母位置选择付款方式
operation_with_pause(
"pk", k.up_key, num=pay_mode.index(data_array[i][j][2]))
if pending == 0:
yield
operation_with_pause("pk", k.alt_key, num=1)
yield
operation_with_pause("pk", "2", num=1)
yield
operation_with_pause("pk", "Y", num=1)
yield
# 2次确认弹窗
time.sleep(stoptime / 2) # 根据不同电脑的处理速度,选择添加一定的延迟,保证程序正常运行
# B200或者B202程序要求填入数据,没有填入会弹窗,此处回车用于消除弹窗
yield
operation_with_pause("pk", k.enter_key, num=1)
yield
time.sleep(stoptime / 2) # 根据不同电脑的处理速度,选择添加一定的延迟,保证程序正常运行
yield
operation_with_pause("pk", k.enter_key, num=1)
else:
yield
operation_with_pause("pks", k.alt_key, "2", "A", num=1)
# 三次确认弹窗
yield
time.sleep(stoptime / 2) # 根据不同电脑的处理速度,选择添加一定的延迟,保证程序正常运行
# B200或者B202程序要求填入数据,没有填入会弹窗,此处回车用于消除弹窗
yield
operation_with_pause("pk", k.enter_key, num=1)
yield
time.sleep(stoptime / 2) # 根据不同电脑的处理速度,选择添加一定的延迟,保证程序正常运行
yield
operation_with_pause("pk", k.enter_key, num=1)
yield
time.sleep(stoptime / 2) # 根据不同电脑的处理速度,选择添加一定的延迟,保证程序正常运行
yield
operation_with_pause("pk", k.enter_key, num=1)
# 新建表单
yield
operation_with_pause("pks", k.alt_key, "2", "N", num=1)
yield
time.sleep(stoptime / 2)
yield
check_window(u"富基融通商业连锁门店管理系统")
yield
ensure_App_Foreground(u"富基融通商业连锁门店管理系统")
yield
m.click(x_dim // 2, y_dim // 2, 1, 1)
Stop_thread(keyboard_monitor_thread)
Stop_thread(os_exit_byEnd_thread)
Stop_thread(os_exit_thread)
def Judge_system(str_info):
system_info = platform.platform()
if str_info in system_info:
return True
else:
return False
def DownLoad(dbnum, dbsize, size):
global download_ProgressValue
'''''回调函数
dbnum: 已经下载的数据块
dbsize: 数据块的大小
size: 远程文件的大小
'''
percent = 100.0 * dbnum * dbsize / size
if percent > 100:
percent = 100
download_ProgressValue.set(percent)
def Check_System_Info(screen_width, screen_height):
system_info = platform.platform()
if "Windows-7" in system_info or "Windows-10" in system_info:
return {"geometry": '352x77+' + '%s+%s' % (screen_width, screen_height),
"maxsize-x": 352,
"maxsize-y": 101,
"textwidth": 50,
"buttonwidth": 43,
"height": 2,
"timedefalut": "1-0",
"rowdefalut": 2
}
else:
return {"geometry": '363x67+' + '%s+%s' % (screen_width, screen_height),
"maxsize-x": 363,
"maxsize-y": 99,
"textwidth": 50,
"buttonwidth": 45,
"height": 3,
"timedefalut": "2-0.1",
"rowdefalut": 2
}
def Refresh_Status_label(info):
global Status_label
Status_label.set(info)
def loadview():
global hllDll, VK_CAPITAL, Status_label, ProgressValue, download_ProgressValue
root = tkinter.Tk()
root.title('费用录入-v%s' % Version)
ico = os.getcwd() + r'\ae.ico'
root.iconbitmap(ico)
#root.attributes("-alpha", 0.1)
screen_width = root.winfo_screenwidth() // 2 - 187
screen_height = root.winfo_screenheight() // 2 - 260
windows_params = Check_System_Info(root.winfo_screenwidth() // 2 - 187, root.winfo_screenheight() // 2 - 260)
root.geometry(windows_params["geometry"])
root.maxsize(windows_params["maxsize-x"], windows_params["maxsize-y"])
root.minsize(windows_params["maxsize-x"], windows_params["maxsize-y"])
textwidth = windows_params["textwidth"]
buttonwidth = windows_params["buttonwidth"]
height = windows_params["height"]
timedefalut = windows_params["timedefalut"]
rowdefalut = windows_params["rowdefalut"]
# 支付二维码显示
pay_windows = Toplevel()
pay_windows.title("购买方式")
pay_windows.iconbitmap(ico)
path = os.getcwd() + r'\QR_Code.png'
tkimg = ImageTk.PhotoImage(file=path)
topLabel = Label(pay_windows, image=tkimg)
topLabel.pack()
pay_windows.withdraw()
###################验证码输入######################
input_window = Toplevel()
input_window.title("输入验证码")
input_window.geometry("282x24+%s+%s" % (input_window.winfo_screenwidth() // 2 - 140, input_window.winfo_screenheight() // 2 - 200))
input_StrVar = StringVar()
Entry(input_window, font='微软雅黑 -10',
width=38,
textvariable=input_StrVar,
justify=LEFT).grid(column=1,
row=1,
sticky=N + S + E + W)
Button(input_window, text="验证",
width=7,
font='微软雅黑 -9 bold',
command = lambda:Check_registration_Status_label("http://130.130.200.49", "sm", input_StrVar.get(), input_window, "input-registrationcode.ini",b"0000000000000000")).grid(column=2,
row=1,
sticky=W)
input_window.withdraw()
######################################################
v1 = StringVar()
v1.set("等待导入数据......")
l1 = Label(root, text="费用数据:", justify=LEFT).grid(
column=1, row=1, sticky=W)
textbox1 = Entry(root, font='微软雅黑 -11', bg='darkgray', width=textwidth, state='readonly', textvariable=v1, justify=LEFT).grid(
column=2, row=1, sticky=N + S + E + W, columnspan=6)
button1 = Button(root, text="✚", width=6, height=height, command=lambda: get_path(
v1)).grid(column=7, row=1, sticky=W, rowspan=2)
v2 = StringVar()
v2.set(timedefalut)
l2 = Label(root, text="延迟秒速:", justify=LEFT).grid(
column=1, row=2, sticky=W)
textbox2 = Entry(root, font='微软雅黑 -13', width=5, textvariable=v2, justify=RIGHT).grid(
column=2, row=2, sticky=W)
l3 = Label(root, text=" 秒 |", justify=LEFT).grid(
column=3, row=2, sticky=W)
v3 = StringVar()
v3.set(rowdefalut)
l4 = Label(root, text="起始行:", justify=LEFT).grid(
column=4, row=2, sticky=E)
textbox3 = Entry(root, font='微软雅黑 -13', width=4, textvariable=v3, justify=RIGHT).grid(
column=5, row=2, sticky=W)
chVarDis = IntVar() # 用来获取复选框是否被勾选,通过chVarDis.get()来获取其的状态,其状态值为int类型 勾选为1 未勾选为0
del_check = Checkbutton(root, text="直接审核", font='微软雅黑 -11',
height=1, variable=chVarDis, state='normal')
del_check.deselect()
del_check.grid(column=6, row=2, sticky=N)
Button(root, text="开始录入", font='微软雅黑 -13 bold', width=buttonwidth, command=lambda: start_input(v2.get(), pay_windows, v3.get(), chVarDis.get())).grid(
column=1, row=5, sticky=W, columnspan=7)
Status_label = StringVar()
Status_label.set("检查注册信息")
l5 = Label(root, font='微软雅黑 -9',
bg='lightgray',
textvariable=Status_label,
justify=LEFT).grid(column=1,
row=6,
sticky=N + S + E + W,
columnspan=7)
Button(root, text="检查更新", font='微软雅黑 -8', width=6, command=lambda:Update_Info_Write()).grid(
column=7, row=6, sticky=N, columnspan=1)
Add_thread(lambda:Check_registration_Status_label("http://130.130.200.49", "ae", None, input_window, "input-registrationcode.ini",b"0000000000000000"))
root.mainloop()
def Update_Info_Write():
file_open = open(r'C:\UpdateInfo.ini','w+')
current_path = os.getcwd()
file_open.write(current_path + '-' + Software_Name + '-' + Version)
file_open.close()
try:
win32api.ShellExecute(0, 'open', os.getcwd() + r'\Update\CheckUpdate.exe', '','',1)
tkinter.messagebox.showinfo('提示!','正在打开更新程序......')
except Exception as e:
tkinter.messagebox.showinfo('警告!','未找到更新程序!' + str(e))
def Check_registration_Status_label(ip, appname, Md5, inputview, filename, keyvalue):
global isRegistered, UserName, Company, Department
Registration = ckr.registration_check(ip, appname, Md5, inputview, filename, keyvalue)
if Registration[0]:
isRegistered = True
UserName = Registration[1]["UserName"]
Company = Registration[1]["Company"]
Department = Registration[1]["Department"]
Refresh_Status_label("...已激活...")
else:
isRegistered = False
Refresh_Status_label("...未激活...")
if __name__ == '__main__':
loadview()
| [
"[email protected]"
] | |
df97e2cec6be03f872168b844e9078036280d682 | 0cfb5831a748ebd46e438e3ad7e7a09c1d196499 | /com/chapter_08/section_03/task_8.3.4_functionWhile.py | f303ebafb04927959b06504d64048c0a65946b9e | [] | no_license | StevenGeGe/pythonFromIntroductionToPractice01 | 7cfe8cdb4bc5c0ddbe25b44976231d72d9e10108 | 9d2ba499056b30ded14180e6c4719ee48edd9772 | refs/heads/master | 2023-02-15T04:08:59.878711 | 2020-12-28T13:27:55 | 2020-12-28T13:27:55 | 310,980,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2020/11/20 21:26
# @Author : Yong
# @Email : [email protected]
# @File : task_8.3.4_functionWhile.py
# @Software: PyCharm
def get_formatted_name(first_name, last_name):
"""返回整洁的姓名"""
full_name = first_name + ' ' + last_name
return full_name.title()
# 这是一个无限循环!
# while True:
# print("\nPlease tell me your name:")
# f_name = input("First name: ")
# l_name = input("Last name: ")
# formatted_name = get_formatted_name(f_name, l_name)
# print("\nHello, " + formatted_name + "!")
def get_formatted_name(first_name, last_name):
"""返回整洁的姓名"""
full_name = first_name + ' ' + last_name
return full_name.title()
while True:
print("\nPlease tell me your name:")
print("(enter 'q' at any time to quit)")
f_name = input("First name: ")
if f_name == 'q':
break
l_name = input("Last name: ")
if l_name == 'q':
break
formatted_name = get_formatted_name(f_name, l_name)
print("\nHello, " + formatted_name + "!")
| [
"[email protected]"
] | |
a4bfce16b09eb37e9cdb42148c47e285c832cacc | 98a834b6cd7f5cb5f596b3818eb84fca0855d62e | /data_gen.py | fa368b0ec07470c9a71b08e50965d5f818b8be3b | [] | no_license | lmb633/transformer | 2e6fe2200942a6e8eb9f24ebfd47eb15478004e8 | e60c743da2078430b764aa68e224e0046b91384e | refs/heads/master | 2020-09-06T11:05:33.875047 | 2019-11-12T03:47:07 | 2019-11-12T03:47:07 | 220,407,189 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,631 | py | import pickle
import time
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
data_file = 'data.pkl'
vocab_file = 'vocab.pkl'
IGNORE_ID = 0
pad_id = 0
print('loading samples...')
start = time.time()
with open(data_file, 'rb') as file:
data = pickle.load(file)
elapsed = time.time() - start
print('elapsed: {:.4f}'.format(elapsed))
def text_to_sequence(text, char2idx):
result = [char2idx[char] for char in text]
return result
def sequence_to_text(seq, idx2char):
result = [idx2char[idx] for idx in seq]
return result
def get_data(filename):
with open(filename, 'r') as file:
data = file.readlines()
data = [line.strip() for line in data]
return data
def pad_collate(batch):
max_input_len = float('-inf')
max_target_len = float('-inf')
for elem in batch:
src, tgt = elem
max_input_len = max_input_len if max_input_len > len(src) else len(src)
max_target_len = max_target_len if max_target_len > len(tgt) else len(tgt)
for i, elem in enumerate(batch):
src, tgt = elem
input_length = len(src)
padded_input = np.pad(src, (0, max_input_len - len(src)), 'constant', constant_values=pad_id)
padded_target = np.pad(tgt, (0, max_target_len - len(tgt)), 'constant', constant_values=IGNORE_ID)
batch[i] = (padded_input, padded_target, input_length)
# sort it by input lengths (long to short)
batch.sort(key=lambda x: x[2], reverse=True)
return default_collate(batch)
class AiChallenger2017Dataset(Dataset):
def __init__(self, split):
self.samples = data[split]
def __getitem__(self, i):
sample = self.samples[i]
src_text = sample['in']
tgt_text = sample['out']
return np.array(src_text, dtype=np.long), np.array(tgt_text, np.long)
def __len__(self):
return len(self.samples)
def main():
valid_dataset = AiChallenger2017Dataset('valid')
print(valid_dataset[0])
with open(vocab_file, 'rb') as file:
data = pickle.load(file)
src_idx2char = data['dict']['src_idx2char']
tgt_idx2char = data['dict']['tgt_idx2char']
src_text, tgt_text = valid_dataset[0]
src_text = sequence_to_text(src_text, src_idx2char)
src_text = ' '.join(src_text)
print('src_text: ' + src_text)
tgt_text = sequence_to_text(tgt_text, tgt_idx2char)
tgt_text = ' '.join(tgt_text)
print('tgt_text: ' + tgt_text)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
a8f7de53cd21c1dc7f0beac6dcb19aab8614a3b6 | 71acb7214efd91c0d327f6d8958e1798eadb4401 | /locations/spiders/croix_rouge_francaise_fr.py | d9186b484a1d64d25affb1fa82f39a5e9c319099 | [
"CC0-1.0",
"MIT"
] | permissive | alltheplaces/alltheplaces | 21b9f8b4ace1352e52ae7b8f8825a930d2cb033e | 1bcbb55cfcf06f2c714465570711f6e83f205c22 | refs/heads/master | 2023-08-30T19:45:35.098658 | 2023-08-30T17:51:54 | 2023-08-30T17:51:54 | 61,166,935 | 453 | 176 | NOASSERTION | 2023-09-14T17:16:40 | 2016-06-15T01:09:18 | Python | UTF-8 | Python | false | false | 2,581 | py | from scrapy import Spider
from scrapy.http import JsonRequest
from locations.dict_parser import DictParser
from locations.hours import DAYS_FR, OpeningHours, sanitise_day
class CroixRougeFrancaiseFRSpider(Spider):
name = "croix_rouge_francaise_fr"
item_attributes = {"brand": "Croix-Rouge française", "brand_wikidata": "Q3003244"}
allowed_domains = ["backend.structure.croix-rouge.fr"]
start_urls = ["https://backend.structure.croix-rouge.fr/graphql"]
def start_requests(self):
graphql_query = """query GET_SEARCH_STRUCTURE_ELASTICSEARCH_QUERY($actionIds: [ID], $activityIds: [ID], $from: Int, $lat: Float, $lon: Float, $search: String!, $size: Int) {
searchStructuresDocuments(
actionIds: $actionIds
activityIds: $activityIds
from: $from
lat: $lat
lon: $lon
search: $search
size: $size
) {
items {
actions
activities { activity }
address_complement
address_number
address_place
address_street
address_street_type
city
contentful_content_id
distance
id
latitude
longitude
name
slug
schedule
specialities
structure_type
zip_code
}
}
}"""
data = {
"operationName": "GET_SEARCH_STRUCTURE_ELASTICSEARCH_QUERY",
"query": graphql_query,
"variables": {
"actionIds": [],
"activityIds": [],
"from": 0,
"lat": 44.8624,
"lon": -0.5848,
"search": "",
"size": 10000,
},
}
for url in self.start_urls:
yield JsonRequest(url=url, method="POST", data=data)
def parse(self, response):
for location in response.json()["data"]["searchStructuresDocuments"]["items"]:
item = DictParser.parse(location)
if location.get("address_complement"):
item["street_address"] = location["address_complement"]
if location.get("slug"):
item["website"] = "https://www.croix-rouge.fr/" + location["slug"]
item["opening_hours"] = OpeningHours()
for day_hours in location["schedule"]:
item["opening_hours"].add_range(
sanitise_day(day_hours["day"], DAYS_FR), day_hours["open"], day_hours["closed"]
)
yield item
| [
"[email protected]"
] | |
311fac66bd5619c74f93b0a3b033e01376dc2ce5 | 5b3caf64b77161748d0929d244798a8fb914d9c5 | /Python Excel Examples/CellsApiDemo/row/setRowStyle.py | b7416ef885f311a5d401678b9b4094cb9616b63c | [] | no_license | EiceblueCloud/Spire.Cloud.Excel | 0d56864991eaf8d44c38f21af70db614b1d804b7 | d9845d5cefd15a3ab408b2c9f80828a4767e2b82 | refs/heads/master | 2021-07-20T23:44:39.068568 | 2021-07-15T03:04:49 | 2021-07-15T03:04:49 | 230,225,396 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | import spirecloudexcel
from spirecloudexcel.configuration import Configuration as ExcelConfiguration
from spirecloudexcel.api.cells_api import CellsApi
appId = "your id"
appKey = "your key"
baseUrl = "https://api.e-iceblue.cn"
configuration = ExcelConfiguration(appId, appKey,baseUrl)
api = spirecloudexcel.api.cells_api.CellsApi(configuration)
name = "SetRowStyle_1.xlsx"
sheetName = "Sheet2"
row_index = 3
style = spirecloudexcel.models.Style()
font = spirecloudexcel.models.Font()
font.underline = "Single"
font.size = 8
font.is_italic = True
font.is_bold = True
font.name = "Algerian"
style.font = font
borders = []
topBorder = spirecloudexcel.models.Border("Medium", spirecloudexcel.models.Color(255, 255, 0, 0), "EdgeTop")
rightBorder = spirecloudexcel.models.Border("DashDot", spirecloudexcel.models.Color(255, 0, 255, 0),
"EdgeRight")
borders.append(topBorder)
borders.append(rightBorder)
style.border_collection = borders
style.horizontal_alignment = "Center"
style.background_color = spirecloudexcel.models.Color(255, 0, 255, 0)
storage = ""
folder = "/Cells/Row/"
api.set_row_style(name, sheet_name=sheetName, row_index=row_index, style=style, folder=folder, storage=storage)
| [
"[email protected]"
] | |
c4ab0b017776466c61193e88cafc3391fe2ec6a6 | ae7ba9c83692cfcb39e95483d84610715930fe9e | /yubinbai/pcuva-problems/UVa 11349 symmetric matrix/main.py | 5b0dbb58cb2e77b0c306a72f8344e573ab95df48 | [] | no_license | xenron/sandbox-github-clone | 364721769ea0784fb82827b07196eaa32190126b | 5eccdd8631f8bad78eb88bb89144972dbabc109c | refs/heads/master | 2022-05-01T21:18:43.101664 | 2016-09-12T12:38:32 | 2016-09-12T12:38:32 | 65,951,766 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 635 | py | import math
import sys
def symM(matrix, n):
flag = True
for x in range(n & 1 + n >> 1):
for y in range(n):
flag &= (matrix[x][y] == matrix[n - 1 - x][n - 1 - y])
if not flag:
break
if not flag:
break
return flag
sys.stdin = open('input.txt')
for case in range(1, 1 + int(input())):
n = int(raw_input().strip().rpartition('=')[2])
mat = []
for x in xrange(1, n + 1):
mat.append(map(int, raw_input().split()))
if symM(mat, n):
print 'Test #%d: Symmetric.' % case
else:
print 'Test #%d: Non-symmetric.' % case
| [
"[email protected]"
] | |
93ef50c85ce3b6ab9f8d2d735078a7f3d4f8fa8f | 35dbd536a17d7127a1dd1c70a2903ea0a94a84c2 | /tests/sentry/integrations/jira_server/test_utils.py | cd17caeb9eacffca5d0e6932a2c9fe1ac566c6bf | [
"Apache-2.0",
"BUSL-1.1"
] | permissive | nagyist/sentry | efb3ef642bd0431990ca08c8296217dabf86a3bf | d9dd4f382f96b5c4576b64cbf015db651556c18b | refs/heads/master | 2023-09-04T02:55:37.223029 | 2023-01-09T15:09:44 | 2023-01-09T15:09:44 | 48,165,782 | 0 | 0 | BSD-3-Clause | 2022-12-16T19:13:54 | 2015-12-17T09:42:42 | Python | UTF-8 | Python | false | false | 525 | py | from fixtures.integrations.mock_service import StubService
from sentry.integrations.jira_server.utils import build_user_choice
from sentry.testutils import TestCase
from sentry.testutils.silo import control_silo_test
@control_silo_test
class BuildUserChoiceTest(TestCase):
def test_jira_server(self):
user_response = StubService.get_stub_data("jira", "jira_server_user.json")
assert build_user_choice(user_response, "name") == (
"bob",
"Bobby - [email protected] (bob)",
)
| [
"[email protected]"
] | |
9b7918d1d533e510ce6ef3c4650b2b29a92e1da3 | a6a78f59f442c18449befc89be2b193e37b695d6 | /ivi/dcpwr.py | 9c28dc0a695ccba5eaf52270ab1422ba87c08f1c | [
"MIT"
] | permissive | hohe/python-ivi | fa0b4b1232f4fca92bd046d2ae322e49959f8a83 | 0fe6d7d5aaf9ebc97085f73e25b0f3051ba996b6 | refs/heads/master | 2021-01-21T08:55:35.470107 | 2013-12-23T09:27:02 | 2013-12-23T09:27:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,975 | py | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from . import ivi
# Parameter Values
CurrentLimitBehavior = set(['regulate', 'trip'])
RangeType = set(['current', 'voltage'])
OutputState = set(['constant_voltage', 'constant_current', 'over_voltage',
'over_current', 'unregulated'])
MeasurementType = set(['current', 'voltage'])
def get_range(range_list, offset, val):
l = list()
for i in range(len(range_list)):
l.append((i, abs(range_list[i][offset])))
l.sort(key=lambda x: x[1], reverse=True)
k = -1
for i in range(len(l)):
if l[i][1] >= val:
k = i
#if k < 0:
# return None
#else:
# return range_list[l[k][0]]
if k < 0:
return -1
else:
return l[k][0]
class Base(object):
"Base IVI methods for all DC power supplies"
def __init__(self, *args, **kwargs):
# needed for _init_outputs calls from other __init__ methods
self._output_count = 1
super(Base, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'Base'
ivi.add_group_capability(self, cls+grp)
self._output_current_limit = list()
self._output_current_limit_behavior = list()
self._output_enabled = list()
self._output_ovp_enabled = list()
self._output_ovp_limit = list()
self._output_voltage_level = list()
self._output_name = list()
self._output_count = 1
self._output_range = [[(0, 0)]]
self._output_range_name = [['P0V']]
self._output_ovp_max = [0]
self._output_voltage_max = [0]
self._output_current_max = [0]
ivi.add_property(self, 'outputs[].current_limit',
self._get_output_current_limit,
self._set_output_current_limit,
None,
ivi.Doc("""
Specifies the output current limit. The units are Amps.
The value of the Current Limit Behavior attribute determines the behavior
of the power supply when the output current is equal to or greater than
the value of this attribute.
""", cls, grp, '4.2.1'))
ivi.add_property(self, 'outputs[].current_limit_behavior',
self._get_output_current_limit_behavior,
self._set_output_current_limit_behavior,
None,
ivi.Doc("""
Specifies the behavior of the power supply when the output current is
equal to or greater than the value of the Current Limit attribute.
Values
* 'trip' - The power supply disables the output when the output current is
equal to or greater than the value of the Current Limit attribute.
* 'regulate' - The power supply restricts the output voltage such that the
output current is not greater than the value of the Current Limit
attribute.
""", cls, grp, '4.2.2'))
ivi.add_property(self, 'outputs[].enabled',
self._get_output_enabled,
self._set_output_enabled,
None,
ivi.Doc("""
If true, the signal the power supply produces appears at the output
connector. If false, the signal the power supply produces does not appear
at the output connector.
""", cls, grp, '4.2.3'))
ivi.add_property(self, 'outputs[].ovp_enabled',
self._get_output_ovp_enabled,
self._set_output_ovp_enabled,
None,
ivi.Doc("""
Specifies whether the power supply provides over-voltage protection. If
this attribute is set to True, the power supply disables the output when
the output voltage is greater than or equal to the value of the OVP
Limit attribute.
""", cls, grp, '4.2.4'))
ivi.add_property(self, 'outputs[].ovp_limit',
self._get_output_ovp_limit,
self._set_output_ovp_limit,
None,
ivi.Doc("""
Specifies the voltage the power supply allows. The units are Volts.
If the OVP Enabled attribute is set to True, the power supply disables the
output when the output voltage is greater than or equal to the value of
this attribute.
If the OVP Enabled is set to False, this attribute does not affect the
behavior of the instrument.
""", cls, grp, '4.2.5'))
ivi.add_property(self, 'outputs[].voltage_level',
self._get_output_voltage_level,
self._set_output_voltage_level,
None,
ivi.Doc("""
Specifies the voltage level the DC power supply attempts to generate. The
units are Volts.
""", cls, grp, '4.2.6'))
ivi.add_property(self, 'outputs[].name',
self._get_output_name,
None,
None,
ivi.Doc("""
This attribute returns the repeated capability identifier defined by
specific driver for the output channel that corresponds to the index that
the user specifies. If the driver defines a qualified Output Channel name,
this property returns the qualified name.
If the value that the user passes for the Index parameter is less than
zero or greater than the value of the Output Channel Count, the attribute
returns an empty string for the value and returns an error.
""", cls, grp, '4.2.9'))
ivi.add_method(self, 'outputs[].configure_current_limit',
self._output_configure_current_limit,
ivi.Doc("""
This function configures the current limit. It specifies the output
current limit value and the behavior of the power supply when the output
current is greater than or equal to that value.
See the definition of the Current Limit Behavior attribute for defined
values for the behavior parameter.
""", cls, grp, '4.3.1'))
ivi.add_method(self, 'outputs[].configure_range',
self._output_configure_range,
ivi.Doc("""
Configures the power supply's output range on an output. One parameter
specifies whether to configure the voltage or current range, and the other
parameter is the value to which to set the range.
Setting a voltage range can invalidate a previously configured current
range. Setting a current range can invalidate a previously configured
voltage range.
""", cls, grp, '4.3.3'))
ivi.add_method(self, 'outputs[].configure_ovp',
self._output_configure_ovp,
ivi.Doc("""
Configures the over-voltage protection. It specifies the over-voltage
limit and the behavior of the power supply when the output voltage is
greater than or equal to that value.
When the Enabled parameter is False, the Limit parameter does not affect
the instrument's behavior, and the driver does not set the OVP Limit
attribute.
""", cls, grp, '4.3.4'))
ivi.add_method(self, 'outputs[].query_current_limit_max',
self._output_query_current_limit_max,
ivi.Doc("""
This function returns the maximum programmable current limit that the
power supply accepts for a particular voltage level on an output.
""", cls, grp, '4.3.7'))
ivi.add_method(self, 'outputs[].query_voltage_level_max',
self._output_query_voltage_level_max,
ivi.Doc("""
This function returns the maximum programmable voltage level that the
power supply accepts for a particular current limit on an output.
""", cls, grp, '4.3.8'))
ivi.add_method(self, 'outputs[].query_output_state',
self._output_query_output_state,
ivi.Doc("""
This function returns whether the power supply is in a particular output
state.
A constant voltage condition occurs when the output voltage is equal to
the value of the Voltage Level attribute and the current is less than or
equal to the value of the Current Limit attribute.
A constant current condition occurs when the output current is equal to
the value of the Current Limit attribute and the Current Limit Behavior
attribute is set to the Current Regulate defined value.
An unregulated condition occurs when the output voltage is less than the
value of the Voltage Level attribute and the current is less than the
value of the Current Limit attribute.
An over-voltage condition occurs when the output voltage is equal to or
greater than the value of the OVP Limit attribute and the OVP Enabled
attribute is set to True.
An over-current condition occurs when the output current is equal to or
greater than the value of the Current Limit attribute and the Current
Limit Behavior attribute is set to the Current Trip defined value.
When either an over-voltage condition or an over-current condition
occurs, the power supply's output protection disables the output. If the
power supply is in an over-voltage or over-current state, it does not
produce power until the output protection is reset. The Reset Output
Protection function resets the output protection. Once the output
protection is reset, the power supply resumes generating a power signal.
Values for output_state:
* 'constant_voltage'
* 'constant_current'
* 'over_voltage'
* 'over_current'
* 'unregulated'
""", cls, grp, '4.3.9'))
ivi.add_method(self, 'outputs[].reset_output_protection',
self._output_reset_output_protection,
ivi.Doc("""
This function resets the power supply output protection after an
over-voltage or over-current condition occurs.
An over-voltage condition occurs when the output voltage is equal to or
greater than the value of the OVP Limit attribute and the OVP Enabled
attribute is set to True.
An over-current condition occurs when the output current is equal to or
greater than the value of the Current Limit attribute and the Current
Limit Behavior attribute is set to Current Trip.
When either an over-voltage condition or an over-current condition
occurs, the output protection of the power supply disables the output.
Once the output protection is reset, the power supply resumes generating
a power signal.
Use the Query Output State function to determine if the power supply is in
an over-voltage or over-current state.
""", cls, grp, '4.3.10'))
self._init_outputs()
def _init_outputs(self):
try:
super(Base, self)._init_outputs()
except AttributeError:
pass
self._output_name = list()
self._output_current_limit = list()
self._output_current_limit_behavior = list()
self._output_enabled = list()
self._output_ovp_enabled = list()
self._output_ovp_limit = list()
self._output_voltage_level = list()
for i in range(self._output_count):
self._output_name.append("output%d" % (i+1))
self._output_current_limit.append(0)
self._output_current_limit_behavior.append('regulate')
self._output_enabled.append(False)
self._output_ovp_enabled.append(True)
self._output_ovp_limit.append(0)
self._output_voltage_level.append(0)
self.outputs._set_list(self._output_name)
def _get_output_current_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_current_limit[index]
def _set_output_current_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0 or value > self._output_current_max[index]:
raise ivi.OutOfRangeException()
self._output_current_limit[index] = value
def _get_output_current_limit_behavior(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_current_limit_behavior[index]
def _set_output_current_limit_behavior(self, index, value):
index = ivi.get_index(self._output_name, index)
if value not in CurrentLimitBehavior:
raise ivi.ValueNotSupportedException()
self._output_current_limit_behavior[index] = value
def _get_output_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_enabled[index]
def _set_output_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_enabled[index] = value
def _get_output_ovp_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ovp_enabled[index]
def _set_output_ovp_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_ovp_enabled[index] = value
def _get_output_ovp_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ovp_limit[index]
def _set_output_ovp_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0 or value > self._output_ovp_max[index]:
raise ivi.OutOfRangeException()
self._output_ovp_limit[index] = value
def _get_output_voltage_level(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_voltage_level[index]
def _set_output_voltage_level(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
if value < 0 or value > self._output_voltage_max[index]:
raise ivi.OutOfRangeException()
self._output_voltage_level[index] = value
def _get_output_name(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_name[index]
def _output_configure_current_limit(self, index, behavior, limit):
self._set_output_current_limit_behavior(index, behavior)
self._set_output_current_limit(index, limit)
def _output_configure_range(self, index, range_type, range_val):
index = ivi.get_index(self._output_name, index)
if range_type not in RangeType:
raise ivi.ValueNotSupportedException()
if range_type == 'voltage':
t = 0
elif range_type == 'current':
t = 1
k = dcpwr.get_range(self._output_range[index], t, range_val)
if k < 0:
raise ivi.OutOfRangeException()
self._output_voltage_max[index] = self._output_range[index][k][0]
self._output_current_max[index] = self._output_range[index][k][1]
pass
def _output_configure_ovp(self, index, enabled, limit):
self._set_output_ovp_enabled(index, enabled)
if enabled:
self._set_output_ovp_limit(index, limit)
def _output_query_current_limit_max(self, index, voltage_level):
index = ivi.get_index(self._output_name, index)
if voltage_level < 0 or voltage_level > self._output_voltage_max[index]:
raise ivi.OutOfRangeException()
return self._output_current_max[index]
def _output_query_voltage_level_max(self, index, current_limit):
index = ivi.get_index(self._output_name, index)
if current_limit < 0 or current_limit > self._output_current_max[index]:
raise ivi.OutOfRangeException()
return self._output_voltage_max[index]
def _output_query_output_state(self, index, state):
index = ivi.get_index(self._output_name, index)
if state not in OutputState:
raise ivi.ValueNotSupportedException()
return False
def _output_reset_output_protection(self, index):
pass
class Trigger(object):
"Extension IVI methods for power supplies supporting trigger based output changes"
def __init__(self, *args, **kwargs):
super(Trigger, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'Trigger'
ivi.add_group_capability(self, cls+grp)
self._output_trigger_source = list()
self._output_triggered_current_limit = list()
self._output_triggered_voltage_level = list()
ivi.add_property(self, 'outputs[].trigger_source',
self._get_output_trigger_source,
self._set_output_trigger_source,
None,
ivi.Doc("""
Specifies the trigger source. After an Initiate call, the power supply
waits for a trigger event from the source specified with this attribute.
After a trigger event occurs, the power supply changes the voltage level
to the value of the Triggered Voltage Level attribute and the current
limit to the value of the Triggered Current Limit attribute.
""", cls, grp, '5.2.1'))
ivi.add_property(self, 'outputs[].triggered_current_limit',
self._get_output_triggered_current_limit,
self._set_output_triggered_current_limit,
None,
ivi.Doc("""
Specifies the value to which the power supply sets the current limit after
a trigger event occurs. The units are Amps.
After an Initiate call, the power supply waits for a trigger event from
the source specified with the Trigger Source attribute. After a trigger
event occurs, the power supply sets the current limit to the value of this
attribute.
After a trigger occurs, the value of the Current Limit attribute reflects
the new value to which the current limit has been set.
""", cls, grp, '5.2.2'))
ivi.add_property(self, 'outputs[].triggered_voltage_level',
self._get_output_triggered_voltage_level,
self._set_output_triggered_voltage_level,
None,
ivi.Doc("""
Specifies the value to which the power supply sets the voltage level
after a trigger event occurs. The units are Volts.
After an Initiate call, the power supply waits for a trigger event from
the source specified with the Trigger Source attribute. After a trigger
event occurs, the power supply sets the voltage level to the value of this
attribute.
After a trigger occurs, the value of the Voltage Level attribute reflects
the new value to which the voltage level has been set.
""", cls, grp, '5.2.3'))
self.__dict__.setdefault('trigger', ivi.PropertyCollection())
ivi.add_method(self, 'trigger.abort',
self._trigger_abort,
ivi.Doc("""
If the power supply is currently waiting for a trigger to change the
output signal, this function returns the power supply to the ignore
triggers state.
If the power supply is not waiting for a trigger, this function does
nothing and returns Success.
""", cls, grp, '5.3.1'))
ivi.add_method(self, 'trigger.initiate',
self._trigger_initiate,
ivi.Doc("""
If the power supply is not currently waiting for a trigger, this function
causes the power supply to wait for a trigger.
If the power supply is already waiting for a trigger, this function does
nothing and returns Success.
""", cls, grp, '5.3.5'))
def _init_outputs(self):
try:
super(Trigger, self)._init_outputs()
except AttributeError:
pass
self._output_trigger_source = list()
self._output_triggered_current_limit = list()
self._output_triggered_voltage_level = list()
for i in range(self._output_count):
self._output_trigger_source.append('')
self._output_triggered_current_limit.append(0)
self._output_triggered_voltage_level.append(0)
def _get_output_trigger_source(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_trigger_source[index]
def _set_output_trigger_source(self, index, value):
index = ivi.get_index(self._output_name, index)
value = str(value)
self._output_trigger_source[index] = value
def _get_output_triggered_current_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_triggered_current_limit[index]
def _set_output_triggered_current_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_triggered_current_limit[index] = value
def _get_output_triggered_voltage_level(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_triggered_voltage_level[index]
def _set_output_triggered_voltage_level(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_triggered_voltage_level[index] = value
def _trigger_abort(self):
pass
def _trigger_initiate(self):
pass
class SoftwareTrigger(object):
"Extension IVI methods for power supplies supporting software triggering"
def __init__(self, *args, **kwargs):
super(SoftwareTrigger, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'SoftwareTrigger'
ivi.add_group_capability(self, cls+grp)
self.__dict__.setdefault('_docs', dict())
self._docs['send_software_trigger'] = ivi.Doc("""
This function sends a software-generated trigger to the instrument. It is
only applicable for instruments using interfaces or protocols which
support an explicit trigger function. For example, with GPIB this function
could send a group execute trigger to the instrument. Other
implementations might send a ``*TRG`` command.
Since instruments interpret a software-generated trigger in a wide variety
of ways, the precise response of the instrument to this trigger is not
defined. Note that SCPI details a possible implementation.
This function should not use resources which are potentially shared by
other devices (for example, the VXI trigger lines). Use of such shared
resources may have undesirable effects on other devices.
This function should not check the instrument status. Typically, the
end-user calls this function only in a sequence of calls to other
low-level driver functions. The sequence performs one operation. The
end-user uses the low-level functions to optimize one or more aspects of
interaction with the instrument. To check the instrument status, call the
appropriate error query function at the conclusion of the sequence.
The trigger source attribute must accept Software Trigger as a valid
setting for this function to work. If the trigger source is not set to
Software Trigger, this function does nothing and returns the error Trigger
Not Software.
""", cls, grp, '6.2.1', 'send_software_trigger')
def send_software_trigger(self):
pass
class Measurement(object):
"Extension IVI methods for power supplies supporting measurement of the output signal"
def __init__(self, *args, **kwargs):
super(Measurement, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'Measurement'
ivi.add_group_capability(self, cls+grp)
ivi.add_method(self, 'outputs[].measure',
self._output_measure,
ivi.Doc("""
Takes a measurement on the output signal and returns the measured value.
Values for measurement_type:
* 'voltage'
* 'current'
""", cls, grp, '7.2.1'))
def _output_measure(self, index, type):
index = ivi.get_index(self._output_name, index)
if type not in MeasurementType:
raise ivi.ValueNotSupportedException()
return 0
| [
"[email protected]"
] | |
d1230b257269e14de6d6a92780f184655cea298a | 38ba13df9ea6e53c7b924cad1f3bea2de59c7a6a | /nibbler/trading/collectors/testfiles/XMRpusher.py | 259b074e21ed320eb51482ce968fe4705c991153 | [] | no_license | JizzFactoryEmployee/nibblerppman | 0fbc1ce662cf8b4868b41a97291250fae29dc41d | 160e557578a3e8a614450354f6ade233d32b052f | refs/heads/master | 2022-11-14T01:10:31.743000 | 2020-07-04T01:21:52 | 2020-07-04T01:21:52 | 273,835,770 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | import time
import subprocess
import inotify.adapters
def XMRrunner():
while 1 <2:
i = inotify.adapters.Inotify()
i.add_watch(r'/home/nibbler/nibblerppman/nibbler/trading/collectors/coins/XMR/1m')
events = i.event_gen(yield_nones=False, timeout_s=1)
a = list(events)
if a == []:
pass
if a != []:
b = str(a)
b.split(',')
if 'XMR' in b:
print('ACTIVATING XMRPUSHBOT')
XMR = subprocess.Popen(['python', '/home/nibbler/nibblerppman/nibbler/trading/collectors/testfiles/XMRMAGIC.py'], shell=False)
XMRrunner()
| [
"[email protected]"
] | |
f4c7922a4d689c89746373c980775b0a21ce13b7 | 500047f47a6b372fa7ff1e96b11315ee26acf5ef | /Chapter-05/badExample.py | c4cad1b499e3b50f0b20cd4f19c9a7030f4bff3b | [] | no_license | ra2003/Tkinter-In-Action | 9f3a80bb2cab8dccf78621915f234f80cf79c58d | 2a35ae029c2cfabb53adee8dae5fd0a7c6db817f | refs/heads/master | 2022-03-02T16:25:26.146299 | 2019-10-07T06:36:41 | 2019-10-07T06:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,524 | py | #!/usr/bin/python3
import tkinter as tk
class RefactorExample(tk.Frame):
def __init__(self,parent ):
super().__init__()
self.parent = parent
panel = tk.Frame(self.parent, bg="white")
prevButton = tk.Button(panel, text="<< PREV", command=self.OnPrev).place(x=50, y=0)
nextButton = tk.Button(panel, text="NEXT >>",command=self.OnNext ).place(x=130, y=0)
panel.pack(fill=tk.BOTH, expand=1)
m_main = tk.Menu(self.master, bd = 1)
m_file = tk.Menu(m_main, tearoff=0, bd = 1)
s_menu = tk.Menu(m_file)
m_edit = tk.Menu(m_main, tearoff=0, bd=1)
m_main.add_cascade(label="File", underline=0, menu=m_file)
m_main.add_cascade(label="Edit", underline=0, menu=m_edit)
m_file.add_command(label="Open", underline=0, command=self.OnOpen)
m_file.add_command(label="Quit", underline=0, command=self.OnCloseWindow)
m_edit.add_command(label="Copy",underline=0,command=self.OnCopy)
m_edit.add_command(label="Cut",underline=1,command=self.OnCut)
m_edit.add_command(label="Paste",underline=0,command=self.OnPaste)
self.master.config(menu=m_main)
static = tk.Label(panel, text = "First Name", bg="white").place(x=10, y=50)
#tk.Entry doesn't have property of "height"
text = tk.Entry(panel, width=10, bg="white").place(x=80, y=50)
static2 = tk.Label(panel, text = "Last Name", bg="white").place(x=10, y=80)
#tk.Entry doesn't have property of "height"
text2 = tk.Entry(panel, width=10, bg="white").place(x=80, y=80)
firstButton = tk.Button(panel, text="FIRST", command=self.OnFirst).place(x=0, y=0)
lastButton = tk.Button(panel, text="LAST", command=self.OnLast).place(x=210, y=0)
m_edit.add_separator()
m_edit.add_command(label="Options", underline=0, command=self.OnOptions)
def OnPrev(self, event): pass
def OnNext(self, event): pass
def OnLast(self, event): pass
def OnFirst(self, event): pass
def OnOpen(self, event): pass
def OnCopy(self, event): pass
def OnCut(self, event): pass
def OnPaste(self, event): pass
def OnOptions(self, event): pass
def OnCloseWindow(self,):
self.master.destroy()
def main():
app = tk.Tk()
app.geometry("340x200")
app.title('Refactor Example')
frame = RefactorExample(app)
app.mainloop()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2b980f81aa73b51a4fce9dccc6c3707de8b329d8 | 849364a9b65ac32feab67dd3bb86119a0102f048 | /tests/test25/t3.py | 9b7f290499290cdfb37499f319fb698ddf121072 | [] | no_license | zokis/Python--Faster-Way | 585b46e50cc70c6b4f3b026d3b82ba2705f6fc6b | 8f11e9246198c6bc3c0c58668674d75188c966ae | refs/heads/gh-pages | 2022-07-28T18:50:54.342599 | 2015-07-02T19:43:18 | 2015-07-02T19:43:18 | 12,438,963 | 200 | 31 | null | 2018-03-25T16:12:12 | 2013-08-28T16:35:19 | HTML | UTF-8 | Python | false | false | 80 | py | def a():
n = 1
if n:
return False
else:
return True
| [
"[email protected]"
] | |
b9d86f03448c8cc1a54d531a274bd2c6babd51c3 | 228ebc9fb20f25dd3ed2a6959aac41fd31314e64 | /tests/system/aiplatform/test_dataset.py | 0167cb8f20ae58a1c3e8b302c90a48c4bafcf178 | [
"Apache-2.0"
] | permissive | orionnye/python-aiplatform | 746e3df0c75025582af38223829faeb2656dc653 | e3ea683bf754832340853a15bdb0a0662500a70f | refs/heads/main | 2023-08-03T06:14:50.689185 | 2021-09-24T03:24:14 | 2021-09-24T03:24:14 | 410,091,957 | 1 | 0 | Apache-2.0 | 2021-09-24T20:21:01 | 2021-09-24T20:21:00 | null | UTF-8 | Python | false | false | 11,073 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import uuid
import pytest
import importlib
from google import auth as google_auth
from google.api_core import exceptions
from google.api_core import client_options
from google.cloud import aiplatform
from google.cloud import storage
from google.cloud.aiplatform import utils
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform_v1beta1.types import dataset as gca_dataset
from google.cloud.aiplatform_v1beta1.services import dataset_service
from test_utils.vpcsc_config import vpcsc_config
# TODO(vinnys): Replace with env var `BUILD_SPECIFIC_GCP_PROJECT` once supported
_, _TEST_PROJECT = google_auth.default()
TEST_BUCKET = os.environ.get(
"GCLOUD_TEST_SAMPLES_BUCKET", "cloud-samples-data-us-central1"
)
_TEST_LOCATION = "us-central1"
_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
_TEST_API_ENDPOINT = f"{_TEST_LOCATION}-aiplatform.googleapis.com"
_TEST_IMAGE_DATASET_ID = "1084241610289446912" # permanent_50_flowers_dataset
_TEST_TEXT_DATASET_ID = (
"6203215905493614592" # permanent_text_entity_extraction_dataset
)
_TEST_DATASET_DISPLAY_NAME = "permanent_50_flowers_dataset"
_TEST_TABULAR_CLASSIFICATION_GCS_SOURCE = "gs://ucaip-sample-resources/iris_1000.csv"
_TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE = f"gs://{TEST_BUCKET}/ai-platform-unified/sdk/datasets/text_entity_extraction_dataset.jsonl"
_TEST_IMAGE_OBJECT_DETECTION_GCS_SOURCE = (
"gs://ucaip-test-us-central1/dataset/salads_oid_ml_use_public_unassigned.jsonl"
)
_TEST_TEXT_ENTITY_IMPORT_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/text_extraction_io_format_1.0.0.yaml"
_TEST_IMAGE_OBJ_DET_IMPORT_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_bounding_box_io_format_1.0.0.yaml"
class TestDataset:
def setup_method(self):
importlib.reload(initializer)
importlib.reload(aiplatform)
@pytest.fixture()
def shared_state(self):
shared_state = {}
yield shared_state
@pytest.fixture()
def create_staging_bucket(self, shared_state):
new_staging_bucket = f"temp-sdk-integration-{uuid.uuid4()}"
storage_client = storage.Client()
storage_client.create_bucket(new_staging_bucket)
shared_state["storage_client"] = storage_client
shared_state["staging_bucket"] = new_staging_bucket
yield
@pytest.fixture()
def delete_staging_bucket(self, shared_state):
yield
storage_client = shared_state["storage_client"]
# Delete temp staging bucket
bucket_to_delete = storage_client.get_bucket(shared_state["staging_bucket"])
bucket_to_delete.delete(force=True)
# Close Storage Client
storage_client._http._auth_request.session.close()
storage_client._http.close()
@pytest.fixture()
def dataset_gapic_client(self):
gapic_client = dataset_service.DatasetServiceClient(
client_options=client_options.ClientOptions(api_endpoint=_TEST_API_ENDPOINT)
)
yield gapic_client
@pytest.fixture()
def create_text_dataset(self, dataset_gapic_client, shared_state):
gapic_dataset = gca_dataset.Dataset(
display_name=f"temp_sdk_integration_test_create_text_dataset_{uuid.uuid4()}",
metadata_schema_uri=aiplatform.schema.dataset.metadata.text,
)
create_lro = dataset_gapic_client.create_dataset(
parent=_TEST_PARENT, dataset=gapic_dataset
)
new_dataset = create_lro.result()
shared_state["dataset_name"] = new_dataset.name
yield
@pytest.fixture()
def create_tabular_dataset(self, dataset_gapic_client, shared_state):
gapic_dataset = gca_dataset.Dataset(
display_name=f"temp_sdk_integration_test_create_tabular_dataset_{uuid.uuid4()}",
metadata_schema_uri=aiplatform.schema.dataset.metadata.tabular,
)
create_lro = dataset_gapic_client.create_dataset(
parent=_TEST_PARENT, dataset=gapic_dataset
)
new_dataset = create_lro.result()
shared_state["dataset_name"] = new_dataset.name
yield
@pytest.fixture()
def create_image_dataset(self, dataset_gapic_client, shared_state):
gapic_dataset = gca_dataset.Dataset(
display_name=f"temp_sdk_integration_test_create_image_dataset_{uuid.uuid4()}",
metadata_schema_uri=aiplatform.schema.dataset.metadata.image,
)
create_lro = dataset_gapic_client.create_dataset(
parent=_TEST_PARENT, dataset=gapic_dataset
)
new_dataset = create_lro.result()
shared_state["dataset_name"] = new_dataset.name
yield
@pytest.fixture()
def delete_new_dataset(self, dataset_gapic_client, shared_state):
yield
assert shared_state["dataset_name"]
deletion_lro = dataset_gapic_client.delete_dataset(
name=shared_state["dataset_name"]
)
deletion_lro.result()
shared_state["dataset_name"] = None
# TODO(vinnys): Remove pytest skip once persistent resources are accessible
@pytest.mark.skip(reason="System tests cannot access persistent test resources")
def test_get_existing_dataset(self):
"""Retrieve a known existing dataset, ensure SDK successfully gets the
dataset resource."""
aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
flowers_dataset = aiplatform.ImageDataset(dataset_name=_TEST_IMAGE_DATASET_ID)
assert flowers_dataset.name == _TEST_IMAGE_DATASET_ID
assert flowers_dataset.display_name == _TEST_DATASET_DISPLAY_NAME
def test_get_nonexistent_dataset(self):
"""Ensure attempting to retrieve a dataset that doesn't exist raises
a Google API core 404 exception."""
aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
# AI Platform service returns 404
with pytest.raises(exceptions.NotFound):
aiplatform.ImageDataset(dataset_name="0")
@pytest.mark.usefixtures("create_text_dataset", "delete_new_dataset")
def test_get_new_dataset_and_import(self, dataset_gapic_client, shared_state):
"""Retrieve new, empty dataset and import a text dataset using import().
Then verify data items were successfully imported."""
assert shared_state["dataset_name"]
aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
my_dataset = aiplatform.TextDataset(dataset_name=shared_state["dataset_name"])
data_items_pre_import = dataset_gapic_client.list_data_items(
parent=my_dataset.resource_name
)
assert len(list(data_items_pre_import)) == 0
# Blocking call to import
my_dataset.import_data(
gcs_source=_TEST_TEXT_ENTITY_EXTRACTION_GCS_SOURCE,
import_schema_uri=_TEST_TEXT_ENTITY_IMPORT_SCHEMA,
)
data_items_post_import = dataset_gapic_client.list_data_items(
parent=my_dataset.resource_name
)
assert len(list(data_items_post_import)) == 469
@vpcsc_config.skip_if_inside_vpcsc
@pytest.mark.usefixtures("delete_new_dataset")
def test_create_and_import_image_dataset(self, dataset_gapic_client, shared_state):
"""Use the Dataset.create() method to create a new image obj detection
dataset and import images. Then confirm images were successfully imported."""
aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
img_dataset = aiplatform.ImageDataset.create(
display_name=f"temp_sdk_integration_create_and_import_dataset_{uuid.uuid4()}",
gcs_source=_TEST_IMAGE_OBJECT_DETECTION_GCS_SOURCE,
import_schema_uri=_TEST_IMAGE_OBJ_DET_IMPORT_SCHEMA,
)
shared_state["dataset_name"] = img_dataset.resource_name
data_items_iterator = dataset_gapic_client.list_data_items(
parent=img_dataset.resource_name
)
assert len(list(data_items_iterator)) == 14
@pytest.mark.usefixtures("delete_new_dataset")
def test_create_tabular_dataset(self, dataset_gapic_client, shared_state):
"""Use the Dataset.create() method to create a new tabular dataset.
Then confirm the dataset was successfully created and references GCS source."""
aiplatform.init(project=_TEST_PROJECT, location=_TEST_LOCATION)
tabular_dataset = aiplatform.TabularDataset.create(
display_name=f"temp_sdk_integration_create_and_import_dataset_{uuid.uuid4()}",
gcs_source=[_TEST_TABULAR_CLASSIFICATION_GCS_SOURCE],
)
shared_state["dataset_name"] = tabular_dataset.resource_name
gapic_metadata = tabular_dataset.to_dict()["metadata"]
gcs_source_uris = gapic_metadata["inputConfig"]["gcsSource"]["uri"]
assert len(gcs_source_uris) == 1
assert _TEST_TABULAR_CLASSIFICATION_GCS_SOURCE == gcs_source_uris[0]
assert (
tabular_dataset.metadata_schema_uri
== aiplatform.schema.dataset.metadata.tabular
)
# TODO(vinnys): Remove pytest skip once persistent resources are accessible
@pytest.mark.skip(reason="System tests cannot access persistent test resources")
@pytest.mark.usefixtures("create_staging_bucket", "delete_staging_bucket")
def test_export_data(self, shared_state):
"""Get an existing dataset, export data to a newly created folder in
Google Cloud Storage, then verify data was successfully exported."""
assert shared_state["staging_bucket"]
assert shared_state["storage_client"]
aiplatform.init(
project=_TEST_PROJECT,
location=_TEST_LOCATION,
staging_bucket=f"gs://{shared_state['staging_bucket']}",
)
text_dataset = aiplatform.TextDataset(dataset_name=_TEST_TEXT_DATASET_ID)
exported_files = text_dataset.export_data(
output_dir=f"gs://{shared_state['staging_bucket']}"
)
assert len(exported_files) # Ensure at least one GCS path was returned
exported_file = exported_files[0]
bucket, prefix = utils.extract_bucket_and_prefix_from_gcs_path(exported_file)
storage_client = shared_state["storage_client"]
bucket = storage_client.get_bucket(bucket)
blob = bucket.get_blob(prefix)
assert blob # Verify the returned GCS export path exists
| [
"[email protected]"
] | |
b8addbaf31dce94cbf9e67adfeee954a02ca3942 | b3237e2600cfd2e84dbba3760a020c8434033e72 | /Assignments/Exam Preparation/Python Advanced Retake Exam - 08 April 2020/03. Find the Eggs.py | 893dfadef18cfd65e739782e2b18fa2140091f16 | [
"MIT"
] | permissive | KaloyankerR/python-advanced-repository | 94a22a5015bb66afa6c61b3fb8ad150dc7028d6a | 711672d0f033a5adffc3ca689d02d2a7a9a26bfb | refs/heads/master | 2023-04-11T18:48:40.538000 | 2021-04-21T10:55:30 | 2021-04-21T10:55:30 | 298,027,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,464 | py | def find_strongest_eggs(elements: list, sub_list):
sub_listed_elements = [[] for x in range(sub_list)]
ind = 0
while elements:
if ind == sub_list:
ind = 0
element = elements.pop(0)
sub_listed_elements[ind].append(element)
ind += 1
valid = []
for sub_listed_el in sub_listed_elements:
pairs = len(sub_listed_el) // 2
# mid_egg = sub_listed_el[pairs]
# left_egg = sub_listed_el[pairs - 1]
# right_egg = sub_listed_el[pairs + 1]
# condition1 = left_egg < mid_egg > right_egg
# condition2 = right_egg > left_egg
#
# if condition1 and condition2:
# valid.append(mid_egg)
is_valid = True
middle_position_egg = sub_listed_el[pairs]
if middle_position_egg > sub_listed_el[pairs + 1] > sub_listed_el[pairs - 1]:
for i in range(pairs):
first_el = sub_listed_el[i]
second_el = sub_listed_el[-(i + 1)]
if first_el > second_el:
is_valid = False
break
if is_valid:
valid.append(middle_position_egg)
return valid
test = ([-1, 7, 3, 15, 2, 12], 2)
print(find_strongest_eggs(*test))
test = ([-1, 0, 2, 5, 2, 3], 2)
print(find_strongest_eggs(*test))
test = ([51, 21, 83, 52, 55], 1)
print(find_strongest_eggs(*test))
test = ([1, 10, 2], 1)
print(find_strongest_eggs(*test))
| [
"[email protected]"
] | |
69114fbbf1b5dd496c9af5359ad301b2f1eeb8b4 | 26ca1e0906feece27896bd267a1f58882fcb0513 | /archive/Basics/multi_func.py | 9820caf039d6f0b84af655361ed8812fdcd57056 | [] | no_license | icecore2/python-training2019 | 092984c6dec1b05e70f9f899ee213d126c45ff63 | ee39f93adabab506c9eef68c5e686ddb59953de9 | refs/heads/master | 2020-09-02T21:19:27.959213 | 2020-04-23T20:06:08 | 2020-04-23T20:06:08 | 219,306,742 | 0 | 2 | null | 2020-01-17T15:07:06 | 2019-11-03T13:40:56 | Python | UTF-8 | Python | false | false | 385 | py | import sys
import os
# from hints.time_gists import timeFormat
firstNumber = 2
secondNumber = 3
# secondNumber.date = 22
def multiFunc(firstNumber, secondNumber):
multiResult = firstNumber * secondNumber
# time = timeFormat('%d-%m-%Y / %H:%M:%S')
# print(time)
return multiResult
pass
print("The multiply result is: %d" + multiFunc(firstNumber, secondNumber))
| [
"[email protected]"
] | |
234e88f3b0a9d275b613902e63d48a31b12c0038 | 0a3bd0fc84263bd65559cf95b19a6752743f7f64 | /src/guis/make_svg_camm | ce5acafd13805ae35b9ba00de0042e3ae860ed81 | [] | no_license | shohei/fabmodules_jp | b38487e5e64599fe8f3de2c404c1f730e81c616c | 67dc16709bb9eff12b63532e83a13aa410f76311 | refs/heads/master | 2016-09-06T16:25:46.636267 | 2014-08-21T06:49:37 | 2014-08-21T06:49:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# make_svg_camm
# .svg to .camm GUI wrapper
#
# Neil Gershenfeld
# CBA MIT 7/13/11
#
# (c) Massachusetts Institute of Technology 2011
# Permission granted for experimental and personal use;
# license for commercial sale available from MIT.
#
# imports
#
import wx,sys
from fab_set import fab_frame
from panel_control import control_panel
from panel_svg import svg_panel
from panel_svg_path import svg_path_panel
from panel_path_camm import path_camm_panel
#
# command line
#
print "command line: make_svg_camm [input_file [size]]"
print " input_file = input .svg file (optional)"
print " size = image panel size (optional)"
#
# start wx
#
app = wx.App()
#
# add panels to frame
#
frame = fab_frame("make_svg_camm",sys.argv)
frame.control_panel = control_panel(frame)
frame.sizer.Add(frame.control_panel,(0,0),span=(1,4),flag=wx.ALIGN_CENTER_HORIZONTAL)
frame.svg_panel = svg_panel(frame)
frame.sizer.Add(frame.svg_panel,(1,0))
frame.path_panel = svg_path_panel(frame)
frame.sizer.Add(frame.path_panel,(1,1))
frame.camm_panel = path_camm_panel(frame)
frame.sizer.Add(frame.camm_panel,(1,2))
#
# defaults
#
frame.set_svg_camm()
#
# fit and show frame
#
frame.Fit()
frame.Show()
#
# start mainloop
#
app.MainLoop()
| [
"[email protected]"
] | ||
42847c974fe12ff0e8c68e79c9bf6085ad3133f3 | 1915a3f90059f4a125b81675d16a427c85428626 | /post.py | 6ab84f631b32119601408481bcca0279c07a68e8 | [] | no_license | AntLouiz/location_photo_bot | b09fd9939513d21b755c6204199c29c31284240c | 0fcf85553efb26f4eec238aa4b0b8f2b57d9f276 | refs/heads/master | 2020-03-31T15:30:04.605136 | 2018-10-20T01:44:38 | 2018-10-20T01:44:38 | 152,339,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | import json
import datetime
class Post(object):
def __init__(self):
self.location = None
self.photo = None
self.date = None
def clean(self):
self.location = None
self.photo = None
self.date = None
def save(self):
data = {
'location': {
'latitude': self.location.latitude,
'longitude': self.location.longitude
},
'photo': self.photo,
'date': datetime.datetime.now().strftime("%Y%m%d")
}
with open('data.json', 'r') as file:
file_data = json.load(file)
file_data['data'].append(data)
with open('data.json', 'w') as file:
file.write(json.dumps(file_data))
| [
"[email protected]"
] | |
bb505cb548b5416b10fedd876d900a3c7eb66011 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqpt/fanstatshist1h.py | faf10d31b9ed476de3fe3f4b2b2402b56adf2b40 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 17,009 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class FanStatsHist1h(Mo):
"""
A class that represents historical statistics for fan in a 1 hour sampling interval. This class updates every 15 minutes.
"""
meta = StatsClassMeta("cobra.model.eqpt.FanStatsHist1h", "fan")
counter = CounterMeta("pwm", CounterCategory.GAUGE, "pwm", "pulse width modulation")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pwmMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pwmMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pwmAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pwmSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pwmThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pwmTr"
meta._counters.append(counter)
counter = CounterMeta("speed", CounterCategory.GAUGE, "rpm", "speed")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "speedMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "speedMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "speedAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "speedSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "speedThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "speedTr"
meta._counters.append(counter)
meta.moClassName = "eqptFanStatsHist1h"
meta.rnFormat = "HDeqptFanStats1h-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical fan stats in 1 hour"
meta.writeAccessMask = 0x80080000000001
meta.readAccessMask = 0x80080000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.eqpt.Fan")
meta.superClasses.add("cobra.model.eqpt.FanStatsHist")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.rnPrefixes = [
('HDeqptFanStats1h-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 6239, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "pwmAvg", "pwmAvg", 8251, PropCategory.IMPLICIT_AVG)
prop.label = "pulse width modulation average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmAvg", prop)
prop = PropMeta("str", "pwmMax", "pwmMax", 8250, PropCategory.IMPLICIT_MAX)
prop.label = "pulse width modulation maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmMax", prop)
prop = PropMeta("str", "pwmMin", "pwmMin", 8249, PropCategory.IMPLICIT_MIN)
prop.label = "pulse width modulation minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmMin", prop)
prop = PropMeta("str", "pwmSpct", "pwmSpct", 8252, PropCategory.IMPLICIT_SUSPECT)
prop.label = "pulse width modulation suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmSpct", prop)
prop = PropMeta("str", "pwmThr", "pwmThr", 8253, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "pulse width modulation thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pwmThr", prop)
prop = PropMeta("str", "pwmTr", "pwmTr", 8254, PropCategory.IMPLICIT_TREND)
prop.label = "pulse width modulation trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pwmTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "speedAvg", "speedAvg", 8272, PropCategory.IMPLICIT_AVG)
prop.label = "speed average value"
prop.isOper = True
prop.isStats = True
meta.props.add("speedAvg", prop)
prop = PropMeta("str", "speedMax", "speedMax", 8271, PropCategory.IMPLICIT_MAX)
prop.label = "speed maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("speedMax", prop)
prop = PropMeta("str", "speedMin", "speedMin", 8270, PropCategory.IMPLICIT_MIN)
prop.label = "speed minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("speedMin", prop)
prop = PropMeta("str", "speedSpct", "speedSpct", 8273, PropCategory.IMPLICIT_SUSPECT)
prop.label = "speed suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("speedSpct", prop)
prop = PropMeta("str", "speedThr", "speedThr", 8274, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "speed thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("speedThr", prop)
prop = PropMeta("str", "speedTr", "speedTr", 8275, PropCategory.IMPLICIT_TREND)
prop.label = "speed trend"
prop.isOper = True
prop.isStats = True
meta.props.add("speedTr", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("EqptSlotToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
ca9afadd672ce387fd79fc8c543b9111e07090f5 | 0460b645ac0697433e4526ea9215ac25c97a64bb | /venv/bin/pyhtmlizer | 9c15bcee75f63931c24a55fe21eaa476ebb0d585 | [] | no_license | veujs/weibo | 0e20645d07196193537f523a677892d3da1abf88 | b33d0c41fc82608fd828e2790a2dcc2c9a246f36 | refs/heads/master | 2020-05-30T12:03:22.631450 | 2019-06-01T10:18:36 | 2019-06-01T10:18:36 | 189,718,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | #!/home/wangzhipeng/myproject/myspider/weibo/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'Twisted==19.2.0','console_scripts','pyhtmlizer'
__requires__ = 'Twisted==19.2.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('Twisted==19.2.0', 'console_scripts', 'pyhtmlizer')()
)
| [
"[email protected]"
] | ||
1b3017e4d9efb6b748173059fdf1e8e745873df8 | cda215558ad8448ed8e2cbb89719de312c382a95 | /enteletaor_lib/libs/hooks/__init__.py | 56572dc3372dc7674f8e8348206a7e4528940f44 | [
"BSD-3-Clause"
] | permissive | cr0hn/enteletaor | 63fc6a9f832ea7b6b08f3f786445a8235b9a4618 | a975b5cb06bc5f819b32e65d0cd2258a37370661 | refs/heads/master | 2023-05-11T13:38:25.213779 | 2023-05-08T08:41:31 | 2023-05-08T08:41:31 | 52,361,896 | 166 | 31 | NOASSERTION | 2023-05-08T08:41:36 | 2016-02-23T13:44:22 | Python | UTF-8 | Python | false | false | 4,021 | py | # -*- coding: utf-8 -*-
#
# Enteletaor - https://github.com/cr0hn/enteletaor
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
This file contains utils for handle decorators
"""
import logging
import functools
from collections import defaultdict
log = logging.getLogger()
# --------------------------------------------------------------------------
# Config decorators
# --------------------------------------------------------------------------
def on_config_loaded(func):
"""
This decorator mark a function or method as hook to run when:
Running config is loaded
"""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
return func(*args, **kwargs)
func_wrapper.hook_type = "config"
return func_wrapper
# --------------------------------------------------------------------------
# Find hooks
# --------------------------------------------------------------------------
def find_hooks():
"""
Find all hooks and return pointers to functions categorized by hook type.
:return: dict with hooks and type as format: dict(hook_type: function_pointer)
:rtype: dict(str: function)
"""
import os
import os.path
import inspect
base_dir = os.path.abspath(os.path.dirname(__file__))
# Modules found
results = defaultdict(list)
for root, dirs, files in os.walk(base_dir):
# Check if folder is a package
if "__init__.py" not in files:
continue
# Remove files or path that starts with "_"
if any(True for x in root.split("/") if x.startswith("_")):
continue
for filename in files:
if filename.endswith(".py") and \
not filename.startswith("celery") and \
not filename.startswith("test_"):
if filename.startswith("_"):
if filename != "__init__.py":
continue
# loop_file = os.path.join(root, filename)
loop_file = os.path.join(root, filename) \
.replace(base_dir, '') \
.replace(os.path.sep, '.') \
.replace('.py', '')
loop_file = loop_file[1:] if loop_file.startswith(".") else loop_file
# Load module info
try:
classes = __import__("%s.%s" % (__package__, loop_file), globals=globals(), locals=locals(), level=loop_file.count("."))
except ImportError:
classes = __import__(loop_file, globals=globals(), locals=locals(), level=loop_file.count("."))
# Get Modules instances
for m in dir(classes):
_loaded_module = getattr(classes, m)
if inspect.isfunction(_loaded_module) and hasattr(_loaded_module, "hook_type"):
log.debug("Loading hook: %s" % _loaded_module.__name__)
results[_loaded_module.hook_type].append(_loaded_module)
return results
| [
"[email protected]"
] | |
de7091480f2f208de4c42fa97e6a868185619552 | 8746f4e3da5e230ec0ca4b924bb06a9951dd03da | /setup.py | 9082c72d9f540e3e1d1fc8b9b0e4754df699af35 | [
"MIT"
] | permissive | Apeopl/django-dj-plugin | 845a508dd9088ceb9d9e03de56f5c11d5d1d07a4 | 2711c1af0185eea0fe5d1aed2eca5cd0422b387d | refs/heads/master | 2021-04-19T06:04:23.658893 | 2020-03-26T06:59:50 | 2020-03-26T06:59:50 | 249,585,770 | 0 | 0 | MIT | 2020-03-26T01:49:01 | 2020-03-24T01:42:04 | Python | UTF-8 | Python | false | false | 2,239 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version(*file_paths):
"""Retrieves the version from django_dj_plugin/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version("django_dj_plugin", "__init__.py")
if sys.argv[-1] == 'publish':
try:
import wheel
print("Wheel version: ", wheel.__version__)
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-dj-plugin',
version=version,
description="""Django plgooge practice modules.""",
long_description=readme + '\n\n' + history,
author='Jian Dai',
author_email='[email protected]',
url='https://github.com/daimon99/django-dj-plugin',
packages=[
'django_dj_plugin',
],
include_package_data=True,
install_requires=[],
license="MIT",
zip_safe=False,
keywords='django-dj-plugin',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| [
"[email protected]"
] | |
4e88beb03ff92ac889515bdb3172e288f961c20b | c5d87c7f25e3fe9b17c1e88993b0ed6831e52acb | /Django_HelpWord/mysite3/upload/templates/__init__.py | 7c35217eb0fb5679a9ba58649325514db58ce182 | [] | no_license | GIS90/python_base_use | e55d55f9df505dac45ddd332fb65dcd08e8e531f | 7166ca85975bb7c56a5fbb6b723fd8300c4dd5d1 | refs/heads/master | 2020-04-02T08:33:49.461307 | 2018-10-23T03:33:41 | 2018-10-23T03:33:41 | 154,249,857 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 270 | py | # -*- coding: utf-8 -*-
"""
------------------------------------------------
describe:
------------------------------------------------
"""
__version__ = "v.10"
__author__ = "PyGo"
__time__ = "2017/3/30"
if __name__ == '__main__':
pass
| [
"[email protected]"
] | |
3937e4a4be12d7fa734792ababc9590adee4697e | aeb40bfa5a685bb739e818e7ea906a748795ba59 | /data/management/commands/insert_dynamic_data.py | c6bd4bddbe9e2c54d30ac01bfb78a64a3ab95d03 | [] | no_license | jaebradley/nba_persistence | d1be548967266b4af09625fc140ce9fb4cd88a25 | 177129ad195c07dc0ff93a6c2c8f7b34770da116 | refs/heads/master | 2021-01-18T23:56:52.512661 | 2017-02-16T21:17:05 | 2017-02-16T21:17:05 | 48,635,050 | 10 | 2 | null | 2016-09-23T02:44:36 | 2015-12-27T06:14:25 | JavaScript | UTF-8 | Python | false | false | 474 | py | from django.core.management.base import BaseCommand
from data.inserters.dynamic import insert_players, insert_games, insert_box_scores
class Command(BaseCommand):
def __init__(self, stdout=None, stderr=None, no_color=False):
super(Command, self).__init__(stdout, stderr, no_color)
def handle(self, *args, **options):
Command.insert()
@staticmethod
def insert():
insert_players()
insert_games()
insert_box_scores() | [
"[email protected]"
] | |
7d0ac5540cd33b68e73c38be260d8192538f2a02 | 2f2e9cd97d65751757ae0a92e8bb882f3cbc5b5b | /1550.存在连续三个奇数的数组.py | 4b37608348efa8136e2a1c0fa66108f9a3ea9483 | [] | no_license | mqinbin/python_leetcode | 77f0a75eb29f8d2f9a789958e0120a7df4d0d0d3 | 73e0c81867f38fdf4051d8f58d0d3dc245be081e | refs/heads/main | 2023-03-10T18:27:36.421262 | 2021-02-25T07:24:10 | 2021-02-25T07:24:10 | 314,410,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 452 | py | #
# @lc app=leetcode.cn id=1550 lang=python3
#
# [1550] 存在连续三个奇数的数组
#
# @lc code=start
class Solution:
def threeConsecutiveOdds(self, arr: List[int]) -> bool:
odd_count = 0
for e in arr:
if e%2:
odd_count +=1
if odd_count ==3:
return True
else:
odd_count =0
return False
# @lc code=end
| [
"[email protected]"
] | |
f68c84a68633225f5e770dd8f0c75f1770df1349 | bc07e2822dbd213e194f69962e2f76c5350f2dc6 | /Python/Introduction/divmod.py | d2195bf6886309c6b622b8a30facfef7e71eabb1 | [] | no_license | nocotan/HackerRank | ddd3168910590c9dcfe59f39551a6de0e63f5946 | 346592c96c5a00c5da9eab73e046b13cefc630d6 | refs/heads/master | 2021-01-15T15:37:52.630649 | 2016-10-30T16:05:17 | 2016-10-30T16:05:17 | 55,508,533 | 1 | 1 | null | 2018-01-19T05:56:20 | 2016-04-05T13:06:29 | Python | UTF-8 | Python | false | false | 93 | py | a = int(input())
b = int(input())
ans = divmod(a, b)
print(ans[0])
print(ans[1])
print(ans)
| [
"[email protected]"
] | |
287e89cd4c348d4ddc8853c3630fe6bc44339f34 | 719e7b35f2b1c3196ff8084b5c5c46cbd2a22f5f | /setup.py | c11bcae02e8fed764919d18313cdb3ad23794389 | [
"MIT"
] | permissive | noobermin/pynoob3a | 72817b87dd7f96652487d139f42373adac820dca | fa8b6650ac286b52803b98b6b596f3cdc9db87cb | refs/heads/master | 2021-02-09T14:07:35.912782 | 2020-03-02T05:48:52 | 2020-03-02T05:48:52 | 244,291,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | from setuptools import setup
setup(name='pynoob3a',
version='0.0.1',
description='dumb binary format',
author='noobermin',
author_email='[email protected]',
license='MIT',
packages=['noob3a'],
zip_safe=False);
| [
"[email protected]"
] | |
be85893dfa6895326d2afa7685f39f168fc17af4 | d596796c1488ea6dc67fb2030e2fd1fbf45e54cb | /free/index.py | 7bccfb1fd092f52382826ca46e34b1e4c277d164 | [] | no_license | zerc/draft | 83aad5b08fc1d7569295aa595821f117cb29decd | a972f1c341c711530c0894b5340a6639b206ec41 | refs/heads/master | 2021-01-15T23:02:10.509905 | 2010-06-22T16:15:56 | 2010-06-22T16:15:56 | 730,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
# zerc.ru
from os import popen
def get_free():
s = tuple(popen('free -m').readlines()[1].split()[1:4])
return s
def index():
html = """
<html>
<head><title>web free</title></head>
<body>
<style type="text/css">
body {margin:0;padding:0;background:#aeaeae;color:#ffffff;}
.main {width:300px;margin:0 auto 0 auto;text-align:center;}
.main table {border:1px solid #444444;}
.main table td {padding:5px;width:100px;text-align:center;}
.main table .title {background-color:#cccccc;color:#000000;font-weight:bolder;}
.main h3 {text-transform:uppercase;font-size:16px;margin:5px 0 5px 0;}
.main .copy {width:300px;text-align:right;margin:5px 0 5px 0;}
</style>
<div class="main">
<h3>Использование оперативки</h3>
<table border="1" cellpadding="0" cellspacing="0" >
<tr class="title">
<td>Всего</td>
<td>Занято</td>
<td>Свободно</td>
</tr>
<tr>
<td>%s, Мб</td>
<td>%s, Мб</td>
<td>%s, Мб</td>
</tr>
</table>
<div class="copy"><b>webFree</b> © zerc</div>
</div>
</body>
</html>""" % get_free()
return html | [
"[email protected]"
] | |
2be97047e2001fdc961703b5402a777176f20e26 | 9e4e7b9d3ad410ea84310d1a93122f6817f59b5e | /bin/edsig | d7d9c4fe903836407868143f4c31d604a3624a26 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | Sigterm-no/python-ed25519 | 42105735c53eba865c5b0430eee2487f40b73fea | 15237f3536b12022c30553a857524768a2d904c7 | refs/heads/master | 2021-01-11T05:08:06.617637 | 2014-03-19T23:21:30 | 2014-03-19T23:21:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | #! /usr/bin/python
import os, sys
import ed25519
from hashlib import sha256
def help():
print """\
Usage: (ed25519 version %s)
edsig generate [STEM]
creates keypair, writes to 'STEM.signing.key' and 'STEM.verifying.key'
default is to 'signing.key' and 'verifying.key'
edsig sign (signing.key|keyfile) message.file
prints signature to stdout
If message.file is "-", reads from stdin.
edsig verify (verifying.key|keyfile) message.file (signature|sigfile)
prints 'good signature!' or raises exception
If message.file is "-", reads from stdin.
Key-providing arguments can either be the key itself, or point to a file
containing the key.
""" % ed25519.__version__
def remove_prefix(prefix, s):
if not s.startswith(prefix):
raise ValueError("no prefix found")
return s[len(prefix):]
def data_from_arg(arg, prefix, keylen, readable):
if (readable
and arg.startswith(prefix)
and len(remove_prefix(prefix, arg))==keylen):
return arg
if os.path.isfile(arg):
return open(arg,"r").read()
raise ValueError("unable to get data from '%s'" % arg)
def message_rep(msg_arg):
if msg_arg == "-":
f = sys.stdin
else:
f = open(msg_arg, "rb")
h = sha256()
while True:
data = f.read(16*1024)
if not data:
break
h.update(data)
return h.digest()
if len(sys.argv) < 2:
help()
elif sys.argv[1] == "generate":
sk,vk = ed25519.create_keypair()
if len(sys.argv) > 2:
sk_outfile = sys.argv[2]+".signing.key"
vk_outfile = sys.argv[2]+".verifying.key"
else:
sk_outfile = "signing.key"
vk_outfile = "verifying.key"
sk_s = sk.to_seed(prefix="sign0-")
vk_s = vk.to_ascii("verf0-", "base32")
open(sk_outfile,"w").write(sk_s)
open(vk_outfile,"w").write(vk_s+"\n")
print "wrote private signing key to", sk_outfile
print "write public verifying key to", vk_outfile
elif sys.argv[1] == "sign":
sk_arg = sys.argv[2]
msg_arg = sys.argv[3]
sk = ed25519.SigningKey(data_from_arg(sk_arg, "sign0-", 52, False),
prefix="sign0-")
sig = sk.sign(message_rep(msg_arg), prefix="sig0-", encoding="base32")
print sig
elif sys.argv[1] == "verify":
vk_arg = sys.argv[2]
msg_arg = sys.argv[3]
sig_arg = sys.argv[4]
vk = ed25519.VerifyingKey(data_from_arg(vk_arg, "verf0-", 52, True),
prefix="verf0-", encoding="base32")
sig = data_from_arg(sig_arg, "sig0-", 103, True)
vk.verify(sig, message_rep(msg_arg),
prefix="sig0-", encoding="base32") # could raise BadSignature
print "good signature!"
else:
help()
| [
"[email protected]"
] | ||
cade8d6acd2d53c2ab00934deb72f9baef388b31 | 7f760365660de815db319d20bb05e1fbd5fc8df4 | /server/app/outputs/dmx.py | 150ff08a6742385ae301ebc0a725eae9f721682f | [
"MIT"
] | permissive | BasementCat/audio-reactive-led-strip | db5ac94eb3c43dfdb6a79501d6d8711579d41c51 | a98bac8e04c0fae3022de9f5086914dc1f1192d8 | refs/heads/master | 2022-07-21T12:39:06.257207 | 2022-07-14T01:12:08 | 2022-07-14T01:12:08 | 216,214,804 | 2 | 0 | MIT | 2019-10-19T13:58:07 | 2019-10-19T13:58:07 | null | UTF-8 | Python | false | false | 4,487 | py | import os
import glob
import logging
import threading
import time
import subprocess
import re
from dmxpy.DmxPy import DmxPy
from app import Task
from app.lib.misc import FPSCounter
logger = logging.getLogger(__name__)
hexint = lambda v: int(v, 16)
def find_device_file__linux(vendor, product):
if not os.path.exists('/sys') or not os.path.isdir('/sys'):
return None
for dev in glob.glob('/sys/bus/usb-serial/devices/*'):
devname = os.path.basename(dev)
with open(os.path.join(dev, '../uevent'), 'r') as fp:
for line in fp:
line = line.strip()
if line and '=' in line:
param, value = line.split('=')
if param == 'PRODUCT':
testvendor, testproduct = map(hexint, value.split('/')[:2])
if testvendor == vendor and testproduct == product:
return os.path.join('/dev', devname)
def find_device_file__macos(vendor, product):
devices = []
curdevice = {}
res = subprocess.check_output(['ioreg', '-p', 'IOUSB', '-l', '-b']).decode('utf-8')
for line in res.split('\n'):
line = line.strip()
if not line:
continue
match = re.match(u'^\+-o (.+)\s+<', line)
if match:
if curdevice:
devices.append(curdevice)
curdevice = {}
continue
match = re.match(u'^[\|\s]*"([\w\d\s]+)"\s+=\s+(.+)$', line)
if match:
k, v = match.groups()
if v.startswith('"'):
v = v[1:-1]
else:
try:
v = int(v)
except:
pass
curdevice[k] = v
if curdevice:
devices.append(curdevice)
for d in devices:
if d.get('idVendor') == vendor and d.get('idProduct') == product:
return '/dev/tty.usbserial-' + d['USB Serial Number']
def find_device_file(name):
# Name is either a path (/dev/ttyUSB0) which might change, or a device ID (0403:6001) which does not
if name.startswith('/') or ':' not in name:
# Assume file
return name
if ':' not in name:
raise ValueError(f"Not a valid device ID: {name}")
vendor, product = map(hexint, name.split(':'))
for fn in (find_device_file__linux, find_device_file__macos):
try:
file = fn(vendor, product)
if file:
return file
except:
logger.debug("Failure in find device file", exc_info=True)
raise RuntimeError(f"Can't find USB device {name}")
class DMX(Task):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.config.get('DMX_DEVICE'):
raise ValueError("No DMX_DEVICE in config")
self.dmx = None
self.dmx_lock = threading.Lock()
self.dmx_attempt = None
self.delay = 1.0 / float(self.config.get('FPS', 60))
self.last_send = 0
self.fps = FPSCounter('DMX')
self.get_dmx()
def get_dmx(self):
if not self.dmx and self.config.get('DMX_DEVICE') != 'sink':
if self.dmx_attempt is None or time.time() - self.dmx_attempt > 1:
self.dmx_attempt = time.time()
if not self.config.get('DMX_DEVICE'):
if self.config.get('DMX_DEVICE') is None:
logger.error("No DMX device configured")
self.config['DMX_DEVICE'] = False
return
with self.dmx_lock:
try:
self.dmx = DmxPy(find_device_file(self.config['DMX_DEVICE']))
except:
logger.error("Can't open DMX device %s", self.config['DMX_DEVICE'], exc_info=True)
return self.dmx
def run(self, data):
dmx = self.get_dmx()
if dmx:
if data.get('dmx_force'):
with self.fps:
for chan, val in data['dmx_force'].items():
dmx.setChannel(chan, val)
dmx.render()
if data.get('dmx'):
for chan, val in data['dmx'].items():
dmx.setChannel(chan, val)
if time.time() - self.last_send >= self.delay:
self.last_send = time.time()
with self.fps:
dmx.render()
| [
"[email protected]"
] | |
32e99b6d1e481856877756ea2cb6756722d16906 | 61ef327bd1d5ff6db7595221db6823c947dab42b | /FlatData/EquipmentStatExcelTable.py | e079ec5a2805f744c293cc9dd2edff8c4f954a6f | [] | no_license | Aikenfell/Blue-Archive---Asset-Downloader | 88e419686a80b20b57a10a3033c23c80f86d6bf9 | 92f93ffbdb81a47cef58c61ec82092234eae8eec | refs/heads/main | 2023-09-06T03:56:50.998141 | 2021-11-19T12:41:58 | 2021-11-19T12:41:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,555 | py | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: FlatData
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class EquipmentStatExcelTable(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = EquipmentStatExcelTable()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsEquipmentStatExcelTable(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# EquipmentStatExcelTable
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# EquipmentStatExcelTable
def DataList(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from FlatData.EquipmentStatExcel import EquipmentStatExcel
obj = EquipmentStatExcel()
obj.Init(self._tab.Bytes, x)
return obj
return None
# EquipmentStatExcelTable
def DataListLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.VectorLen(o)
return 0
# EquipmentStatExcelTable
def DataListIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
return o == 0
def Start(builder): builder.StartObject(1)
def EquipmentStatExcelTableStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def AddDataList(builder, DataList): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(DataList), 0)
def EquipmentStatExcelTableAddDataList(builder, DataList):
"""This method is deprecated. Please switch to AddDataList."""
return AddDataList(builder, DataList)
def StartDataListVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def EquipmentStatExcelTableStartDataListVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartDataListVector(builder, numElems)
def End(builder): return builder.EndObject()
def EquipmentStatExcelTableEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | [
"[email protected]"
] | |
30d2d8bf4d1bd7d1f4a8095bfd4336d191319e46 | bbe5b336150c38f480a4c3a3a15e1d65a7dfc7d1 | /tests/app/api/business/validators/application_validator/test_validate_documents.py | 319034a3aff6b2c513ba2c418c2b4794d506282a | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | AusDTO/dto-digitalmarketplace-api | 9135785c205fe04bbb07782c561c5c5f8cf8417d | af1f0c8979406f80223ab7a68266563abd80b2f4 | refs/heads/master | 2022-07-31T04:12:36.364555 | 2022-07-07T04:31:41 | 2022-07-07T04:31:41 | 62,025,672 | 6 | 7 | MIT | 2022-05-23T23:32:37 | 2016-06-27T04:34:37 | Python | UTF-8 | Python | false | false | 1,755 | py | from app.api.business.validators import ApplicationValidator
from app.models import Application
def test_can_get_error_when_no_documents():
application = Application(
data={}
)
errors = ApplicationValidator(application).validate_documents()
assert len(errors) == 1
def test_can_get_error_for_expired_documents():
application = Application(
data={
'documents': {
'indemnity': {
'filename': 'test.pdf',
'expiry': '2018/01/01'
},
'liability': {
'filename': 'test.pdf',
'expiry': '2018/01/01'
},
'workers': {
'filename': 'test.pdf',
'expiry': '2018/01/01'
},
'financial': {
'filename': 'test.pdf'
}
}
}
)
errors = ApplicationValidator(application).validate_documents()
assert len(errors) == 3
def test_can_get_error_for_no_filename():
application = Application(
data={
'documents': {
'indemnity': {
'filename': '',
'expiry': '2018/01/01'
},
'liability': {
'filename': '',
'expiry': '2018/01/01'
},
'workers': {
'filename': '',
'expiry': '2018/01/01'
},
'financial': {
'filename': ''
}
}
}
)
errors = ApplicationValidator(application).validate_documents()
assert len(errors) == 7
| [
"[email protected]"
] | |
e24f4a4008a9f2edade00871369f275ca42462dd | 5b9bce9fdfc13848b6bacc73741f6e8fc5a4ae99 | /client/client.py | eb5be712516765751211389e5083d166429114f2 | [] | no_license | ContinuumBridge/bridge_admin | 4a5a036f4e0cb4e96366a85524aef0c33e82a7ff | efd4148a55221f74cb8a11139a8416d1af453408 | refs/heads/master | 2022-03-20T15:00:01.422221 | 2020-01-04T11:08:17 | 2020-01-04T11:08:17 | 17,435,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py |
import httplib
import json
import requests
import websocket
import time
import signal
from twisted.internet import threads
from twisted.internet import defer
from twisted.internet import reactor
#CID 64 on Production. Key: 6b8b20b7r2oPxGmlwjoFuJD/iLKCPTkBOW/ZR9F2vnyxGd1pHLVVime+srwxoOTP
#CB_ADDRESS = "portal.continuumbridge.com"
#KEY = "6b8b20b7r2oPxGmlwjoFuJD/iLKCPTkBOW/ZR9F2vnyxGd1pHLVVime+srwxoOTP"
# Staging service test
CB_ADDRESS = "staging.continuumbridge.com"
KEY = "649e038do23icDEnfrtxf0BRCbLw9exPIyTDKSxJtm8EGm10jG4vMjUFRZqLmbfE"
START_DELAY = 60
SWITCH_INTERVAL = 60
# Staging:
DESTINATION = "BID106/AID29"
# Production
#DESTINATION = "BID167/AID12"
class Connection(object):
def __init__(self):
self.boilerState = 0
reactor.callInThread(self.connect)
reactor.callLater(START_DELAY, self.switchBoiler)
reactor.run()
def connect(self) :
auth_url = "http://" + CB_ADDRESS + "/api/client/v1/client_auth/login/"
auth_data = '{"key": "' + KEY + '"}'
auth_headers = {'content-type': 'application/json'}
response = requests.post(auth_url, data=auth_data, headers=auth_headers)
self.cbid = json.loads(response.text)['cbid']
print "CBID: ", self.cbid
sessionID = response.cookies['sessionid']
ws_url = "ws://" + CB_ADDRESS + ":7522/"
websocket.enableTrace(True)
self.ws = websocket.WebSocketApp(
ws_url,
on_open = self._onopen,
header = ['sessionID: {0}'.format(sessionID)],
on_message = self._onmessage)
self.ws.run_forever()
def _onopen(self, ws):
print "on_open"
def _onmessage(self, ws, message):
msg = json.loads(message)
print "Message received:"
print(json.dumps(msg, indent=4))
def switchBoiler(self):
msg = {
"source": self.cbid,
"destination": DESTINATION,
"body": {
"n": 0,
"d":
[
{
"i": "Boiler",
"s": self.boilerState,
"at": int(time.time() + 20)
}
]
}
}
print "Sending: ", msg
self.ws.send(json.dumps(msg))
print "Message sent"
if self.boilerState == 0:
self.boilerState = 1
else:
self.boilerState = 0
reactor.callLater(SWITCH_INTERVAL, self.switchBoiler)
def signalHandler(self, signal, frame):
logging.debug("%s signalHandler received signal", ModuleName)
reactor.stop()
exit()
if __name__ == '__main__':
connection = Connection()
| [
"[email protected]"
] | |
41faa5af6577c42cbb30f57da6441bb4991e463c | e6f62843d8e7f580a8f2993988cde930a2f5daf2 | /final_project/work_classifiers.py | b665de4b121586dd97b0848c3555e197199060cd | [] | no_license | diegoami/ud120-projects | 614002d317425139948a254293c46a335c1b1e22 | 2a573ea095ac456843c203592d7175800d49c938 | refs/heads/master | 2021-01-20T00:27:48.939087 | 2017-06-15T19:07:59 | 2017-06-15T19:07:59 | 89,133,359 | 0 | 0 | null | 2017-04-23T10:17:18 | 2017-04-23T10:17:18 | null | UTF-8 | Python | false | false | 854 | py |
import numpy as np
def gaussian_classificator(features_train, labels_train):
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(features_train, np.ravel(labels_train))
return clf
def tree_classificator(features_train, labels_train, **kwargs):
from sklearn import tree
clf = tree.DecisionTreeClassifier(**kwargs)
clf.fit(features_train, np.ravel(labels_train))
return clf
def svc_classificator(features_train, labels_train):
from sklearn.svm import SVC
### your code goes here!
clf = SVC()
clf.fit(features_train, labels_train)
return clf
gauss_call = {"method": gaussian_classificator, "args": {}}
tree_call = {"method": tree_classificator, "args": dict({"min_samples_split": 2})}
svc_call = {"method": svc_classificator, "args": {}}
classifiers = [ tree_call, gauss_call] | [
"[email protected]"
] | |
15d84fd92ae6047db555a7aabf4fe7ff1c05d808 | e8d7e13eb4d26c0a0147f2d0208d70e61f865c2c | /untitled7/bin/2to3-3.6 | a7320f5bb9145f972ebb4114fd485764853a360d | [] | no_license | fazi4888/APCSP-FreshmanYear | 55c5b5717aadeb2d871582754174f88213a488fe | b4f0f797b2c469e148b0330ad9d309610f1f0668 | refs/heads/master | 2022-11-30T11:51:30.210342 | 2020-08-17T15:00:37 | 2020-08-17T15:00:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | 6 | #!/Users/rayaan_siddiqi23/untitled7/bin/python
import sys
from lib2to3.main import main
sys.exit(main("lib2to3.fixes"))
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.