blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a41d7737fdb64767088b4153d8994a0422a6044c
|
ca2dbcfeac6ab571a19bd7d91b7234fd461d09e3
|
/contact/settings.py
|
f6b23ebea5443fb592009997c1e7ce9e73093d67
|
[] |
no_license
|
RahulSinghDhek/test-contact
|
51ebcc85e32a3d4fc86cb978824337b444f077be
|
ff14bb369e4caae6cd4db95388f7c87bf65c3227
|
refs/heads/master
| 2020-04-20T02:18:27.516767 | 2019-01-31T17:44:41 | 2019-01-31T17:44:41 | 168,568,336 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,655 |
py
|
"""
Django settings for contact project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+mg_ysn-@n6l*ltqbi59wn(b(9pt32ugy_l!ztko^ux0nl80@k'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1','https://contactlistdhek.herokuapp.com/']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'phonebook',
'rest_framework.authtoken'
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'contact.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'contact.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': '1234', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '5432', # Set to empty string for default. Not used with sqlite3.
}
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
# https://docs.djangoproject.com/en/1.11/howto/static-files/
PROJECT_ROOT = os.path.join(os.path.abspath(__file__))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra lookup directories for collectstatic to find static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Add configuration for static files storage using whitenoise
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
import dj_database_url
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(prod_db)
|
[
"[email protected]"
] | |
df0300b9ae066ae31618798f45525e2480426413
|
7a1b08c64b29522d4bbb913475275c1bc8ad61a4
|
/patient_agenda/events/doc_events/conver_value.py
|
afcdc7008de591882958f1eb59c25de32cecb141
|
[
"MIT"
] |
permissive
|
erichilarysmithsr/time-track
|
8f84d4cc92cebaedce550b3741982d204e734a6c
|
dc0a7b63c937d561309f9b1c84af65fb581a8e18
|
refs/heads/master
| 2023-03-27T08:07:46.717221 | 2021-03-30T16:45:50 | 2021-03-30T16:45:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,695 |
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
import os
import json
import subprocess
try:
with open('./patient_agenda/events/doc_events/patient_rdv.json') as file:
data=json.load(file)
except FileNotFoundError as fileout:
print("File 1 patient_rdv.json not created", fileout)
for value in data:
print(value)
data_list1 = []
for value in data:
data_list1.append(value[1])
data_day = data_list1[0]
data_month = data_list1[1]
data_year = data_list1[2]
try:
if data_day < 10:
extraday = '0' +''+ str(data_day)
elif data_day >= 10:
extraday = str(data_day)
else:
pass
except ValueError as valout:
print("Value of day is a problem", valout)
try:
if data_month < 10:
extramounth = '0' +''+ str(data_month)
elif data_month >= 10:
extramounth = str(data_month)
else:
pass
except ValueError as valout:
print("Value of mounth is a problem", valout)
# initword = "Appointment set for :"
# initword +' '+
final_data = extraday +'/'+ extramounth +'/'+ str(data_year) +' :'
print(final_data)
try:
if os.path.getsize('./patient_agenda/events/doc_events/fix_agenda/patient_value.json'):
print("+ File 'value' exist !")
with open('./patient_agenda/events/doc_events/fix_agenda/patient_value.json','w') as partytime:
json.dump(final_data, partytime)
except FileNotFoundError as msg:
print("File doesn't exist, but it has been created !")
with open('./patient_agenda/events/doc_events/fix_agenda/patient_value.json','w') as partyleft:
json.dump(final_data, partyleft)
subprocess.call('./patient_agenda/events/doc_events/fix_agenda/extend_agenda.py')
|
[
"[email protected]"
] | |
0048953dec39f492a91c8bdde7a9ddaca57537a1
|
5d4753b7e463827c9540e982108de22f62435c3f
|
/python/tink/daead/_deterministic_aead_wrapper_test.py
|
b59d11dca3c7331a23581b856195197dfeb49b72
|
[
"Apache-2.0"
] |
permissive
|
thaidn/tink
|
8c9b65e3f3914eb54d70847c9f56853afd051dd3
|
2a75c1c3e4ef6aa1b6e29700bf5946b725276c95
|
refs/heads/master
| 2021-07-25T02:02:59.839232 | 2021-02-10T17:21:31 | 2021-02-10T17:22:01 | 337,815,957 | 2 | 0 |
Apache-2.0
| 2021-02-10T18:28:20 | 2021-02-10T18:28:20 | null |
UTF-8
|
Python
| false | false | 6,074 |
py
|
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tink.python.tink.aead_wrapper."""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import tink
from tink import daead
from tink.testing import keyset_builder
DAEAD_TEMPLATE = daead.deterministic_aead_key_templates.AES256_SIV
RAW_DAEAD_TEMPLATE = keyset_builder.raw_template(DAEAD_TEMPLATE)
def setUpModule():
daead.register()
class AeadWrapperTest(parameterized.TestCase):
@parameterized.parameters([DAEAD_TEMPLATE, RAW_DAEAD_TEMPLATE])
def test_encrypt_decrypt(self, template):
keyset_handle = tink.new_keyset_handle(template)
primitive = keyset_handle.primitive(daead.DeterministicAead)
ciphertext = primitive.encrypt_deterministically(
b'plaintext', b'associated_data')
self.assertEqual(
primitive.decrypt_deterministically(ciphertext, b'associated_data'),
b'plaintext')
@parameterized.parameters([DAEAD_TEMPLATE, RAW_DAEAD_TEMPLATE])
def test_decrypt_unknown_ciphertext_fails(self, template):
unknown_handle = tink.new_keyset_handle(template)
unknown_primitive = unknown_handle.primitive(daead.DeterministicAead)
unknown_ciphertext = unknown_primitive.encrypt_deterministically(
b'plaintext', b'associated_data')
keyset_handle = tink.new_keyset_handle(template)
primitive = keyset_handle.primitive(daead.DeterministicAead)
with self.assertRaises(tink.TinkError):
primitive.decrypt_deterministically(unknown_ciphertext,
b'associated_data')
@parameterized.parameters([DAEAD_TEMPLATE, RAW_DAEAD_TEMPLATE])
def test_decrypt_wrong_associated_data_fails(self, template):
keyset_handle = tink.new_keyset_handle(template)
primitive = keyset_handle.primitive(daead.DeterministicAead)
ciphertext = primitive.encrypt_deterministically(b'plaintext',
b'associated_data')
with self.assertRaises(tink.TinkError):
primitive.decrypt_deterministically(ciphertext, b'wrong_associated_data')
@parameterized.parameters([(DAEAD_TEMPLATE, DAEAD_TEMPLATE),
(RAW_DAEAD_TEMPLATE, DAEAD_TEMPLATE),
(DAEAD_TEMPLATE, RAW_DAEAD_TEMPLATE),
(RAW_DAEAD_TEMPLATE, RAW_DAEAD_TEMPLATE)])
def test_encrypt_decrypt_with_key_rotation(self, template1, template2):
builder = keyset_builder.new_keyset_builder()
older_key_id = builder.add_new_key(template1)
builder.set_primary_key(older_key_id)
p1 = builder.keyset_handle().primitive(daead.DeterministicAead)
newer_key_id = builder.add_new_key(template2)
p2 = builder.keyset_handle().primitive(daead.DeterministicAead)
builder.set_primary_key(newer_key_id)
p3 = builder.keyset_handle().primitive(daead.DeterministicAead)
builder.disable_key(older_key_id)
p4 = builder.keyset_handle().primitive(daead.DeterministicAead)
self.assertNotEqual(older_key_id, newer_key_id)
# p1 encrypts with the older key. So p1, p2 and p3 can decrypt it,
# but not p4.
ciphertext1 = p1.encrypt_deterministically(b'plaintext', b'ad')
self.assertEqual(p1.decrypt_deterministically(ciphertext1, b'ad'),
b'plaintext')
self.assertEqual(p2.decrypt_deterministically(ciphertext1, b'ad'),
b'plaintext')
self.assertEqual(p3.decrypt_deterministically(ciphertext1, b'ad'),
b'plaintext')
with self.assertRaises(tink.TinkError):
_ = p4.decrypt_deterministically(ciphertext1, b'ad')
# p2 encrypts with the older key. So p1, p2 and p3 can decrypt it,
# but not p4.
ciphertext2 = p2.encrypt_deterministically(b'plaintext', b'ad')
self.assertEqual(p1.decrypt_deterministically(ciphertext2, b'ad'),
b'plaintext')
self.assertEqual(p2.decrypt_deterministically(ciphertext2, b'ad'),
b'plaintext')
self.assertEqual(p3.decrypt_deterministically(ciphertext2, b'ad'),
b'plaintext')
with self.assertRaises(tink.TinkError):
_ = p4.decrypt_deterministically(ciphertext2, b'ad')
# p3 encrypts with the newer key. So p2, p3 and p4 can decrypt it,
# but not p1.
ciphertext3 = p3.encrypt_deterministically(b'plaintext', b'ad')
with self.assertRaises(tink.TinkError):
_ = p1.decrypt_deterministically(ciphertext3, b'ad')
self.assertEqual(p2.decrypt_deterministically(ciphertext3, b'ad'),
b'plaintext')
self.assertEqual(p3.decrypt_deterministically(ciphertext3, b'ad'),
b'plaintext')
self.assertEqual(p4.decrypt_deterministically(ciphertext3, b'ad'),
b'plaintext')
# p4 encrypts with the newer key. So p2, p3 and p4 can decrypt it,
# but not p1.
ciphertext4 = p4.encrypt_deterministically(b'plaintext', b'ad')
with self.assertRaises(tink.TinkError):
_ = p1.decrypt_deterministically(ciphertext4, b'ad')
self.assertEqual(p2.decrypt_deterministically(ciphertext4, b'ad'),
b'plaintext')
self.assertEqual(p3.decrypt_deterministically(ciphertext4, b'ad'),
b'plaintext')
self.assertEqual(p4.decrypt_deterministically(ciphertext4, b'ad'),
b'plaintext')
if __name__ == '__main__':
absltest.main()
|
[
"[email protected]"
] | |
f487b32d187d8c46617f40bfa556df73ae0f4374
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-iotanalytics/huaweicloudsdkiotanalytics/v1/model/list_batch_jobs_response.py
|
c820f1d45d59dffa07e947b9b3b4e80b79e3084e
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 3,929 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListBatchJobsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'count': 'int',
'jobs': 'list[Job]'
}
attribute_map = {
'count': 'count',
'jobs': 'jobs'
}
def __init__(self, count=None, jobs=None):
"""ListBatchJobsResponse
The model defined in huaweicloud sdk
:param count: 定时作业总个数。
:type count: int
:param jobs:
:type jobs: list[:class:`huaweicloudsdkiotanalytics.v1.Job`]
"""
super(ListBatchJobsResponse, self).__init__()
self._count = None
self._jobs = None
self.discriminator = None
if count is not None:
self.count = count
if jobs is not None:
self.jobs = jobs
@property
def count(self):
"""Gets the count of this ListBatchJobsResponse.
定时作业总个数。
:return: The count of this ListBatchJobsResponse.
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ListBatchJobsResponse.
定时作业总个数。
:param count: The count of this ListBatchJobsResponse.
:type count: int
"""
self._count = count
@property
def jobs(self):
"""Gets the jobs of this ListBatchJobsResponse.
:return: The jobs of this ListBatchJobsResponse.
:rtype: list[:class:`huaweicloudsdkiotanalytics.v1.Job`]
"""
return self._jobs
@jobs.setter
def jobs(self, jobs):
"""Sets the jobs of this ListBatchJobsResponse.
:param jobs: The jobs of this ListBatchJobsResponse.
:type jobs: list[:class:`huaweicloudsdkiotanalytics.v1.Job`]
"""
self._jobs = jobs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListBatchJobsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
30ca95084a650818ad76ed5e625a46506e6e8e39
|
60e27c8b1755c741dfd069393e8b65766a9647ae
|
/07_Natural_Language_Processing/C0702_bag_of_words.py
|
fb7af5b042f0820d93e1aaa9984960d0ba24a209
|
[
"MIT"
] |
permissive
|
xiejinwen113/tensorflow_cookbook
|
d0426991be2369d6480728c2af7a4dc93eccf621
|
57d7ee719385ddd249a67c3a85bd336e884a67e5
|
refs/heads/master
| 2022-03-24T08:30:43.089441 | 2019-12-09T09:55:39 | 2019-12-09T09:55:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,784 |
py
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : [email protected]
@site : https://github.com/zhuyuanxiang/tensorflow_cookbook
---------------------------
@Software : PyCharm
@Project : TensorFlow_Machine_Learning_Cookbook
@File : C0702_bag_of_words.py
@Version : v0.1
@Time : 2019-11-07 17:11
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0702,P144
@Desc : 自然语言处理,使用 TensorFlow 实现“词袋”
@理解:
1. 这个模型是个错误的模型,因为数据集本身就是87%的正常短信,那么只要判断为正常短信就有87%的准确率。
而模型的准确率还不到87%,说明正确理解数据集是非常重要的。
2. 跟踪sess.run(x_col_sums,feed_dict = {x_data: t}),也会发现训练的嵌入矩阵的结果就是UNKNOWN单词和'to'单词过多的短信就是垃圾短信,
这个也是因为数据集中数据偏离造成的,根本原因还是模型与数据不匹配。
"""
# common imports
import os
import string
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from tensorflow.contrib import learn
from tensorflow.python.framework import ops
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# numpy 1.16.4 is required
assert np.__version__ in ["1.16.5", "1.16.4"]
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 初始化默认的计算图
ops.reset_default_graph()
# Open graph session
sess = tf.Session()
# ----------------------------------------------------------------------
print("载入数据。。。")
# 下载的文件直接读出,没有下载的文件就下载后读出
data_file_name = "../Data/SMS_SPam/SMSSpamCollection"
with open(data_file_name, encoding = 'utf-8') as temp_output_file:
text_data = temp_output_file.read()
pass
pass
# Format Data
text_data = text_data.encode('ascii', errors = 'ignore')
text_data = text_data.decode().split('\n')
text_data = [x.split('\t') for x in text_data if len(x) >= 1]
texts = [x[1] for x in text_data]
target = [x[0] for x in text_data]
# 将标签整数化, 'spam' 表示垃圾短信,设置为1, 'ham' 表示正常短信,设置为0
target = [1 if x == 'spam' else 0 for x in target]
# 文本标准化
texts = [x.lower() for x in texts] # 文本字母小写
texts = [''.join(c for c in x if c not in string.punctuation) for x in texts] # 移除标点符号
texts = [''.join(c for c in x if c not in '0123456789') for x in texts] # 移除数字
texts = [' '.join(x.split()) for x in texts] # 移除多余的空格
# 统计文本中不同长度的单词的数目,最大单词长度不超过50个字母
text_lengths = [len(x.split()) for x in texts]
text_lengths = [x for x in text_lengths if x < 50]
plt.hist(text_lengths, bins = 25)
plt.title("图7-1:文本数据中的单词长度的直方图")
sentence_size = 25 # 每个句子的单词个数最多不超过25个,不足25个用0填充,超过25个的从后往前截断
min_word_freq = 3 # 单词出现的频率不低于3次,如果某个单词只在某几条短信中出现,那么就不选入字典
# TensorFlow 自带的分词器 VocabularyProcessor()
vocab_processor = learn.preprocessing.VocabularyProcessor(sentence_size, min_frequency = min_word_freq)
# Have to fit transform to get length of unique words.
vocab_processor.fit_transform(texts) # 使用文本数据进行训练并且变换为字典
embedding_size = len(vocab_processor.vocabulary_) # 取字典大小为嵌入层的大小
# 将文本数据切分为训练数据集(80%)和测试数据集(20%)
train_indices = np.random.choice(len(texts), int(round(len(texts) * 0.8)), replace = False)
test_indices = np.array(list(set(range(len(texts))) - set(train_indices)))
texts_train = [x for ix, x in enumerate(texts) if ix in train_indices]
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = [x for ix, x in enumerate(target) if ix in train_indices]
target_test = [x for ix, x in enumerate(target) if ix in test_indices]
# 设置单位矩阵用于 One-Hot 编码
identity_mat = tf.diag(tf.ones(shape = [embedding_size]))
# 为 logistic regression 创建变量
A = tf.Variable(tf.random_normal(shape = [embedding_size, 1]))
b = tf.Variable(tf.random_normal(shape = [1, 1]))
# 初始化占位符
x_data = tf.placeholder(shape = [sentence_size], dtype = tf.int32)
y_target = tf.placeholder(shape = [1, 1], dtype = tf.float32)
# 搜索 Text-Vocab Embedding 权重,单位矩阵用于映射句子中的单词的 One-Hot 向量
x_embed = tf.nn.embedding_lookup(identity_mat, x_data)
x_col_sums = tf.reduce_sum(x_embed, 0) # ToDo:为什么要按列求和?
# 模型的输出
x_col_sums_2D = tf.expand_dims(x_col_sums, 0)
model_output = x_col_sums_2D @ A + b
# 交叉熵损失函数
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = y_target, logits = model_output))
# Prediction operation
prediction = tf.sigmoid(model_output)
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.001)
train_step = my_opt.minimize(loss)
# Intitialize Variables
init = tf.global_variables_initializer()
sess.run(init)
# Start Logistic Regression
print('基于训练集中{}个句子开始训练。。。'.format(len(texts_train)))
loss_vec, train_acc_all, train_acc_avg = [], [], []
for ix, t in enumerate(vocab_processor.transform(texts_train)): # 只转换不训练,不应该再次训练
y_data = [[target_train[ix]]]
sess.run(train_step, feed_dict = {x_data: t, y_target: y_data})
temp_loss = sess.run(loss, feed_dict = {x_data: t, y_target: y_data})
loss_vec.append(temp_loss)
if ix % 100 == 0:
print('训练集迭代次数: #' + str(ix + 1) + ': Loss = ' + str(temp_loss))
pass
[[temp_pred]] = sess.run(prediction, feed_dict = {x_data: t, y_target: y_data})
# 获得预测结果
train_acc_temp = target_train[ix] == np.round(temp_pred)
train_acc_all.append(train_acc_temp)
if len(train_acc_all) >= 50:
# 跟踪最后50个训练精度的平均值
train_acc_avg.append(np.mean(train_acc_all[-50:]))
pass
pass
# 获取测试集的评估精度
print('基于测试集中{}个句子开始评估。。。'.format(len(texts_test)))
test_acc_all = []
for ix, t in enumerate(vocab_processor.transform(texts_test)):
y_data = [[target_test[ix]]]
if ix % 50 == 0:
print("测试集迭代次数 #", ix + 1)
pass
[[temp_pred]] = sess.run(prediction, feed_dict = {x_data: t, y_target: y_data})
test_acc_temp = target_test[ix] == np.round(temp_pred)
test_acc_all.append(test_acc_temp)
pass
print("\n测试集精度: {}".format(np.mean(test_acc_all)))
# Plot training accuracy over time
plt.figure()
plt.plot(range(len(train_acc_avg)), train_acc_avg, 'b-', label = "训练集精度")
plt.title("统计最后50个训练集数据的平均训练集精度")
plt.xlabel('迭代次数')
plt.ylabel("训练集精度")
# -----------------------------------------------------------------
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
|
[
"[email protected]"
] | |
d1e21770e28bf318bb9670ca416bde39191d4f7d
|
6e0108c11132e63c81adbfab4309011b1f9f6dda
|
/tests/python/extra/clear_leaves.py
|
4d2f1e3a58a3fcb2fd07655efd2646b28d0a5f5f
|
[
"Apache-2.0"
] |
permissive
|
scottdonaldau/ledger-qrl
|
c28a614ae52c44e53947e444abf078ec27041815
|
7a3b933b84065b9db2b775d50205efcdbed2399e
|
refs/heads/master
| 2020-04-12T07:12:25.687015 | 2018-12-19T02:55:43 | 2018-12-19T02:55:43 | 162,360,262 | 0 | 0 |
Apache-2.0
| 2018-12-19T00:15:27 | 2018-12-19T00:15:27 | null |
UTF-8
|
Python
| false | false | 269 |
py
|
from pyledgerqrl.ledgerqrl import *
dev = LedgerQRL()
start = time.time()
for i in range(256):
data = bytearray([i]) + bytearray.fromhex("00" * 32)
answer = dev.send(INS_TEST_WRITE_LEAF, data)
assert len(answer) == 0
answer = dev.send(INS_TEST_PK_GEN_1)
|
[
"[email protected]"
] | |
55559c3ca1ad5ff7d80c5cf736dab7da2c5d72a7
|
dfff816642f4e1afeab268f441906a6d811d3fb4
|
/polling_stations/apps/data_collection/management/commands/import_newham.py
|
d1463f346e39dd465ff77e53dbf91e637072ccae
|
[] |
permissive
|
mtravis/UK-Polling-Stations
|
2c07e03d03959492c7312e5a4bfbb71e12320432
|
26e0331dc29253dc436a0462ffaa01e974c5dc52
|
refs/heads/master
| 2020-05-14T18:36:31.501346 | 2019-04-17T12:54:57 | 2019-04-17T12:54:57 | 181,912,382 | 0 | 0 |
BSD-3-Clause
| 2019-04-17T14:48:26 | 2019-04-17T14:48:26 | null |
UTF-8
|
Python
| false | false | 1,255 |
py
|
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E09000025"
addresses_name = "local.2018-05-03/Version 2/LBNewham Democracy_Club__03May2018.TSV"
stations_name = "local.2018-05-03/Version 2/LBNewham Democracy_Club__03May2018.TSV"
elections = ["local.2018-05-03", "mayor.newham.2018-05-03"]
csv_delimiter = "\t"
def address_record_to_dict(self, record):
if record.addressline6 == "E16 1EF":
return None
if record.property_urn == "10090852604":
return None
if record.property_urn == "10034510101":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E13 8NA"
return rec
if record.addressline6 == "E16 1XF":
return None
if record.property_urn == "10090756946":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E7 9AW"
return rec
if record.property_urn == "10023994990":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E7 9AW"
return rec
return super().address_record_to_dict(record)
|
[
"[email protected]"
] | |
f7a133da42b483bbd6721ea185ae86310461ffcc
|
1eb2d7d2a6e945a9bc487afcbc51daefd9af02e6
|
/algorithm/zhang/baiduface.py
|
aae543c80ba05cfedc089fe690d2f4beb4954ca2
|
[] |
no_license
|
fengges/eds
|
11dc0fdc7a17b611af1f61894f497ad443439bfe
|
635bcf015e3ec12e96949632c546d29fc99aee31
|
refs/heads/master
| 2021-06-20T04:43:02.019309 | 2019-06-20T12:55:26 | 2019-06-20T12:55:26 | 133,342,023 | 0 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,255 |
py
|
import os,time
from aip import AipFace
from PIL import Image, ImageDraw
""" 你的 APPID AK SK """
APP_ID = '10909628'
API_KEY = 'sInxLcVbCLSg6rNXVDXR4sHD'
SECRET_KEY = 'e2zgNstc7GEhhvFOfCVKDW2itVf0iID4'
filepath ="pic"
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
pathDir = os.listdir(filepath)
for path in pathDir:
pic=filepath+'/'+path
pic3="pic3/"+path
image = get_file_content(pic)
""" 调用人脸检测 """
client.detect(image)
""" 如果有可选参数 """
options = {}
options["max_face_num"] = 10
options["face_fields"] = "age"
""" 带参数调用人脸检测 """
res=client.detect(image, options)
try:
result=res["result"]
except:
print(res)
img = Image.open(pic3)
img_d = ImageDraw.Draw(img)
for f in result:
face_rectangle = f["location"]
img_d.rectangle((face_rectangle['left'], face_rectangle['top'],
face_rectangle['left'] + face_rectangle['width'],
face_rectangle['top'] + face_rectangle['height']), outline="red")
img.save(pic3)
img.close()
print("sleep")
time.sleep(2)
|
[
"[email protected]"
] | |
1a4331aa03052d0136ac9424cf6c3d97e49dc9fc
|
4a2bd14eb54a5447b9b5c67df97d9237cd506bd7
|
/setup.py
|
61fde968a06933af9c27eabc838e71e919e782a8
|
[] |
no_license
|
GapData/bokehutils
|
85363af5d1575983fe980a7c5a269eab354d168d
|
deadedd7a8a2210beeb8cce226d7d566f84a6f11
|
refs/heads/master
| 2021-05-29T13:40:12.105135 | 2015-09-30T19:40:03 | 2015-09-30T19:40:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,608 |
py
|
# Copyright (C) 2015 by Per Unneberg
from setuptools import setup, find_packages
import glob
import versioneer
INSTALL_REQUIRES = [
'sphinx>=1.3',
'pytest',
'pytest-cov>=1.8.1',
'bokeh>=0.10.0',
]
try:
# Hack for readthedocs
if not 'readthedocs' in os.path.dirname(os.path.realpath(__file__)):
pass
else:
print("readthedocs in path name; assuming we're building docs @readthedocs")
INSTALL_REQUIRES.append('sphinx-bootstrap-theme')
except:
pass
# Integrating pytest with setuptools: see
# https://pytest.org/latest/goodpractises.html#integrating-with-distutils-python-setup-py-test
from distutils.core import setup, Command
# you can also import from setuptools
class PyTest(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
import sys
errno = subprocess.call([sys.executable, 'runtests.py'])
raise SystemExit(errno)
_version = versioneer.get_version()
_cmdclass = versioneer.get_cmdclass()
setup(name="bokehutils",
version=_version,
cmdclass=_cmdclass,
author="Per Unneberg",
author_email="[email protected]",
description="Utility functions for working with bokeh plots",
license="MIT",
scripts=glob.glob('scripts/*.py'),
install_requires=INSTALL_REQUIRES,
packages=find_packages(exclude=['ez_setup', 'test*']),
package_data={
'bokehutils': [
'_templates/*',
'static/*',
],
})
|
[
"[email protected]"
] | |
755ce3602c7d4642c4b0aca6891d7446594eb0b1
|
48fff0f472066dc6e5b5a15d16dcc33738e7a2c2
|
/train2/chatbot/broadcast.py
|
027badb7eddef0c0ba8411820cb20092bd9088f5
|
[] |
no_license
|
hasadna/OpenTrainCommunity
|
228a4f078829f6653e62db1294da01488be55b64
|
3c7a941b730160c40cc400ed94ed77ffa9189f0a
|
refs/heads/master
| 2023-01-23T14:39:10.462114 | 2020-06-08T11:36:27 | 2020-06-08T11:36:27 | 19,729,986 | 23 | 16 | null | 2023-01-13T22:57:43 | 2014-05-13T07:34:15 |
HTML
|
UTF-8
|
Python
| false | false | 1,036 |
py
|
import logging
from django.conf import settings
import telegram
from django.template.loader import render_to_string
from . import models
logger = logging.getLogger(__name__)
def broadcast_new_report_to_telegram_channel(report: models.ChatReport):
message = render_to_string('chatbot/new_report_message.html', context={
'report': report,
})
_broadcast(message)
def broadcast_wrong_report_to_telegram_channel(report: models.ChatReport):
message = render_to_string('chatbot/wrong_report_message.html', context={
'report': report,
})
_broadcast(message)
def _broadcast(message: str):
channel = '@' + settings.TELEGRAM_CHANNEL
try:
bot = telegram.Bot(settings.TELEGRAM_TOKEN)
bot.send_message(
channel,
message,
parse_mode='html',
disable_web_page_preview=True)
logger.info("Broadcasting to channel %s:\n%s", channel, message)
except Exception:
logger.exception('Failed to broadcast to channel')
|
[
"[email protected]"
] | |
8f51618dff92b6609f174e7d9e48161f653dd784
|
fdba533d128e5fcc237abacda12de9545ddce39c
|
/keras/optimizer_experimental/optimizer_lib.py
|
d180179dde8284a872030aa0a8c1237796c3da4d
|
[
"Apache-2.0"
] |
permissive
|
hhuang97/keras
|
5949d754dcaed47df011fb4218d6552251e265e2
|
f5fea878c271e38946c6681c1c2434e72d0ab977
|
refs/heads/master
| 2021-12-24T00:01:26.759181 | 2021-12-14T18:21:47 | 2021-12-14T18:22:26 | 90,206,289 | 0 | 1 | null | 2017-05-04T00:54:28 | 2017-05-04T00:54:28 | null |
UTF-8
|
Python
| false | false | 3,775 |
py
|
"""Library of helper classes of optimizer."""
class GradientsClipOption:
"""Gradients clip option for optimizer class.
Attributes:
clipnorm: float. If set, the gradient of each weight is individually clipped
so that its norm is no higher than this value.
clipvalue: float. If set, the gradient of each weight is clipped to be no
higher than this value.
global_clipnorm: float. If set, the gradient of all weights is clipped so
that their global norm is no higher than this value.
"""
def __init__(self, clipnorm=None, clipvalue=None, global_clipnorm=None):
if clipnorm is not None and global_clipnorm is not None:
raise ValueError(f"At most one of `clipnorm` and `global_clipnorm` can "
f"be set. Received: clipnorm={clipnorm}, "
f"global_clipnorm={global_clipnorm}.")
if clipnorm and clipnorm <= 0:
raise ValueError("Clipnorm should be a positive number, but received "
f"clipnorm={clipnorm}.")
if global_clipnorm and global_clipnorm <= 0:
raise ValueError("global_clipnorm should be a positive number, but "
f"received global_clipnorm={global_clipnorm}.")
if clipvalue and clipvalue <= 0:
raise ValueError("clipvalue should be a positive number, but received "
f"clipvalue={clipvalue}.")
self.clipnorm = clipnorm
self.global_clipnorm = global_clipnorm
self.clipvalue = clipvalue
def get_config(self):
return {
"clipnorm": self.clipnorm,
"global_clipnorm": self.global_clipnorm,
"clipvalue": self.clipvalue,
}
class EMAOption:
# TODO(b/207532340): Add examples on how to use this EMAOption.
"""EMA option for optimizer class.
Attributes:
use_ema: boolean, default to False. If True, exponential moving average
(EMA) is applied. EMA consists of computing an exponential moving average
of the weights of the model (as the weight values change after each
training batch), and periodically overwriting the weights with their
moving average.
ema_momentum: float, default to 0.99. Only used if `use_ema=True`. This is
the momentum to use when computing the EMA of the model's weights:
`new_average = ema_momentum * old_average + (1 - ema_momentum) *
current_variable_value`.
ema_overwrite_frequency: int or None, default to 100. Only used if
`use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we
overwrite the model variable by its stored moving average. If None, we do
not overwrite model variables in the middle of training, and users need to
explicitly overwrite the model variable by calling
`finalize_variable_update()`.
"""
def __init__(self,
use_ema=False,
ema_momentum=0.99,
ema_overwrite_frequency=100):
self.use_ema = use_ema
if use_ema:
# Verify the arguments related to EMA.
if ema_momentum > 1 or ema_momentum < 0:
raise ValueError("`ema_momentum` must be in the range [0, 1]. "
f"Received: ema_momentum={ema_momentum}")
if ema_overwrite_frequency and (not isinstance(
ema_overwrite_frequency, int) or ema_overwrite_frequency < 1):
raise ValueError(
"`ema_overwrite_frequency` must be an integer > 1 or None. "
f"Received: ema_overwrite_frequency={ema_overwrite_frequency}")
self.ema_momentum = ema_momentum
self.ema_overwrite_frequency = ema_overwrite_frequency
def get_config(self):
return {
"use_ema": self.use_ema,
"ema_momentum": self.ema_momentum,
"ema_overwrite_frequency": self.ema_overwrite_frequency,
}
|
[
"[email protected]"
] | |
f2744c340d84c765a7f38427e107dcf0e0339605
|
6ba72ce01fe8c08a10a7607536858cfd40b2dc16
|
/kirppuauth/migrations/0001_initial.py
|
a501f184d3eaff89282c7871370678d0bb60b7eb
|
[
"MIT"
] |
permissive
|
jlaunonen/kirppu
|
dcafc5537d325b2605daf98cdde4115a759dd2ce
|
fb694a0d1f827f4f4aae870589eb4e57ddf9bc97
|
refs/heads/master
| 2023-07-20T03:13:10.814349 | 2023-07-14T16:46:55 | 2023-07-14T16:46:55 | 18,244,187 | 0 | 6 |
MIT
| 2023-01-10T20:48:08 | 2014-03-29T15:36:30 |
Python
|
UTF-8
|
Python
| false | false | 2,830 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('phone', models.CharField(max_length=64)),
('last_checked', models.DateTimeField(auto_now_add=True)),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
]
|
[
"[email protected]"
] | |
1a72065a811121f0dd9d16e8dd072b751fba6a6a
|
917a99fdf14097dd8001b5c98cc48c8716f8f969
|
/webElement/ass_service/syslogElement.py
|
8bffc624415e17423f022ce3b8b9a646794ed0be
|
[] |
no_license
|
isomper/testIsomptySecret
|
722eba4cbefe9495a3292d8d10e8ad9c4a34c8a7
|
968bbee05af730cfb7717f1531286f11a7f99cf3
|
refs/heads/master
| 2020-03-19T07:29:28.487913 | 2018-07-13T06:25:50 | 2018-07-13T06:25:50 | 136,118,583 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,408 |
py
|
#coding=utf-8
u'''
#文件名:
#被测软件版本号:V2.8.1
#作成人:李择优
#生成日期:2018/1/24
#模块描述:SYSLOG
#历史修改记录
#修改人:
#修改日期:
#修改内容:
'''
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import time
sys.path.append("/testIsompSecret/common/")
from _icommon import getElement,selectElement,frameElement,commonFun
from _cnEncode import cnEncode
from _log import log
sys.path.append("/testIsompSecret/webElement/ass_service/")
from ntpElement import NtpService
class Syslog:
#运行状态开关
SWITCH = "btn_qh"
#IP
HOST = "host"
#端口
PORT = "port"
#协议
PROTOCOL = "protocol"
#标识
IDENT = "ident"
#机制
FACILITY = "facility"
#测试按钮
TEST_BUTTON = "test_syslog"
#保存按钮
SAVE_BUTTON = "save_syslog"
def __init__(self,driver):
self.driver = driver
self.log = log()
self.getElem = getElement(driver)
self.select = selectElement(driver)
self.frameElem = frameElement(driver)
self.cmf = commonFun(driver)
self.ntp = NtpService(driver)
self.cnEnde = cnEncode()
u'''填写变量内容
parameters:
var_text : 变量内容
value : 定位方式值
'''
def set_common_func(self,var_text,value):
try:
revar_text = self.cnEnde.is_float(var_text)
var_elem =self.getElem.find_element_with_wait_EC("id",value)
var_elem.clear()
var_elem.send_keys(revar_text)
except Exception as e:
print ("set user common text error: ") + str(revar_text) + str(e)
u'''输入IP
parameters:
setIp : IP
'''
def set_ip(self,setIp):
return self.set_common_func(setIp,self.HOST)
u'''输入端口
parameters:
setPort : 端口
'''
def set_port(self,setPort):
return self.set_common_func(setPort,self.PORT)
u'''选择协议
Parameters:
value:select选项中的value属性值,udp代表UDP,tcp代表TCP,nix_syslog代表nix_syslog
'''
def set_protocol(self, value):
valu = self.cnEnde.is_float(value)
self.frameElem.from_frame_to_otherFrame("rigthFrame")
selem = self.getElem.find_element_with_wait_EC("id",self.PROTOCOL)
self.select.select_element_by_value(selem, valu)
u'''输入标识
parameters:
setIdent : 标识
'''
def set_ident(self,setIdent):
return self.set_common_func(setIdent,self.IDENT)
u'''选择机制
Parameters:
value:select选项中的value属性值,32代表facility
'''
def set_facility(self, value):
valu = self.cnEnde.is_float(value)
self.frameElem.from_frame_to_otherFrame("rigthFrame")
selem = self.getElem.find_element_with_wait_EC("id",self.FACILITY)
self.select.select_element_by_value(selem, valu)
u'''点击测试按钮'''
def test_button(self):
self.frameElem.from_frame_to_otherFrame("rigthFrame")
self.getElem.find_element_wait_and_click_EC("id",self.TEST_BUTTON)
u'''点击保存按钮'''
def save_button(self):
self.frameElem.from_frame_to_otherFrame("rigthFrame")
self.getElem.find_element_wait_and_click_EC("id",self.SAVE_BUTTON)
u'''改变开关状态'''
def change_switch_status(self):
self.frameElem.from_frame_to_otherFrame("rigthFrame")
try:
button_elem = self.getElem.find_element_with_wait_EC("id",self.SWITCH)
class_attr = button_elem.get_attribute("class")
off_status = "switch_off"
on_status = "switch_on"
if class_attr == on_status:
self.ntp.click_left_moudle(1)
self.frameElem.from_frame_to_otherFrame("rigthFrame")
button_elem = self.getElem.find_element_with_wait_EC("id",self.SWITCH)
time.sleep(1)
button_elem.click()
button_elem.click()
else:
button_elem.click()
except Exception as e:
print ("Change button status error: ") + str(e)
|
[
"[email protected]"
] | |
c35de16dd47821fb8bd0c74d9ed293dc5ee70956
|
34ef83114e02b173bd2d55eb53ad399e738a8e3c
|
/django/code_test/teka/teka/settings.py
|
ca05f4f1f4426561e47f91918dae0a82be1e722d
|
[] |
no_license
|
vavilon/Python3
|
e976a18eb301e4953696d1e3f4730ed890da015a
|
8c79729747ce51d60ad685e6a2e58292954ed7eb
|
refs/heads/master
| 2023-01-09T13:44:37.408601 | 2018-01-25T22:41:14 | 2018-01-25T22:41:14 | 100,892,055 | 0 | 1 | null | 2022-12-26T20:29:27 | 2017-08-20T22:23:06 |
Python
|
UTF-8
|
Python
| false | false | 3,232 |
py
|
"""
Django settings for teka project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7&(=23s0&zbaks8=)r=a=5xb^mz61l1&m2&=to8_j*2h^c0ld9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'apps',
'bootstrap3',
'bootstrap4',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'teka.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'teka.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static")),
|
[
"[email protected]"
] | |
44a0a7737d19c1e6e47ddf5525d0d632188aabd2
|
10f397d1fe8c68dc5af033e0b88cb99be56bc4f2
|
/Statistics/models.py
|
28a3d7a4f4138e1e4c55db79c7ee134721781ded
|
[] |
no_license
|
wsqy/AccountRandmomAmount
|
db3905bd425c074935c256aab62f437fe7cb0ffc
|
b69bc1269a666c48fe954ac423a25d111e01176b
|
refs/heads/master
| 2023-06-29T12:49:35.947729 | 2020-06-30T03:27:05 | 2020-06-30T03:27:05 | 271,683,993 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,592 |
py
|
from django.db import models
# from django.conf import settings
from django.utils import timezone
from Account.models import Buyer, Seller, Company
class DayBuyer(models.Model):
"""
买方日交易额总量表
"""
date = models.DateField(verbose_name='任务日期', default=timezone.now)
buyer = models.ForeignKey(Buyer, on_delete=models.PROTECT, verbose_name='买方')
amount_total = models.IntegerField(verbose_name='日总交易金额(万元)', default=0)
class Meta:
verbose_name = '买方日交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.buyer, self.date, self.amount_total)
class DaySeller(models.Model):
"""
卖方日交易额总量表
"""
date = models.DateField(verbose_name='任务日期', default=timezone.now)
seller = models.ForeignKey(Seller, on_delete=models.PROTECT, verbose_name='卖方')
amount_total = models.IntegerField(verbose_name='日总交易金额(万元)', default=0)
class Meta:
verbose_name = '卖方日交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.seller, self.date, self.amount_total)
class DayCompany(models.Model):
"""
集团子公司日交易额总量表
"""
date = models.DateField(verbose_name='任务日期', default=timezone.now)
company = models.ForeignKey(Company, on_delete=models.PROTECT, verbose_name='集团子公司')
amount_total = models.IntegerField(verbose_name='日总交易金额(万元)', default=0)
class Meta:
verbose_name = '集团子公司日交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.company, self.date, self.amount_total)
class MouthBuyer(models.Model):
"""
买方月交易额总量表
"""
date = models.CharField(max_length=8, verbose_name='月份')
buyer = models.ForeignKey(Buyer, on_delete=models.PROTECT, verbose_name='买方')
amount_total = models.IntegerField(verbose_name='月总交易金额(万元)', default=0)
class Meta:
verbose_name = '买方月交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.buyer, self.date, self.amount_total)
class MouthSeller(models.Model):
"""
卖方月交易额总量表
"""
date = models.CharField(max_length=8, verbose_name='月份')
seller = models.ForeignKey(Seller, on_delete=models.PROTECT, verbose_name='卖方')
amount_total = models.IntegerField(verbose_name='月总交易金额(万元)', default=0)
class Meta:
verbose_name = '卖方月交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.seller, self.date, self.amount_total)
class MouthCompany(models.Model):
"""
集团子公司月交易额总量表
"""
date = models.CharField(max_length=8, verbose_name='月份')
company = models.ForeignKey(Company, on_delete=models.PROTECT, verbose_name='集团子公司')
amount_total = models.IntegerField(verbose_name='月总交易金额(万元)', default=0)
class Meta:
verbose_name = '集团子公司月交易额总量'
verbose_name_plural = verbose_name
def __str__(self):
return '{}于{}总交易额{}万元'.format(self.company, self.date, self.amount_total)
|
[
"[email protected]"
] | |
624253db8803ba4e60460ddc4c11d392b0bac60d
|
297497957c531d81ba286bc91253fbbb78b4d8be
|
/third_party/libwebrtc/tools/grit/grit/tool/postprocess_unittest.py
|
21ca5e2f774610e4a7efa36f398ec1fb87b4cddc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
marco-c/gecko-dev-comments-removed
|
7a9dd34045b07e6b22f0c636c0a836b9e639f9d3
|
61942784fb157763e65608e5a29b3729b0aa66fa
|
refs/heads/master
| 2023-08-09T18:55:25.895853 | 2023-08-01T00:40:39 | 2023-08-01T00:40:39 | 211,297,481 | 0 | 0 |
NOASSERTION
| 2019-09-29T01:27:49 | 2019-09-27T10:44:24 |
C++
|
UTF-8
|
Python
| false | false | 1,705 |
py
|
'''Unit test that checks postprocessing of files.
Tests postprocessing by having the postprocessor
modify the grd data tree, changing the message name attributes.
'''
from __future__ import print_function
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import grit.tool.postprocess_interface
from grit.tool import rc2grd
class PostProcessingUnittest(unittest.TestCase):
def testPostProcessing(self):
rctext = '''STRINGTABLE
BEGIN
DUMMY_STRING_1 "String 1"
// Some random description
DUMMY_STRING_2 "This text was added during preprocessing"
END
'''
tool = rc2grd.Rc2Grd()
class DummyOpts(object):
verbose = False
extra_verbose = False
tool.o = DummyOpts()
tool.post_process = 'grit.tool.postprocess_unittest.DummyPostProcessor'
result = tool.Process(rctext, '.\resource.rc')
self.failUnless(
result.children[2].children[2].children[0].attrs['name'] == 'SMART_STRING_1')
self.failUnless(
result.children[2].children[2].children[1].attrs['name'] == 'SMART_STRING_2')
class DummyPostProcessor(grit.tool.postprocess_interface.PostProcessor):
'''
Post processing replaces all message name attributes containing "DUMMY" to
"SMART".
'''
def Process(self, rctext, rcpath, grdnode):
smarter = re.compile(r'(DUMMY)(.*)')
messages = grdnode.children[2].children[2]
for node in messages.children:
name_attr = node.attrs['name']
m = smarter.search(name_attr)
if m:
node.attrs['name'] = 'SMART' + m.group(2)
return grdnode
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
f9520500f015b1f2f85946de085b6dfb1d169031
|
b92b0e9ba2338ab311312dcbbeefcbb7c912fc2e
|
/build/shogun_lib/examples/documented/python_modular/distance_sparseeuclidean_modular.py
|
2e8e098e832784cb39eb330eae2d7c48c0f9148f
|
[] |
no_license
|
behollis/muViewBranch
|
384f8f97f67723b2a4019294854969d6fc1f53e8
|
1d80914f57e47b3ad565c4696861f7b3213675e0
|
refs/heads/master
| 2021-01-10T13:22:28.580069 | 2015-10-27T21:43:20 | 2015-10-27T21:43:20 | 45,059,082 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,057 |
py
|
# In this example a sparse euclidean distance is computed for sparse toy data.
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
parameter_list = [[traindat,testdat],[traindat,testdat]]
def distance_sparseeuclidean_modular (fm_train_real=traindat,fm_test_real=testdat):
from shogun.Features import RealFeatures, SparseRealFeatures
from shogun.Distance import SparseEuclidianDistance
realfeat=RealFeatures(fm_train_real)
feats_train=SparseRealFeatures()
feats_train.obtain_from_simple(realfeat)
realfeat=RealFeatures(fm_test_real)
feats_test=SparseRealFeatures()
feats_test.obtain_from_simple(realfeat)
distance=SparseEuclidianDistance(feats_train, feats_train)
dm_train=distance.get_distance_matrix()
distance.init(feats_train, feats_test)
dm_test=distance.get_distance_matrix()
return distance,dm_train,dm_test
if __name__=='__main__':
print('SparseEuclidianDistance')
distance_sparseeuclidean_modular(*parameter_list[0])
|
[
"prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305"
] |
prosen@305cdda6-5ce1-45b3-a98d-dfc68c8b3305
|
c02054b0e7144f761e863a5a249d40a75b1c8cc5
|
6a609bc67d6a271c1bd26885ce90b3332995143c
|
/exercises/exhaustive-search/combinations_ii.py
|
46561751342eaead6317019aa18b093dfc811644
|
[] |
no_license
|
nahgnaw/data-structure
|
1c38b3f7e4953462c5c46310b53912a6e3bced9b
|
18ed31a3edf20a3e5a0b7a0b56acca5b98939693
|
refs/heads/master
| 2020-04-05T18:33:46.321909 | 2016-07-29T21:14:12 | 2016-07-29T21:14:12 | 44,650,911 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 668 |
py
|
# -*- coding: utf-8 -*-
"""
Given a list of integer lists. Each time take an item from each list. Find all the combinations.
"""
class Solution(object):
def combine(self, arr):
"""
:type arr: List[List[init]]
:rtype: List[List[int]]
"""
def dfs(res, pos):
if len(res) == len(arr):
results.append(res)
return
for i in xrange(len(arr[pos])):
dfs(res + [arr[pos][i]], pos + 1)
results = []
dfs([], 0)
return results
if __name__ == '__main__':
arr = [[1,2], [3,4], [5,6,7]]
sol = Solution()
print sol.combine(arr)
|
[
"[email protected]"
] | |
75b8cf6fde95fbd9a46ab0c2c5277b706714856b
|
ce6538b5b7da162c1c690a346e7ec9ae0a6291f3
|
/wild_cat_zoo/project/cheetah.py
|
92f02831fe0904412100be1467c39f16c02a2ad7
|
[] |
no_license
|
DaniTodorowa/Softuni
|
391f13dd61a6d16cd48ee06e9b35b2fd931375df
|
f7c875fda4e13ec63152671509aaa6eca29d7f50
|
refs/heads/master
| 2022-11-25T23:34:49.744315 | 2020-08-02T08:23:44 | 2020-08-02T08:23:44 | 278,938,559 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 298 |
py
|
class Cheetah:
def __init__(self, name, gender, age):
self.name = name
self.gender = gender
self.age = age
@staticmethod
def get_needs(self):
return 60
def __repr__(self):
return f"Name: {self.name}, Age: {self.age}, Gender: {self.gender}"
|
[
"[email protected]"
] | |
e9b4572ab1f8e1c87a7d0030bcf82691a6a035e5
|
880103c6f9bdc9d5a7a8633c3e4d108c964e9b89
|
/devil/devil/android/tools/device_monitor.py
|
2b3503174c8a364463e242fa2f450a76e5b3047f
|
[
"BSD-3-Clause"
] |
permissive
|
ateleshev/catapult
|
c3645f0fb0d4e929b5baebea33307b765225cb2f
|
faf60eb37f8b9828eddb30c8397b333eb1d89204
|
refs/heads/master
| 2021-01-22T19:08:47.140355 | 2017-03-16T01:01:54 | 2017-03-16T01:01:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,936 |
py
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Launches a daemon to monitor android device temperatures & status.
This script will repeatedly poll the given devices for their temperatures and
status every 60 seconds and dump the stats to file on the host.
"""
import argparse
import collections
import json
import logging
import logging.handlers
import os
import re
import socket
import sys
import time
if __name__ == '__main__':
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', '..')))
from devil import devil_env
from devil.android import battery_utils
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_utils
# Various names of sensors used to measure cpu temp
CPU_TEMP_SENSORS = [
# most nexus devices
'tsens_tz_sensor0',
# android one
'mtktscpu',
# nexus 9
'CPU-therm',
]
DEVICE_FILE_VERSION = 1
# TODO(bpastene): Remove the old file once sysmon has been updated to read the
# new status file.
DEVICE_FILES = [
os.path.join(os.path.expanduser('~'), 'android_device_status.json'),
os.path.join(
os.path.expanduser('~'), '.android',
'%s__android_device_status.json' % socket.gethostname().split('.')[0]
),
]
MEM_INFO_REGEX = re.compile(r'.*?\:\s*(\d+)\s*kB') # ex: 'MemTotal: 185735 kB'
def get_device_status(device):
"""Polls the given device for various info.
Returns: A dict of the following format:
{
'battery': {
'level': 100,
'temperature': 123
},
'build': {
'build.id': 'ABC12D',
'product.device': 'chickenofthesea'
},
'mem': {
'avail': 1000000,
'total': 1234567,
},
'processes': 123,
'state': 'good',
'temp': {
'some_sensor': 30
},
'uptime': 1234.56,
}
"""
status = collections.defaultdict(dict)
# Battery
battery = battery_utils.BatteryUtils(device)
battery_info = battery.GetBatteryInfo()
try:
level = int(battery_info.get('level'))
except (KeyError, TypeError, ValueError):
level = None
if level and level >= 0 and level <= 100:
status['battery']['level'] = level
try:
temperature = int(battery_info.get('temperature'))
except (KeyError, TypeError, ValueError):
temperature = None
if temperature:
status['battery']['temperature'] = temperature
# Build
status['build']['build.id'] = device.build_id
status['build']['product.device'] = device.build_product
# Memory
mem_info = ''
try:
mem_info = device.ReadFile('/proc/meminfo')
except device_errors.AdbShellCommandFailedError:
logging.exception('Unable to read /proc/meminfo')
for line in mem_info.splitlines():
match = MEM_INFO_REGEX.match(line)
if match:
try:
value = int(match.group(1))
except ValueError:
continue
key = line.split(':')[0].strip()
if 'MemTotal' == key:
status['mem']['total'] = value
elif 'MemFree' == key:
status['mem']['free'] = value
# Process
try:
lines = device.RunShellCommand('ps', check_return=True)
status['processes'] = len(lines) - 1 # Ignore the header row.
except device_errors.AdbShellCommandFailedError:
logging.exception('Unable to count process list.')
# CPU Temps
# Find a thermal sensor that matches one in CPU_TEMP_SENSORS and read its
# temperature.
files = []
try:
files = device.RunShellCommand(
'grep -lE "%s" /sys/class/thermal/thermal_zone*/type' % '|'.join(
CPU_TEMP_SENSORS), check_return=True)
except device_errors.AdbShellCommandFailedError:
logging.exception('Unable to list thermal sensors.')
for f in files:
try:
sensor_name = device.ReadFile(f).strip()
temp = float(device.ReadFile(f[:-4] + 'temp').strip()) # s/type^/temp
status['temp'][sensor_name] = temp
except (device_errors.AdbShellCommandFailedError, ValueError):
logging.exception('Unable to read thermal sensor %s', f)
# Uptime
try:
uptimes = device.ReadFile('/proc/uptime').split()
status['uptime'] = float(uptimes[0]) # Take the first field (actual uptime)
except (device_errors.AdbShellCommandFailedError, ValueError):
logging.exception('Unable to read /proc/uptime')
status['state'] = 'available'
return status
def get_all_status(blacklist):
status_dict = {
'version': DEVICE_FILE_VERSION,
'devices': {},
}
healthy_devices = device_utils.DeviceUtils.HealthyDevices(blacklist)
parallel_devices = device_utils.DeviceUtils.parallel(healthy_devices)
results = parallel_devices.pMap(get_device_status).pGet(None)
status_dict['devices'] = {
device.serial: result for device, result in zip(healthy_devices, results)
}
if blacklist:
for device, reason in blacklist.Read().iteritems():
status_dict['devices'][device] = {
'state': reason.get('reason', 'blacklisted')}
status_dict['timestamp'] = time.time()
return status_dict
def main(argv):
"""Launches the device monitor.
Polls the devices for their battery and cpu temperatures and scans the
blacklist file every 60 seconds and dumps the data to DEVICE_FILE.
"""
parser = argparse.ArgumentParser(
description='Launches the device monitor.')
parser.add_argument('--adb-path', help='Path to adb binary.')
parser.add_argument('--blacklist-file', help='Path to device blacklist file.')
args = parser.parse_args(argv)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.handlers.RotatingFileHandler(
'/tmp/device_monitor.log', maxBytes=10 * 1024 * 1024, backupCount=5)
fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s',
datefmt='%y%m%d %H:%M:%S')
handler.setFormatter(fmt)
logger.addHandler(handler)
devil_dynamic_config = devil_env.EmptyConfig()
if args.adb_path:
devil_dynamic_config['dependencies'].update(
devil_env.LocalConfigItem(
'adb', devil_env.GetPlatform(), args.adb_path))
devil_env.config.Initialize(configs=[devil_dynamic_config])
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file else None)
logging.info('Device monitor running with pid %d, adb: %s, blacklist: %s',
os.getpid(), args.adb_path, args.blacklist_file)
while True:
start = time.time()
status_dict = get_all_status(blacklist)
for device_file in DEVICE_FILES:
with open(device_file, 'wb') as f:
json.dump(status_dict, f, indent=2, sort_keys=True)
logging.info('Got status of all devices in %.2fs.', time.time() - start)
time.sleep(60)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"[email protected]"
] | |
4bfb6408fe3122c020282667a4a2da27d9bce309
|
ea2cf796332879d86561f80882da93b672966448
|
/configs/csl/rotated_retinanet_obb_csl_gaussian_r50_adamw_fpn_1x_dior_le90.py
|
06b08881da2ee813a6c9b31343d7fc13168ee2d2
|
[
"Apache-2.0"
] |
permissive
|
yangxue0827/h2rbox-mmrotate
|
968c34adf22eca073ab147b670226884ea80ac61
|
cfd7f1fef6ae4d4e17cb891d1ec144ece8b5d7f5
|
refs/heads/main
| 2023-05-23T10:02:58.344148 | 2023-02-14T05:28:38 | 2023-02-14T05:28:38 | 501,580,810 | 68 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 669 |
py
|
_base_ = \
['../rotated_retinanet/rotated_retinanet_hbb_r50_adamw_fpn_1x_dior_oc.py']
angle_version = 'le90'
model = dict(
bbox_head=dict(
type='CSLRRetinaHead',
angle_coder=dict(
type='CSLCoder',
angle_version=angle_version,
omega=4,
window='gaussian',
radius=3),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0),
loss_angle=dict(
type='SmoothFocalLoss', gamma=2.0, alpha=0.25, loss_weight=0.8)))
|
[
"[email protected]"
] | |
628776070784ddd0d523624b6c9462eea95ff6bf
|
0d8486c1d55c40bebea7c5428930f18165d2d0e9
|
/tests/asp/AllAnswerSets/aggregates/count.example4.test.py
|
76a4479b403d3b6ffc18810437b0f4bc40d563f8
|
[
"Apache-2.0"
] |
permissive
|
bernardocuteri/wasp
|
6f81bf6aa8fb273c91bbf68ecce4ecb195a55953
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
refs/heads/master
| 2021-06-08T11:58:25.080818 | 2020-10-05T16:57:37 | 2020-10-05T16:57:37 | 124,245,808 | 0 | 0 |
Apache-2.0
| 2018-03-07T14:13:16 | 2018-03-07T14:13:16 | null |
UTF-8
|
Python
| false | false | 625 |
py
|
input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 2 1 7 8
1 7 2 1 6 8
1 8 0 0
1 9 2 1 10 11
1 10 2 1 9 11
1 11 0 0
1 12 2 1 13 14
1 13 2 1 12 14
1 14 0 0
1 15 2 1 16 17
1 16 2 1 15 17
1 17 0 0
2 18 2 0 2 12 6
1 1 1 0 18
2 19 2 0 2 15 9
1 1 1 0 19
1 20 1 0 15
1 20 1 0 12
1 21 1 0 9
1 21 1 0 6
1 1 1 1 21
1 1 1 1 20
0
20 ad(a)
21 ad(b)
6 a(b,2)
9 a(b,1)
12 a(a,2)
15 a(a,1)
4 c(a)
5 c(b)
7 na(b,2)
10 na(b,1)
13 na(a,2)
16 na(a,1)
2 b(1)
3 b(2)
0
B+
0
B-
1
0
1
"""
output = """
{b(1), b(2), c(a), c(b), a(b,2), na(b,1), na(a,2), a(a,1), ad(a), ad(b)}
{b(1), b(2), c(a), c(b), na(b,2), a(b,1), a(a,2), na(a,1), ad(a), ad(b)}
"""
|
[
"[email protected]"
] | |
f79c9ac3da69afb6f18aca5cfd8be28254cb7292
|
811b67fca9efd7b6a2b95500dfdfbd981a2be9a7
|
/Machine Learning For Finance/Lesson5_2.py
|
c7d259b4d24c65ca989844a257248ee28f058f98
|
[] |
no_license
|
inaheaven/Finance_Tool
|
a978ae534dca646088a12b58e00a8ce995b08920
|
adeaf05307dc1d4af002bea3d39c3273e93af347
|
refs/heads/master
| 2020-05-23T13:41:33.912276 | 2019-07-03T02:06:28 | 2019-07-03T02:06:28 | 186,781,682 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,658 |
py
|
import pandas as pd
import matplotlib.pyplot as plt
import os
def symbol_to_path(symbol, base_dir="Data/data"):
return os.path.join(base_dir, "{}.csv".format(str(symbol)))
def get_data(symbols, dates):
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols:
symbols.insert(0, 'SPY')
for symbol in symbols:
df_tmp = pd.read_csv(symbol_to_path(symbol), usecols=['Date', 'Adj Close'], index_col='Date', parse_dates=True,
na_values=['NaN'])
df_tmp = df_tmp.rename(columns={'Adj Close': symbol})
df = df.join(df_tmp)
df = df.dropna(subset=['SPY'])
print(df)
return df
def normalize_data(df):
return df / df.ix[0, :]
def plot_data(df, title="STOCK PRICE"):
ax = df.plot(title=title, fontsize=12)
ax.set_xlabel("Date")
ax.set_ylabel("Price")
plt.show()
def plot_selected(df, columns, start_index, end_index):
plot_data(df.ix[start_index: end_index, columns], title="STOCK PRICE")
def test_run():
dates = pd.date_range('2012-01-01', '2012-12-31')
symbols = ['SPY']
df = get_data(symbols, dates)
print("df", df)
# df = normalize_data(df)
# plot_selected(df, ['GOOG', 'SPY', 'IBM', 'GLD'], '2010-01-01', '2010-05-01')
# print("MEAN", df.mean())
# print("MEDIAN", df.median())
# print("STD", df.std())
ax = df['SPY'].plot(title="SPY ROLLING MEAN", label='SPY')
rm_SPY = df['SPY'].rolling(20).mean()
rm_SPY.plot(label="Rolling mean", ax = ax)
ax.set_xlabel("DATE")
ax.set_ylabel("PRICE")
ax.legend(loc="upper left")
plt.show()
if __name__ == '__main__':
test_run()
|
[
"[email protected]"
] | |
a93164796eaa571c517dc3a2993e7377c297e581
|
faa54203033398d264c75814b899d253edf71c9b
|
/pyflux/gas/__init__.py
|
dfb1ab71874ada0b2da30c57785c375b3036e9ae
|
[
"BSD-3-Clause"
] |
permissive
|
th3nolo/pyflux
|
4a9e646f9ee0e650676b82134d3810c0a98d8963
|
21bc18ddeabce277e4485e75962e5fa5ff3a46ea
|
refs/heads/master
| 2020-12-02T15:08:53.257900 | 2016-07-24T17:47:59 | 2016-07-24T17:47:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 258 |
py
|
from .gas import GAS
from .gasx import GASX
from .gasmodels import GASPoisson, GASNormal, GASLaplace, GASt, GASSkewt, GASExponential
from .gasllm import GASLLEV
from .gasllt import GASLLT
from .gasreg import GASReg
from .scores import BetatScore, SkewtScore
|
[
"[email protected]"
] | |
8dc0a87dd10e8d1d8503e312210b327d6098d695
|
6ab31b5f3a5f26d4d534abc4b197fe469a68e8e5
|
/katas/beta/only_readable_once_list.py
|
b075f9fe98d07c99e321b7906fe37c77f51fe6d7
|
[
"MIT"
] |
permissive
|
mveselov/CodeWars
|
e4259194bfa018299906f42cd02b8ef4e5ab6caa
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
refs/heads/master
| 2021-06-09T04:17:10.053324 | 2017-01-08T06:36:17 | 2017-01-08T06:36:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 377 |
py
|
class SecureList(object):
def __init__(self, lst):
self.lst = list(lst)
def __getitem__(self, item):
return self.lst.pop(item)
def __len__(self):
return len(self.lst)
def __repr__(self):
tmp, self.lst = self.lst, []
return repr(tmp)
def __str__(self):
tmp, self.lst = self.lst, []
return str(tmp)
|
[
"[email protected]"
] | |
905b497b63c36d0df8721fcbe09be8f5bcd07c97
|
7e409f6490957f96a1ea17161a3b791256a2ec4f
|
/31 - Form Field dan Option/mywebsite/forms.py
|
0019079b233c0b91fd26b93c04438c8e64622c04
|
[] |
no_license
|
CuteCatCrying/Django
|
9fb8fd06f4793ef754e6e3dfd63b9caad03317f8
|
563119a66c81bf57616f62855bc78f448204ba83
|
refs/heads/master
| 2022-04-02T14:13:23.700165 | 2020-02-05T09:34:46 | 2020-02-05T09:34:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,360 |
py
|
from django import forms
class FormField(forms.Form):
# python data type
integer_field = forms.IntegerField(required=False)
decimal_field = forms.DecimalField(required=False)
float_field = forms.FloatField(required=False)
boolean_field = forms.BooleanField(required=False)
char_field = forms.CharField(max_length=10, required=False)
# string input
email_field = forms.EmailField(required=False)
regex_field = forms.RegexField(regex=r'(P?<test>)')
slug_field = forms.SlugField()
url_field = forms.URLField()
ip_field = forms.GenericIPAddressField()
# select input
PILIHAN = (
('nilai1', 'Pilihan1'),
('nilai2', 'Pilihan2'),
('nilai3', 'Pilihan3'),
)
choice_field = forms.ChoiceField(choices=PILIHAN)
multi_choice_field = forms.MultipleChoiceField(choices=PILIHAN)
multi_typed_field = forms.TypedMultipleChoiceField(choices=PILIHAN)
null_boolean_field = forms.NullBooleanField()
# date time
date_field = forms.DateField()
datetime_field = forms.DateTimeField()
duration_field = forms.DurationField()
time_field = forms.TimeField()
splidatetime_field = forms.SplitDateTimeField()
# file input
file_field = forms.FileField()
image_field = forms.ImageField()
|
[
"[email protected]"
] | |
ebe79df1ad52ea7869c585362ad2a6af388c05ff
|
eb4877802021fa9f20962a7cfb176239dfb1e169
|
/py/testdir_single_jvm/test_GBMGrid_basic.py
|
bc789ffa818ccfead0bc492810e7f8731686b00e
|
[
"Apache-2.0"
] |
permissive
|
jinbochen/h2o
|
bd6f2b937884a6c51ccd5673310c64d6a9e1839b
|
48a5196cc790ed46f7c4a556258f8d2aeb7eb1c1
|
refs/heads/master
| 2021-01-17T23:33:42.765997 | 2013-11-14T20:06:23 | 2013-11-14T20:08:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,703 |
py
|
import unittest, time, sys
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_hosts, h2o_import as h2i, h2o_jobs
DO_CLASSIFICATION = True
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GBMGrid_basic_benign(self):
csvFilename = "benign.csv"
print "\nStarting", csvFilename
csvPathname = 'logreg/' + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put')
# columns start at 0
# cols 0-13. 3 is output
# no member id in this one
# fails with n_folds
print "Not doing n_folds with benign. Fails with 'unable to solve?'"
# check the first in the models list. It should be the best
colNames = [ 'STR','OBS','AGMT','FNDX','HIGD','DEG','CHK', 'AGP1','AGMN','NLV','LIV','WT','AGLP','MST' ]
modelKey = 'GBMGrid_benign'
# 'cols', 'ignored_cols_by_name', and 'ignored_cols' have to be exclusive
params = {
'destination_key': modelKey,
'ignored_cols_by_name': 'STR',
'learn_rate': .1,
'ntrees': 2,
'max_depth': 8,
'min_rows': 1,
'response': 'FNDX',
'classification': 1 if DO_CLASSIFICATION else 0,
}
kwargs = params.copy()
timeoutSecs = 1800
start = time.time()
GBMFirstResult = h2o_cmd.runGBM(parseResult=parseResult, noPoll=True,**kwargs)
print "\nGBMFirstResult:", h2o.dump_json(GBMFirstResult)
# no pattern waits for all
h2o_jobs.pollWaitJobs(pattern=None, timeoutSecs=300, pollTimeoutSecs=10, retryDelaySecs=5)
elapsed = time.time() - start
print "GBM training completed in", elapsed, "seconds."
gbmTrainView = h2o_cmd.runGBMView(model_key=modelKey)
# errrs from end of list? is that the last tree?
errsLast = gbmTrainView['gbm_model']['errs'][-1]
print "GBM 'errsLast'", errsLast
if DO_CLASSIFICATION:
cm = gbmTrainView['gbm_model']['cm']
pctWrongTrain = h2o_gbm.pp_cm_summary(cm);
print "Last line of this cm might be NAs, not CM"
print "\nTrain\n==========\n"
print h2o_gbm.pp_cm(cm)
else:
print "GBMTrainView:", h2o.dump_json(gbmTrainView['gbm_model']['errs'])
def test_GBMGrid_basic_prostate(self):
csvFilename = "prostate.csv"
print "\nStarting", csvFilename
# columns start at 0
csvPathname = 'logreg/' + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put')
colNames = ['ID','CAPSULE','AGE','RACE','DPROS','DCAPS','PSA','VOL','GLEASON']
modelKey = 'GBMGrid_prostate'
# 'cols', 'ignored_cols_by_name', and 'ignored_cols' have to be exclusive
params = {
'destination_key': modelKey,
'ignored_cols_by_name': 'ID',
'learn_rate': .1,
'ntrees': 2,
'max_depth': 8,
'min_rows': 1,
'response': 'CAPSULE',
'classification': 1 if DO_CLASSIFICATION else 0,
}
kwargs = params.copy()
timeoutSecs = 1800
start = time.time()
GBMFirstResult = h2o_cmd.runGBM(parseResult=parseResult, noPoll=True,**kwargs)
print "\nGBMFirstResult:", h2o.dump_json(GBMFirstResult)
# no pattern waits for all
h2o_jobs.pollWaitJobs(pattern=None, timeoutSecs=300, pollTimeoutSecs=10, retryDelaySecs=5)
elapsed = time.time() - start
print "GBM training completed in", elapsed, "seconds."
gbmTrainView = h2o_cmd.runGBMView(model_key=modelKey)
# errrs from end of list? is that the last tree?
errsLast = gbmTrainView['gbm_model']['errs'][-1]
print "GBM 'errsLast'", errsLast
if DO_CLASSIFICATION:
cm = gbmTrainView['gbm_model']['cm']
pctWrongTrain = h2o_gbm.pp_cm_summary(cm);
print "Last line of this cm might be NAs, not CM"
print "\nTrain\n==========\n"
print h2o_gbm.pp_cm(cm)
else:
print "GBMTrainView:", h2o.dump_json(gbmTrainView['gbm_model']['errs'])
if __name__ == '__main__':
h2o.unit_main()
|
[
"[email protected]"
] | |
077fa8b5db26b02abb818582046ec268a8d0215b
|
9b9a02657812ea0cb47db0ae411196f0e81c5152
|
/repoData/danfolkes-Magnet2Torrent/allPythonContent.py
|
0beca59a21a4fb5422f36851ab3bc50601572d49
|
[] |
no_license
|
aCoffeeYin/pyreco
|
cb42db94a3a5fc134356c9a2a738a063d0898572
|
0ac6653219c2701c13c508c5c4fc9bc3437eea06
|
refs/heads/master
| 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,287 |
py
|
__FILENAME__ = Magnet_To_Torrent2
#!/usr/bin/env python
'''
Created on Apr 19, 2012
@author: dan, Faless
GNU GENERAL PUBLIC LICENSE - Version 3
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
http://www.gnu.org/licenses/gpl-3.0.txt
'''
import shutil
import tempfile
import os.path as pt
import sys
import libtorrent as lt
from time import sleep
def magnet2torrent(magnet, output_name=None):
if output_name and \
not pt.isdir(output_name) and \
not pt.isdir(pt.dirname(pt.abspath(output_name))):
print("Invalid output folder: " + pt.dirname(pt.abspath(output_name)))
print("")
sys.exit(0)
tempdir = tempfile.mkdtemp()
ses = lt.session()
params = {
'save_path': tempdir,
'duplicate_is_error': True,
'storage_mode': lt.storage_mode_t(2),
'paused': False,
'auto_managed': True,
'duplicate_is_error': True
}
handle = lt.add_magnet_uri(ses, magnet, params)
print("Downloading Metadata (this may take a while)")
while (not handle.has_metadata()):
try:
sleep(1)
except KeyboardInterrupt:
print("Aborting...")
ses.pause()
print("Cleanup dir " + tempdir)
shutil.rmtree(tempdir)
sys.exit(0)
ses.pause()
print("Done")
torinfo = handle.get_torrent_info()
torfile = lt.create_torrent(torinfo)
output = pt.abspath(torinfo.name() + ".torrent")
if output_name:
if pt.isdir(output_name):
output = pt.abspath(pt.join(
output_name, torinfo.name() + ".torrent"))
elif pt.isdir(pt.dirname(pt.abspath(output_name))):
output = pt.abspath(output_name)
print("Saving torrent file here : " + output + " ...")
torcontent = lt.bencode(torfile.generate())
f = open(output, "wb")
f.write(lt.bencode(torfile.generate()))
f.close()
print("Saved! Cleaning up dir: " + tempdir)
ses.remove_torrent(handle)
shutil.rmtree(tempdir)
return output
def showHelp():
print("")
print("USAGE: " + pt.basename(sys.argv[0]) + " MAGNET [OUTPUT]")
print(" MAGNET\t- the magnet url")
print(" OUTPUT\t- the output torrent file name")
print("")
def main():
if len(sys.argv) < 2:
showHelp()
sys.exit(0)
magnet = sys.argv[1]
output_name = None
if len(sys.argv) >= 3:
output_name = sys.argv[2]
magnet2torrent(magnet, output_name)
if __name__ == "__main__":
main()
########NEW FILE########
|
[
"[email protected]"
] | |
dd47b3c3b12d0394c13cd989e4409d25f90ad2cc
|
3e3506f8a9c18744b5e9c1bda2f66315d2ebe753
|
/snippets/serializers.py
|
4089503add5e5b2f37f19c8d9fb456de701cad2f
|
[] |
no_license
|
didoogan/drf
|
63ad069540124ab057d4f271aa76be650486981a
|
2a0446b6d38ef8ce67c031b2ac5bff62c519cf40
|
refs/heads/master
| 2020-07-31T00:24:19.904525 | 2016-08-24T20:55:11 | 2016-08-24T20:55:11 | 66,281,423 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 804 |
py
|
from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from django.contrib.auth.models import User
class SnippetSerializer(serializers.HyperlinkedModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
highlight = serializers.HyperlinkedIdentityField(view_name='snippet-highlight', format='html')
class Meta:
model = Snippet
fields = ('url', 'pk', 'highlight', 'owner',
'title', 'code', 'linenos', 'language', 'style')
class UserSerializer(serializers.HyperlinkedModelSerializer):
snippets = serializers.HyperlinkedRelatedField(many=True, view_name='snippet-detail', read_only=True)
class Meta:
model = User
fields = ('url', 'pk', 'username', 'snippets')
|
[
"[email protected]"
] | |
2c3f9bf1eb7af4abf52697eb26f2d9fc8262ce2d
|
23631af0987b3f1d30b0bf8bfcea1bd63159eeba
|
/gate_api/configuration.py
|
4b19c91ec5f25ad45dffcb2880c0358f4580b286
|
[] |
no_license
|
xuvw/gateapi-python
|
08c3c72ff0e2c4713bf3a2ffe0b15d05e57491ca
|
1a3f3551cba4a756f76f17b070c3e0c5ff2e88ea
|
refs/heads/master
| 2020-05-25T14:33:35.592775 | 2019-04-02T08:50:25 | 2019-04-02T08:50:25 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,144 |
py
|
# coding: utf-8
"""
Gate API v4
APIv4 provides spot, margin and futures trading operations. There are public APIs to retrieve the real-time market statistics, and private APIs which needs authentication to trade on user's behalf. # noqa: E501
OpenAPI spec version: 4.6.1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import copy
import logging
import multiprocessing
import sys
import urllib3
import six
from six.moves import http_client as httplib
class TypeWithDefault(type):
def __init__(cls, name, bases, dct):
super(TypeWithDefault, cls).__init__(name, bases, dct)
cls._default = None
def __call__(cls):
if cls._default is None:
cls._default = type.__call__(cls)
return copy.copy(cls._default)
def set_default(cls, default):
cls._default = copy.copy(default)
class Configuration(six.with_metaclass(TypeWithDefault, object)):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self):
"""Constructor"""
# Default Base url
self.host = "https://api.gateio.ws/api/v4"
# Temp file folder for downloading files
self.temp_folder_path = None
# Authentication Settings
# dict to store API key(s)
self.api_key = {}
# dict to store API prefix (e.g. Bearer)
self.api_key_prefix = {}
# Username for HTTP basic authentication
self.username = ""
# Password for HTTP basic authentication
self.password = ""
# Logging Settings
self.logger = {}
self.logger["package_logger"] = logging.getLogger("gate_api")
self.logger["urllib3_logger"] = logging.getLogger("urllib3")
# Log format
self.logger_format = '%(asctime)s %(levelname)s %(message)s'
# Log stream handler
self.logger_stream_handler = None
# Log file handler
self.logger_file_handler = None
# Debug file location
self.logger_file = None
# Debug switch
self.debug = False
# SSL/TLS verification
# Set this to false to skip verifying SSL certificate when calling API
# from https server.
self.verify_ssl = True
# Set this to customize the certificate file to verify the peer.
self.ssl_ca_cert = None
# client certificate file
self.cert_file = None
# client key file
self.key_file = None
# Set this to True/False to enable/disable SSL hostname verification.
self.assert_hostname = None
# urllib3 connection pool's maximum number of connections saved
# per pool. urllib3 uses 1 connection as default value, but this is
# not the best value when you are making a lot of possibly parallel
# requests to the same host, which is often the case here.
# cpu_count * 5 is used as default value to increase performance.
self.connection_pool_maxsize = multiprocessing.cpu_count() * 5
# Proxy URL
self.proxy = None
# Safe chars for path_param
self.safe_chars_for_path_param = ''
# API key and secret
self.key = ""
self.secret = ""
@property
def logger_file(self):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
return self.__logger_file
@logger_file.setter
def logger_file(self, value):
"""The logger file.
If the logger_file is None, then add stream handler and remove file
handler. Otherwise, add file handler and remove stream handler.
:param value: The logger_file path.
:type: str
"""
self.__logger_file = value
if self.__logger_file:
# If set logging file,
# then add file handler and remove stream handler.
self.logger_file_handler = logging.FileHandler(self.__logger_file)
self.logger_file_handler.setFormatter(self.logger_formatter)
for _, logger in six.iteritems(self.logger):
logger.addHandler(self.logger_file_handler)
@property
def debug(self):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
return self.__debug
@debug.setter
def debug(self, value):
"""Debug status
:param value: The debug status, True or False.
:type: bool
"""
self.__debug = value
if self.__debug:
# if debug status is True, turn on debug logging
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.DEBUG)
# turn on httplib debug
httplib.HTTPConnection.debuglevel = 1
else:
# if debug status is False, turn off debug logging,
# setting log level to default `logging.WARNING`
for _, logger in six.iteritems(self.logger):
logger.setLevel(logging.WARNING)
# turn off httplib debug
httplib.HTTPConnection.debuglevel = 0
@property
def logger_format(self):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
return self.__logger_format
@logger_format.setter
def logger_format(self, value):
"""The logger format.
The logger_formatter will be updated when sets logger_format.
:param value: The format string.
:type: str
"""
self.__logger_format = value
self.logger_formatter = logging.Formatter(self.__logger_format)
def get_api_key_with_prefix(self, identifier):
"""Gets API key (with prefix if set).
:param identifier: The identifier of apiKey.
:return: The token for api key authentication.
"""
if (self.api_key.get(identifier) and
self.api_key_prefix.get(identifier)):
return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501
elif self.api_key.get(identifier):
return self.api_key[identifier]
def get_basic_auth_token(self):
"""Gets HTTP basic authentication header (string).
:return: The token for basic HTTP authentication.
"""
return urllib3.util.make_headers(
basic_auth=self.username + ':' + self.password
).get('authorization')
def auth_settings(self):
"""Gets Auth Settings dict for api client.
:return: The Auth Settings information dict.
"""
return {
'api_key':
{
'type': 'api_key',
'in': 'header',
'key': 'KEY',
'value': self.get_api_key_with_prefix('KEY')
},
'api_sign':
{
'type': 'api_key',
'in': 'header',
'key': 'SIGN',
'value': self.get_api_key_with_prefix('SIGN')
},
'api_timestamp':
{
'type': 'api_key',
'in': 'header',
'key': 'Timestamp',
'value': self.get_api_key_with_prefix('Timestamp')
},
}
def to_debug_report(self):
"""Gets the essential information for debugging.
:return: The report for debugging.
"""
return "Python SDK Debug Report:\n"\
"OS: {env}\n"\
"Python Version: {pyversion}\n"\
"Version of the API: 4.6.1\n"\
"SDK Package Version: 4.6.1".\
format(env=sys.platform, pyversion=sys.version)
|
[
"[email protected]"
] | |
a4cfe939cf946016e8aa26c131d44218af521253
|
d5f53599338a30a9d6c7de7d5c574db59545ed3d
|
/Gse/generated/Ref/channels/SG1_SignalGen_Output.py
|
dd5c8e60cf31bbd105160631ee8ff53b9fd5a55e
|
[
"Apache-2.0"
] |
permissive
|
dstockhouse/eaglesat-fprime
|
c39a01cc5648dcd8b351f47684923fe481c720be
|
e640b3faea0000e1ca8acab4d6ff66150196c32b
|
refs/heads/master
| 2020-05-07T15:31:09.289797 | 2019-11-20T00:33:15 | 2019-11-20T00:33:15 | 180,639,007 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,405 |
py
|
'''
Created on Wednesday, 10 April 2019
@author: David
THIS FILE IS AUTOMATICALLY GENERATED - DO NOT EDIT!!!
XML Source: /cygdrive/c/Users/David/Documents/eaglesat/eaglesat-fprime/Ref/SignalGen/SignalGenComponentAi.xml
'''
# Import the types this way so they do not need prefixing for execution.
from models.serialize.type_exceptions import *
from models.serialize.type_base import *
from models.serialize.bool_type import *
from models.serialize.enum_type import *
from models.serialize.f32_type import *
from models.serialize.f64_type import *
from models.serialize.u8_type import *
from models.serialize.u16_type import *
from models.serialize.u32_type import *
from models.serialize.u64_type import *
from models.serialize.i8_type import *
from models.serialize.i16_type import *
from models.serialize.i32_type import *
from models.serialize.i64_type import *
from models.serialize.string_type import *
from models.serialize.serializable_type import *
from models.common import channel_telemetry
# Each file represents the information for a single event
# These module variables are used to instance the channel object within the Gse
COMPONENT = "Ref::SignalGen"
NAME = "SG1_SignalGen_Output"
ID = 0xb5
CHANNEL_DESCRIPTION = "SignalGen Output"
TYPE = F32Type()
FORMAT_STRING = None
LOW_RED = None
LOW_ORANGE = None
LOW_YELLOW = None
HIGH_YELLOW = None
HIGH_ORANGE = None
HIGH_RED = None
|
[
"[email protected]"
] | |
6a4b9b33ccb4907b8e2d6194e8a505fcd0aaaeb0
|
523f8f5febbbfeb6d42183f2bbeebc36f98eadb5
|
/539.py
|
3f49db2b3056bb0ea18a9558663dea67f0a5b806
|
[] |
no_license
|
saleed/LeetCode
|
655f82fdfcc3000400f49388e97fc0560f356af0
|
48b43999fb7e2ed82d922e1f64ac76f8fabe4baa
|
refs/heads/master
| 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 580 |
py
|
class Solution(object):
def findMinDifference(self, timePoints):
"""
:type timePoints: List[str]
:rtype: int
"""
timePoints.sort()
diff=float("inf")
for i in range(1,len(timePoints)):
time1=timePoints[i%len(timePoints)]
time2=timePoints[(i-1)%len(timePoints)]
hour1=int(time1[:2])
hour2=int(time2[:2])
minute1=int(time1[3:])
minute2=int(time2[3:])
diff=min(diff,((hour1-hour2)*60+minute1-minute2+(24*60))%(24*60))
return diff
|
[
"[email protected]"
] | |
7cec84d82d2dcb14c1cbdaf99b64ffc73e1ae94e
|
11771f5dd90a74d5c76765f27f0d9a9cb044f57b
|
/route/bbs_make.py
|
2185cef4c1bdcfd50b641b610aa030eb0d012695
|
[
"BSD-3-Clause"
] |
permissive
|
openNAMU/openNAMU
|
cc031ea848ac6d829ad243fcf59da26adf0f0814
|
868107e4ef53e4e78af15c590673b78ee385baa5
|
refs/heads/beta
| 2023-08-24T10:20:00.245680 | 2023-08-23T14:09:53 | 2023-08-23T14:09:53 | 78,184,261 | 86 | 75 |
BSD-3-Clause
| 2023-09-13T21:36:03 | 2017-01-06T07:22:10 |
Python
|
UTF-8
|
Python
| false | false | 2,024 |
py
|
from .tool.func import *
def bbs_make():
with get_db_connect() as conn:
curs = conn.cursor()
if admin_check() != 1:
return re_error('/error/3')
if flask.request.method == 'POST':
curs.execute(db_change('select set_id from bbs_set where set_name = "bbs_name" order by set_id + 0 desc'))
db_data = curs.fetchall()
bbs_num = str(int(db_data[0][0]) + 1) if db_data else '1'
bbs_name = flask.request.form.get('bbs_name', 'test')
bbs_type = flask.request.form.get('bbs_type', 'comment')
bbs_type = bbs_type if bbs_type in ['comment', 'thread'] else 'comment'
curs.execute(db_change("insert into bbs_set (set_name, set_code, set_id, set_data) values ('bbs_name', '', ?, ?)"), [bbs_num, bbs_name])
curs.execute(db_change("insert into bbs_set (set_name, set_code, set_id, set_data) values ('bbs_type', '', ?, ?)"), [bbs_num, bbs_type])
conn.commit()
return redirect('/bbs/main')
else:
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('bbs_make'), wiki_set(), wiki_custom(), wiki_css([0, 0])],
data = '''
<form method="post">
<input placeholder="''' + load_lang('bbs_name') + '''" name="bbs_name">
<hr class="main_hr">
<select name="bbs_type">
<option value="comment">''' + load_lang('comment_base') + '''</option>
<option value="thread">''' + load_lang('thread_base') + '''</option>
</select>
<hr class="main_hr">
<button type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['bbs/main', load_lang('return')]]
))
|
[
"[email protected]"
] | |
376183f1fd02abc26c81e2af35be1774eebe4052
|
1eab574606dffb14a63195de994ee7c2355989b1
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/topology/bfdrouter/bfdrouter.py
|
a4c8d026e8eb6e4b33d202bfd079514c11616f61
|
[
"MIT"
] |
permissive
|
steiler/ixnetwork_restpy
|
56b3f08726301e9938aaea26f6dcd20ebf53c806
|
dd7ec0d311b74cefb1fe310d57b5c8a65d6d4ff9
|
refs/heads/master
| 2020-09-04T12:10:18.387184 | 2019-11-05T11:29:43 | 2019-11-05T11:29:43 | 219,728,796 | 0 | 0 | null | 2019-11-05T11:28:29 | 2019-11-05T11:28:26 | null |
UTF-8
|
Python
| false | false | 2,602 |
py
|
# MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class BfdRouter(Base):
"""Bfd Port Specific Data
The BfdRouter class encapsulates a required bfdRouter resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'bfdRouter'
def __init__(self, parent):
super(BfdRouter, self).__init__(parent)
@property
def Count(self):
"""Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
Returns:
number
"""
return self._get_attribute('count')
@property
def DescriptiveName(self):
"""Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offers more context
Returns:
str
"""
return self._get_attribute('descriptiveName')
@property
def Name(self):
"""Name of NGPF element, guaranteed to be unique in Scenario
Returns:
str
"""
return self._get_attribute('name')
@Name.setter
def Name(self, value):
self._set_attribute('name', value)
@property
def RowNames(self):
"""Name of rows
Returns:
list(str)
"""
return self._get_attribute('rowNames')
def update(self, Name=None):
"""Updates a child instance of bfdRouter on the server.
Args:
Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
|
[
"[email protected]"
] | |
6e5b281dfdbc8eb03b095b591ce654289d789360
|
e0c8e66af3a72a1cc534d7a90fead48754d266b3
|
/vb_suite/miscellaneous.py
|
8295d275f2dd615f626d02981203b406f233a1ea
|
[
"BSD-3-Clause"
] |
permissive
|
gwtaylor/pandas
|
e12b0682347b9f03a24d6bff3e14f563cb7a3758
|
7b0349f0545011a6cac2422b8d8d0f409ffd1e15
|
refs/heads/master
| 2021-01-15T17:51:47.147334 | 2012-01-13T17:53:56 | 2012-01-13T17:53:56 | 3,174,111 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 505 |
py
|
from vbench.benchmark import Benchmark
from datetime import datetime
common_setup = """from pandas_vb_common import *
"""
#----------------------------------------------------------------------
# cache_readonly
setup = common_setup + """
from pandas.util.decorators import cache_readonly
class Foo:
@cache_readonly
def prop(self):
return 5
obj = Foo()
"""
misc_cache_readonly = Benchmark("obj.prop", setup, name="misc_cache_readonly",
ncalls=2000000)
|
[
"[email protected]"
] | |
7bcff9fa804b622d48c7a6bb33873bdeede52060
|
b61dedf12868e2bc511b6693af1985911a13f336
|
/src/logpipe/formats/json.py
|
5747c863e36910b285e51dc63598357f2e147fee
|
[
"ISC"
] |
permissive
|
vitorcarmovieira/django-logpipe
|
f9eebb6674b9ba180a63448c9d71ce2e87929f7c
|
89d0543e341518f9ae49124c354e6a6c2e3f4150
|
refs/heads/main
| 2023-03-03T13:18:22.456270 | 2021-02-13T17:29:32 | 2021-02-13T17:29:32 | 326,679,534 | 1 | 1 |
ISC
| 2021-02-13T17:29:32 | 2021-01-04T12:39:30 |
Python
|
UTF-8
|
Python
| false | false | 139 |
py
|
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
__all__ = ['JSONRenderer', 'JSONParser']
|
[
"[email protected]"
] | |
53cd30d03207556424257a7a49ed432f13b6260a
|
9aa7b52847a161507eae57c222f6f3b3473fbf67
|
/Project/Main/bin/pyhtmlizer
|
cb636c852d7d27ecd58cab7c59fd4e7b1fc7541b
|
[] |
no_license
|
azatnt/Project_Aza_Madi
|
13a41bcc7bc822503136046dd5905a0884ffccb5
|
d2804cd2b1e9b97d44e85d6a24c45d3f41458db3
|
refs/heads/master
| 2023-01-22T18:17:17.512344 | 2020-11-16T15:56:00 | 2020-11-16T15:56:00 | 261,734,097 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 276 |
#!/Users/sulpak/Documents/GitHub/Project_Aza_Madi/Project/Main/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from twisted.scripts.htmlizer import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"[email protected]"
] | ||
f012952b29876b396eeff208f656b11ad3d1d3d2
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/agc025/C/2658196.py
|
38086edb4f9e8897ebee6bd4d4c545b41c0b5eb2
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null |
UTF-8
|
Python
| false | false | 333 |
py
|
from itertools import accumulate
N = int(input())
L, R = [0], [0]
for i in range(N):
li, ri = map(int, input().split())
L.append(li)
R.append(ri)
L.sort(reverse=True)
R.sort()
L = list(accumulate(L))
R = list(accumulate(R))
ans = 0
for k in range(N+1):
ans = max(ans, 2*(L[k]-R[k]))
print(ans)
|
[
"[email protected]"
] | |
26571dafb7f2105ef31a78259155c44e4a01ad01
|
22dcd52b6a07e82e8db9bf8b7ad38711d12f69a8
|
/venv/Lib/site-packages/sklearn/neighbors/base.py
|
9be7b1ffc90f2c7d40252bf586916429df32fafb
|
[] |
no_license
|
MrGreenPepper/music_cluster
|
9060d44db68ae5e085a4f2c78d36868645432d43
|
af5383a7b9c68d04c16c1086cac6d2d54c3e580c
|
refs/heads/main
| 2023-08-15T09:14:50.630105 | 2021-10-01T09:45:47 | 2021-10-01T09:45:47 | 412,407,002 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 489 |
py
|
# THIS FILE WAS AUTOMATICALLY GENERATED BY deprecated_modules.py
import sys
from . import _base
from ..externals._pep562 import Pep562
from ..utils.deprecation import _raise_dep_warning_if_not_pytest
deprecated_path = 'sklearn.neighbors.base'
correct_import_path = 'sklearn.neighbors'
_raise_dep_warning_if_not_pytest(deprecated_path, correct_import_path)
def __getattr__(name):
return getattr(_base, name)
if not sys.version_info >= (3, 7):
Pep562(__name__)
|
[
"[email protected]"
] | |
eac8e55efa2b9ab7f320a562c98fa8c884b5e994
|
60ce73bf2f86940438e5b7fecaaccad086888dc5
|
/working_scrapers/Virginia_amherst.py
|
dd21db5c4557762ea61e5ec1f6730d25b2bd6a00
|
[] |
no_license
|
matthewgomies/jailcrawl
|
22baf5f0e6dc66fec1b1b362c26c8cd2469dcb0d
|
9a9ca7e1328ae549860ebeea9b149a785f152f39
|
refs/heads/master
| 2023-02-16T06:39:42.107493 | 2021-01-15T16:37:57 | 2021-01-15T16:37:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,248 |
py
|
#!/usr/bin/python
'''
This is an template script
'''
from urllib.request import urlopen, Request
import pandas as pd
import os
import time
import numpy as np
from datetime import datetime
import datetime as dt
import sys
from io import StringIO
from joblib import Parallel, delayed
import requests
from jailscrape.common import save_to_s3, get_browser, get_logger, record_error, save_pages_array
from jailscrape import crawlers
# jailscrape.common is a file that is part of the project which keeps
# most common boilerplate code out of this file
from selenium.webdriver.common.keys import Keys
import watchtower
from bs4 import BeautifulSoup
import re
import math
# NOTE: These are imports. They ideally don't change very often. It's OK
# to have a large, maximal set here and to bulk-edit files to add to
# these.
ROW_INDEX = 1015 # Change this for each scraper. This references the row
# of the main jailcrawl spreadsheet. This index will be used to look up
# the URL as well as state/county info
THIS_STATE = 'virginia' # Change the current state/county information.
THIS_COUNTY = 'amherst'
def main(roster_row):
try:
logger = get_logger(roster_row) # Get a standard logger
# Here are standard variable values/how to initialize them.
# These aren't initialized here since in the save_single_page
# case, they can be done in the called function
#browser = get_browser() # Get a standard browser
#urlAddress = roster_row['Working Link'] # Set the main URL from the spreadsheet
#page_index = 0 # Set an initial value of "page_index", which we will use to separate output pages
#logger.info('Set working link to _%s_', urlAddress) # Log the chosen URL
##########
# Begin core specific scraping code
if roster_row['State'].lower() != THIS_STATE or roster_row['County'].lower() != THIS_COUNTY:
raise Exception("Expected county definition info from _%s, %s_, but found info: _%s_" % (THIS_COUNTY, THIS_STATE, roster_row))
#crawlers.save_single_page(roster_row) # try to call a known crawler if possible
crawlers.basic_multipage(roster_row, next_type='text', next_string='>') # try to call a known crawler if possible
## Code to save a page and log appropriately
#save_to_s3(store_source, page_index, roster_row)
#logger.info('Saved page _%s_', page_index)
# End core specific scraping code
##########
#Close the browser
logger.info('complete!')
except Exception as errorMessage:
try:
browser.close()
record_error(message=str(errorMessage), roster_row=roster_row, browser=browser)
except:
record_error(message=str(errorMessage), roster_row=roster_row)
# Record error in S3 for a general error
logger.error('Error: %s', errorMessage)
# Log error
sys.exit(1)
if __name__ == "__main__":
#This will load in the current jail roster list
#Select the index of the roster this script is for:
#Write the name of the county and state
roster = pd.read_csv('/opt/jail_roster_final_rmDuplicates.csv',encoding = "utf-8")
main(roster[roster['index'] == ROW_INDEX].iloc[0])
|
[
"[email protected]"
] | |
93431b4260ae9bcc50dc2babafb602fe5f3a56f8
|
f3598888ce889075d006de9559aa67499ca0d708
|
/Common/CenterToLeft.py
|
d1cf789f60c0bb67849262f0612c7c308bf8032d
|
[] |
no_license
|
JinYanming/jym_cmot_semi_mask
|
6f1ceafa344d2831cdc91e1af0515b417b3939d6
|
be5fc9694f802ab0fb2eaeb11c7eca10ee0e72b3
|
refs/heads/master
| 2022-02-20T05:56:36.418283 | 2019-09-18T18:23:40 | 2019-09-18T18:23:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 722 |
py
|
# Generated with SMOP 0.41
from libsmop import *
# /workspace/MOT/cmot-v1/Common/CenterToLeft.m
@function
def CenterToLeft(x=None,y=None,height=None,width=None,*args,**kwargs):
varargin = CenterToLeft.varargin
nargin = CenterToLeft.nargin
## Copyright (C) 2014 Seung-Hwan Bae
## All rights reserved.
# (x,y): Center position
h_height=height / 2
# /workspace/MOT/cmot-v1/Common/CenterToLeft.m:7
h_width=width / 2
# /workspace/MOT/cmot-v1/Common/CenterToLeft.m:8
L_x=x - round(h_width)
# /workspace/MOT/cmot-v1/Common/CenterToLeft.m:10
L_y=y - round(h_height)
# /workspace/MOT/cmot-v1/Common/CenterToLeft.m:11
return L_x,L_y
if __name__ == '__main__':
pass
|
[
"[email protected]"
] | |
a2f9e589693f4eda5cea8869d53759b116acfc76
|
b0e299f6ab0139b831d0ed86cc6da0c3eb80b50d
|
/hello/public/deploy/chal.py
|
3cdcfdcec0ecf9a88f3f75665084382c0d2855d2
|
[] |
no_license
|
kendricktan/paradigm-ctf
|
96768eb6a3ee76867b873e96e2f623796803361c
|
21ba8273f858d1af24d0abdb841bb019e8fa0965
|
refs/heads/main
| 2023-06-26T07:50:39.179665 | 2021-07-31T06:27:11 | 2021-07-31T06:27:11 | 387,947,845 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 191 |
py
|
import eth_sandbox
from web3 import Web3
eth_sandbox.run_launcher([
eth_sandbox.new_launch_instance_action(deploy_value=Web3.toWei(0, 'ether')),
eth_sandbox.new_get_flag_action()
])
|
[
"[email protected]"
] | |
781ffa6094e1e065a1662ff414e97c2d8e72f5f6
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_clambake.py
|
a60e6c1d2e25f9b27e08fb830685e405c05f668b
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 359 |
py
|
#calss header
class _CLAMBAKE():
def __init__(self,):
self.name = "CLAMBAKE"
self.definitions = [u'an event in which seafood is cooked and eaten outside, usually near the sea']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
c9a9d2dda80846c0c2c7b067316cfabaf6aed24b
|
321b4ed83b6874eeb512027eaa0b17b0daf3c289
|
/1266/1266.minimum-time-visiting-all-points.289659185.Accepted.leetcode.python3.py
|
76eb95f002965918c8ee1bffff4858d8a5a97364
|
[] |
no_license
|
huangyingw/submissions
|
7a610613bdb03f1223cdec5f6ccc4391149ca618
|
bfac1238ecef8b03e54842b852f6fec111abedfa
|
refs/heads/master
| 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 277 |
py
|
class Solution(object):
def minTimeToVisitAllPoints(self, points):
x1, y1 = points[0]
time = 0
for x2, y2 in points[1:]:
dx, dy = abs(x1 - x2), abs(y1 - y2)
time += max(dx, dy)
x1, y1 = x2, y2
return time
|
[
"[email protected]"
] | |
f03251f2e2c93487fb9538d28c53e60da6493523
|
772f8f0a197b736cba22627485ccbdb65ed45e4b
|
/day09/mygui3.py
|
85903ce1a15afbb06fa75763d482edd7e38d2f79
|
[] |
no_license
|
zhpg/python1805
|
ddc69cd1b3bda8bef1cb0c2913d456ea2c29a391
|
3d98c8ebc106fd0aab633a4c99ae6591013e4438
|
refs/heads/master
| 2020-03-26T11:26:59.378511 | 2018-08-05T09:25:21 | 2018-08-05T09:25:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 610 |
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import tkinter
from functools import partial
def say_hi(world):
def welcome():
label.config(text='Hello %s' % world)
return welcome
root = tkinter.Tk()
label = tkinter.Label(text='Hello world', font="15px")
b1 = tkinter.Button(root, bg='red', fg='white', text='button1', comand=say_hi('sss'))
MyButton = partial(tkinter.Button(root, bg='red', fg='white')) # 偏函数
b2 = MyButton(text='button2', command=say_hi('chine'))
b3 = MyButton(text='quit', command=root.quit())
label.pack() # ?
b1.pack()
b2.pack()
b3.pack()
root.mainloop() # ?
|
[
"[email protected]"
] | |
62c0360071a15ade3e6a6b3f38a577416759847b
|
7160e632d88bf49492616f8152c91cb9f1d40d8d
|
/testcases/statistical_form2/test_case_166_statistical_form_alarm_detail.py
|
53c29adc336cc3d9a149c60941a9e7a5f1d2954e
|
[] |
no_license
|
huangqiming123/tuqiangol_test1
|
ad5ddf22ce61b5b6daad55f684be5da160a64e59
|
75722812260590480320910c4ad6f6c1251a2def
|
refs/heads/master
| 2021-03-30T23:29:08.478494 | 2018-03-12T03:45:11 | 2018-03-12T03:45:11 | 124,832,890 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,802 |
py
|
import unittest
from time import sleep
from automate_driver.automate_driver import AutomateDriver
from pages.alarm_info.alarm_info_page import AlarmInfoPage
from pages.base.base_page import BasePage
from pages.base.lon_in_base import LogInBase
from pages.statistical_form.statistical_form_page import StatisticalFormPage
from pages.statistical_form.statistical_form_page_read_csv import StatisticalFormPageReadCsv
class TestCase166StatisticalFormAlarmDetail(unittest.TestCase):
def setUp(self):
# 前置条件
# 实例化对象
self.driver = AutomateDriver()
self.base_url = self.driver.base_url
self.base_page = BasePage(self.driver, self.base_url)
self.alarm_info_page = AlarmInfoPage(self.driver, self.base_url)
self.statistical_form_page_read_csv = StatisticalFormPageReadCsv()
self.log_in_base = LogInBase(self.driver, self.base_url)
self.statistical_form_page = StatisticalFormPage(self.driver, self.base_url)
# 打开页面,填写用户名、密码、点击登录
self.base_page.open_page()
self.driver.set_window_max()
self.driver.implicitly_wait(5)
self.log_in_base.log_in_jimitest()
# 登录之后点击控制台,然后点击指令管理
self.statistical_form_page.click_control_after_click_statistical_form_page()
sleep(3)
def tearDown(self):
self.driver.quit_browser()
def test_case_statistical_form_alarm_detail(self):
# 断言url
expect_url = self.base_url + '/deviceReport/statisticalReport'
self.assertEqual(expect_url, self.alarm_info_page.actual_url_click_alarm())
# 点击告警详情
self.alarm_info_page.click_alarm_detail_list()
for n in range(5):
self.statistical_form_page.click_customer_in_alarm_detail_form(n)
# 点击搜索设备按钮
self.statistical_form_page.click_search_dev_button_in_alarm_detail()
# 获取有多少组
number = self.statistical_form_page.get_group_number_in_alarm_detail_form()
if number == 0:
pass
else:
for m in range(number):
# 收起默认组
self.statistical_form_page.click_defalut_group_in_alarm_detail_form()
# 获取每个组设备的数量
dev_number = self.statistical_form_page.get_dev_number_in_alarm_detail_form(m)
# 点开每一个分组
self.statistical_form_page.click_per_group_in_alarm_detail_form(m)
dev_number_list = self.statistical_form_page.get_dev_number_list_in_alarm_detail_form(m)
self.assertEqual(str(dev_number_list), dev_number)
|
[
"[email protected]"
] | |
d8c6eb7e638620f0db30fcee4607c3f27da7d23c
|
501e9924cb19e95c32e2d168e73ea44e7c9c440c
|
/readfiles.py
|
9c0300bb83848b8231570bcef6216b1d447617f6
|
[] |
no_license
|
Miguelmargar/file-io
|
cc2790b109187dbeec87788c662aaf52d8e96c02
|
f1c6f6ccfefbc572cac83a6ddc21ba2e902ac0c1
|
refs/heads/master
| 2020-03-17T17:23:49.493484 | 2018-05-21T12:22:23 | 2018-05-21T12:22:23 | 133,786,461 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,607 |
py
|
#to read a file's data
# ONE WAY -----------------------------------------
f = open("data.txt", "r") # opens file to read it with "r"
lines = f.readlines() # stores the file info in lines variable therefore in memory
f.close() # closes the file but it is still stored in memory
print(lines)
# OTHER WAY ---------------------------------------
f = open("data.txt", "r") #opens file
lines = f.read().split("\n") #stores file in varible but as it has .split it gives each line a list without the .split it would all be one string together
f.close() #closes file but it is still stored in memory
print(lines)
# find most common word in a text file-------------
import re # imports regular expresions from a python library which is native to python - another library would be "random"
import collections
text = open("1155-0.txt").read().lower() # opens the file in question .read() reads it and .lower() makes it all lower case and converts it into a string in a variable called text
words = re.findall("\w+", text) # this line converts the string into a line - "\w+", text = finds all the words in text
long_words = []
for word in words: # this loop takes the words that are bigger than 5 characters
if len(word) > 5:
long_words.append(word)
most_common = collections.Counter(long_words).most_common(10) # this prints out the top 10 words from the list created by the loop above
print(most_common)
|
[
"[email protected]"
] | |
73782f3ba66ecf7f99c21522cdbbf9118fadd0e6
|
32e2ba212d39e022bea40f12cdd6b3c138a62ac0
|
/mizani/tests/test_breaks.py
|
b1e61d60f512ce503f985284c50ce6a24b8c473b
|
[
"BSD-3-Clause"
] |
permissive
|
vals/mizani
|
148dd985d25796c25346a3fac106c1c5c7f40d05
|
6b288fe6061e36add001cc5f8ffb147154e7ca62
|
refs/heads/master
| 2020-09-11T09:59:07.672839 | 2017-06-16T08:03:30 | 2017-06-16T08:03:30 | 94,454,967 | 1 | 0 | null | 2017-06-15T15:47:21 | 2017-06-15T15:47:21 | null |
UTF-8
|
Python
| false | false | 5,432 |
py
|
from __future__ import division
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
import numpy.testing as npt
import pytest
from mizani.breaks import (mpl_breaks, log_breaks, minor_breaks,
trans_minor_breaks, date_breaks,
timedelta_breaks, extended_breaks)
from mizani.transforms import trans
def test_mpl_breaks():
x = np.arange(100)
limits = min(x), max(x)
for nbins in (5, 7, 10, 13, 31):
breaks = mpl_breaks(nbins=nbins)
assert len(breaks(limits)) <= nbins+1
limits = float('-inf'), float('inf')
breaks = mpl_breaks(n=5)
assert len(breaks(limits)) == 0
# Zero range discrete
limits = [1, 1]
assert len(breaks(limits)) == 1
assert breaks(limits)[0] == limits[1]
# Zero range continuous
limits = [np.pi, np.pi]
assert len(breaks(limits)) == 1
assert breaks(limits)[0] == limits[1]
def test_log_breaks():
x = [2, 20, 2000]
limits = min(x), max(x)
breaks = log_breaks()(limits)
npt.assert_array_equal(breaks, [1, 10, 100, 1000, 10000])
breaks = log_breaks(3)(limits)
npt.assert_array_equal(breaks, [1, 100, 10000])
breaks = log_breaks()((10000, 10000))
npt.assert_array_equal(breaks, [10000])
breaks = log_breaks()((float('-inf'), float('inf')))
assert len(breaks) == 0
def test_minor_breaks():
# equidistant breaks
major = [1, 2, 3, 4]
limits = [0, 5]
breaks = minor_breaks()(major, limits)
npt.assert_array_equal(breaks, [.5, 1.5, 2.5, 3.5, 4.5])
minor = minor_breaks(3)(major, [2, 3])
npt.assert_array_equal(minor, [2.25, 2.5, 2.75])
# non-equidistant breaks
major = [1, 2, 4, 8]
limits = [0, 10]
minor = minor_breaks()(major, limits)
npt.assert_array_equal(minor, [1.5, 3, 6])
# single major break
minor = minor_breaks()([2], limits)
assert len(minor) == 0
def test_trans_minor_breaks():
class identity_trans(trans):
minor_breaks = trans_minor_breaks()
class square_trans(trans):
transform = staticmethod(np.square)
inverse = staticmethod(np.sqrt)
minor_breaks = trans_minor_breaks()
class weird_trans(trans):
dataspace_is_numerical = False
minor_breaks = trans_minor_breaks()
major = [1, 2, 3, 4]
limits = [0, 5]
regular_minors = trans.minor_breaks(major, limits)
npt.assert_allclose(
regular_minors,
identity_trans.minor_breaks(major, limits))
# Transform the input major breaks and check against
# the inverse of the output minor breaks
squared_input_minors = square_trans.minor_breaks(
np.square(major), np.square(limits))
npt.assert_allclose(regular_minors,
np.sqrt(squared_input_minors))
t = weird_trans()
with pytest.raises(TypeError):
t.minor_breaks(major)
def test_date_breaks():
# cpython
x = [datetime(year, 1, 1) for year in [2010, 2026, 2015]]
limits = min(x), max(x)
breaks = date_breaks('5 Years')
years = [d.year for d in breaks(limits)]
npt.assert_array_equal(
years, [2010, 2015, 2020, 2025, 2030])
breaks = date_breaks('10 Years')
years = [d.year for d in breaks(limits)]
npt.assert_array_equal(years, [2010, 2020, 2030])
# numpy
x = [np.datetime64(i*10, 'D') for i in range(1, 10)]
breaks = date_breaks('10 Years')
limits = min(x), max(x)
with pytest.raises(AttributeError):
breaks(limits)
# NaT
limits = np.datetime64('NaT'), datetime(2017, 1, 1)
breaks = date_breaks('10 Years')
assert len(breaks(limits)) == 0
def test_timedelta_breaks():
breaks = timedelta_breaks()
# cpython
x = [timedelta(days=i*365) for i in range(25)]
limits = min(x), max(x)
major = breaks(limits)
years = [val.total_seconds()/(365*24*60*60)for val in major]
npt.assert_array_equal(
years, [0, 5, 10, 15, 20, 25])
x = [timedelta(microseconds=i) for i in range(25)]
limits = min(x), max(x)
major = breaks(limits)
mseconds = [val.total_seconds()*10**6 for val in major]
npt.assert_array_equal(
mseconds, [0, 5, 10, 15, 20, 25])
# pandas
x = [pd.Timedelta(seconds=i*60) for i in range(10)]
limits = min(x), max(x)
major = breaks(limits)
minutes = [val.total_seconds()/60 for val in major]
npt.assert_allclose(
minutes, [0, 2, 4, 6, 8])
# numpy
x = [np.timedelta64(i*10, unit='D') for i in range(1, 10)]
limits = min(x), max(x)
with pytest.raises(ValueError):
breaks(limits)
# NaT
limits = pd.NaT, pd.Timedelta(seconds=9*60)
assert len(breaks(limits)) == 0
def test_extended_breaks():
x = np.arange(100)
limits = min(x), max(x)
for n in (5, 7, 10, 13, 31):
breaks = extended_breaks(n=n)
assert len(breaks(limits)) <= n+1
# Reverse limits
breaks = extended_breaks(n=7)
npt.assert_array_equal(breaks((0, 6)), breaks((6, 0)))
# Infinite limits
limits = float('-inf'), float('inf')
breaks = extended_breaks(n=5)
assert len(breaks(limits)) == 0
# Zero range discrete
limits = [1, 1]
assert len(breaks(limits)) == 1
assert breaks(limits)[0] == limits[1]
# Zero range continuous
limits = [np.pi, np.pi]
assert len(breaks(limits)) == 1
assert breaks(limits)[0] == limits[1]
|
[
"[email protected]"
] | |
09c5c0f500049f682537e17e758566cd5a346d59
|
bc01e1d158e7d8f28451a7e108afb8ec4cb7d5d4
|
/sage/src/sage/combinat/species/functorial_composition_species.py
|
6c84368ba4dfa192538a5c7946a0850b4b801bd3
|
[] |
no_license
|
bopopescu/geosci
|
28792bda1ec1f06e23ba8dcb313769b98f793dad
|
0d9eacbf74e2acffefde93e39f8bcbec745cdaba
|
refs/heads/master
| 2021-09-22T17:47:20.194233 | 2018-09-12T22:19:36 | 2018-09-12T22:19:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,158 |
py
|
"""
Functorial composition species
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2008 Mike Hansen <[email protected]>,
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
from .species import GenericCombinatorialSpecies
from .structure import GenericSpeciesStructure
from sage.misc.cachefunc import cached_function
from sage.structure.unique_representation import UniqueRepresentation
class FunctorialCompositionStructure(GenericSpeciesStructure):
pass
class FunctorialCompositionSpecies(GenericCombinatorialSpecies):
def __init__(self, F, G, min=None, max=None, weight=None):
"""
Returns the functorial composition of two species.
EXAMPLES::
sage: E = species.SetSpecies()
sage: E2 = species.SetSpecies(size=2)
sage: WP = species.SubsetSpecies()
sage: P2 = E2*E
sage: G = WP.functorial_composition(P2)
sage: G.isotype_generating_series().coefficients(5)
[1, 1, 2, 4, 11]
sage: G = species.SimpleGraphSpecies()
sage: c = G.generating_series().coefficients(2)
sage: type(G)
<class 'sage.combinat.species.functorial_composition_species.FunctorialCompositionSpecies'>
sage: G == loads(dumps(G))
True
sage: G._check() #False due to isomorphism types not being implemented
False
"""
self._F = F
self._G = G
self._state_info = [F, G]
self._name = "Functorial composition of (%s) and (%s)"%(F, G)
GenericCombinatorialSpecies.__init__(self, min=None, max=None, weight=None)
_default_structure_class = FunctorialCompositionStructure
def _structures(self, structure_class, s):
"""
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.structures([1,2,3]).list()
[{},
{{1, 2}*{3}},
{{1, 3}*{2}},
{{2, 3}*{1}},
{{1, 2}*{3}, {1, 3}*{2}},
{{1, 2}*{3}, {2, 3}*{1}},
{{1, 3}*{2}, {2, 3}*{1}},
{{1, 2}*{3}, {1, 3}*{2}, {2, 3}*{1}}]
"""
gs = self._G.structures(s).list()
for f in self._F.structures(gs):
yield f
def _isotypes(self, structure_class, s):
"""
There is no known algorithm for efficiently generating the
isomorphism types of the functorial composition of two species.
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.isotypes([1,2,3]).list()
Traceback (most recent call last):
...
NotImplementedError
"""
raise NotImplementedError
def _gs(self, series_ring, base_ring):
"""
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.generating_series().coefficients(5)
[1, 1, 1, 4/3, 8/3]
"""
return self._F.generating_series(base_ring).functorial_composition(self._G.generating_series(base_ring))
def _itgs(self, series_ring, base_ring):
"""
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.isotype_generating_series().coefficients(5)
[1, 1, 2, 4, 11]
"""
return self.cycle_index_series(base_ring).isotype_generating_series()
def _cis(self, series_ring, base_ring):
"""
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.cycle_index_series().coefficients(5)
[p[],
p[1],
p[1, 1] + p[2],
4/3*p[1, 1, 1] + 2*p[2, 1] + 2/3*p[3],
8/3*p[1, 1, 1, 1] + 4*p[2, 1, 1] + 2*p[2, 2] + 4/3*p[3, 1] + p[4]]
"""
return self._F.cycle_index_series(base_ring).functorial_composition(self._G.cycle_index_series(base_ring))
def weight_ring(self):
"""
Returns the weight ring for this species. This is determined by
asking Sage's coercion model what the result is when you multiply
(and add) elements of the weight rings for each of the operands.
EXAMPLES::
sage: G = species.SimpleGraphSpecies()
sage: G.weight_ring()
Rational Field
"""
from sage.structure.element import get_coercion_model
cm = get_coercion_model()
f_weights = self._F.weight_ring()
g_weights = self._G.weight_ring()
return cm.explain(f_weights, g_weights, verbosity=0)
#Backward compatibility
FunctorialCompositionSpecies_class = FunctorialCompositionSpecies
|
[
"valber@HPC"
] |
valber@HPC
|
163265522ac5b1d53899d5d114cb4432cf72522d
|
1548ce77537dcd50ab04b0eaee050b5d30553e23
|
/tests/test_pipeline/components/classification/test_lda.py
|
f78f133407c5e5dff1614b0807339f117fb6d6e8
|
[
"Apache-2.0"
] |
permissive
|
Shamoo100/AutoTabular
|
4a20e349104246bf825ebceae33dca0a79928f2e
|
7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2
|
refs/heads/main
| 2023-08-13T21:34:34.329888 | 2021-10-02T07:06:00 | 2021-10-02T07:06:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 768 |
py
|
import sklearn.discriminant_analysis
from autotabular.pipeline.components.classification.lda import LDA
from .test_base import BaseClassificationComponentTest
class LDAComponentTest(BaseClassificationComponentTest):
__test__ = True
res = dict()
res['default_iris'] = 1.0
res['default_iris_iterative'] = -1
res['default_iris_proba'] = 0.5614481896257509
res['default_iris_sparse'] = -1
res['default_digits'] = 0.88585306618093507
res['default_digits_iterative'] = -1
res['default_digits_binary'] = 0.9811778992106861
res['default_digits_multilabel'] = 0.82204896441795205
res['default_digits_multilabel_proba'] = 0.9833070018235553
sk_mod = sklearn.discriminant_analysis.LinearDiscriminantAnalysis
module = LDA
|
[
"[email protected]"
] | |
9c94a6ae985e0ffbcc4884ebef338fa1f8d357d0
|
b7a2a80843fa5141ffb9c7b4439f1d2ac713af30
|
/Version2/U7.2_Threads_Alt.py
|
6e674dc4ae02171ef537759fd638fb0b727f2a73
|
[] |
no_license
|
wunnox/python_grundlagen
|
df1bc2b9b1b561bd6733ccc25305e799a48e714e
|
fa84d7aae7332a7acbb3ba7ff0fe2216cc345fc0
|
refs/heads/master
| 2023-05-01T12:19:23.208445 | 2023-04-16T11:29:01 | 2023-04-16T11:29:01 | 222,099,539 | 2 | 3 | null | 2019-12-19T10:56:43 | 2019-11-16T12:57:54 |
Python
|
UTF-8
|
Python
| false | false | 1,241 |
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
####################################################
#
# Uebung:
# Erstellen Sie ein Programm, welches drei Threads startet
# Der erste Thread läuft 8 Sekunden, der zweite 4 Sekunden und der dritte 6 Sekunden
# Nehmen Sie als Vorlage , die vorhergehenden Folie.
#
####################################################
#### Lösung: ####
import time
import _thread
t = []
def show(c, s):
t.append(c)
print("Starte Thread", c, "mit", s, "Sek.")
time.sleep(s)
t.remove(c)
_thread.start_new_thread(show, (1, 12,))
time.sleep(0.5)
_thread.start_new_thread(show, (2, 22,))
time.sleep(0.5)
_thread.start_new_thread(show, (3, 18,))
time.sleep(0.5)
_thread.start_new_thread(show, (4, 14,))
time.sleep(0.5)
_thread.start_new_thread(show, (5, 21,))
time.sleep(0.5)
_thread.start_new_thread(show, (6, 19,))
time.sleep(0.5)
_thread.start_new_thread(show, (7, 15,))
time.sleep(0.5)
_thread.start_new_thread(show, (8, 18,))
time.sleep(0.5)
_thread.start_new_thread(show, (9, 13,))
time.sleep(0.5)
_thread.start_new_thread(show, (10, 14,))
time.sleep(0.5)
while t:
print("Warte auf Ende der Threads", t)
time.sleep(1)
print("Ende der Threads")
|
[
"[email protected]"
] | |
9a4a66b73d5ac59e838f0aa82bbb615cf4efa43f
|
6c58da2c54a3d35273e7984313d181f1da9981fc
|
/Multiple_Apps/djangoEnv/bin/django-admin.py
|
78fd42c83301322a9da7ef20392fed2b3158a0b1
|
[
"MIT-0"
] |
permissive
|
py1-10-2017/rgero215_PY1-10-2017
|
e582cb12cc63f84b1c0c14d09a922cb6cb228016
|
f455b335ec9c8c850571f3a75dcd95759b4cfdad
|
refs/heads/master
| 2021-09-04T03:23:48.062326 | 2018-01-14T21:07:26 | 2018-01-14T21:07:26 | 105,612,652 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 187 |
py
|
#!/Users/RGero13/Desktop/rgero215_PY1-10-2017/Multiple_Apps/djangoEnv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"[email protected]"
] | |
30d76d5a0ff3e6f690abdabd2e750c027eb4391d
|
c05f9fb686ef49c093e618a4078ffe723231f346
|
/config/conf.py
|
aa2247f1303787036d7ea5fca1e3fa1d81a42f4c
|
[] |
no_license
|
wmm0165/PytestAuto
|
d1bb40dcc5760439658c15af653953646119af44
|
42846b12ed7aefaa4e5890529ec71a76d27f245d
|
refs/heads/master
| 2020-07-16T16:35:19.962864 | 2019-09-16T10:08:37 | 2019-09-16T10:08:37 | 205,825,635 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 881 |
py
|
from datetime import datetime
import os
# 项目根目录
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# 报告目录
REPORT_DIR = os.path.join(ROOT_DIR, 'report')
# ui对象库config.ini文件所在目录
CONF_PATH = os.path.join(ROOT_DIR, 'config', 'config.ini')
# 测试数据所在目录
DATA_Path = os.path.join(ROOT_DIR, 'data', 'tcData.xlsx')
# 当前时间
CURRENT_TIME = datetime.now().strftime('%H_%M_%S')
# 邮件配置信息
# 邮件服务器
SMTP_SERVER = 'smtp.qq.com'
# 发送者
FROM_USER = '[email protected]'
# 发送者密码
FROM_PASSWORD = 'mhxvqpewblldbjhf'
# 接收者
TO_USER = ['账号@qq.com'] # 可以同时发送给多人,追加到列表中
# 邮件标题
SUBJECT = 'xx项目自动化测试报告'
# 邮件正文
CONTENTS = '测试报告正文'
# 报告名称
HTML_NAME = 'testReport{}.html'.format(CURRENT_TIME)
print(HTML_NAME)
|
[
"[email protected]"
] | |
592ac9a1613e8c2b0e733f3b1ebe6ebb4046e7ca
|
cb12e3eff7bbb5fe2f4d0e2be9ca165a5577dc93
|
/plt-and-algo/webrtc-p2pframe/serve.py
|
a43133af73a4329d22e725d2ebc34e112a0c7968
|
[] |
no_license
|
overminder/kitchen-sink
|
6b1227ff00f8804d4d0a632e613ee903d51ab753
|
2e61b9041ceed536d42b42b75a5c50dae080c0ba
|
refs/heads/master
| 2023-06-09T05:48:47.291336 | 2023-05-29T17:38:34 | 2023-05-29T17:38:34 | 50,777,705 | 18 | 3 | null | 2020-02-09T19:22:18 | 2016-01-31T14:13:16 |
Scala
|
UTF-8
|
Python
| false | false | 3,346 |
py
|
#!/usr/bin/env python3.5
import logging
import os
import sys
import random
import json
HERE = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
STATIC_PATH = os.path.join(HERE, 'static')
from tornado import gen
from tornado.websocket import WebSocketHandler
from tornado.web import Application, RequestHandler, StaticFileHandler
from tornado.ioloop import IOLoop
class Offer:
resp_cb = None
def __init__(self, key=None, offer_payload=None):
self.key = key
self.offer_payload = offer_payload
self.candidates = []
def __repr__(self):
return '<Offer candidates=%d %r>' % (len(self.candidates), self.offer_payload)
def wait_resp(self, callback=None):
self.resp_cb = callback
class Offers:
def __init__(self):
self.offers = {}
@classmethod
def mkkey(cls):
return str(random.randint(10000, 99999)) # Just to be simple.
def add(self, offer):
self.offers[offer.key] = offer
return offer
def find(self, key):
return self.offers[key]
def pop(self, key):
return self.offers.pop(key)
offers = Offers()
class OfferListingHandler(RequestHandler):
def get(self):
self.write({
'offers': [{'key': key, 'resp_cb': repr(resp_cb)}
for (key, resp_cb) in offers.offers.items()],
})
class OfferHandler(WebSocketHandler):
offer = None
key = None
def open(self):
self.key = Offers.mkkey()
def _ensure_offer(self):
if self.offer is None:
self.offer = Offer(key=self.key)
return self.offer
@gen.coroutine
def on_message(self, s):
msg = json.loads(s)
print('msg', type(msg), repr(msg))
if msg['type'] == 'allocKey':
self.write_message({
'type': 'allocKeyResp',
'key': self.key,
})
elif msg['type'] == 'offer':
offer = offers.add(self._ensure_offer())
offer.offer_payload = msg
self.write_message(json.dumps({
'type': 'offer-created',
}))
resp = yield gen.Task(offer.wait_resp)
self.write_message(json.dumps({
'type': 'offer-accepted',
'resp': resp,
}))
elif msg['type'] == 'take-offer':
offer = offers.find(msg['key'])
self.write_message(offer.offer_payload)
for c in offer.candidates:
self.write_message(c)
elif msg['type'] == 'answer':
key = msg.pop('forKey')
offer = offers.pop(key)
offer.resp_cb(msg)
elif msg['type'] == 'candidate':
self._ensure_offer().candidates.append(msg)
class NoCacheStaticFileHandler(StaticFileHandler):
def set_extra_headers(self, path):
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
def mkapp():
return Application([
(r'/offer', OfferHandler),
(r'/offers/list', OfferListingHandler),
(r'/(.*)', NoCacheStaticFileHandler, {
'path': STATIC_PATH,
}),
], gzip=True)
def main():
port = 17080
mkapp().listen(port)
print('Listening on :%d' % port)
IOLoop.current().start()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
8704bbaf901d8a35e0ee5512cc626afd639f0d60
|
bed0d23d35b42b7316dee35f9fa06d4d2cc9de26
|
/src/custom_admin/__init__.py
|
5ec402089bc364c75d9685df1a8d89ebdb5cca66
|
[] |
no_license
|
Bloodlettinger/meandre
|
b55911c93faf6c279f496394137def21ec181e6a
|
f9a8c5dc709fcdda808fc1329264724c7b8d951e
|
refs/heads/master
| 2020-05-17T23:01:15.326103 | 2012-10-11T17:22:48 | 2012-10-11T17:22:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 199 |
py
|
# -*- coding: utf-8 -*-
from datetime import date, datetime
def ddmmyy(value):
if isinstance(value, (date, datetime)):
return value.strftime('%d.%m.%y')
else:
return u'--'
|
[
"[email protected]"
] | |
8b92035f4b34e0556c903e155ba9a8618bf17529
|
275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc
|
/test/test_bad_request_error_code.py
|
670128cb3664339498ad0e2fe8a03b0977a7c7ff
|
[] |
no_license
|
cascadiarc/cyclos-python-client
|
8029ce07174f2fe92350a92dda9a60976b2bb6c2
|
a2e22a30e22944587293d51be2b8268bce808d70
|
refs/heads/main
| 2023-04-03T16:52:01.618444 | 2021-04-04T00:00:52 | 2021-04-04T00:00:52 | 354,419,532 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 913 |
py
|
# coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.bad_request_error_code import BadRequestErrorCode # noqa: E501
from swagger_client.rest import ApiException
class TestBadRequestErrorCode(unittest.TestCase):
"""BadRequestErrorCode unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBadRequestErrorCode(self):
"""Test BadRequestErrorCode"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.bad_request_error_code.BadRequestErrorCode() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
4fd9c70db157736bfaf3aab4bd859f51b90b8f41
|
82aee3211216f55392d5a757eb57f02c859e9a28
|
/Easy/599_minimumIndexSumOfTwoLists.py
|
d92b6e8b5c30aa160a4ed09faac635a69b9d9ca6
|
[] |
no_license
|
Yucheng7713/CodingPracticeByYuch
|
505d18095d4b9a35c1f3b23632a90a76d811b64a
|
1461b10b8910fa90a311939c6df9082a8526f9b1
|
refs/heads/master
| 2022-05-01T11:51:00.612603 | 2022-04-18T09:46:55 | 2022-04-18T09:46:55 | 198,961,132 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 605 |
py
|
class Solution:
def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:
r_set = set(list1 + list2)
map_1 = {res : i for i, res in enumerate(list1)}
map_2 = {res : i for i, res in enumerate(list2)}
common_res = []
min_sum = float('inf')
for r in r_set:
if r in map_1 and r in map_2:
k = map_1[r] + map_2[r]
if min_sum > k:
common_res = [r]
min_sum = k
elif min_sum == k:
common_resI += [r]
return common_res
|
[
"[email protected]"
] | |
be5e0b741dc6ad841c668031edaee115bfe5314e
|
36059411cedfeec7478fd725c43f2120ab5ad38d
|
/boulder_ftp.py
|
a055a456e089b72f19569b633bd3059a4fd66cd2
|
[] |
no_license
|
tytechortz/Arctic-Ice
|
a8345746bdd2d73559941ea71efe06601212a7f1
|
83aac39c00027cca6bd85fd2709fcfe86cf3ef31
|
refs/heads/master
| 2022-07-09T13:38:41.735808 | 2020-01-02T16:22:52 | 2020-01-02T16:22:52 | 167,096,158 | 1 | 0 | null | 2022-06-21T23:10:16 | 2019-01-23T01:35:54 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 541 |
py
|
from ftplib import FTP
import os
import pandas as pd
# Log into ftp site.
ftp = FTP('sidads.colorado.edu')
ftp.login(user='anonymous', passwd='ICE_PSWD')
ftp.login()
# Read file.
ftp.cwd('/pub/DATASETS/NOAA/G02135/north/daily/data/')
ftp.retrbinary('RETR N_seaice_extent_daily_v3.0.csv', open('N_seaice_extent_daily_v3.0.csv', 'wb').write)
ftp.quit()
# Read data.
df = pd.read_csv('N_seaice_extent_daily_v3.0.csv',skiprows=[i for i in range(1,2436)])
# df.columns = []
pd.options.display.float_format = '{:,}'.format
print(df.head())
|
[
"[email protected]"
] | |
076bf3038294b33eac256cefdfcdc846835b8fba
|
9905901a2beae3ff4885fbc29842b3c34546ffd7
|
/nitro-python/build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/filter/filterglobal_filterpolicy_binding.py
|
5aac2b8dd0a9540023a40b6dd3d6be45a9da1f39
|
[
"Python-2.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
culbertm/NSttyPython
|
f354ebb3dbf445884dbddb474b34eb9246261c19
|
ff9f6aedae3fb8495342cd0fc4247c819cf47397
|
refs/heads/master
| 2020-04-22T17:07:39.654614 | 2019-02-13T19:07:23 | 2019-02-13T19:07:23 | 170,530,223 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,576 |
py
|
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class filterglobal_filterpolicy_binding(base_resource) :
""" Binding class showing the filterpolicy that can be bound to filterglobal.
"""
def __init__(self) :
self._policyname = None
self._priority = None
self._state = None
self.___count = None
@property
def priority(self) :
r"""The priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
r"""The priority of the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def state(self) :
r"""State of the binding.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
r"""State of the binding.<br/>Possible values = ENABLED, DISABLED
"""
try :
self._state = state
except Exception as e:
raise e
@property
def policyname(self) :
r"""The name of the filter policy.
"""
try :
return self._policyname
except Exception as e:
raise e
@policyname.setter
def policyname(self, policyname) :
r"""The name of the filter policy.
"""
try :
self._policyname = policyname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(filterglobal_filterpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.filterglobal_filterpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = filterglobal_filterpolicy_binding()
updateresource.policyname = resource.policyname
updateresource.priority = resource.priority
updateresource.state = resource.state
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [filterglobal_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].policyname = resource[i].policyname
updateresources[i].priority = resource[i].priority
updateresources[i].state = resource[i].state
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = filterglobal_filterpolicy_binding()
deleteresource.policyname = resource.policyname
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [filterglobal_filterpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].policyname = resource[i].policyname
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
r""" Use this API to fetch a filterglobal_filterpolicy_binding resources.
"""
try :
obj = filterglobal_filterpolicy_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
r""" Use this API to fetch filtered set of filterglobal_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = filterglobal_filterpolicy_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
r""" Use this API to count filterglobal_filterpolicy_binding resources configued on NetScaler.
"""
try :
obj = filterglobal_filterpolicy_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
r""" Use this API to count the filtered set of filterglobal_filterpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = filterglobal_filterpolicy_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class filterglobal_filterpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.filterglobal_filterpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.filterglobal_filterpolicy_binding = [filterglobal_filterpolicy_binding() for _ in range(length)]
|
[
"[email protected]"
] | |
f4291ee93deaf810818660a525dacb66b3a2eb7c
|
6227d63532f2d657ef66d90709a3a1f484e9784b
|
/oviqpr/wsgi.py
|
02a76e64db03ba8feffdd1981fb6c14b65e4ad1b
|
[] |
no_license
|
vassily-la/oviq
|
2edff4e61e5ac8cb94b462e2ed5c5bec2c5b014a
|
65a86ca5cddd0180e8309a7659eaab6a35a5c785
|
refs/heads/master
| 2021-04-28T03:05:02.908010 | 2018-02-22T17:14:32 | 2018-02-22T17:14:32 | 122,131,126 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 390 |
py
|
"""
WSGI config for oviqpr project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oviqpr.settings")
application = get_wsgi_application()
|
[
"[email protected]"
] | |
d5046c3c0b613b372fc250a121575cad4d03bc38
|
89d230ad44d17b18897da507725b0a10c32960d8
|
/Gen2_0_PP/Contest/weaponsProblem.py
|
4262d5dcd74a8bb81dd8ecc8b8b5afcbc6146ab4
|
[] |
no_license
|
KB-perByte/CodePedia
|
aeeae87b56cf0ff6e02200cfd6b34da42a007338
|
287e7a3ce981bbf594436cdc06dde23a02b53bb0
|
refs/heads/master
| 2021-06-19T07:32:53.849871 | 2021-01-23T16:17:27 | 2021-01-23T16:17:27 | 163,250,017 | 0 | 1 | null | 2020-03-21T14:39:36 | 2018-12-27T05:13:55 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,014 |
py
|
'''
Daenerys has N types of weapons. There are Ai number of weapons of type i (1 <= i <= N). She wants to distribute these weapons among K soldiers. She wants to distribute them in such a way that:
All soldier get equal number of weapons.
All the weapons which a soldier gets must be of same type.
As she wants to make all of them more powerful so she wants to give as many weapons as possible. Help Daenerys in finding out what is the maximum number of weapons which a soldier can get.
Input Format
The first line consists two space seperated integer N and K.
The second line consists of N space seperated integers A1, A2, A3.... An, as described above.
Constraints
1 <= N <= 100000
1 <= Ai, K <= 1,000,000,000
Output Format
Output a single integer denoting the maximum weapons a soldier can get .
Sample Input 0
3 2
3 1 4
Sample Output 0
3
Explanation 0
She can give 3 weapons of type 1 to first soldier and 3 weapons of type 3 to second soldier.
'''
def binarySearch(array, l, r, toSearch): #not so needed
while l <= r:
mid = l + (r - l)//2
if array[mid] == toSearch:
return mid
elif array[mid] < toSearch:
l = mid + 1
else:
r = mid - 1
return -1
def checkDistribution(lst, mid , k):
s = 0
for i in range(len(lst)):
s+=lst[i]//mid
print('val of s',s)
print('val of k',k)
return s>=k
def makimumWeapons(lst,k):
l = min(lst)
h = max(lst)
while h >= l:
mid = l+(h-l)//2
print("value of l and h", l ,h)
if checkDistribution(lst, mid, k):
if not checkDistribution(lst, mid+1, k):
return mid
else:
l = mid + 1
else:
h = mid - 1
return 0
import sys
def get_ints(): return list(map(int, sys.stdin.readline().strip().split()))
input1 = list(map(int,input().split()))
#input2 = list(map(int,input().split()))
input2 = get_ints()
print(makimumWeapons(input2, input1[1]))
|
[
"[email protected]"
] | |
bf5c6fee091e8426d7dd6d71ed755b2e3e1eaeed
|
7e4425342a4d7e0f40978af17091f32d2712c79c
|
/Cb_DeepLearning_lec/Day_04_02_collection.py
|
280b5a625f46315f5d0d9b50bfda8439d9fccca1
|
[] |
no_license
|
yunhui21/CB_Ai_NLP
|
eca3da00c6c9615c8737b50d2c5ebe8dd1e3ba8a
|
b66ecc24abfd988fc9e7f19fa1941826b1bf38a4
|
refs/heads/master
| 2023-01-07T14:21:26.758030 | 2020-11-16T05:57:30 | 2020-11-16T05:57:30 | 291,835,156 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,389 |
py
|
# Day_04_02_collection.py
a = [1, 3, 7, 9]
for i in a:
print(i, end=' ')
print()
# 문제
# 리스트를 거꾸로 출력하세요
for i in reversed(a):
print(i, end=' ')
print()
# 튜플 : 상수 버전의 리스트 (읽기 전용 리스트)
b = (1, 3, 7, 9)
print(b)
print(b[0], b[1])
for i in b:
print(i, end=' ')
print()
# b[0] = -1 # error
# b.append(99) # error
c1 = (1, 4)
c2 = 1, 4 # packing
print(c1)
print(c2)
c3, c4 = 1, 4
c3, c4 = c2 # unpacking
# c3, c4, c5 = c2 # error
print(c3, c4)
def multi(d1, d2):
return d1 + d2, d1 * d2
e = multi(3, 5)
print(e, e[0], e[1])
e1, e2 = multi(3, 5)
print(e1, e2)
print('-' * 30)
# set (집합)
g = {1, 3, 5, 1, 3, 5, 1, 3, 5, } # 순서 없음
print(g)
h = [1, 3, 5, 1, 3, 5, 1, 3, 5, ] # 순서 보장
print(h)
print(set(h))
print(list(set(h)))
for i in g:
print(i)
print('-' * 30)
# 딕셔너리 (사전)
# 영한사전 : 영어 단어를 찾으면 한글 설명 나옴
# 영어단어 : key
# 한글설명 : value
info = {'age': 21, 'addr': 'ochang', 'hobby': 'minton', 12: 34}
print(type(info), type(set()), type((3,)))
print(info[12])
# <class 'dict'> <class 'set'> <class 'tuple'>
info = dict(age=21, addr='ochang')
print(info)
print(info['age'], info['addr'], info['hobby'])
info.pop('hobby')
info.pop('addr')
info['blood'] = 'AB' # insert
info['blood'] = 'O' # update
# info.popitem() # 마지막에 추가한 항목 삭제
print('-' * 30)
print(info.keys())
print(info.values())
print(info.items())
for k in info.keys():
print(k, info[k])
# 문제
# items()를 반복문에 적용하세요
p = list(info.items())
print(p)
for i in info.items():
print(i, i[0], i[1])
for k, v in info.items():
print(k, v)
# range, reversed, enumerate
a = ['A', 'B', 'C']
for i in a:
print(i)
# 문제
# 아래 코드를 파이썬답게 바꾸세요
for i in enumerate(a):
print(i)
for i, letter in enumerate(a):
print(i, letter)
# 문제
# items()에 enumerate를 연결하세요
for i in enumerate(info.items()):
# print(i, i[0], i[1], i[1][0], i[1][1])
print(i[0], i[1][0], i[1][1])
for i, kv in enumerate(info.items()):
# print(i, kv, kv[0], kv[1])
print(i, kv[0], kv[1])
for i, (k, v) in enumerate(info.items()):
print(i, k, v)
for k in info:
print(k, info[k])
print('\n\n\n')
|
[
"[email protected]"
] | |
ae9c23f5fdb98de82ae8cbf6a8e4ee62419a45d6
|
493a36f1f8606c7ddce8fc7fe49ce4409faf80be
|
/.history/B073040023/client_20210614185044.py
|
dc83e9c3495b85ade6ac751b06199f40df2ea143
|
[] |
no_license
|
ZhangRRz/computer_network
|
f7c3b82e62920bc0881dff923895da8ae60fa653
|
077848a2191fdfe2516798829644c32eaeded11e
|
refs/heads/main
| 2023-05-28T02:18:09.902165 | 2021-06-15T06:28:59 | 2021-06-15T06:28:59 | 376,568,344 | 0 | 0 | null | 2021-06-13T14:48:36 | 2021-06-13T14:48:36 | null |
UTF-8
|
Python
| false | false | 4,772 |
py
|
import socket
import threading
import tcppacket
import struct
# socket.socket() will create a TCP socket (default)
# socket.socket(socket.AF_INET, socket.SOCK_STREAM) to explicitly define a TCP socket
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # explicitly define a UDP socket
udp_host = '127.0.0.1' # Host IP
udp_port = 12345 # specified port to connect
def init_new_calc_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "calc 2 ^ 10"
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port) # Sending message to UDP server
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
def init_new_videoreq_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
msg = "video 1".encode('utf-8')
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port)) # Sending message to UDP server
recvdata = b''
ack_seq = 0
seq = 0
counter = 0
while True:
data, address = sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
raw = struct.unpack('!HHLLBBHHH', data[:s])
print("receive packet from ", address,
"with header", raw)
if(raw[2] == ack_seq and raw[7] == 0):
recvdata += data[s:]
if(raw[5] % 2):
# fin_falg
fin_flag = 1
else:
fin_flag = 0
ack_seq += 1
counter += 1
else:
print("Receive ERROR packet from ", address)
fin_flag = 1
counter = 3
# --------------------------------------------
# send ACK
if(counter == 3):
tcp = tcppacket.TCPPacket(
data=str("ACK").encode('utf-8'),
seq=seq, ack_seq=ack_seq,
flags_ack=1,
flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address,
"with ack seq: ", ack_seq, " and seq: ", seq)
sock.sendto(tcp.raw, address)
if(not fin_flag):
counter = 0
seq += 1
# --------------------------------------------
print(fin_flag)
if(fin_flag):
break
savename = str(i+1)+"received.mp4"
f = open(savename, "wb")
f.write(recvdata)
f.close()
def init_new_dns_req(i):
# ---------------------
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "dns google.com"
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
# ----------------------
# def init_new
threads = []
for i in range(1):
print("Demo calculation function")
threads.append(threading.Thread(target = init_new_calc_req, args = (i,)))
threads[-1].start()
for i in range(1):
threads.append(threading.Thread(target = init_new_dns_req, args = (i,)))
threads[-1].start()
for i in range(1):
threads.append(threading.Thread(target = init_new_videoreq_req, args = (i,)))
threads[-1].start()
|
[
"[email protected]"
] | |
21108a445d65be0ac0386514b2c345649b88fd66
|
32fb781cb6718ef90c4fdfba0469f9278380a256
|
/AntShares/Core/Transaction.py
|
83091faf4d380c5c037c959eb17dbfab2778feca
|
[
"MIT"
] |
permissive
|
OTCGO/sync_antshares
|
1dcd6f809518ff2f9f89047d7da5efedb14e8625
|
5724a5a820ec5f59e0c886a3c1646db2d07b4d78
|
refs/heads/master
| 2021-01-22T22:49:26.094742 | 2017-12-14T17:15:33 | 2017-12-14T17:15:33 | 85,582,206 | 10 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,899 |
py
|
# -*- coding:utf-8 -*-
"""
Description:
Transaction Basic Class
Usage:
from AntShares.Core.Transaction import Transaction
"""
from AntShares.Core.AssetType import AssetType
from AntShares.Core.TransactionType import TransactionType
from AntShares.Helper import *
from AntShares.Fixed8 import Fixed8
from AntShares.Network.Inventory import Inventory
class Transaction(Inventory):
"""docstring for Transaction"""
def __init__(self, inputs, outputs, attributes):
super(Transaction, self).__init__()
self.inputs = inputs
self.outputs = outputs
self.attributes = attributes
self.scripts = []
self.TransactionType = TransactionType.ContractTransaction
self.InventoryType = 0x01 # InventoryType TX 0x01
self.systemFee = self.getSystemFee()
def getAllInputs(self):
return self.inputs
def getReference(self):
inputs = self.getAllInputs()
# TODO
# Blockchain.getTransaction
txs = [Blockchain.getTransaction(_input.prevHash) for _input in inputs]
if inputs == []:
raise Exception, 'No Inputs.'
else:
res = {}
for _input in inputs:
i = inputs.index(_input)
res.update({_input.toString(): txs[i].outputs[_input.prevIndex]})
return res
def getSystemFee(self):
return Fixed8(0)
def getScriptHashesForVerifying(self):
"""Get ScriptHash From SignatureContract"""
hashes = {}
result = self.getReference()
if result == None:
raise Exception, 'getReference None.'
for _input in self.inputs:
_hash = result.get(_input.toString()).scriptHash
hashes.update({_hash.toString(), _hash})
# TODO
# Blockchain.getTransaction
txs = [Blockchain.getTransaction(output.AssetId) for output in self.outputs]
for output in self.outputs:
tx = txs[self.outputs.index(output)]
if tx == None:
raise Exception, "Tx == None"
else:
if tx.AssetType & AssetType.DutyFlag:
hashes.update(output.ScriptHash.toString(), output.ScriptHash)
array = sorted(hashes.keys())
return array
def serialize(self, writer):
self.serializeUnsigned(writer)
writer.writeSerializableArray(self.scripts)
def serializeUnsigned(self, writer):
writer.writeByte(self.TransactionType)
writer.writeByte(0) #Version
self.serializeExclusiveData(writer)
writer.writeSerializableArray(self.attributes)
writer.writeSerializableArray(self.inputs)
writer.writeSerializableArray(self.outputs)
def serializeExclusiveData(self, writer):
# ReWrite in RegisterTransaction and IssueTransaction#
pass
|
[
"[email protected]"
] | |
22b06f917a2e60d9e5443d0a32cf7b4cb27e71c3
|
50f42e142c7b989afc9bc9d9fd53515923aceb56
|
/ML_practice/test_field.py
|
824433785cb92c1abe62e59a015e4140ff9a6c0c
|
[] |
no_license
|
shincling/MyCommon
|
7d02da4408f1ab0acf883845cbb8b8e54e364076
|
ae362fdef8d51c808645f7827a86e43d07db6e0f
|
refs/heads/master
| 2021-01-17T04:10:57.546936 | 2018-11-06T13:17:27 | 2018-11-06T13:17:27 | 45,384,609 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 265 |
py
|
def f():
print "Before first yield"
yield 1
print "Before second yield"
yield 2
print "After second yield"
g = f()
# g.next()
# g.next()
print "Before first next"
g.next()
print "Before second next"
g.next()
print "Before third yield"
g.next()
|
[
"[email protected]"
] | |
bb7711a1d9a0542bf0147818f036a11eb8eb630f
|
5f65e12a62b59aea9263f35240c960b7e6009aa5
|
/cb_scripts/game_on.py
|
5cd86411560201702863ecffeab27460f20cfee6
|
[
"MIT"
] |
permissive
|
christopher-burke/python-scripts
|
23e80b8e7f26a74ab68dc7d0ad1a8093d900cf8b
|
f5dceca0bdbe9de6197b26858600b792f6adff8a
|
refs/heads/main
| 2022-05-20T01:36:04.668447 | 2022-04-25T20:31:33 | 2022-04-25T20:31:33 | 6,054,247 | 1 | 1 |
MIT
| 2022-03-16T02:24:45 | 2012-10-03T01:49:53 |
Python
|
UTF-8
|
Python
| false | false | 1,233 |
py
|
#!/usr/bin/env python3
"""Game on.
Games won tracker.
"""
from dataclasses import dataclass, asdict
import json
import sys
# from datetime import date
@dataclass
class Player:
"""Player dataclass."""
name: str
@dataclass
class Match:
"""Match dataclass."""
game: str
date: date = date.today().__str__()
@dataclass
class Results:
"""Results dataclass."""
match: Match
player: Player
wins: int = 0
losses: int = 0
def load():
"""Load data from json file."""
with open('game_on.json') as json_file:
data = json.load(json_file)
return data
def write(data, *args, **kwargs):
"""Write data to the json file."""
with open('game_on.json', 'w') as json_file:
json.dump(data, json_file)
return True
def main():
"""Game on main funtion."""
pass
if __name__ == "__main__":
if not len(sys.argv) < 1:
exit(0)
match = Match('Name') # -g "Name"
p1 = Player('Player 1') # -p1 "Name"
p2 = Player('Player 2') # -p1 "Name"
r1 = Results(match, p1, 2) # -r1 2
r2 = Results(match, p2, 12) # -r2 2
r1.losses = r2.wins
r2.losses = r1.wins
data = {}
data['result'] = [asdict(r1), asdict(r2)]
|
[
"[email protected]"
] | |
9f1faec8e0731fbad823f5000c61ae7553ec1af1
|
9083d620ec89d3c85f4270fd724010c20799368e
|
/app/admin.py
|
a6361094fdf44cebc131a84ddfb668ce2f22b52a
|
[] |
no_license
|
riyadhswe/CovidHelp
|
e122aa1fefacb985c862e758a3021af4af08712e
|
5e004739ec3facebbccdf0e9e46f96d3c01b2bb6
|
refs/heads/master
| 2023-08-14T04:14:37.458150 | 2021-10-10T05:02:23 | 2021-10-10T05:02:23 | 370,762,838 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 203 |
py
|
from django.contrib import admin
from app.models import *
# Register your models here.
admin.site.register(Division)
admin.site.register(City)
admin.site.register(Hospital)
admin.site.register(Service)
|
[
"[email protected]"
] | |
f0da7aa51ef368c2762cf0033e027208273b4603
|
41188a72facc51c65d0d58efe127f5e8c8811f5e
|
/0046. Permutations/Solution.py
|
76886c436d83c51c92b29bc0f627d71268d88c1c
|
[
"MIT"
] |
permissive
|
furutuki/LeetCodeSolution
|
74ccebc8335125bbc4cbf1a76eb8d4281802f5b9
|
089d27af04bf81149251787409d1866c7c4390fb
|
refs/heads/master
| 2022-10-31T08:46:15.124759 | 2022-10-25T02:57:54 | 2022-10-25T02:57:54 | 168,449,346 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 425 |
py
|
from typing import List
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
def dfs(num: List[int], cur_res: List[int]):
if not num:
ret.append(cur_res)
return
else:
for i in range(len(num)):
dfs(num[:i] + num[i + 1:], cur_res + [num[i]])
ret = []
dfs(nums, [])
return ret
|
[
"[email protected]"
] | |
32d2b673b4421719313ac17c64560921dade7d60
|
2b8d4e22d10ca118fba0100cc87af04f3939448f
|
/ioud10/ioud_sale_order/__manifest__.py
|
22982bed7d88a58ac835d123e58c4e47090afaf9
|
[] |
no_license
|
ahmed-amine-ellouze/personal
|
f10c0a161da709f689a3254ec20486411102a92d
|
4fe19ca76523cf274a3a85c8bcad653100ff556f
|
refs/heads/master
| 2023-03-28T23:17:05.402578 | 2021-03-25T13:33:18 | 2021-03-25T13:33:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,047 |
py
|
# -*- coding: utf-8 -*-
{
'name': "ioud_sale_order",
'summary': """
This module is for customization of sale for iOud """,
'description': """
This module is for customization of sale for iOud
""",
'author': "SolutionFounder",
'website': "http://www.solutionfounder.com",
# for the full list
'category': 'sale',
'version': '10.4.18',
# any module necessary for this one to work correctly
'depends': ['base','sale','mail','ioud_email_alerts','account_reports','delivery'],
# always loaded
'data': [
'data/partner_sequnce.xml',
'security/user_groups.xml',
'security/ir.model.access.csv',
'views/branches.xml',
'views/account_invoice_view.xml',
'views/sale_order_view.xml',
'views/res_partner_view.xml',
'views/region_config_view.xml',
'views/config.xml',
'views/stcok.xml',
#Backend View Load - JS
'views/assets.xml'
],
# only loaded in demonstration mode
}
|
[
"[email protected]"
] | |
193122adf0ef9170907c47e035ebe8434d378807
|
e3910a25ca4456a35112d41f184fe2a919214ac0
|
/reservation/migrations/0003_auto_20160310_2101.py
|
4bd2beed2f08e5987ae67f1bc5dbe13adea43864
|
[] |
no_license
|
RobertPastor/studio_reservation
|
a498f1ae2077bb21199651d245f22cb59ef13370
|
63a47de856cc1d5aedbd4024d8696b39470d11f2
|
refs/heads/master
| 2021-01-10T16:13:32.935529 | 2018-01-28T14:19:28 | 2018-01-28T14:19:28 | 54,514,678 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 658 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-10 20:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reservation', '0002_reservation_made_when'),
]
operations = [
migrations.AlterField(
model_name='reservation',
name='made_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Guest',
),
]
|
[
"[email protected]"
] | |
8b346eaacf62c7cde882fe6c60be97b4649c2519
|
1620e0af4a522db2bac16ef9c02ac5b5a4569d70
|
/Ekeopara_Praise/Phase 2/DICTIONARY/Day48 Tasks/Task2.py
|
81b89aa1ff7a364846dcb6ab7608ea8ed5a16508
|
[
"MIT"
] |
permissive
|
Ekeopara-Praise/python-challenge-solutions
|
cda07902c9ffc09ba770ae7776e5e01026406a05
|
068b67c05524b5c5a0d6084315eca3424c768421
|
refs/heads/master
| 2022-12-15T15:29:03.031583 | 2020-09-25T06:46:27 | 2020-09-25T06:46:27 | 263,758,530 | 2 | 0 | null | 2020-05-13T22:37:33 | 2020-05-13T22:37:32 | null |
UTF-8
|
Python
| false | false | 211 |
py
|
'''2. Write a Python script to add a key to a dictionary.
Sample Dictionary : {0: 10, 1: 20}
Expected Result : {0: 10, 1: 20, 2: 30} '''
original_dict = {0: 10, 1: 20}
original_dict[2] = 30
print(original_dict)
|
[
"[email protected]"
] | |
8753a921a15f6a43bf864b793500b8df7df5a232
|
bc437dc74647765b51996f64b35fda3d047daf93
|
/2_Intermediate/day18_The_Hirst_Painting_Project/main.py
|
4c03978fb420c12c9f275227d28b734e5c0a907b
|
[] |
no_license
|
macosta-42/100_days_of_code
|
e06720d57b6ed870a3dd4fa4e6d019296206a08f
|
5b527dc18bae2ef556c26f653ef3c4badf94bb82
|
refs/heads/main
| 2023-05-22T03:26:02.422275 | 2021-06-10T10:31:26 | 2021-06-10T10:31:26 | 328,963,362 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,187 |
py
|
# import colorgram
#
# Extract 30 colors from an image.
# colors = colorgram.extract('image.jpg', 30)
#
# rgb_colors = []
#
# for color in colors:
# r = color.rgb.r
# g = color.rgb.g
# b = color.rgb.b
# new_color = (r, g, b)
# rgb_colors.append(new_color)
#
#
# print(rgb_colors)
import turtle as t
import random
color_list = [
(8, 16, 67),
(63, 8, 28),
(192, 70, 22),
(144, 11, 35),
(248, 237, 242),
(13, 45, 142),
(30, 103, 175),
(123, 162, 201),
(249, 216, 64),
(170, 16, 5),
(204, 71, 124),
(62, 34, 12),
(224, 135, 86),
(12, 45, 32),
(200, 174, 38),
(143, 194, 173),
(213, 74, 55),
(174, 50, 76),
(59, 161, 118),
(252, 206, 0),
(215, 134, 145),
(78, 111, 80),
(82, 111, 199),
(12, 100, 4),
(177, 185, 218),
(231, 166, 180),
(237, 171, 160)
]
tim = t.Turtle()
tim.hideturtle()
tim.speed(0)
t.colormode(255)
tim.penup()
pos_x = -250
pos_y = -250
for pos in range(10):
tim.setpos(pos_x, pos_y)
for dot in range(10):
tim.dot(20, random.choice(color_list))
tim.forward(50)
pos_y += 50
screen = t.Screen()
screen.exitonclick()
|
[
"[email protected]"
] | |
39449e677ee1bf94f14738a476fbaeffef554460
|
11e484590b27585facf758f0432eeebe66bf790a
|
/fal_default_discount/__openerp__.py
|
ebb37f69d114ff8b401e6a98002ffca961d71f5d
|
[] |
no_license
|
jeanabreu/falinwa_branch
|
51b38ee5a3373d42417b84a0431bad9f7295f373
|
be96a209479259cd5b47dec73694938848a2db6c
|
refs/heads/master
| 2021-01-18T10:25:49.866747 | 2015-08-25T10:05:05 | 2015-08-25T10:05:05 | 41,369,368 | 0 | 1 | null | 2015-08-25T14:51:50 | 2015-08-25T14:51:50 | null |
UTF-8
|
Python
| false | false | 569 |
py
|
# -*- coding: utf-8 -*-
{
"name": "GEN-39_Default Discount",
"version": "1.0",
'author': 'Falinwa Hans',
"description": """
Module to give default discount
""",
"depends" : ['base','account','sale','purchase'],
'init_xml': [],
'data': [
],
'update_xml': [
'res_partner_view.xml',
'sale_view.xml',
'account_view.xml',
],
'css': [],
'installable': True,
'active': False,
'application' : False,
'js': [],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"[email protected]"
] | |
63a3859655163da8d1b68d6100318174b51087b3
|
fa69eadde7b449647ebd976214d2f99886b6db18
|
/FireHydrant/common/enum/task/type.py
|
da3d9f0df70ac9947f6d9e55b12919b3b7ed67be
|
[] |
no_license
|
shoogoome/FireHydrant
|
0da1d6e06aa9e853837f6435a30ac4ef73118764
|
7467cd66e1fc91f0b3a264f8fc9b93f22f09fe7b
|
refs/heads/master
| 2020-06-21T01:29:25.711595 | 2019-12-18T00:31:01 | 2019-12-18T00:31:01 | 197,309,304 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 265 |
py
|
# -*- coding: utf-8 -*-
# coding:utf-8
from common.core.dao.enumBase import EnumBase
class TaskTypeEnum(EnumBase):
PERSONAL = 0
TEAM = 1
__default__ = PERSONAL
__desc__ = {
'PERSONAL': '个人任务',
'TEAM': '团队任务',
}
|
[
"[email protected]"
] | |
7db647d25a21499083092c001e5dbe7f68539f5a
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/506ae8f067379afa4417a57db5814487ea198a23-<_ldflags>-fix.py
|
e74dc5c62be559f47e3819254ac49089008a296f
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,311 |
py
|
@utils.memoize
def _ldflags(ldflags_str, libs, flags, libs_dir, include_dir):
'Extract list of compilation flags from a string.\n\n Depending on the options, different type of flags will be kept.\n\n Parameters\n ----------\n ldflags_str : string\n The string to process. Typically, this will be the content of\n `theano.config.blas.ldflags`.\n libs : bool\n Extract flags starting with "-l".\n flags: bool\n Extract all the other flags.\n libs_dir: bool\n Extract flags starting with "-L".\n include_dir: bool\n Extract flags starting with "-I".\n\n Returns\n -------\n list of strings\n Extracted flags.\n\n '
rval = []
if libs_dir:
found_dyn = False
dirs = [x[2:] for x in ldflags_str.split() if x.startswith('-L')]
l = _ldflags(ldflags_str=ldflags_str, libs=True, flags=False, libs_dir=False, include_dir=False)
for d in dirs:
for f in os.listdir(d.strip('"')):
if (f.endswith('.so') or f.endswith('.dylib') or f.endswith('.dll')):
if any([(f.find(ll) >= 0) for ll in l]):
found_dyn = True
if ((not found_dyn) and dirs):
_logger.warning('We did not found a dynamic library into the library_dir of the library we use for blas. If you use ATLAS, make sure to compile it with dynamics library.')
for t in ldflags_str.split():
if ((t.startswith("'") and t.endswith("'")) or (t.startswith('"') and t.endswith('"'))):
t = t[1:(- 1)]
try:
(t0, t1, t2) = t[0:3]
assert (t0 == '-')
except Exception:
raise ValueError(('invalid token "%s" in ldflags_str: "%s"' % (t, ldflags_str)))
if (libs_dir and (t1 == 'L')):
rval.append(t[2:])
elif (include_dir and (t1 == 'I')):
raise ValueError('Include dirs are not used for blas. We disable this as this can hide other headers and this is not wanted.', t)
rval.append(t[2:])
elif (libs and (t1 == 'l')):
rval.append(t[2:])
elif (flags and (t1 not in ['L', 'I', 'l'])):
rval.append(t)
elif (flags and (t1 == 'L')):
rval.append(('-Wl,-rpath,' + t[2:]))
return rval
|
[
"[email protected]"
] | |
3020613b94d8ab6d48331de09fbcc650efe92b54
|
1978a9455159b7c2f3286e0ad602652bc5277ffa
|
/exercises/15_module_re/task_15_2a.py
|
ff8cb7e603b04c43d8bed5f08c6262dda11c4009
|
[] |
no_license
|
fortredux/py_net_eng
|
338fd7a80debbeda55b5915dbfba4f5577279ef0
|
61cf0b2a355d519c58bc9f2b59d7e5d224922890
|
refs/heads/master
| 2020-12-03T17:32:53.598813 | 2020-04-08T20:55:45 | 2020-04-08T20:55:45 | 231,409,656 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,102 |
py
|
# -*- coding: utf-8 -*-
'''
Задание 15.2a
Создать функцию convert_to_dict, которая ожидает два аргумента:
* список с названиями полей
* список кортежей со значениями
Функция возвращает результат в виде списка словарей, где ключи - взяты из первого списка,
а значения подставлены из второго.
Например, если функции передать как аргументы список headers и список
[('FastEthernet0/0', 'up', 'up', '10.0.1.1'),
'FastEthernet0/1', 'up', 'up', '10.0.2.1')]
Функция должна вернуть такой список со словарями (порядок полей может быть другой):
[{'interface': 'FastEthernet0/0', 'status': 'up', 'protocol': 'up', 'address': '10.0.1.1'},
{'interface': 'FastEthernet0/1', 'status': 'up', 'protocol': 'up', 'address': '10.0.2.1'}]
Проверить работу функции:
* первый аргумент - список headers
* второй аргумент - результат, который возвращает функция parse_sh_ip_int_br из задания 15.2, если ей как аргумент передать sh_ip_int_br.txt.
Функцию parse_sh_ip_int_br не нужно копировать.
Ограничение: Все задания надо выполнять используя только пройденные темы.
'''
import re
from task_15_2 import parse_sh_ip_int_br
parsed_sh_ip_int_br = parse_sh_ip_int_br('/home/vagrant/GitHub/pynet_rep/exercises/15_module_re/sh_ip_int_br.txt')
headers = ['interface', 'address', 'status', 'protocol']
def convert_to_dict(list_headers, list_values):
final_list = []
for tup in list_values:
final_list.append(dict(zip(list_headers, tup)))
return final_list
if __name__ == '__main__':
from pprint import pprint
pprint(convert_to_dict(headers, parsed_sh_ip_int_br))
|
[
"[email protected]"
] | |
042afc513c24332f122836a2cec49692b2f77a28
|
7a63ce94e1806a959c9c445c2e0bae95afb760c8
|
/tests/incident/test_resolve.py
|
8ccf653a5dbc4b46fd96837ef309be097512d6e1
|
[
"MIT"
] |
permissive
|
pklauke/pycamunda
|
20b54ceb4a40e836148e84912afd04d78d6ba0ec
|
3faac4037212df139d415ee1a54a6594ae5e9ac5
|
refs/heads/master
| 2023-08-18T10:23:30.503737 | 2022-04-17T18:34:40 | 2022-04-17T18:34:40 | 240,333,835 | 40 | 16 |
MIT
| 2023-09-12T13:29:08 | 2020-02-13T18:37:25 |
Python
|
UTF-8
|
Python
| false | false | 1,602 |
py
|
# -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.incident
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_resolve_params(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
assert resolve_incident.url == engine_url + '/incident/anId'
assert resolve_incident.query_parameters() == {}
assert resolve_incident.body_parameters() == {}
@unittest.mock.patch('requests.Session.request')
def test_resolve_calls_requests(mock, engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
resolve_incident()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'DELETE'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_resolve_raises_pycamunda_exception(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
with pytest.raises(pycamunda.PyCamundaException):
resolve_incident()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_resolve_raises_for_status(mock, engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
resolve_incident()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
def test_resolve_returns_none(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
result = resolve_incident()
assert result is None
|
[
"[email protected]"
] | |
b1363d2eeea65f67da9c4da23778667e39565849
|
ee4152e9b5eafa7afafe05de04391a9a3606eea3
|
/client/API/AddRecord.py
|
431bc9058aefc1020df12034d650ed008e3998a5
|
[] |
no_license
|
adibl/password_saver
|
3a06c8c04905d82f01fc14b41b646a6578af2b70
|
2ea73781db92ce750f91039251f2c06e929da7bb
|
refs/heads/master
| 2020-04-09T23:51:34.804870 | 2019-06-16T10:13:42 | 2019-06-16T10:13:42 | 160,665,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,675 |
py
|
"""
name:
date:
description
"""
import base64
import json
import requests
import os
from .connection import Request
class Passwords(object):
FILE_NAME = 'token.txt'
@classmethod
def handle(cls, url, username, password):
return cls.POST(url, username, password)
@classmethod
def GET(cls):
auto = cls.read_jwt()
if auto is None:
return {'general': 401}
responce = conn = Request().get_conn().get(Request.URI + '/passwords', headers={'Authorization': 'Bearer {0}'.format(auto)})
if responce.status_code == 200:
return json.loads(responce.text)
else:
return {'general': responce.status_code}
@classmethod
def POST(cls, url, username, password):
auto = cls.read_jwt()
if auto is None:
return {'general': 401}
print base64.urlsafe_b64encode(url)
encode_url = base64.urlsafe_b64encode(url)
responce = conn = Request().get_conn().post(Request.URI + '/passwords', headers={'Authorization': 'Bearer {0}'.format(auto)}
, json={'username': username, 'password': password,
'program_id': encode_url})
if responce.status_code == 200:
return True
elif responce.status_code == 442:
return json.loads(responce.text)
else:
return {'general': 'general error'}
@classmethod
def read_jwt(cls):
if os.path.isfile(cls.FILE_NAME):
with open(cls.FILE_NAME, 'rb')as handel:
jwt = handel.read()
return jwt
else:
return None
|
[
"[email protected]"
] | |
881b5c0fc9bea295c8d51dcae0942461610bb9c2
|
8c5f1e07333edfd14a58677ea90ea9a8ec24daa7
|
/examples/simple_pendulum/custom_simple_pendulum.py
|
423dcab619d69ba966d9a866ae2b925a8862fb9f
|
[
"MIT"
] |
permissive
|
echoix/pyro
|
52c37b3c14fb3b52977be510545fdc43922dd8f9
|
787920cb14e3669bc65c530fd8f91d4277a24279
|
refs/heads/master
| 2020-09-07T09:08:21.114064 | 2019-11-10T05:59:50 | 2019-11-10T05:59:50 | 220,733,155 | 0 | 0 |
MIT
| 2019-11-10T02:52:39 | 2019-11-10T02:52:38 | null |
UTF-8
|
Python
| false | false | 1,412 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 12:19:01 2018
@author: nvidia
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import pendulum
###############################################################################
###############################################################################
class MyCustomPendulum( pendulum.SinglePendulum ):
"""
"""
###########################################################################
# Only overload functions that are different from base version
###########################################################################
def setparams(self):
""" Set model parameters here """
# kinematic
self.l1 = 3
self.lc1 = 2
# dynamic
self.m1 = 10
self.I1 = 10
self.gravity = 9.81
self.d1 = 50
'''
#################################################################
################## Main ########
#################################################################
'''
if __name__ == "__main__":
""" MAIN TEST """
sys = MyCustomPendulum()
x0 = np.array([0.8,0])
sys.plot_animation( x0 )
|
[
"[email protected]"
] | |
2b8edfa347b5b9d6a6b2c2d912242611e9907980
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/nonfatal_code/hospital/Formatting/001_pre_format_UK_UTLA_fit_models.py
|
a401509726a0ff362b8b717c593c63c90020b098
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986 | 2020-10-28T19:51:51 | 2020-10-28T19:51:51 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,688 |
py
|
# -*- coding: utf-8 -*-
"""
formatting UK UTLA data
"""
import pandas as pd
import numpy as np
import platform
import sys
import statsmodels.formula.api as smf
import statsmodels.api as sm
import time
sys.path.append("FILEPATH")
from hosp_prep import *
# Environment:
if platform.system() == "Linux":
root = "FILEPATH"
else:
root = "FILEPATH"
print("need to incorporate injuries data which are stored in separate files")
################################################
# Use data prepped on the cluster
###############################################
# was too big to merge locally so merged on the cluster and written to FILEPATH
# just read in the merged data from drive
both = pd.read_csv("FILEPATH", compression='gzip')
# both = pd.read_csv("FILEPATH", compression='gzip')
#both = pd.read_csv("FILEPATH", compression='gzip')
# back = both.copy()
# the regional level data needs to be split to include age start 90
# it's breaking the models so I'm gonna subset that age group out
# both = both[both.age_start < 80]
# also drop 2011, 2012
# both = both[both.fiscal_year < 2011]
# drop the rows that don't match (only 2 rows before 2011)
both = both[~both.log_rate.isnull()]
##################################
# FIT THE LINEAR MODELS
###################################
causes = both.cause_code.unique()
# both = both[both.cause_code.isin(causes)]
both['preds'] = np.nan # initialize pred col
# loop over causes and sexes
start = time.time()
counter = 0
counter_denom = causes.size
for cause in causes:
for s in [1, 2]:
# create the mask
mask = (both['cause_code'] == cause) & (both['sex_id'] == s)
if both[mask].log_rate.isnull().sum() == both[mask].shape[0]:
print("there's no data")
continue
# our formula for predictions
formula = "log_rate ~ C(age_start) + C(location_id)"
# fit the model
fit = smf.ols(formula, data=both[mask]).fit()
# exponentiate the predicted values
both.loc[mask, 'preds'] = np.exp(fit.predict(both[mask]))
if s == 1:
counter += 1
if counter % 125 == 0:
print(round((counter / counter_denom) * 100, 1), "% Done")
print("Run time: ", (time.time()-start)/60, " minutes")
print("Done in ", (time.time()-start) / 60, " minutes")
# both.to_csv("FILEPATH")
###################################################
# both = back.copy()
# subtract off the existing cases that we have at utla level
# use a groupby transform to leave the data in same format but create sums of
# known values at the regional level
reg_groups = ['cause_code', 'location_parent_id', 'age_start', 'age_end',
'sex_id', 'fiscal_year']
# fill missing utla level data with zeroes instead of NA so rows will be
# included in groupby
both['value'].fillna(value=0, inplace=True)
# sum the existing utla values up to the regional level
both['utla_val_to_reg'] = both.groupby(reg_groups)['value'].transform('sum')
# split the data
# subset the data to get only rows where utla value was suppressed
pred_df = both[both.utla_log_rate.isnull()].copy()
# drop the rows where utla value was suppressed
both = both[both.utla_log_rate.notnull()]
# subtract the known utla values from the regional values to get
# residual (unknown) values
pred_df['reg_resid_value'] = pred_df['reg_value'] - pred_df['utla_val_to_reg']
# new method
# get into count space
pred_df['pred_counts'] = pred_df['preds'] * pred_df['utla_population']
# sum utla predicted counts to region level
pred_df['utla_pred_to_reg'] = pred_df.groupby(reg_groups)['pred_counts'].\
transform('sum')
# make the weights
pred_df['weight'] = pred_df['reg_resid_value'] / pred_df['utla_pred_to_reg']
# apply weights to predicted values
pred_df['weighted_counts'] = pred_df['pred_counts'] * pred_df['weight']
# now test
reg_compare = pred_df.copy()
# get the sum of values at the regional level
reg_compare = reg_compare[['cause_code', 'location_parent_id', 'age_start',
'age_end', 'sex_id', 'fiscal_year',
'reg_resid_value']]
reg_compare.drop_duplicates(inplace=True)
reg_sum = reg_compare.reg_resid_value.sum()
# get the sum of desuppressed values
pred_df_sum = pred_df.weighted_counts.sum()
# pretty dang close to zero
assert round(reg_sum - pred_df_sum, 5) == 0
# assert residual vals are smaller than regional vals
assert (pred_df.reg_value >= pred_df.reg_resid_value).all()
# concat de-suppressed and un-suppressed data back together
both = pd.concat([both, pred_df])
# merge data that needed to be de-suppressed and data that didn't into same col
# fill value with desuppressed val where value = 0 and desuppressed isn't null
condition = (both['value'] == 0) & (both['weighted_counts'].notnull())
both.loc[condition, 'value'] = both.loc[condition, 'weighted_counts']
# write to a csv for use with a Shiny app
both['rates'] = both['value'] / both['utla_population']
both[['location_id', 'location_parent_id', 'age_start', 'age_end', 'sex_id',
'fiscal_year', 'cause_code', 'utla_log_rate', 'value', 'preds',
'reg_value', 'reg_resid_value',
'weight', 'rates', 'utla_population']].\
to_csv("FILEPATH", index=False)
# write to FILEPATH intermediate data
both[['location_id', 'location_parent_id', 'age_start', 'age_end', 'sex_id',
'fiscal_year', 'cause_code', 'utla_log_rate', 'value', 'preds',
'reg_value', 'reg_resid_value', 'weight']].\
to_csv("FILEPATH", index=False)
|
[
"[email protected]"
] | |
ae3c07417196b04210dbed26d9b1fba5aac5f9ec
|
07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8
|
/lib/python3.6/site-packages/numpy/core/tests/test_regression.py
|
39a92211635a6dcc5cd242241cf5f18f0e08b70e
|
[] |
no_license
|
cronos91/ML-exercise
|
39c5cd7f94bb90c57450f9a85d40c2f014900ea4
|
3b7afeeb6a7c87384049a9b87cac1fe4c294e415
|
refs/heads/master
| 2021-05-09T22:02:55.131977 | 2017-12-14T13:50:44 | 2017-12-14T13:50:44 | 118,736,043 | 0 | 0 | null | 2018-01-24T08:30:23 | 2018-01-24T08:30:22 | null |
UTF-8
|
Python
| false | false | 130 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:2d5a65e7c1da1e87651cabd3481c0012ad15f784275aad1259a1312faf19cfc2
size 81211
|
[
"[email protected]"
] | |
bce22db2adda5234a705ff0d1fb719565b3bddd8
|
9692a20a1e7a224a72785e4495f31421639b9f3b
|
/frex/pipeline_stages/filters/candidate_filterer.py
|
2d79e3b31e1ec3776b5978e1f52488af2826dfdb
|
[] |
no_license
|
solashirai/FREx
|
6b0cb040930761a0e269f4591d7dde36e3f636d1
|
36ad09a0cb0020661ee990c7800bafd110e2ec04
|
refs/heads/master
| 2023-08-14T08:49:49.270281 | 2021-09-29T14:58:23 | 2021-09-29T14:58:23 | 291,760,109 | 0 | 0 | null | 2021-09-24T22:41:19 | 2020-08-31T15:57:47 |
Python
|
UTF-8
|
Python
| false | false | 2,535 |
py
|
from abc import abstractmethod
from typing import Generator, Optional, Any
from frex.models import Explanation, Candidate
from frex.pipeline_stages import PipelineStage
class CandidateFilterer(PipelineStage):
"""
CandidateFilterer is a PipelineStage that determines whether input candidates should be removed from consideration
or continue on through the FREx Pipeline.
A new CandidateFilterer class can be minimally defined by creating a new subclass of CandidateFilterer and
defining the filter() function.
"""
def __init__(
self, *, filter_explanation: Explanation, filter_score: float = 0, **kwargs
):
"""
:param filter_explanation: The explanation to add to the Candidate if it passes the filter function.
:param filter_score: The score to apply to the candidate if it passes the filter. This is 0 by default.
"""
self.filter_explanation = filter_explanation
self.filter_score = filter_score
@abstractmethod
def filter(self, *, candidate: Candidate) -> bool:
"""
A filter to determine whether or not the current candidate is suitable to move on through the Pipeline.
This function should return True when the candidate should be removed and False when it should continue on.
:param candidate: A domain-specific candidate to filter
:return: True if the candidate should be removed, False if it should be kept and passed on to later stages.
"""
pass
def __call__(
self, *, candidates: Generator[Candidate, None, None], context: Any
) -> Generator[Candidate, None, None]:
"""
For each of candidate being yielded by the Generator, apply a filtering function to decide whether or not
to yield the candidate forward to the next PipelineStage.
:param candidates: A Generator yielding candidates. In the setup of a FREx Pipeline, this is typically another
PipelineStage that is yielding candidates into the next stage.
:param context: The current context being used to execute the Pipeline.
:return: A Generator, yielding updated Candidate objects that have not been caught by this stage's
filtering function.
"""
for candidate in candidates:
if not self.filter(candidate=candidate):
candidate.applied_explanations.append(self.filter_explanation)
candidate.applied_scores.append(self.filter_score)
yield candidate
|
[
"[email protected]"
] | |
113af3e207e4b01797c11ec0d406ac5a136b56c2
|
801418efbd049078c8aad4cd17297f3ece571412
|
/temp/toy/python/238. Product of Array Except Self.py
|
d7da2b067439b8c2b107a462617c0fb4b8eac579
|
[] |
no_license
|
xixihaha1995/CS61B_SP19_SP20
|
2b654f0c864a80a0462fdd4b1561bdc697a8c1e2
|
7d6599596f7f49b38f1c256ece006b94555c1900
|
refs/heads/master
| 2023-01-01T18:41:48.027058 | 2020-10-29T04:50:01 | 2020-10-29T04:50:01 | 240,976,072 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 311 |
py
|
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
res, p, q = [1], 1, 1
for i in range(len(nums)-1):
p *= nums[i]
res.append(p)
for i in range(len(nums)-1, 0, -1):
q *= nums[i]
res[i-1] *= q
return res
|
[
"[email protected]"
] | |
75d2f93063a4feaf6b869a50b0e5a88d40500e00
|
2bcf18252fa9144ece3e824834ac0e117ad0bdf3
|
/httpy/tags/0.7/tests/TestCaseHttpy.py
|
08a1fc6dd3fb6eb41284fefc3f7dc8c1602cb96c
|
[] |
no_license
|
chadwhitacre/public
|
32f65ba8e35d38c69ed4d0edd333283a239c5e1d
|
0c67fd7ec8bce1d8c56c7ff3506f31a99362b502
|
refs/heads/master
| 2021-05-10T14:32:03.016683 | 2010-05-13T18:24:20 | 2010-05-13T18:24:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,199 |
py
|
"""The idea and code for running a test._server in another thread are from the
standard library's test/test_socke._server.py.
TODO: This is out of date now that we are using asyncore (via httpy._zope._server).
"""
import asyncore
import os
import select
import socket
import threading
import time
import unittest
from httpy._zope.server.taskthreads import ThreadedTaskDispatcher
from httpy._zope.server.tests.asyncerror import AsyncoreErrorHook
from httpy.Config import Config
from httpy.Request import Request, ZopeRequest
from httpy.Server import Server
td = ThreadedTaskDispatcher()
opts = [ '--mode', 'development'
, '--sockfam', 'AF_INET'
, '--root', 'root'
, '--address', ':65370'
, '--verbosity', '99'
#, '--apps', '/' discover automatically
]
class TestCaseHttpy(unittest.TestCase, AsyncoreErrorHook):
# unittest.TestCase hooks
# =======================
want_config = False
def setUp(self):
self.scrubenv()
# [re]build a temporary website tree in ./root
self.removeTestSite()
self.buildTestSite()
if self.server:
self.startServer()
if self.want_config:
self.config = Config()
def tearDown(self):
if self.server:
self.stopServer()
self.removeTestSite()
self.restoreenv()
# server support
# ==============
server = False # Override to True if your subclass needs a server
def startServer(self):
if len(asyncore.socket_map) != 1:
# Let sockets die off.
# TODO tests should be more careful to clear the socket map.
asyncore.poll(0.1)
self.orig_map_size = len(asyncore.socket_map)
#self.hook_asyncore_error()
config = Config(opts)
self._server = Server(config, threads=4)
self._server.accept_connections()
self.port = self._server.socket.getsockname()[1]
self.run_loop = 1
self.counter = 0
self.thread_started = threading.Event()
self.thread = threading.Thread(target=self.loop)
self.thread.setDaemon(True)
self.thread.start()
self.thread_started.wait(10.0)
self.assert_(self.thread_started.isSet())
def stopServer(self):
self.run_loop = 0
self.thread.join()
td.shutdown()
self._server.close()
# Make sure all sockets get closed by asyncore normally.
timeout = time.time() + 5
while 1:
if len(asyncore.socket_map) == self.orig_map_size:
# Clean!
break
if time.time() >= timeout:
self.fail('Leaked a socket: %s' % `asyncore.socket_map`)
asyncore.poll(0.1)
#self.unhook_asyncore_error()
def loop(self):
self.thread_started.set()
while self.run_loop:
self.counter = self.counter + 1
asyncore.poll(0.1)
# environment
# ===========
def scrubenv(self):
save = {}
for opt in Config.options:
envvar = 'HTTPY_%s' % opt.upper()
if os.environ.has_key(envvar):
save[envvar] = os.environ[envvar]
del os.environ[envvar]
self.env = save
def restoreenv(self):
for k, v in self.env.items():
os.environ[k] = v
self.env = {}
# test site
# =========
# testsite is a list of strings and tuples. If a string, it is interpreted
# as a path to a directory that should be created. If a tuple, the first
# element is a path to a file, the second is the contents of the file.
# We do it this way to ease cross-platform testing.
#
# siteroot is the filesystem path under which to create the test site.
siteroot = 'root'
testsite = []
def buildTestSite(self):
"""Build the site described in self.testsite
"""
os.mkdir(self.siteroot)
for item in self.testsite:
if isinstance(item, basestring):
path = self.convert_path(item.lstrip('/'))
path = os.sep.join([self.siteroot, path])
os.mkdir(path)
elif isinstance(item, tuple):
filepath, contents = item
path = self.convert_path(filepath.lstrip('/'))
path = os.sep.join([self.siteroot, path])
file(path, 'w').write(contents)
def removeTestSite(self):
if os.path.isfile('httpy.conf'):
os.remove('httpy.conf')
if not os.path.isdir(self.siteroot):
return
for root, dirs, files in os.walk(self.siteroot, topdown=False):
for name in dirs:
os.rmdir(os.path.join(root, name))
for name in files:
os.remove(os.path.join(root, name))
os.rmdir(self.siteroot)
def convert_path(self, path):
"""Given a Unix path, convert it for the current platform.
"""
return os.sep.join(path.split('/'))
def convert_paths(self, paths):
"""Given a tuple of Unix paths, convert them for the current platform.
"""
return tuple([self.convert_path(p) for p in paths])
# utils
# =====
@staticmethod
def neuter_traceback(tb):
"""Given a traceback, return just the system-independent lines.
"""
tb_list = tb.split(os.linesep)
if not tb_list[-1]:
tb_list = tb_list[:-1]
neutered = []
for i in range(0,len(tb_list),2):
neutered.append(tb_list[i])
neutered.append(tb_list[-1])
return os.linesep.join(neutered)
@staticmethod
def dict2tuple(d):
return tuple(sorted(d.iteritems()))
@staticmethod
def make_request(uri, headers=None, Zope=False):
if headers is None:
headers = {}
request = ZopeRequest()
request.received("GET %s HTTP/1.1\r\n" % uri)
for header in headers.items():
request.received("%s: %s\r\n" % header)
request.received('\r\n')
if Zope:
return request
else:
return Request(request)
|
[
"[email protected]"
] | |
e827ef9de12fa0211e6677aa82084594cd16d444
|
6b76819d395bb76591fc12e9de83161b37d61672
|
/woot/apps/expt/management/commands/step02_zmod.py
|
f30ef4f4d650e4b9e4688253eed2cfb7feb067a9
|
[] |
no_license
|
NicholasPiano/img
|
8426530512ee80a4ed746874c4219b1de56acbfd
|
3a91c65c3c9680ba7ed7c94308a721dd0cff9ad5
|
refs/heads/master
| 2020-05-18T15:48:50.566974 | 2015-07-16T00:01:17 | 2015-07-16T00:01:17 | 38,632,176 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,707 |
py
|
# expt.command: step03_zmod
# django
from django.core.management.base import BaseCommand, CommandError
# local
from apps.img.models import Composite
from apps.expt.util import *
# util
from optparse import make_option
spacer = ' ' * 20
### Command
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--expt', # option that will appear in cmd
action='store', # no idea
dest='expt', # refer to this in options variable
default='050714-test', # some default
help='Name of the experiment to import' # who cares
),
make_option('--series', # option that will appear in cmd
action='store', # no idea
dest='series', # refer to this in options variable
default='13', # some default
help='Name of the series' # who cares
),
)
args = ''
help = ''
def handle(self, *args, **options):
'''
1. What does this script do?
> Make images that can be recognized by CellProfiler by multiplying smoothed GFP with the flattened Brightfield
2. What data structures are input?
> Channel
3. What data structures are output?
> Channel
4. Is this stage repeated/one-time?
> One-time
Steps:
1. Select composite
2. Call pmod mod on composite
3. Run
'''
# 1. select composite
composite = Composite.objects.get(experiment__name=options['expt'], series__name=options['series'])
# 2. Call pmod mod
mod = composite.mods.create(id_token=generate_id_token('img', 'Mod'), algorithm='mod_zmod')
# 3. Run mod
print('step02 | processing mod_zmod...', end='\r')
mod.run()
print('step02 | processing mod_zmod... done.{}'.format(spacer))
|
[
"[email protected]"
] | |
16bf0ef9ec53acb6b4376b1146bb236b50565626
|
fddad101c7be2fcbc05131081e708f31948c002f
|
/329. Longest Increasing Path in a Matrix/answer_bfs.py
|
a9141a61f5be8c4c3d3ff273a059e79b03652077
|
[] |
no_license
|
LennyDuan/AlgorithmPython
|
a10c9278c676829ab5a284a618f6352414888061
|
523c11e8a5728168c4978c5a332e7e9bc4533ef7
|
refs/heads/master
| 2021-07-16T12:31:08.284846 | 2021-03-28T20:31:28 | 2021-03-28T20:31:28 | 244,040,362 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 935 |
py
|
def longestIncreasingPath(self, matrix) -> int:
if not matrix:
return 0
res = 0
visited = set()
rows, cols = len(matrix), len(matrix[0])
directions = ((0, 1), (0, -1), (1, 0), (-1, 0))
def traverse(i, j, visited):
if (i, j) in visited:
return 0
res = 1
for direction in directions:
next_i, next_j = i + direction[0], j + direction[1]
direction_count = 0
if 0 <= next_i < rows and 0 <= next_j < cols:
if matrix[next_i][next_j] > matrix[i][j]:
direction_count = 1 + traverse(next_i, next_j, visited)
res = max(res, direction_count)
return res
for row in range(rows):
for col in range(cols):
res = max(traverse(row, col, visited), res)
return res
nums = [
[3, 4, 5],
[3, 2, 6],
[2, 2, 1]
]
print(longestIncreasingPath(None, nums))
|
[
"[email protected]"
] | |
8679eb15e7abddc2ffc51114e648c08423ab7ebd
|
2aec9c5e8c72b731d3abf22f2a407fe09c1cde09
|
/QDS_Test/case/dbwytest.py
|
22710e1c97b825043ebe5514995dd8e8038a0300
|
[] |
no_license
|
jiangyg/ZWFproject
|
8b24cc34970ae0a9c2a2b0039dc527c83a5862b5
|
aa35bc59566d92721f23d2dd00b0febd268ac2dd
|
refs/heads/master
| 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null |
UTF-8
|
Python
| false | false | 3,860 |
py
|
# coding=utf-8
import time
import logging
from selenium.webdriver import ActionChains
from utils.mytestcase import MyTestCase
from utils.logincookie import DengLuPage
from utils.random import unicode
from utils.screenshort import get_screenshort
class DbWyTest(MyTestCase):
"""担保无忧测试集"""
def test_dbwy(self):
"""担保无忧测试"""
# logging.basicConfig(filename='../LOG/' + __name__ + '.log',
# format='[%(asctime)s-%(filename)s-%(levelname)s: %(message)s]', level=logging.DEBUG,
# filemode='a', datefmt='%Y-%m-%d%I:%M:%S %p')
dl = DengLuPage(self.driver)
# 官方推荐有find_element(By.*(""))代替find_element_by_*("")
# self.driver.find_element_by_id()
# self.driver.find_element()
dl.login()
time.sleep(2)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > h3 > span")).perform()
time.sleep(2)
ActionChains(self.driver).move_to_element(self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > div > ul:nth-child(1) > li:nth-child(1) > h3 > a")).perform()
ActionChains(self.driver).release()
self.driver.find_element_by_css_selector(
"body > div.section-banner > div.public-navbar > div > div > div > ul:nth-child(1) > li:nth-child(1) > div > dl:nth-child(3) > dd > a:nth-child(2)").click()
# 获取打开的多个窗口句柄
windows = self.driver.window_handles
# 切换到当前最新打开的窗口
self.driver.switch_to.window(windows[-1])
time.sleep(2)
self.driver.set_window_size(1920, 1080)
time.sleep(3)
self.assertIn("商标担保注册|商标注册费用|商标申请流程-权大师", self.driver.title)
print(self.driver.title)
# abwy注册
self.driver.find_element_by_css_selector(
"body > div.section-product.width1200 > dl > dd > div.cont-serviceItems > table > tbody > tr > td.td-cont > ul > li:nth-child(2)").click()
for a in self.driver.find_elements_by_css_selector("#total-price"):
print("费用总计:"+a.text)
aa=a.text
self.driver.find_element_by_css_selector(
"body > div.section-product.width1200 > dl > dd > div.cont-btnBuy > a.btn.btn-next.buynow").click()
self.driver.find_element_by_name("ownerContactPerson").send_keys("{}".format(unicode()))
self.driver.find_element_by_name("ownerContactPhone").send_keys("15624992498")
self.driver.find_element_by_name("contactMail").send_keys("[email protected]")
self.driver.find_element_by_css_selector("#remark").send_keys(time.strftime("%Y-%m-%d_%H-%M-%S") + "测试订单")
get_screenshort(self.driver, "test_dbwy.png")
for i in self.driver.find_elements_by_css_selector("body > div.myOrder-wrap > div.section-myorder.orderinfo-wrap.width1200 > div:nth-child(6) > div.last-pay.personal-last-pay > ul > li.row-sense > em > i"):
print("总价:"+i.text)
ii=i.text
self.assertIn(aa,ii)
print("价格一致")
self.driver.find_element_by_css_selector(
"body > div.myOrder-wrap > div.section-myorder.orderinfo-wrap.width1200 > div:nth-child(6) > div.btns > a.btn-next.submitOrder").click()
for o in self.driver.find_elements_by_class_name("payable"):
print("订单提交成功,应付金额:"+o.text)
oo=o.text
self.assertIn(oo,ii)
print("测试通过")
self.driver.find_element_by_css_selector("#alisubmit").click()
|
[
"[email protected]"
] | |
2ac108f270cf5ffa0bfbca7755b958d446b3a030
|
facb8b9155a569b09ba66aefc22564a5bf9cd319
|
/wp2/merra_scripts/01_netCDF_extraction/merra902Combine/21-tideGauge.py
|
784ddb0d0f655471f76357e1f1df6c7540900599
|
[] |
no_license
|
moinabyssinia/modeling-global-storm-surges
|
13e69faa8f45a1244a964c5de4e2a5a6c95b2128
|
6e385b2a5f0867df8ceabd155e17ba876779c1bd
|
refs/heads/master
| 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,374 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 21
y = 22
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
|
[
"[email protected]"
] | |
72228f507a4ac8d98397a992ca802e652f3d5c8f
|
2207cf4fb992b0cb106e2daf5fc912f23d538d0d
|
/src/catalog/serializers.py
|
1e85a0316ce6f1e7fa4b866254126cb6dd9a095a
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
litedesk/litedesk-webserver-provision
|
95bc75f61532c5f1c7cb21fb5372ff288999689e
|
1576b9d3e5e2e64d1136d276767c2710cfb1938f
|
refs/heads/master
| 2021-05-15T01:35:31.984067 | 2020-08-18T10:55:20 | 2020-08-18T10:55:20 | 25,595,412 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,021 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework import serializers
import models
class OfferSerializer(serializers.ModelSerializer):
url = serializers.HyperlinkedIdentityField(view_name='offer-detail')
class Meta:
model = models.Offer
fields = ('url', 'name', 'currency', 'price', 'setup_price', 'status')
read_only_fields = ('name', 'asset', 'currency', )
|
[
"[email protected]"
] | |
5edaa1b154eb40102fe6ec6a4a37b893c4eab07f
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/hv572GaPtbqwhJpTb_2.py
|
8e0bb6a39e996aa650ed4adf5f67abcc31d4539a
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,831 |
py
|
"""
In this challenge, you must think about words as elastics. What happens when
do you tend an elastic applying a constant traction force at both ends? Every
part (or letter, in this case) of the elastic will expand, with the minimum
expansion at the ends, and the maximum expansion in the center.
If the word has an odd length, the effective central character of the word
will be the pivot that splits the word into two halves.
"ABC" -> Left = "A" | Center = "B" | Right = "C"
If the word has an even length, you will consider two parts of equal length,
with the last character of the left half and the first character of the right
half being the center.
"ABCD" -> Left = "AB" | Right = "CD"
You will represent the expansion of a letter repeating it as many times as its
numeric position (so counting the indexes from/to 1, and not from 0 as usual)
in its half, with a crescent order in the left half and a decrescent order in
the right half.
Word = "ANNA"
Left = "AN"
Right = "NA"
Left = "A" * 1 + "N" * 2 = "ANN"
Right = "N" * 2 + "A" * 1 = "NNA"
Word = Left + Right = "ANNNNA"
If the word has an odd length, the pivot (the central character) will be the
peak (as to say, the highest value) that delimits the two halves of the word.
Word = "KAYAK"
Left = "K" * 1 + "A" * 2 = "KAA"
Pivot = "Y" * 3 = "YYY"
Right = "A" * 2 + "K" * 1 = "AAK"
Word = Left + Pivot + Right = "KAAYYYAAK"
Given a `word`, implement a function that returns the elasticized version of
the word as a string.
### Examples
elasticize("ANNA") ➞ "ANNNNA"
elasticize("KAYAK") ➞ "KAAYYYAAK"
elasticize("X") ➞ "X"
### Notes
* For words with less than three characters, the function must return the same word (no traction appliable).
* Remember, into the left part characters are counted from 1 to the end, and, in reverse order until 1 is reached, into the right.
"""
def elasticize(word):
def is_even(n):
return n%2==0
def first_half(word, n):
l8rs = {}
for num in range(n):
l8rs[num] = word[num] * (num+1)
return l8rs
def last_half(word, n):
l8rs = {}
y = 1
while len(word) - y > n-1:
l8rs[y] = word[len(word)-y]*y
y += 1
return l8rs
def combine(fh, lh):
lst = []
for key in sorted(list(fh.keys())):
lst.append(fh[key])
for key in reversed(sorted(list(lh.keys()))):
lst.append(lh[key])
return lst
if len(word) < 3:
return word
if is_even(len(word)) == False:
x = 0
y = 1
while x != len(word) - y:
x += 1
y += 1
middle = x
else:
middle = int(len(word)/2)
fh = first_half(word, middle)
lh = last_half(word, middle)
combined = combine(fh, lh)
return ''.join(combined)
|
[
"[email protected]"
] | |
327169a1cb6be4099ccb7f13fab70dfa92f4742e
|
7deda84f7a280f5a0ee69b98c6a6e7a2225dab24
|
/Receptionist/migrations/0027_package_manage_reception.py
|
45248c462110a952feffbb09a7008787a2c97129
|
[] |
no_license
|
Cornex-Inc/Coffee
|
476e30f29412373fb847b2d518331e6c6b9fdbbf
|
fcd86f20152e2b0905f223ff0e40b1881db634cf
|
refs/heads/master
| 2023-01-13T01:56:52.755527 | 2020-06-08T02:59:18 | 2020-06-08T02:59:18 | 240,187,025 | 0 | 0 | null | 2023-01-05T23:58:52 | 2020-02-13T05:47:41 |
Python
|
UTF-8
|
Python
| false | false | 549 |
py
|
# Generated by Django 2.1.15 on 2020-05-19 15:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Receptionist', '0026_package_manage_grouping'),
]
operations = [
migrations.AddField(
model_name='package_manage',
name='reception',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.DO_NOTHING, to='Receptionist.Reception'),
preserve_default=False,
),
]
|
[
"[email protected]"
] | |
845f77bc8d39737647f4a55d183df4f8f7afdbf3
|
43aeee48c1f6fc468a43f9bb0d4edae8ee0dbee1
|
/LPTW-SRC/例3_21.py
|
8430bd36f542e524ac1f1798a936dc9eba351ed6
|
[] |
no_license
|
wiky2/mytestproject
|
f694cf71dd3031e4597086f3bc90d246c4b26298
|
e7b79df6304476d76e87f9e8a262f304b30ca312
|
refs/heads/master
| 2021-09-07T20:54:19.569970 | 2018-02-28T23:39:00 | 2018-02-28T23:39:00 | 100,296,844 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,333 |
py
|
#这个循环用来保证必须输入大于2的整数作为评委人数
while True:
try:
n = int(input('请输入评委人数:'))
if n <= 2:
print('评委人数太少,必须多于2个人。')
else:
#如果输入大于2的整数,就结束循环
break
except:
Pass
#用来保存所有评委的打分
scores = []
for i in range(n):
#这个while循环用来保证用户必须输入0到100之间的数字
while True:
try:
score = input('请输入第{0}个评委的分数:'.format(i+1))
#把字符串转换为实数
score = float(score)
#用来保证输入的数字在0到100之间
assert 0<=score<=100
scores.append(score)
#如果数据合法,跳出while循环,继续输入下一个评委的得分
break
except:
print('分数错误')
#计算并删除最高分与最低分
highest = max(scores)
lowest = min(scores)
scores.remove(highest)
scores.remove(lowest)
#计算平均分,保留2位小数
finalScore = round(sum(scores)/len(scores), 2)
formatter = '去掉一个最高分{0}\n去掉一个最低分{1}\n最后得分{2}'
print(formatter.format(highest, lowest, finalScore))
|
[
"[email protected]"
] | |
afa9a1d0944e4af29df98932dd9113870175e138
|
3ac0a169aa2a123e164f7434281bc9dd6373d341
|
/singleNumber.py
|
4a7b92101b0350685936c92368994f2cf80679bc
|
[] |
no_license
|
sfeng77/myleetcode
|
02a028b5ca5a0354e99b8fb758883902a768f410
|
a2841fdb624548fdc6ef430e23ca46f3300e0558
|
refs/heads/master
| 2021-01-23T02:06:37.569936 | 2017-04-21T20:31:06 | 2017-04-21T20:31:06 | 85,967,955 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 208 |
py
|
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
s = 0
for v in nums:
s = s ^ v
return s
|
[
"[email protected]"
] | |
d8756586064d46abf0b01f2f255a4408170c98ca
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/galex_j19485-4225/sdB_GALEX_J19485-4225_lc.py
|
ad5e79f01dd4bec1f067eebd2a8c3dee9507a2f5
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 359 |
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[297.137792,-42.429325], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_GALEX_J19485-4225 /sdB_GALEX_J19485-4225_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
be016283897b8b97fcd923c3c66271b85639e383
|
10d98fecb882d4c84595364f715f4e8b8309a66f
|
/rl_metrics_aaai2021/utils.py
|
fdb1f66a5371b5960ba1746220fe5dec986ad621
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
afcarl/google-research
|
51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42
|
320a49f768cea27200044c0d12f394aa6c795feb
|
refs/heads/master
| 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 |
Apache-2.0
| 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null |
UTF-8
|
Python
| false | false | 7,577 |
py
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
import collections
from absl import logging
import numpy as np
from rl_metrics_aaai2021 import bisimulation
from rl_metrics_aaai2021 import d_delta
from rl_metrics_aaai2021 import d_delta_star
from rl_metrics_aaai2021 import discrete_bisimulation
from rl_metrics_aaai2021 import discrete_lax_bisimulation
from rl_metrics_aaai2021 import lax_bisimulation
MetricData = collections.namedtuple('metric_data', ['constructor', 'label'])
MDPStats = collections.namedtuple(
'MDPStats', ['time', 'num_iterations', 'min_gap', 'avg_gap', 'max_gap'])
# Dictionary mapping metric name to constructor and LaTeX label.
METRICS = {
'bisimulation':
MetricData(bisimulation.Bisimulation, r'$d^{\sim}$'),
'discrete_bisimulation':
MetricData(discrete_bisimulation.DiscreteBisimulation, r'$e^{\sim}$'),
'lax_bisimulation':
MetricData(lax_bisimulation.LaxBisimulation, r'$d^{\sim_{lax}}$'),
'discrete_lax_bisimulation':
MetricData(discrete_lax_bisimulation.DiscreteLaxBisimulation,
r'$e^{\sim_{lax}}$'),
'd_delta_1':
MetricData(d_delta.DDelta1, r'$d_{\Delta1}$'),
'd_delta_5':
MetricData(d_delta.DDelta5, r'$d_{\Delta5}$'),
'd_delta_10':
MetricData(d_delta.DDelta10, r'$d_{\Delta10}$'),
'd_delta_15':
MetricData(d_delta.DDelta15, r'$d_{\Delta15}$'),
'd_delta_20':
MetricData(d_delta.DDelta20, r'$d_{\Delta20}$'),
'd_delta_50':
MetricData(d_delta.DDelta50, r'$d_{\Delta50}$'),
'd_delta_100':
MetricData(d_delta.DDelta100, r'$d_{\Delta100}$'),
'd_delta_500':
MetricData(d_delta.DDelta500, r'$d_{\Delta500}$'),
'd_delta_1000':
MetricData(d_delta.DDelta1000, r'$d_{\Delta1000}$'),
'd_delta_5000':
MetricData(d_delta.DDelta5000, r'$d_{\Delta5000}$'),
'd_Delta_star':
MetricData(d_delta_star.DDeltaStar, r'$d_{\Delta^*}$'),
}
def value_iteration(env, tolerance, verbose=False):
"""Run value iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, error tolerance used to exit loop.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V* and Q*.
"""
values = np.zeros(env.num_states)
q_values = np.zeros((env.num_states, env.num_actions))
error = tolerance * 2
i = 0
while error > tolerance:
new_values = np.copy(values)
for s in range(env.num_states):
for a in range(env.num_actions):
q_values[s, a] = (
env.rewards[s, a] +
env.gamma * np.matmul(env.transition_probs[s, a, :], values))
new_values[s] = np.max(q_values[s, :])
error = np.max(abs(new_values - values))
values = new_values
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, error)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(values)
return values, q_values
def q_value_iteration(env, tolerance):
"""Run q value iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, error tolerance used to exit loop.
Returns:
Numpy array with V* and Q*.
"""
q_values = np.zeros((env.num_states, env.num_actions))
error = tolerance * 2
i = 0
while error > tolerance:
for s in range(env.num_states):
for a in range(env.num_actions):
old_q_values = np.copy(q_values[s, a])
q_values[s, a] = (
env.rewards[s, a] + env.gamma *
np.matmul(env.transition_probs[s, a, :], np.max(q_values, axis=1)))
error = np.max(abs(old_q_values - q_values[s, a]))
i += 1
return q_values
def policy_iteration(env, tolerance, verbose=False):
"""Run policy iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, evaluation stops when the value function change is less
than the tolerance.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V*
"""
values = np.zeros(env.num_states)
# Random policy
policy = np.ones((env.num_states, env.num_actions)) / env.num_actions
policy_stable = False
i = 0
while not policy_stable:
# Policy evaluation
while True:
delta = 0.
for s in range(env.num_states):
v = np.sum(env.rewards[s, :] * policy[s, :] + env.gamma * policy[s, :] *
np.matmul(env.transition_probs[s, :, :], values))
delta = max(delta, abs(v - values[s]))
values[s] = v
if delta < tolerance:
break
# Policy improvement
policy_stable = True
for s in range(env.num_states):
old = policy[s].copy()
g = np.zeros(env.num_actions, dtype=float)
for a in range(env.num_actions):
g[a] = (
env.rewards[s, a] +
env.gamma * np.matmul(env.transition_probs[s, a, :], values))
greed_actions = np.argwhere(g == np.amax(g))
for a in range(env.num_actions):
if a in greed_actions:
policy[s, a] = 1 / len(greed_actions)
else:
policy[s, a] = 0
if not np.array_equal(policy[s], old):
policy_stable = False
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, delta)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(values)
return values
def q_policy_iteration(env, tolerance, verbose=False):
"""Run policy iteration on env.
Args:
env: a MiniGrid environment, including the MDPWrapper.
tolerance: float, evaluation stops when the value function change is less
than the tolerance.
verbose: bool, whether to print verbose messages.
Returns:
Numpy array with V*
"""
q_values = np.zeros((env.num_states, env.num_actions))
# Random policy
policy = np.ones((env.num_states, env.num_actions)) / env.num_actions
policy_stable = False
i = 0
while not policy_stable:
# Policy evaluation
while True:
delta = 0.
for s in range(env.num_states):
v = env.rewards[s, :] + env.gamma * np.matmul(
env.transition_probs[s, :, :], np.sum(q_values * policy, axis=1))
delta = max(delta, np.max(abs(v- q_values[s])))
q_values[s] = v
if delta < tolerance:
break
# Policy improvement
policy_stable = True
for s in range(env.num_states):
old = policy[s].copy()
greedy_actions = np.argwhere(q_values[s] == np.amax(q_values[s]))
for a in range(env.num_actions):
if a in greedy_actions:
policy[s, a] = 1 / len(greedy_actions)
else:
policy[s, a] = 0
if not np.array_equal(policy[s], old):
policy_stable = False
i += 1
if i % 1000 == 0 and verbose:
logging.info('Error after %d iterations: %f', i, delta)
if verbose:
logging.info('Found V* in %d iterations', i)
logging.info(q_values)
return q_values
|
[
"[email protected]"
] | |
6f87b92696de2420ba9b14956ac1d08db4e16a86
|
bc6c0cda914c23e80921793eb0ce71c45202ada4
|
/src/endoexport/export.py
|
66f3970d48311c18dc3f984c553dd2e423f77298
|
[
"MIT"
] |
permissive
|
karlicoss/endoexport
|
a2221799113a12b400e298dea8d95559926de138
|
98c8805cbcc00187822737ef32c2e0434c4f450e
|
refs/heads/master
| 2023-04-04T09:56:57.716411 | 2023-03-15T02:19:15 | 2023-03-15T02:22:45 | 230,617,833 | 3 | 0 |
MIT
| 2023-03-15T02:22:46 | 2019-12-28T14:05:23 |
Python
|
UTF-8
|
Python
| false | false | 1,512 |
py
|
#!/usr/bin/env python3
import argparse
import json
from .exporthelpers.export_helper import Json
import endoapi
def get_json(**params) -> Json:
endomondo = endoapi.endomondo.Endomondo(**params)
maximum_workouts = None # None means all
workouts = endomondo.get_workouts_raw(maximum_workouts)
return workouts
Token = str
def login(email: str) -> Token:
print(f"Logging in as {email}")
password = input('Your password: ')
endomondo = endoapi.endomondo.Endomondo(email=email, password=password)
token = endomondo.token
print('Your token:')
print(token)
return token
def make_parser():
from .exporthelpers.export_helper import setup_parser, Parser
parser = Parser("Tool to export your personal Endomondo data")
setup_parser(parser=parser, params=['email', 'token']) # TODO exports -- need help for each param?
parser.add_argument('--login', action='store_true', help='''
This will log you in and give you the token (you'll need your password).
You only need to do it once, after that just store the token and use it.
''')
return parser
def main() -> None:
# TODO add logger configuration to export_helper?
# TODO autodetect logzero?
args = make_parser().parse_args()
params = args.params
dumper = args.dumper
if args.login:
login(email=params['email'])
return
j = get_json(**params)
js = json.dumps(j, indent=1, ensure_ascii=False)
dumper(js)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
2acbc2e004d4d067218de078794ec2dd281455fd
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/cosmos/azure-mgmt-cosmosdb/generated_samples/cosmos_db_sql_container_create_update.py
|
4eb9b7c581d3ad5045f9f14afe3e0ab5a7f5f6c1
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 |
MIT
| 2023-09-08T08:38:48 | 2019-11-18T07:09:24 |
Python
|
UTF-8
|
Python
| false | false | 3,434 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.cosmosdb import CosmosDBManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-cosmosdb
# USAGE
python cosmos_db_sql_container_create_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = CosmosDBManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.sql_resources.begin_create_update_sql_container(
resource_group_name="rg1",
account_name="ddb1",
database_name="databaseName",
container_name="containerName",
create_update_sql_container_parameters={
"location": "West US",
"properties": {
"options": {},
"resource": {
"clientEncryptionPolicy": {
"includedPaths": [
{
"clientEncryptionKeyId": "keyId",
"encryptionAlgorithm": "AEAD_AES_256_CBC_HMAC_SHA256",
"encryptionType": "Deterministic",
"path": "/path",
}
],
"policyFormatVersion": 2,
},
"conflictResolutionPolicy": {"conflictResolutionPath": "/path", "mode": "LastWriterWins"},
"defaultTtl": 100,
"id": "containerName",
"indexingPolicy": {
"automatic": True,
"excludedPaths": [],
"includedPaths": [
{
"indexes": [
{"dataType": "String", "kind": "Range", "precision": -1},
{"dataType": "Number", "kind": "Range", "precision": -1},
],
"path": "/*",
}
],
"indexingMode": "consistent",
},
"partitionKey": {"kind": "Hash", "paths": ["/AccountNumber"]},
"uniqueKeyPolicy": {"uniqueKeys": [{"paths": ["/testPath"]}]},
},
},
"tags": {},
},
).result()
print(response)
# x-ms-original-file: specification/cosmos-db/resource-manager/Microsoft.DocumentDB/stable/2023-04-15/examples/CosmosDBSqlContainerCreateUpdate.json
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
d10c3fb59eb602e7a438fe8b8b7ccca52fcc45d2
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_syphon.py
|
1ef3547d3d666728720ba4bfc26206b8a9d76bc4
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 295 |
py
|
#calss header
class _SYPHON():
def __init__(self,):
self.name = "SYPHON"
self.definitions = [u'a siphon noun ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"[email protected]"
] | |
ee27313bde085575df70e1d42550c376748fe931
|
08a9dc04e6defa9dc9378bfbfbe0b6185af6a86a
|
/manager/views.py
|
78b92fee93ead9c43d6d958d58f90642c7277c7f
|
[] |
no_license
|
Felicity-jt/50.008-Project-1
|
8ecc63d2302b2eaa4060f4c900d7fed2e958927c
|
960b5e57a39bfda1c31653798c23ddc051a2ff19
|
refs/heads/master
| 2021-08-24T00:40:27.886634 | 2017-12-07T09:14:12 | 2017-12-07T09:14:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,191 |
py
|
from json import loads
from django.http import Http404
from django.shortcuts import render
from django.core.exceptions import PermissionDenied
from django.views.decorators.http import require_POST
from common.db import sql, page
from common.utils import pagination
from common.messages import NOT_STAFF
from common.decorators import json_response
@require_POST
@json_response
def new(request):
"""Add item or entity into inventory."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
s = """INSERT INTO item (id, name)
VALUES (DEFAULT, %s)"""
try:
rq = loads(request.body)
# sanitize before inserting
values = (rq['name'],)
except (ValueError, KeyError):
return None
sql(s, *values)
return {}
@json_response
def stock(request, item_id):
"""Get or update current stock."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
q = 'SELECT id, price, quantity FROM item WHERE id = %s'
if request.method == 'POST':
# update price and/or quantity from post data
s = """UPDATE item SET
quantity = %s
WHERE id = %s"""
try:
rq = loads(request.body)
# sanitize before inserting
values = (int(rq['quantity']),)
except (ValueError, KeyError):
return None
sql(s, *values, item_id)
try:
r = sql(q, item_id)[0]
except IndexError:
raise Http404
return {
'id': r[0],
'price': r[1],
'quantity': r[2],
}
@json_response
def stats(request, entity, year, month):
"""Get stats for entity."""
if not request.user.is_staff:
raise PermissionDenied(NOT_STAFF)
if entity not in ('item', 'company', 'creator'):
raise Http404
q = """SELECT item_id, SUM(quantity) AS total FROM purchase_item
INNER JOIN purchase p ON p.id = purchase_item.purchase_id
WHERE YEAR(p.made_on) = %s AND MONTH(p.made_on) = %s
GROUP BY item_id"""
pg = pagination(request)
pg['sort'].append('-total')
return sql(q + page(**pg), year, month)
|
[
"[email protected]"
] | |
39ce07857213f8a281675528cad52ce7943c5bf1
|
2bcf18252fa9144ece3e824834ac0e117ad0bdf3
|
/zpt/trunk/site-packages/zpt/_pytz/zoneinfo/US/Indiana_minus_Starke.py
|
f06a4f85e29494d5c49f070ed6153788987fe72d
|
[
"MIT",
"ZPL-2.1"
] |
permissive
|
chadwhitacre/public
|
32f65ba8e35d38c69ed4d0edd333283a239c5e1d
|
0c67fd7ec8bce1d8c56c7ff3506f31a99362b502
|
refs/heads/master
| 2021-05-10T14:32:03.016683 | 2010-05-13T18:24:20 | 2010-05-13T18:24:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,743 |
py
|
'''tzinfo timezone information for US/Indiana_minus_Starke.'''
from zpt._pytz.tzinfo import DstTzInfo
from zpt._pytz.tzinfo import memorized_datetime as d
from zpt._pytz.tzinfo import memorized_ttinfo as i
class Indiana_minus_Starke(DstTzInfo):
'''US/Indiana_minus_Starke timezone definition. See datetime.tzinfo for details'''
zone = 'US/Indiana_minus_Starke'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,8,0,0),
d(1918,10,27,7,0,0),
d(1919,3,30,8,0,0),
d(1919,10,26,7,0,0),
d(1942,2,9,8,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,7,0,0),
d(1947,4,27,8,0,0),
d(1947,9,28,7,0,0),
d(1948,4,25,8,0,0),
d(1948,9,26,7,0,0),
d(1949,4,24,8,0,0),
d(1949,9,25,7,0,0),
d(1950,4,30,8,0,0),
d(1950,9,24,7,0,0),
d(1951,4,29,8,0,0),
d(1951,9,30,7,0,0),
d(1952,4,27,8,0,0),
d(1952,9,28,7,0,0),
d(1953,4,26,8,0,0),
d(1953,9,27,7,0,0),
d(1954,4,25,8,0,0),
d(1954,9,26,7,0,0),
d(1955,4,24,8,0,0),
d(1955,10,30,7,0,0),
d(1956,4,29,8,0,0),
d(1956,10,28,7,0,0),
d(1957,4,28,8,0,0),
d(1957,9,29,7,0,0),
d(1958,4,27,8,0,0),
d(1958,9,28,7,0,0),
d(1959,4,26,8,0,0),
d(1959,10,25,7,0,0),
d(1960,4,24,8,0,0),
d(1960,10,30,7,0,0),
d(1961,4,30,8,0,0),
d(1961,10,29,7,0,0),
d(1962,4,29,8,0,0),
d(1963,10,27,7,0,0),
d(1967,4,30,8,0,0),
d(1967,10,29,7,0,0),
d(1968,4,28,8,0,0),
d(1968,10,27,7,0,0),
d(1969,4,27,8,0,0),
d(1969,10,26,7,0,0),
d(1970,4,26,8,0,0),
d(1970,10,25,7,0,0),
d(1971,4,25,8,0,0),
d(1971,10,31,7,0,0),
d(1972,4,30,8,0,0),
d(1972,10,29,7,0,0),
d(1973,4,29,8,0,0),
d(1973,10,28,7,0,0),
d(1974,1,6,8,0,0),
d(1974,10,27,7,0,0),
d(1975,2,23,8,0,0),
d(1975,10,26,7,0,0),
d(1976,4,25,8,0,0),
d(1976,10,31,7,0,0),
d(1977,4,24,8,0,0),
d(1977,10,30,7,0,0),
d(1978,4,30,8,0,0),
d(1978,10,29,7,0,0),
d(1979,4,29,8,0,0),
d(1979,10,28,7,0,0),
d(1980,4,27,8,0,0),
d(1980,10,26,7,0,0),
d(1981,4,26,8,0,0),
d(1981,10,25,7,0,0),
d(1982,4,25,8,0,0),
d(1982,10,31,7,0,0),
d(1983,4,24,8,0,0),
d(1983,10,30,7,0,0),
d(1984,4,29,8,0,0),
d(1984,10,28,7,0,0),
d(1985,4,28,8,0,0),
d(1985,10,27,7,0,0),
d(1986,4,27,8,0,0),
d(1986,10,26,7,0,0),
d(1987,4,5,8,0,0),
d(1987,10,25,7,0,0),
d(1988,4,3,8,0,0),
d(1988,10,30,7,0,0),
d(1989,4,2,8,0,0),
d(1989,10,29,7,0,0),
d(1990,4,1,8,0,0),
d(1990,10,28,7,0,0),
d(1991,4,7,8,0,0),
d(1991,10,27,7,0,0),
d(2006,4,2,7,0,0),
d(2006,10,29,6,0,0),
d(2007,3,11,7,0,0),
d(2007,11,4,6,0,0),
d(2008,3,9,7,0,0),
d(2008,11,2,6,0,0),
d(2009,3,8,7,0,0),
d(2009,11,1,6,0,0),
d(2010,3,14,7,0,0),
d(2010,11,7,6,0,0),
d(2011,3,13,7,0,0),
d(2011,11,6,6,0,0),
d(2012,3,11,7,0,0),
d(2012,11,4,6,0,0),
d(2013,3,10,7,0,0),
d(2013,11,3,6,0,0),
d(2014,3,9,7,0,0),
d(2014,11,2,6,0,0),
d(2015,3,8,7,0,0),
d(2015,11,1,6,0,0),
d(2016,3,13,7,0,0),
d(2016,11,6,6,0,0),
d(2017,3,12,7,0,0),
d(2017,11,5,6,0,0),
d(2018,3,11,7,0,0),
d(2018,11,4,6,0,0),
d(2019,3,10,7,0,0),
d(2019,11,3,6,0,0),
d(2020,3,8,7,0,0),
d(2020,11,1,6,0,0),
d(2021,3,14,7,0,0),
d(2021,11,7,6,0,0),
d(2022,3,13,7,0,0),
d(2022,11,6,6,0,0),
d(2023,3,12,7,0,0),
d(2023,11,5,6,0,0),
d(2024,3,10,7,0,0),
d(2024,11,3,6,0,0),
d(2025,3,9,7,0,0),
d(2025,11,2,6,0,0),
d(2026,3,8,7,0,0),
d(2026,11,1,6,0,0),
d(2027,3,14,7,0,0),
d(2027,11,7,6,0,0),
d(2028,3,12,7,0,0),
d(2028,11,5,6,0,0),
d(2029,3,11,7,0,0),
d(2029,11,4,6,0,0),
d(2030,3,10,7,0,0),
d(2030,11,3,6,0,0),
d(2031,3,9,7,0,0),
d(2031,11,2,6,0,0),
d(2032,3,14,7,0,0),
d(2032,11,7,6,0,0),
d(2033,3,13,7,0,0),
d(2033,11,6,6,0,0),
d(2034,3,12,7,0,0),
d(2034,11,5,6,0,0),
d(2035,3,11,7,0,0),
d(2035,11,4,6,0,0),
d(2036,3,9,7,0,0),
d(2036,11,2,6,0,0),
d(2037,3,8,7,0,0),
d(2037,11,1,6,0,0),
]
_transition_info = [
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CWT'),
i(-18000,3600,'CPT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,0,'EST'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-21600,0,'CST'),
i(-18000,3600,'CDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
i(-14400,3600,'EDT'),
i(-18000,0,'EST'),
]
Indiana_minus_Starke = Indiana_minus_Starke()
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.