blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09d31f3cfb420681b24d39ce5b6d98b82b443b5b | c3082eb2adc43b311dd3c9ff16fd3ed9df85f266 | /python/examples/advanced/slow.py | f2e6b6f91e806518083d6f82e06462b77c31f239 | [] | no_license | szabgab/slides | 78818c7138331b3ba9e221c81da3678a46efe9b3 | 63bba06678554db737602f2fbcd6510c36037e8a | refs/heads/main | 2023-08-31T07:13:51.536711 | 2023-08-29T13:17:59 | 2023-08-29T13:17:59 | 122,212,527 | 87 | 69 | null | 2023-05-19T06:55:11 | 2018-02-20T14:57:03 | Python | UTF-8 | Python | false | false | 734 | py | import random
def f():
n = 0
for i in range(30):
n += random.random()
return n
def g():
return random.random() * 30
def main(n):
text = get_str(n)
#print(str)
text_sorted = sort(text)
return text_sorted
def sort(s):
chars = list(s)
for i in reversed(range(len(chars))):
a = f()
b = g()
for j in range(i, len(chars)-1):
swap(chars, j)
return ''.join(chars)
def get_str(n):
text = ''
for i in range(1, n):
text += chr(65 + random.randrange(0, 26))
return text
def swap(lst, loc):
if lst[loc] > lst[loc + 1]:
lst[loc], lst[loc + 1] = lst[loc + 1], lst[loc]
if __name__ == '__main__':
print(main(1000))
| [
"[email protected]"
] | |
d4fc9bc42d28a4ae8358c51ca58861c999d614ed | c42672aeac984ab3f57d840710e145f4e918ba01 | /nasws/cnn/policy/darts_policy/train.py | f9bbbe1987129dcdc49a1d3205830cfc4856a047 | [
"MIT"
] | permissive | kcyu2014/nas-landmarkreg | 00212b6015d1fef3e7198bfa596fa69a898167c2 | a00c3619bf4042e446e1919087f0b09fe9fa3a65 | refs/heads/main | 2023-07-21T19:52:19.392719 | 2021-08-24T09:37:24 | 2021-08-24T09:37:24 | 350,368,390 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,295 | py | import os
import sys
import time
import glob
import numpy as np
import torch
# import nasws.cnn.utils
import utils
import logging
import argparse
import torch.nn as nn
import genotypes
import torch.utils
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model import NetworkCIFAR as Network
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=96, help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--epochs', type=int, default=600, help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=36, help='num of init channels')
parser.add_argument('--layers', type=int, default=20, help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--auxiliary', action='store_true', default=False, help='use auxiliary tower')
parser.add_argument('--auxiliary_weight', type=float, default=0.4, help='weight for auxiliary loss')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--arch', type=str, default='DARTS', help='which architecture to use')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
args = parser.parse_args()
args.save = 'iclr-resubmission/eval-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
CIFAR_CLASSES = 10
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay
)
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
valid_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=valid_transform)
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=2)
valid_queue = torch.utils.data.DataLoader(
valid_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, float(args.epochs))
for epoch in range(args.epochs):
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
model.drop_path_prob = args.drop_path_prob * epoch / args.epochs
train_acc, train_obj = train(train_queue, model, criterion, optimizer)
logging.info('train_acc %f', train_acc)
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('valid_acc %f', valid_acc)
utils.save(model, os.path.join(args.save, 'weights.pt'))
def train(train_queue, model, criterion, optimizer):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.train()
for step, (input, target) in enumerate(train_queue):
input = Variable(input).cuda()
target = Variable(target).cuda(async=True)
optimizer.zero_grad()
logits, logits_aux = model(input)
loss = criterion(logits, target)
if args.auxiliary:
loss_aux = criterion(logits_aux, target)
loss += args.auxiliary_weight*loss_aux
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), args.grad_clip)
optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def infer(valid_queue, model, criterion):
objs = utils.AverageMeter()
top1 = utils.AverageMeter()
top5 = utils.AverageMeter()
model.eval()
for step, (input, target) in enumerate(valid_queue):
input = Variable(input, volatile=True).cuda()
target = Variable(target, volatile=True).cuda(async=True)
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.data[0], n)
top1.update(prec1.data[0], n)
top5.update(prec5.data[0], n)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1ce1feb6bf75a790c18144cc6885d7f674924843 | 2c3da6e0bddf55d64d650040bbf286c47b31811a | /c语言中文网python教程/Python itertools模块:生成迭代器(实例分析).py | 24c70c2923eb597f7aa59b8fc68122a78a8ba190 | [
"MIT"
] | permissive | Bngzifei/PythonNotes | 76bd53db3033a9c51ab4bdd727842cd89607b584 | 01590e1b6c1bc0f04aa2d355fa2553c04cce27f2 | refs/heads/master | 2023-02-04T06:49:00.725463 | 2020-12-15T09:26:40 | 2020-12-15T09:26:40 | 155,154,662 | 1 | 2 | MIT | 2020-09-08T01:30:19 | 2018-10-29T05:02:48 | Python | UTF-8 | Python | false | false | 7,147 | py | # Python itertools模块:生成迭代器(实例分析)
"""
itertools模块主要包含了一些用于生成迭代器的函数.在在 Python 的交互式解释器中先导入
itertools 模块,然后输入 [e for e in dir(itertools) if not e.startswith('_')] 命令,
即可看到该模块所包含的全部属性和函数:
>>> [e for e in dir(itertools) if not e.startswith('_')]
['accumulate', 'chain', 'combinations', 'combinations_with_replacement',
'compress', 'count', 'cycle', 'dropwhile', 'filterfalse', 'groupby', 'islice',
'permutations', 'product', 'repeat', 'starmap', 'takewhile', 'tee', 'zip_longest']
从上面的输出结果可以看出,itertools模块中的不少函数都可以用于生成迭代器.
先看itertools模块中三个生成无限迭代器的函数:
1.count(start,[step]):生成 start, start+step, start+2*step,... 的迭代器,其中 step 默认为 1。
比如使用 count(10) 生成的迭代器包含:10, 11 , 12 , 13, 14,...
2.cycle(p):对序列 p 生成无限循环 p0, p1,..., p0, p1,... 的迭代器。比如使用 cycle('ABCD')
生成的迭代器包含:A,B,C,D,A,B,C,D,...
3.repeat(elem [,n]):生成无限个 elem 元素重复的迭代器,如果指定了参数 n,则只生成 n 个 elem 元素。
比如使用 repeat(10, 3) 生成的迭代器包含:10, 10, 10。
"""
import itertools as it
# count(10,3)生成10,13,16...迭代器
for e in it.count(10, 3):
print(e)
# 用于跳出无限循环
if e > 20:
break
print("---------")
my_counter = 0
# cycle用途对序列生成无限循环的迭代器
for e in it.cycle(["python","ruby","swift"]):
print(e)
# 用于跳出无限循环
my_counter += 1
if my_counter > 7:
break
print("-------")
# repeat用于生成n个元素重复的迭代器
for e in it.repeat("python",3):
print(e)
"""
在 itertools 模块中还有一些常用的迭代器函数,如下所示:
accumulate(p[,func]):默认生成根据序列 p 元素累加的迭代器,p0, p0+p1, p0+p1+p2,...序列,如果指定了 func 函数,则用 func 函数来计算下一个元素的值。
chain(p, q, ...):将多个序列里的元素“链”在一起生成新的序列。
compress(data, selectors):根据 selectors 序列的值对 data 序列的元素进行过滤。如果 selector[0] 为真,则保留 data[0];如果 selector[1] 为真,则保留 data[1]......依此类推。
dropwhile(pred, seq):使用 pred 函数对 seq 序列进行过滤,从 seq 中第一个使用 pred 函数计算为 False 的元素开始,保留从该元素到序列结束的全部元素。
takewhile(pred, seq):该函数和上一个函数恰好相反。使用 pred 函数对 seq 序列进行过滤,从 seq 中第一个使用 pred 函数计算为 False 的元素开始,去掉从该元素到序列结束的全部元素。
filterfalse(pred, seq):使用 pred 函数对 seq 序列进行过滤,保留 seq 中使用 pred 计算为 True 的元素。比如 filterfalse(lambda x:x%2, range(10)),得到 0, 2, 4, 6, 8。
islice(seq, [start,] stop [, step]):其功能类似于序列的 slice 方法,实际上就是返回 seq[start:stop:step] 的结果。
starmap(func, seq):使用 func 对 seq 序列的每个元素进行计算,将计算结果作为新的序列元素。当使用 func 计算序列元素时,支持序列解包。比如 seq 序列的元素长度为 3,那么 func 可以是一个接收三个参数的函数,该函数将会根据这三个参数来计算新序列的元素。
zip_longest(p,q,...):将 p、q 等序列中的元素按索引合并成元组,这些元组将作为新序列的元素。
上面这些函数的测试程序如下:
"""
print("----------")
import itertools as it
# 默认使用累加的方式计算下一个元素的值
for e in it.accumulate(range(6)):
print(e,end=",")
print("\n---------------")
# 使用x*y的方式来计算迭代器下一个元素的值
for e in it.accumulate(range(1,6),lambda x,y:x*y):
print(e,end=", ")
print("\n-----------------")
# 将两个序列"链接"在一起,生成新的迭代器
for e in it.chain(["a","b"],["kotlin","swift"]):
print(e,end=", ")
print("\n------------------")
# 根据第二个序列来筛选第一个序列的元素.
# 由于第二个序列只有中间两个元素为1,因此前一个序列只保留中间两个元素
for e in it.compress(["a","b","kotlin","swift"],[0,1,1,0]):
print(e,end=", ") # b, kotlin
print("\n-----------------------")
# 获取序列中从长度不小于4的元素开始,到结束的所有元素(即保留长度大于4位置开始到结束的所有元素)
for e in it.dropwhile(lambda x:len(x)<4,["a","b","kotlin","x","y"]):
print(e,end=", ") # 只有: 'Kotlin', 'x', 'y'
print("\n----------------")
# 去掉序列中从长度不小于4的元素开始,到结束的所有元素
for e in it.takewhile(lambda x:len(x)<4,["a","b","kotlin","x","y"]):
print(e,end=", ") # 只有: 'a', 'b'
print("\n----------------")
# 只保留序列中从长度不小于4的元素
for e in it.filterfalse(lambda x:len(x)<4,["a","b","kotlin","x","y"]):
print(e,end=", ") # 只有: 'Kotlin'
print("\n----------------------")
# 使用pow函数对原序列的元素进行计算,将计算结果作为新序列的元素
for e in it.starmap(pow, [(2,5),(3,2),(10,3)]):
print(e,end=", ")
print("\n------------------------")
# 将"ABCD","xy"的元素按索引合并成元组,这些元组作为新序列的元素
# 长度不够的序列元素使用"-"字符代替
for e in it.zip_longest("ABCD","xy",fillvalue="-"):
print(e,end=", ") # # ('A', 'x'), ('B', 'y'), ('C', '-'), ('D', '-')
"""
在 itertools 模块中还有一些用于生成排列组合的工具函数:
product(p, q, ...[repeat= 1)]:用序列 p 、q 、... 中的元素进行排列组合,就相当于使用嵌套循环组合。
permutations(p[, r]):从序列 p 中取出 r 个元素组成全排列,将排列得到的元组作为新迭代器的元素。
combinations(p, r):从序列 p 中取出 r 个元素组成全组合,元素不允许重复,将组合得到的元组作为新迭代器的元素。
combinations with_replacement(p, r),从序列 p 中取出 r 个元素组成全组合,元素允许重复,
将组合得到的元组作为新迭代器的元素。
如下程序示范了上面4个函数的用法:
"""
import itertools as it
print("\n-------")
# 使用两个序列进行排列组合
for e in it.product("AB","CD"):
print("".join(e),end=",") # AC,AD,BC,BD
print("\n------------")
# 使用一个序列,重复两次进行全排列
for e in it.product("AB",repeat=2):
print("".join(e),end=", ")
print("\n-------------------")
# 从序列中取2个元素进行排列
for e in it.permutations("ABCD",2):
print("".join(e),end=", ")
print("\n------------------")
# 从序列中取2个元素进行组合,不允许重复
for e in it.combinations("ABCD", 2):
print("".join(e),end=", ")
print("\n------------------------")
"""
上面程序用到了一个字符串的join()方法,该方法用于将元组的所有元素连接成一个字符串.
"""
| [
"[email protected]"
] | |
52920abfc6f261e127be3ca99e74ed82138ce10e | 5761eca23af5ad071a9b15e2052958f2c9de60c0 | /generated-stubs/allauth/socialaccount/providers/bitbucket_oauth2/provider.pyi | 100feb2b8afd2da0c647bb49cce0b3e58fab5d68 | [] | no_license | d-kimuson/drf-iframe-token-example | 3ed68aa4463531f0bc416fa66d22ee2aaf72b199 | dd4a1ce8e38de9e2bf90455e3d0842a6760ce05b | refs/heads/master | 2023-03-16T13:52:45.596818 | 2021-03-09T22:09:49 | 2021-03-09T22:09:49 | 346,156,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | pyi | from allauth.socialaccount.providers.base import ProviderAccount as ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider as OAuth2Provider
from typing import Any
class BitbucketOAuth2Account(ProviderAccount):
def get_profile_url(self): ...
def get_avatar_url(self): ...
def to_str(self): ...
class BitbucketOAuth2Provider(OAuth2Provider):
id: str = ...
name: str = ...
account_class: Any = ...
def extract_uid(self, data: Any): ...
def extract_common_fields(self, data: Any): ...
provider_classes: Any
| [
"[email protected]"
] | |
3cd8b6972a27fcb26f2dea995651f24fa971773a | e2d23d749779ed79472a961d2ab529eeffa0b5b0 | /gcloud/contrib/function/permissions.py | 7c2a108a18154b28e3751cac04a29072152ab26b | [
"MIT",
"BSD-3-Clause",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | manlucas/atom | 9fa026b3f914e53cd2d34aecdae580bda09adda7 | 94963fc6fdfd0568473ee68e9d1631f421265359 | refs/heads/master | 2022-09-30T06:19:53.828308 | 2020-01-21T14:08:36 | 2020-01-21T14:08:36 | 235,356,376 | 0 | 0 | NOASSERTION | 2022-09-16T18:17:08 | 2020-01-21T14:04:51 | Python | UTF-8 | Python | false | false | 1,234 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.utils.translation import ugettext_lazy as _
from auth_backend.resources.base import Action, NeverInitiateResource
from auth_backend.backends.bkiam import BKIAMBackend
function_center_resource = NeverInitiateResource(
rtype='function_center',
name=_(u"职能化中心"),
scope_type='system',
scope_id='bk_sops',
scope_name=_(u"标准运维"),
actions=[Action(id='view', name=_(u"查看"), is_instance_related=False)],
backend=BKIAMBackend())
| [
"[email protected]"
] | |
3fd1171ad91a14ad977ace1eb611c02b31a71019 | ddb185b0cf581d85a1dd733a6d1e5d027ba3e0ca | /phase2/387.py | 4ab3425dbedcd713562ef34c92f76e379fea962a | [] | no_license | GavinPHR/code | 8a319e1223a307e755211b7e9b34c5abb00b556b | b1d8d49633db362bbab246c0cd4bd28305964b57 | refs/heads/master | 2020-05-16T04:09:19.026207 | 2020-04-30T10:00:06 | 2020-04-30T10:00:06 | 182,766,600 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # First Unique Character in a String
import collections
class Solution:
def firstUniqChar(self, s: str) -> int:
if not s: return -1
d = collections.Counter(s)
i = 0
for c in s:
if d[c] == 1:
return i
else:
i += 1
return -1
s = Solution()
print(s.firstUniqChar("loveleetcode"))
| [
"[email protected]"
] | |
2f10e9f21d71eb1536fb53c92e38822aa53b0480 | a8750439f200e4efc11715df797489f30e9828c6 | /csAcademy/binary_array.py | 40c9693298764e851e76be4a163f8a7a254efb4e | [] | no_license | rajlath/rkl_codes | f657174305dc85c3fa07a6fff1c7c31cfe6e2f89 | d4bcee3df2f501349feed7a26ef9828573aff873 | refs/heads/master | 2023-02-21T10:16:35.800612 | 2021-01-27T11:43:34 | 2021-01-27T11:43:34 | 110,989,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | '''
Find Binary Array
Time limit: 1000 ms
Memory limit: 256 MB
You have a binary array of length NN. For each index i (1 \leq i \leq N)i(1≤i≤N) you know the number of zeroes among the positions on the left side of ii and on the right side of ii, respectively. Find the array!
Standard input
The first line contains an integer NN, the length of the binary array.
The second line contains NN integer values, where the ii-th value represents the number of zeroes among the positions on the left side of the ii-th index of the array.
The third line contains NN integer values, where the ii-th value represents the number of zeroes among the positions on the right side of the ii-th index of the array.
Standard output
The first line will contain NN bits (00 or 11), representing the binary array.
Constraints and notes
2 \leq N \leq 10^52≤N≤10
5
It is guaranteed that there is always at least one solution
Input Output
5
0 1 1 1 2
1 1 1 0 0
01101
'''
lens = int(input())
arrL = [int(x) for x in input().split()]
arrR = [int(x) for x in input().split()]
for i in range(1, lens):
if arrL[i] != arrL[i-1]:
print(0, end="")
else:
print(1, end="")
#arr = [0 if b != a else 1 for a, b in zip(arrL, arrL[1:])]
print( 0 if arrR[-1] != arrR[-2] else 1, end="")
| [
"[email protected]"
] | |
a561f92c4f4333b5258bc33ef7c64c6f1d3acb28 | 4cdd5813d20f40d525b4d418df2788fa72a394bf | /Leetcode/easy/invert-a-binary-tree.py | a00b8eb4764600bf5c8bc9a376fe2119c0737828 | [
"MIT"
] | permissive | das-jishu/data-structures-basics-leetcode | 9baa78b49cfc1b0f5c48ef961b85b4fa9ffaf0dd | 9f877ba8ed1968c21c39ebeae611ba3c448a083a | refs/heads/master | 2023-08-21T06:54:57.380306 | 2021-10-01T02:59:51 | 2021-10-01T02:59:51 | 298,940,500 | 23 | 21 | MIT | 2021-10-01T02:59:52 | 2020-09-27T02:52:50 | Python | UTF-8 | Python | false | false | 916 | py | """
# INVERT A BINARY TREE
Invert a binary tree.
Example:
Input:
4
- -
2 7
- - - -
1 3 6 9
Output:
4
- -
7 2
- - - -
9 6 3 1
Trivia:
This problem was inspired by this original tweet by Max Howell:
Google: 90% of our engineers use the software you wrote (Homebrew), but you can’t invert a binary tree on a whiteboard so f*** off.
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if not root:
return None
if not root.left and not root.right:
return root
temp = self.invertTree(root.right)
root.right = self.invertTree(root.left)
root.left = temp
return root | [
"[email protected]"
] | |
ae36ea6b3ea5163238fb1b53cb7a789fac529f36 | ebd24e400986c57b4bb1b9578ebd8807a6db62e8 | /InstaGrade-FormBuilder/xlsxwriter/test/sharedstrings/test_initialisation.py | 053c1388226206274fb0df70e2a7a9877a33525b | [] | no_license | nate-parrott/ig | 6abed952bf32119a536a524422037ede9b431926 | 6e0b6ac0fb4b59846680567150ce69a620e7f15d | refs/heads/master | 2021-01-12T10:15:15.825004 | 2016-12-13T21:23:17 | 2016-12-13T21:23:17 | 76,399,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 835 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2014, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ...sharedstrings import SharedStrings
class TestInitialisation(unittest.TestCase):
"""
Test initialisation of the SharedStrings class and call a method.
"""
def setUp(self):
self.fh = StringIO()
self.sharedstrings = SharedStrings()
self.sharedstrings._set_filehandle(self.fh)
def test_xml_declaration(self):
"""Test Sharedstrings xml_declaration()"""
self.sharedstrings._xml_declaration()
exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
| [
"[email protected]"
] | |
e0b11893d0de06d4b3be2aaa5725db913e41379e | 6ddcb131e5f2806acde46a525ff8d46bfbe0990e | /enaml/backends/qt/noncomponents/toolkit_items.py | a72749e2ac1d15a5b2158138eef276a442329617 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agrawalprash/enaml | 5ce1823188eb51e5b83117ebee6c3655f53e5157 | 96828b254ac9fdfa2e5b6b31eff93a4933cbc0aa | refs/heads/master | 2021-01-15T23:35:21.351626 | 2012-09-05T03:40:07 | 2012-09-05T03:40:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | #------------------------------------------------------------------------------
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from .qt_dock_manager import QtDockManager
from .qt_icon import QtIcon
from .qt_image import QtImage
TOOLKIT_ITEMS = {
'DockManager': QtDockManager,
'Image': QtImage,
'Icon': QtIcon,
}
| [
"[email protected]"
] | |
a02cdca9c3a54d2bfa72eade971c66436b2737b7 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_068/ch40_2020_04_06_13_40_18_164592.py | adb63cc4378dfd175fa19617df94442df378c4ad | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | def soma_valores(a):
b = 0
for i in range(a[0], a[-1]+1):
b += i
return b | [
"[email protected]"
] | |
8be1d1310a0658b39cd9f615911d05de1415385e | 0444c96c6c428f75c72696159fb89633ec3ea34f | /backend/driver/migrations/0001_initial.py | c44b338ce03abd20124f390fc8f16f011c02b766 | [] | no_license | crowdbotics-apps/juice-and-eatery-21638 | 03a690239baf56065ee44986dc845f8768f1d999 | 136357629b22a48ad06731a5a5f14f648052a93f | refs/heads/master | 2022-12-27T14:03:23.949577 | 2020-10-17T21:57:34 | 2020-10-17T21:57:34 | 304,975,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,540 | py | # Generated by Django 2.2.16 on 2020-10-17 21:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("delivery_order", "0001_initial"),
]
operations = [
migrations.CreateModel(
name="DriverProfile",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("photo", models.URLField()),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
("last_updated", models.DateTimeField(auto_now=True)),
("details", models.TextField(blank=True, null=True)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="driverprofile_user",
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="DriverOrder",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("timestamp_created", models.DateTimeField(auto_now_add=True)),
(
"driver",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="driverorder_driver",
to="driver.DriverProfile",
),
),
(
"order",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="driverorder_order",
to="delivery_order.Order",
),
),
],
),
]
| [
"[email protected]"
] | |
cf96272e956bd4c7ef7b7af41cb4f6eb727a951a | d83fde3c891f44014f5339572dc72ebf62c38663 | /_bin/google-cloud-sdk/.install/.backup/lib/surface/kms/keys/update.py | c6c8ee94d6505b0f672450ba629b25382af2f005 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | gyaresu/dotfiles | 047cc3ca70f4b405ba272856c69ee491a79d2ebe | e5e533b3a081b42e9492b228f308f6833b670cfe | refs/heads/master | 2022-11-24T01:12:49.435037 | 2022-11-01T16:58:13 | 2022-11-01T16:58:13 | 17,139,657 | 1 | 1 | null | 2020-07-25T14:11:43 | 2014-02-24T14:59:59 | Python | UTF-8 | Python | false | false | 8,498 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Update rotation schedule and/or labels on a key."""
from __future__ import absolute_import
from __future__ import unicode_literals
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.kms import flags
from googlecloudsdk.command_lib.util.args import labels_util
class Update(base.UpdateCommand):
r"""Update a key.
1. Update the rotation schedule for the given key.
Updates the rotation schedule for the given key. The schedule
automatically creates a new primary version for the key
according to the `--next-rotation-time` and `--rotation-period` flags.
The flag `--next-rotation-time` must be in ISO or RFC3339 format,
and `--rotation-period` must be in the form INTEGER[UNIT], where units
can be one of seconds (s), minutes (m), hours (h) or days (d).
Key rotations performed manually via `update-primary-version` and the
version `create` do not affect the stored `--next-rotation-time`.
2. Remove the rotation schedule for the given key with
--remove-rotation-schedule.
3. Update/Remove the labels for the given key with --update-labels and/or
--remove-labels.
4. Update the primary version for the given key with --primary-version.
## EXAMPLES
The following command sets a 30 day rotation period for the key
named `frodo` within the keyring `fellowship` and location `global`
starting at the specified time:
$ {command} frodo \
--location global \
--keyring fellowship \
--rotation-period 30d \
--next-rotation-time 2017-10-12T12:34:56.1234Z
The following command removes the rotation schedule for the key
named `frodo` within the keyring `fellowship` and location `global`:
$ {command} frodo \
--location global \
--keyring fellowship \
--remove-rotation-schedule
The following command updates the labels value for the key
named `frodo` within the keyring `fellowship` and location `global`. If the
label key does not exist at the time, it will be added:
$ {command} frodo \
--location global \
--keyring fellowship \
--update-labels k1=v1
The following command removes labels k1 and k2 from the key
named `frodo` within the keyring `fellowship` and location `global`:
$ {command} frodo \
--location global \
--keyring fellowship \
--remove-labels k1,k2
The following command updates the primary version for the key
named `frodo` within the keyring `fellowship` and location `global`:
$ {command} frodo \
--location global \
--keyring fellowship \
--primary-version 1
"""
@staticmethod
def Args(parser):
flags.AddKeyResourceArgument(parser, 'to update')
flags.AddRotationPeriodFlag(parser)
flags.AddNextRotationTimeFlag(parser)
flags.AddRemoveRotationScheduleFlag(parser)
flags.AddCryptoKeyPrimaryVersionFlag(parser, 'to make primary')
labels_util.AddUpdateLabelsFlags(parser)
def ProcessFlags(self, args):
fields_to_update = []
labels_diff = labels_util.Diff.FromUpdateArgs(args)
if labels_diff.MayHaveUpdates():
fields_to_update.append('labels')
if args.remove_rotation_schedule:
if args.rotation_period or args.next_rotation_time:
raise exceptions.ToolException(
'You cannot set and remove rotation schedule at the same time.')
fields_to_update.append('rotationPeriod')
fields_to_update.append('nextRotationTime')
if args.rotation_period:
fields_to_update.append('rotationPeriod')
if args.next_rotation_time:
fields_to_update.append('nextRotationTime')
# Raise an exception when no update field is specified.
if not args.primary_version and not fields_to_update:
raise exceptions.ToolException(
'At least one of --primary-version or --update-labels or --remove-'
'labels or --clear-labels or --rotation-period or --next-rotation-'
'time or --remove-rotation-schedule must be specified.')
return fields_to_update
def UpdatePrimaryVersion(self, args):
# pylint: disable=line-too-long
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
crypto_key_ref = flags.ParseCryptoKeyName(args)
req = messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysUpdatePrimaryVersionRequest(
name=crypto_key_ref.RelativeName(),
updateCryptoKeyPrimaryVersionRequest=(
messages.UpdateCryptoKeyPrimaryVersionRequest(
cryptoKeyVersionId=args.primary_version)))
try:
response = client.projects_locations_keyRings_cryptoKeys.UpdatePrimaryVersion(req)
except apitools_exceptions.HttpError:
return None
return response
def UpdateOthers(self, args, crypto_key, fields_to_update):
# pylint: disable=line-too-long
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
crypto_key_ref = flags.ParseCryptoKeyName(args)
req = messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysPatchRequest(
name=crypto_key_ref.RelativeName(),
cryptoKey=messages.CryptoKey(
labels=labels_util.Diff.FromUpdateArgs(args).Apply(
messages.CryptoKey.LabelsValue, crypto_key.labels).GetOrNone()))
req.updateMask = ','.join(fields_to_update)
flags.SetNextRotationTime(args, req.cryptoKey)
flags.SetRotationPeriod(args, req.cryptoKey)
try:
response = client.projects_locations_keyRings_cryptoKeys.Patch(req)
except apitools_exceptions.HttpError:
return None
return response
def HandleErrors(self, args,
set_primary_version_succeeds,
other_updates_succeed,
fields_to_update):
err = 'An Error occurred:'
if not set_primary_version_succeeds:
err += ' Failed to update field \'primaryVersion\'.'
elif args.primary_version:
err += ' Field \'primaryVersion\' was updated.'
if not other_updates_succeed:
err += ' Failed to update field(s) \'{}\'.'.format(
'\', \''.join(fields_to_update))
elif fields_to_update:
err += ' Field(s) \'{}\' were updated.'.format(
'\', \''.join(fields_to_update))
raise exceptions.ToolException(err)
def Run(self, args):
# Check the flags and raise an exception if any check fails.
fields_to_update = self.ProcessFlags(args)
# Try to get the cryptoKey and raise an exception if the key doesn't exist.
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
crypto_key_ref = flags.ParseCryptoKeyName(args)
crypto_key = client.projects_locations_keyRings_cryptoKeys.Get(
messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysGetRequest(
name=crypto_key_ref.RelativeName()))
# Try to update the key's primary version.
set_primary_version_succeeds = True
if args.primary_version:
response = self.UpdatePrimaryVersion(args)
if response:
crypto_key = response # If call succeeds, update the crypto_key.
else:
set_primary_version_succeeds = False
# Try other updates.
other_updates_succeed = True
if fields_to_update:
response = self.UpdateOthers(args, crypto_key, fields_to_update)
if response:
crypto_key = response # If call succeeds, update the crypto_key.
else:
other_updates_succeed = False
if (not set_primary_version_succeeds) or (not other_updates_succeed):
self.HandleErrors(args,
set_primary_version_succeeds,
other_updates_succeed,
fields_to_update)
else:
return crypto_key
| [
"[email protected]"
] | |
c05cee85368f5bec48ac6aa5ced76d9bf8558474 | d18d7f86a1e701caada063d09ee00fe08a95e353 | /test/kapp/func/ch/data_stmt/kapp_func_ch_data_stmt_test.py | 55eda51adeca269c0d1de4830ea59af09f6cc3ab | [
"BSD-3-Clause"
] | permissive | E3SM-Project/KGen | 2e097b2ef979b42b094089f337d49240838aa13b | c0035c93d21286da6519a74ff527b6a009781de4 | refs/heads/master | 2021-02-14T00:01:10.939108 | 2020-06-15T18:49:58 | 2020-06-15T18:49:58 | 244,747,822 | 3 | 0 | NOASSERTION | 2020-03-03T21:43:57 | 2020-03-03T21:43:56 | null | UTF-8 | Python | false | false | 97 | py |
from kapp_func_ch_test import KAppFuncCHTest
class KAppFuncCHDSTTest(KAppFuncCHTest):
pass
| [
"[email protected]"
] | |
73ac9438958d2800fff63c4333e4bceb2498904c | afe04cd22c2a839668dd0f29c314cc4b35bc7345 | /mayidaili_tool.py | 9aecca25ec13ff9c9429250376ae07b45bfdebb6 | [
"Apache-2.0"
] | permissive | 343829084/base_function | 1bded81816dcc9e18cbd36d1484be782931bec55 | 296aaf84bbfc084f9d27d84d4b300823a356e484 | refs/heads/master | 2020-03-23T16:00:48.610252 | 2018-07-21T03:06:15 | 2018-07-21T03:06:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | # coding: utf-8
import hashlib
import time
import urllib2
# 请替换appkey和secret
import requests
def useproxy(url,headers,postdata=None,post=False):
appkey = ""
secret = "c978952ede1661bd5342b34ca0bf561e"
paramMap = {
"app_key": appkey,
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S") # 如果你的程序在国外,请进行时区处理
}
# 排序
keys = paramMap.keys()
keys.sort()
codes = "%s%s%s" % (secret, str().join('%s%s' % (key, paramMap[key]) for key in keys), secret)
# 计算签名
sign = hashlib.md5(codes).hexdigest().upper()
paramMap["sign"] = sign
# 拼装请求头Proxy-Authorization的值
keys = paramMap.keys()
authHeader = "MYH-AUTH-MD5 " + str('&').join('%s=%s' % (key, paramMap[key]) for key in keys)
proxy='http://s5.proxy.mayidaili.com:8123'
# 接下来使用蚂蚁动态代理进行访问
#target='http://members.3322.org/dyndns/getip'
headers['Proxy-Authorization'] = authHeader
if post:
try:
r = requests.post(url=url, headers=headers, proxies={'http': proxy},data=postdata)
#print('in post')
#print(r.text)
except Exception as e:
return None
else:
try:
r=requests.get(url=url,headers=headers,proxies={'http':proxy})
except Exception as e :
return None
return r
| [
"[email protected]"
] | |
8b11a20af2cab0011ea4d1e067cbf96c6d2c6c41 | 71e70eb343584d249b6f7d0e366ad5ac91f90723 | /common/utils.py | 20f12cc78e03a9bc3f6881ff306966a4b146e17e | [] | no_license | ljarufe/giant | bf1d081aec8afbddfc0162f79d38f53da1624255 | 8f3db1365a8b862b07fae8920a249bf1a532980f | refs/heads/master | 2021-01-23T16:30:37.654917 | 2013-10-21T15:58:12 | 2013-10-21T15:58:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,299 | py | # -*- coding: utf-8 -*-
import codecs
from django.conf import settings
from django.core.mail import EmailMessage, BadHeaderError
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils import simplejson
# models
from proyectos.models import Nivel
def direct_response(request, *args, **kwargs):
"""
Forma resumida de render to response, enviando context_instance al template
"""
kwargs['context_instance'] = RequestContext(request)
return render_to_response(*args, **kwargs)
def json_response(data):
"""
Devuelve una respuesta json con la información de data
"""
return HttpResponse(simplejson.dumps(data), mimetype = 'application/json')
def send_html_mail(subject, html_file, data, from_email, to_emails, files = None):
"""
Envía un e-mail con contenido html el cual es extraído de un archivo de
codificación utf-8 ubicado en /media colocando la data correcta, la cúal
debe ser una lista, como parámetro opcional se pueden poner archivos
adjuntos en forma de lista
"""
content = ""
try:
print "hi"
html = codecs.open('%shtml/%s' % (settings.MEDIA_ROOT, html_file), "r",
"utf-8")
content = html.read() % data
html.close()
except:
print "no se pudo"
try:
msg = EmailMessage(subject, content, from_email, to_emails)
msg.content_subtype = "html"
if files == None:
pass
else:
#for afile in files:
msg.attach_file(files)
msg.send()
except BadHeaderError:
return HttpResponse('Se encontró una cabecera de e-mail inválida')
def get_detalles_construccion(id_proyecto):
"""
Crea una estructura de datos para almacenar los detalles en la relación de
tablas: nivel/ambiente/acabados, la estructura de datos es de la forma:\n\n
[{'nivel' : 'Primer piso',
'rowspan': 14,
'ambientes': [{'acabados': [['Parket', 'XXX', '12x23m', 'parket'],
['nombre', 'marca', 'medidas', 'descripcion'],
'ambiente': 'Sala'}]}]
"""
niveles_objeto = Nivel.objects.filter(construccion__proyecto = id_proyecto).distinct()
detalles_construccion = []
for nivel in niveles_objeto:
ambientes_objeto = nivel.ambientes.all()
detalles_nivel = {'nivel': nivel.nombre,
'ambientes': []}
rowspan = 0
for ambiente in ambientes_objeto:
acabados_objeto = ambiente.acabados.all()
detalles_ambiente = {'ambiente': ambiente.nombre,
'acabados': []}
rowspan += len(acabados_objeto)
for acabado in acabados_objeto:
detalles_acabado = [acabado.nombre, acabado.marca, \
acabado.medidas, acabado.descripcion]
detalles_ambiente['acabados'].append(detalles_acabado)
detalles_nivel['ambientes'].append(detalles_ambiente)
detalles_nivel['rowspan'] = rowspan
detalles_construccion.append(detalles_nivel)
return detalles_construccion
| [
"[email protected]"
] | |
18c382fddeb7fa8c62d83f435d9993cefc3c8fb6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_looniest.py | 7e8f7a11b5de3a81edeb8fc566e9cba6c7fc74ea | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | py |
from xai.brain.wordbase.nouns._loony import _LOONY
#calss header
class _LOONIEST(_LOONY, ):
def __init__(self,):
_LOONY.__init__(self)
self.name = "LOONIEST"
self.specie = 'nouns'
self.basic = "loony"
self.jsondata = {}
| [
"[email protected]"
] | |
7849b3485cdb443da03ffcc1456b6ced0e6ac0a0 | 01a9e501d60ee5a5d660886a622618bf95eb46d8 | /user/forms/__init__.py | 52859f22d79b30285d8f8c07c9a2b372ff875791 | [] | no_license | tuantran37/UniRanking | 1938252a034b3ce3b8ee7f1a797b3d42c2eb9ecb | be8778fa004207bb0fea6e83e8e108347a3693c6 | refs/heads/master | 2023-08-05T06:31:52.868483 | 2018-08-27T18:26:04 | 2018-08-27T18:26:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | from .sector_form import AddSectorForm, UpdateSectorForm, RemoveSectorForm
from .favoutite_university_form import AddFavouriteUniversityForm, RemoveFavouriteUniversityForm | [
"[email protected]"
] | |
38d4d461845d0243da88843ecba563340d11b19b | 75c1ac9405911ff3490d0de854650ade12b95e63 | /random_exercises/prework_2-5_consecutive.py | 817d7f583ceaa15f75391025c592d7edc2f96701 | [] | no_license | PropeReferio/practice-exercises | df6063b859da7f5966fc972ad44e74288054bc16 | 571856e452ef60d5bac7adebb49f6a6654e96733 | refs/heads/master | 2020-09-23T13:28:55.020969 | 2020-04-28T16:51:16 | 2020-04-28T16:51:16 | 225,511,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | #Write a function to check to see if all numbers in list are consecutive
#numbers. For example, [2,3,4,5,6,7] are consecutive numbers, but [1,2,4,5]
#are not consecutive numbers. The return should be boolean Type.
check = [1,2,4,6]
check2 =[2,3,4,5,6,7]
def is_consecutive(a_list):
"""Checks to see if the numbers in a list are consecutive"""
total = 2
while total > 1:
test = a_list.pop(0)
if test == a_list[0] - 1:
total = len(a_list)
else:
return False
break
return True
works = is_consecutive(check)
print(works)
| [
"[email protected]"
] | |
82488481c998675c7f0e1ce4c13fbfc98fb02f5e | 90ae1f7729920be498f04faef7efb2195cfc5ab7 | /engine/_gevent.py | 5cbcd6b17a1f7664ede7170045eae2067053d843 | [] | no_license | jrecuero/pyengine | 297a65dab16bf2a24d8b9f8a3385db21e5725502 | e5de88a0053b2690230c04d7c9183d619ece32cf | refs/heads/master | 2022-12-27T01:15:09.302018 | 2020-10-16T02:53:03 | 2020-10-16T02:53:03 | 304,499,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,749 | py | import pygame
from ._loggar import Log
class GEvent:
"""GEvent implements all codes related with user events used in the
application via pygame events.
"""
NONE = 0
# GEvent type. Used by pygame events.
# pygame.USEREVENT = 24
USER = pygame.USEREVENT
ENGINE = pygame.USEREVENT + 1
TIMER = pygame.USEREVENT + 2
CALLBACK = pygame.USEREVENT + 3
APP_DEFINED = pygame.USEREVENT + 4
USER_DEFINED = pygame.USEREVENT + 5
# GEvent subtype. Used internaly
MOVE_TO = 1
DELETE = 2
CREATE = 3
LOGGER = 4
SUBTYPE_USER_DEFINED = 1000
_gevent_subtypes = {
"MOVE_TO": 1,
"DELETE": 2,
"CREATE": 3,
"LOGGER": 4,
"USER_DEFINED": 1000, }
_gevent_subtypes_user_defined_last = 1000
# Event Source/Destination
HANDLER = 1
SCENE = 2
BOARD = 3
OBJECT = 4
OTHER = 5
SRC_DST_USER_DEFINED = 1000
@classmethod
def register_subtype_event(cls, name):
"""register_subtype_event registers a new user defined subtype event.
"""
if name in cls._gevent_subtypes:
return None
cls._gevent_subtypes_user_defined_last += 1
cls._gevent_subtypes[name] = cls._gevent_subtypes_user_defined_last
return cls._gevent_subtypes[name]
@classmethod
def get_subtype_event(cls, name):
"""get_subtype_event returns the subtype for a given user defined
subtype event.
"""
return cls._gevent_subtypes.get(name, None)
@staticmethod
def check_destination(event, dest):
"""check_destination checked if the given destination is in the event
dest attribute.
"""
if isinstance(event.destination, list):
return dest in event.destination
else:
return dest == event.destination
@staticmethod
def post_event(etype, esubtype, source, destination, payload, **kwargs):
"""post_event creates and post a new event.
"""
the_event = pygame.event.Event(etype, subtype=esubtype, source=source, destination=destination, payload=payload, **kwargs)
pygame.event.post(the_event)
Log.Post().Event(etype).Subtype(esubtype).Source(source).Destination(destination).Payload(str(payload)).Kwargs(kwargs).call()
@staticmethod
def new_event(etype, esubtype, source, destination, payload, **kwargs):
"""new_event creates a new event.
"""
the_event = pygame.event.Event(etype, subtype=esubtype, source=source, destination=destination, payload=payload, **kwargs)
Log.New().Event(etype).Subtype(esubtype).Source(source).Destination(destination).Payload(str(payload)).Kwargs(kwargs).call()
return the_event
| [
"[email protected]"
] | |
4f8c9e11ffc1a73a137a6a5c0323df0ecc30e1b6 | e7451193592aaee2536924ef03846eee920bcf94 | /ucscentralsdk/mometa/compute/ComputeBoardController.py | 4bb83485c59beb7794d79694940732d6a8920efd | [
"Apache-2.0"
] | permissive | vinayravish/ucscentralsdk | eb33191f3c7675561298af8cef9b30f6e220b7b2 | 809a3782d26c69f50cf7237700e107f1a9857870 | refs/heads/master | 2021-01-18T01:51:57.275207 | 2016-07-20T05:37:26 | 2016-07-20T05:37:26 | 62,137,219 | 0 | 0 | null | 2016-06-28T12:00:34 | 2016-06-28T12:00:34 | null | UTF-8 | Python | false | false | 12,098 | py | """This module contains the general information for ComputeBoardController ManagedObject."""
from ...ucscentralmo import ManagedObject
from ...ucscentralcoremeta import UcsCentralVersion, MoPropertyMeta, MoMeta
from ...ucscentralmeta import VersionMeta
class ComputeBoardControllerConsts():
OPER_STATE_ACCESSIBILITY_PROBLEM = "accessibility-problem"
OPER_STATE_AUTO_UPGRADE = "auto-upgrade"
OPER_STATE_BIOS_POST_TIMEOUT = "bios-post-timeout"
OPER_STATE_CHASSIS_LIMIT_EXCEEDED = "chassis-limit-exceeded"
OPER_STATE_CONFIG = "config"
OPER_STATE_DECOMISSIONING = "decomissioning"
OPER_STATE_DEGRADED = "degraded"
OPER_STATE_DISABLED = "disabled"
OPER_STATE_DISCOVERY = "discovery"
OPER_STATE_DISCOVERY_FAILED = "discovery-failed"
OPER_STATE_EQUIPMENT_PROBLEM = "equipment-problem"
OPER_STATE_FABRIC_CONN_PROBLEM = "fabric-conn-problem"
OPER_STATE_FABRIC_UNSUPPORTED_CONN = "fabric-unsupported-conn"
OPER_STATE_IDENTIFY = "identify"
OPER_STATE_IDENTITY_UNESTABLISHABLE = "identity-unestablishable"
OPER_STATE_INOPERABLE = "inoperable"
OPER_STATE_MALFORMED_FRU = "malformed-fru"
OPER_STATE_NOT_SUPPORTED = "not-supported"
OPER_STATE_OPERABLE = "operable"
OPER_STATE_PEER_COMM_PROBLEM = "peer-comm-problem"
OPER_STATE_PERFORMANCE_PROBLEM = "performance-problem"
OPER_STATE_POST_FAILURE = "post-failure"
OPER_STATE_POWER_PROBLEM = "power-problem"
OPER_STATE_POWERED_OFF = "powered-off"
OPER_STATE_REMOVED = "removed"
OPER_STATE_THERMAL_PROBLEM = "thermal-problem"
OPER_STATE_UNKNOWN = "unknown"
OPER_STATE_UPGRADE_PROBLEM = "upgrade-problem"
OPER_STATE_VOLTAGE_PROBLEM = "voltage-problem"
OPERABILITY_ACCESSIBILITY_PROBLEM = "accessibility-problem"
OPERABILITY_AUTO_UPGRADE = "auto-upgrade"
OPERABILITY_BIOS_POST_TIMEOUT = "bios-post-timeout"
OPERABILITY_CHASSIS_LIMIT_EXCEEDED = "chassis-limit-exceeded"
OPERABILITY_CONFIG = "config"
OPERABILITY_DECOMISSIONING = "decomissioning"
OPERABILITY_DEGRADED = "degraded"
OPERABILITY_DISABLED = "disabled"
OPERABILITY_DISCOVERY = "discovery"
OPERABILITY_DISCOVERY_FAILED = "discovery-failed"
OPERABILITY_EQUIPMENT_PROBLEM = "equipment-problem"
OPERABILITY_FABRIC_CONN_PROBLEM = "fabric-conn-problem"
OPERABILITY_FABRIC_UNSUPPORTED_CONN = "fabric-unsupported-conn"
OPERABILITY_IDENTIFY = "identify"
OPERABILITY_IDENTITY_UNESTABLISHABLE = "identity-unestablishable"
OPERABILITY_INOPERABLE = "inoperable"
OPERABILITY_MALFORMED_FRU = "malformed-fru"
OPERABILITY_NOT_SUPPORTED = "not-supported"
OPERABILITY_OPERABLE = "operable"
OPERABILITY_PEER_COMM_PROBLEM = "peer-comm-problem"
OPERABILITY_PERFORMANCE_PROBLEM = "performance-problem"
OPERABILITY_POST_FAILURE = "post-failure"
OPERABILITY_POWER_PROBLEM = "power-problem"
OPERABILITY_POWERED_OFF = "powered-off"
OPERABILITY_REMOVED = "removed"
OPERABILITY_THERMAL_PROBLEM = "thermal-problem"
OPERABILITY_UNKNOWN = "unknown"
OPERABILITY_UPGRADE_PROBLEM = "upgrade-problem"
OPERABILITY_VOLTAGE_PROBLEM = "voltage-problem"
PERF_LOWER_CRITICAL = "lower-critical"
PERF_LOWER_NON_CRITICAL = "lower-non-critical"
PERF_LOWER_NON_RECOVERABLE = "lower-non-recoverable"
PERF_NOT_SUPPORTED = "not-supported"
PERF_OK = "ok"
PERF_UNKNOWN = "unknown"
PERF_UPPER_CRITICAL = "upper-critical"
PERF_UPPER_NON_CRITICAL = "upper-non-critical"
PERF_UPPER_NON_RECOVERABLE = "upper-non-recoverable"
POWER_DEGRADED = "degraded"
POWER_ERROR = "error"
POWER_FAILED = "failed"
POWER_NOT_SUPPORTED = "not-supported"
POWER_OFF = "off"
POWER_OFFDUTY = "offduty"
POWER_OFFLINE = "offline"
POWER_OK = "ok"
POWER_ON = "on"
POWER_ONLINE = "online"
POWER_POWER_SAVE = "power-save"
POWER_TEST = "test"
POWER_UNKNOWN = "unknown"
PRESENCE_EMPTY = "empty"
PRESENCE_EQUIPPED = "equipped"
PRESENCE_EQUIPPED_IDENTITY_UNESTABLISHABLE = "equipped-identity-unestablishable"
PRESENCE_EQUIPPED_NOT_PRIMARY = "equipped-not-primary"
PRESENCE_EQUIPPED_SLAVE = "equipped-slave"
PRESENCE_EQUIPPED_UNSUPPORTED = "equipped-unsupported"
PRESENCE_EQUIPPED_WITH_MALFORMED_FRU = "equipped-with-malformed-fru"
PRESENCE_INACCESSIBLE = "inaccessible"
PRESENCE_MISMATCH = "mismatch"
PRESENCE_MISMATCH_IDENTITY_UNESTABLISHABLE = "mismatch-identity-unestablishable"
PRESENCE_MISMATCH_SLAVE = "mismatch-slave"
PRESENCE_MISSING = "missing"
PRESENCE_MISSING_SLAVE = "missing-slave"
PRESENCE_NOT_SUPPORTED = "not-supported"
PRESENCE_UNAUTHORIZED = "unauthorized"
PRESENCE_UNKNOWN = "unknown"
THERMAL_LOWER_CRITICAL = "lower-critical"
THERMAL_LOWER_NON_CRITICAL = "lower-non-critical"
THERMAL_LOWER_NON_RECOVERABLE = "lower-non-recoverable"
THERMAL_NOT_SUPPORTED = "not-supported"
THERMAL_OK = "ok"
THERMAL_UNKNOWN = "unknown"
THERMAL_UPPER_CRITICAL = "upper-critical"
THERMAL_UPPER_NON_CRITICAL = "upper-non-critical"
THERMAL_UPPER_NON_RECOVERABLE = "upper-non-recoverable"
VOLTAGE_LOWER_CRITICAL = "lower-critical"
VOLTAGE_LOWER_NON_CRITICAL = "lower-non-critical"
VOLTAGE_LOWER_NON_RECOVERABLE = "lower-non-recoverable"
VOLTAGE_NOT_SUPPORTED = "not-supported"
VOLTAGE_OK = "ok"
VOLTAGE_UNKNOWN = "unknown"
VOLTAGE_UPPER_CRITICAL = "upper-critical"
VOLTAGE_UPPER_NON_CRITICAL = "upper-non-critical"
VOLTAGE_UPPER_NON_RECOVERABLE = "upper-non-recoverable"
class ComputeBoardController(ManagedObject):
"""This is ComputeBoardController class."""
consts = ComputeBoardControllerConsts()
naming_props = set([])
mo_meta = MoMeta("ComputeBoardController", "computeBoardController", "boardController", VersionMeta.Version141a, "InputOutput", 0x1f, [], ["read-only"], [u'computeBlade', u'computeExtBoard', u'computeRackUnit', u'computeServerUnit'], [u'mgmtController'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version141a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, [], []),
"location_dn": MoPropertyMeta("location_dn", "locationDn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"model": MoPropertyMeta("model", "model", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"oper_qualifier_reason": MoPropertyMeta("oper_qualifier_reason", "operQualifierReason", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["accessibility-problem", "auto-upgrade", "bios-post-timeout", "chassis-limit-exceeded", "config", "decomissioning", "degraded", "disabled", "discovery", "discovery-failed", "equipment-problem", "fabric-conn-problem", "fabric-unsupported-conn", "identify", "identity-unestablishable", "inoperable", "malformed-fru", "not-supported", "operable", "peer-comm-problem", "performance-problem", "post-failure", "power-problem", "powered-off", "removed", "thermal-problem", "unknown", "upgrade-problem", "voltage-problem"], []),
"operability": MoPropertyMeta("operability", "operability", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["accessibility-problem", "auto-upgrade", "bios-post-timeout", "chassis-limit-exceeded", "config", "decomissioning", "degraded", "disabled", "discovery", "discovery-failed", "equipment-problem", "fabric-conn-problem", "fabric-unsupported-conn", "identify", "identity-unestablishable", "inoperable", "malformed-fru", "not-supported", "operable", "peer-comm-problem", "performance-problem", "post-failure", "power-problem", "powered-off", "removed", "thermal-problem", "unknown", "upgrade-problem", "voltage-problem"], []),
"perf": MoPropertyMeta("perf", "perf", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["lower-critical", "lower-non-critical", "lower-non-recoverable", "not-supported", "ok", "unknown", "upper-critical", "upper-non-critical", "upper-non-recoverable"], []),
"power": MoPropertyMeta("power", "power", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["degraded", "error", "failed", "not-supported", "off", "offduty", "offline", "ok", "on", "online", "power-save", "test", "unknown"], []),
"presence": MoPropertyMeta("presence", "presence", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["empty", "equipped", "equipped-identity-unestablishable", "equipped-not-primary", "equipped-slave", "equipped-unsupported", "equipped-with-malformed-fru", "inaccessible", "mismatch", "mismatch-identity-unestablishable", "mismatch-slave", "missing", "missing-slave", "not-supported", "unauthorized", "unknown"], []),
"revision": MoPropertyMeta("revision", "revision", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"serial": MoPropertyMeta("serial", "serial", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version141a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"thermal": MoPropertyMeta("thermal", "thermal", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["lower-critical", "lower-non-critical", "lower-non-recoverable", "not-supported", "ok", "unknown", "upper-critical", "upper-non-critical", "upper-non-recoverable"], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"voltage": MoPropertyMeta("voltage", "voltage", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["lower-critical", "lower-non-critical", "lower-non-recoverable", "not-supported", "ok", "unknown", "upper-critical", "upper-non-critical", "upper-non-recoverable"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"id": "id",
"locationDn": "location_dn",
"model": "model",
"operQualifierReason": "oper_qualifier_reason",
"operState": "oper_state",
"operability": "operability",
"perf": "perf",
"power": "power",
"presence": "presence",
"revision": "revision",
"rn": "rn",
"serial": "serial",
"status": "status",
"thermal": "thermal",
"vendor": "vendor",
"voltage": "voltage",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.id = None
self.location_dn = None
self.model = None
self.oper_qualifier_reason = None
self.oper_state = None
self.operability = None
self.perf = None
self.power = None
self.presence = None
self.revision = None
self.serial = None
self.status = None
self.thermal = None
self.vendor = None
self.voltage = None
ManagedObject.__init__(self, "ComputeBoardController", parent_mo_or_dn, **kwargs)
| [
"[email protected]"
] | |
6dd36fb253ed73f381095bd750e21c9931ab3833 | 5fdcf15f818eb2d0c7b5dd39443064d5bc42aff9 | /lc_valid_parantheses.py | 18bb3778b9edc3c6721301f71f978ca4a87a32cb | [] | no_license | vincentt117/coding_challenge | acf3664034a71ffd70c5f1ac0f6a66768e097a6e | 5deff070bb9f6b19a1cfc0a6086ac155496fbb78 | refs/heads/master | 2021-07-02T05:43:08.007851 | 2020-08-27T02:16:19 | 2020-08-27T02:16:19 | 146,027,883 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # https://leetcode.com/problems/valid-parentheses/description/
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
if not s:
return True
checkStack = []
for i in s:
if i == "(":
checkStack.append(")")
elif i == "[":
checkStack.append("]")
elif i == "{":
checkStack.append("}")
else:
if not checkStack or checkStack.pop() != i:
return False
return not checkStack
# Faster than 99% of accepted submissions
| [
"[email protected]"
] | |
861a4a235ebe5bbe9d3f3015fffc82b10f920ddd | c1ea75db1da4eaa485d39e9d8de480b6ed0ef40f | /helper/conf/__init__.py | 0f8d87b95106d30140f7ae12f00266728f66965c | [
"Apache-2.0"
] | permissive | gasbarroni8/VideoCrawlerEngine | a4f092b0a851dc0487e4dcf4c98b62d6282a6180 | 994933d91d85bb87ae8dfba1295f7a69f6d50097 | refs/heads/master | 2023-04-06T07:59:29.269894 | 2021-02-10T16:09:15 | 2021-02-10T16:09:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 390 | py |
from .base import get_conf as __get_conf, ConfMeta, iter_conf
from typing import ClassVar
import importlib
import sys
DEBUG = True
def get_conf(__name: str, **kwargs) -> ClassVar[ConfMeta]:
package = f'{get_conf.__module__}.{__name}'
if not sys.modules.get(package, None):
importlib.import_module(package)
conf_cls = __get_conf(__name)
return conf_cls(**kwargs)
| [
"[email protected]"
] | |
bf18c68b958da2b5499bf7ff92df74368e1f21ef | db552a9fa9d1a3f45ff3a8fb668ad47998084aa1 | /ROS_projects_study/service_use/devel/lib/python2.7/dist-packages/my_service/srv/_AddInts.py | 73e64b34dc3500768888b91c9ba4926e7d1f796a | [] | no_license | gdamion/ROS_projects | 026529e99898152de0facfa854b01e1b2af55319 | a3e9c8d199fd7577ce183689428f159bf29acd41 | refs/heads/master | 2020-05-01T15:20:46.689036 | 2019-08-18T09:52:58 | 2019-08-18T09:52:58 | 177,544,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,402 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from my_service/AddIntsRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class AddIntsRequest(genpy.Message):
_md5sum = "05577f62131ad26921bff0de6b2cb722"
_type = "my_service/AddIntsRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 first
int32 second
"""
__slots__ = ['first','second']
_slot_types = ['int32','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
first,second
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AddIntsRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.first is None:
self.first = 0
if self.second is None:
self.second = 0
else:
self.first = 0
self.second = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2i().pack(_x.first, _x.second))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.first, _x.second,) = _get_struct_2i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2i().pack(_x.first, _x.second))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.first, _x.second,) = _get_struct_2i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2i = None
def _get_struct_2i():
global _struct_2i
if _struct_2i is None:
_struct_2i = struct.Struct("<2i")
return _struct_2i
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from my_service/AddIntsResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class AddIntsResponse(genpy.Message):
_md5sum = "0ba699c25c9418c0366f3595c0c8e8ec"
_type = "my_service/AddIntsResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """int32 sum
"""
__slots__ = ['sum']
_slot_types = ['int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
sum
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(AddIntsResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.sum is None:
self.sum = 0
else:
self.sum = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_i().pack(self.sum))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(self.sum,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_i().pack(self.sum))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(self.sum,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_i = None
def _get_struct_i():
global _struct_i
if _struct_i is None:
_struct_i = struct.Struct("<i")
return _struct_i
class AddInts(object):
_type = 'my_service/AddInts'
_md5sum = '85a734c776d49ce7e013b15b395d3f69'
_request_class = AddIntsRequest
_response_class = AddIntsResponse
| [
"[email protected]"
] | |
20a8c083a677609f674239ea399cf51c63cce0ba | 1dae87abcaf49f1d995d03c0ce49fbb3b983d74a | /programs/subroutines/Picture NaK-180ms levit.sub.py | 485cfa3099cbdc48d849f03c32e4ce7d57956fb9 | [] | no_license | BEC-Trento/BEC1-data | 651cd8e5f15a7d9848f9921b352e0830c08f27dd | f849086891bc68ecf7447f62962f791496d01858 | refs/heads/master | 2023-03-10T19:19:54.833567 | 2023-03-03T22:59:01 | 2023-03-03T22:59:01 | 132,161,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,842 | py | prg_comment = ""
prg_version = "0.5.1"
def program(prg, cmd):
prg.add(-2599000, "Na Repumper1 (+) Amp", 1.000000)
prg.add(-2589000, "K probe Repumper (+) Amp", 1.000000)
prg.add(-2579000, "K Repumper 1p (+) Amp", 1.000000)
prg.add(-2569000, "K probe Cooler (-) Amp", 1.000000)
prg.add(-2559000, "Na Dark Spot Amp", 1.000000)
prg.add(-2549000, "Na Repumper MOT Amp", 1.000000)
prg.add(-1229000, "Shutter Probe K Open")
prg.add(-1219000, "Shutter RepumperMOT K Open")
prg.add(-1209000, "Shutter repump Na Open")
prg.add(-1199000, "Shutter Probe Na Open")
prg.add(-709000, "Na Probe/Push (-) Amp", 1.000000)
prg.add(-699000, "Na Probe/Push (+) Amp", 1.000000)
prg.add(-9500, "B comp y", 0.000000)
prg.add(-8970, "B comp y", 0.260000)
prg.add(-8450, "B comp y", 0.530000)
prg.add(-7920, "B comp y", 0.790000)
prg.add(-7390, "B comp y", 1.050000)
prg.add(-6870, "B comp y", 1.320000)
prg.add(-6340, "B comp y", 1.580000)
prg.add(-5820, "B comp y", 1.840000)
prg.add(-5290, "B comp y", 2.110000)
prg.add(-4760, "B comp y", 2.370000)
prg.add(-4240, "B comp y", 2.630000)
prg.add(-3710, "B comp y", 2.890000)
prg.add(-3180, "B comp y", 3.160000)
prg.add(-2660, "B comp y", 3.420000)
prg.add(-2130, "B comp y", 3.680000)
prg.add(-1610, "B comp y", 3.950000)
prg.add(-1080, "B comp y", 4.210000)
prg.add(-550, "B comp y", 4.470000)
prg.add(-30, "B comp y", 4.740000)
prg.add(0, "IGBT 1 pinch", -10.000000)
prg.add(20, "IGBT 3 Open")
prg.add(60, "IGBT 2 pinch+comp", 10.000000)
prg.add(160, "IGBT 2 pinch+comp", 9.800000)
prg.add(260, "IGBT 2 pinch+comp", 9.600000)
prg.add(360, "IGBT 2 pinch+comp", 9.400000)
prg.add(460, "IGBT 2 pinch+comp", 9.200000)
prg.add(500, "B comp y", 5.000000)
prg.add(560, "IGBT 2 pinch+comp", 9.000000)
prg.add(660, "IGBT 2 pinch+comp", 8.800000)
prg.add(760, "IGBT 2 pinch+comp", 8.600000)
prg.add(859, "IGBT 2 pinch+comp", 8.400000)
prg.add(960, "IGBT 2 pinch+comp", 8.200000)
prg.add(1060, "IGBT 2 pinch+comp", 8.000000)
prg.add(1160, "IGBT 2 pinch+comp", 7.800000)
prg.add(1260, "IGBT 2 pinch+comp", 7.600000)
prg.add(1360, "IGBT 2 pinch+comp", 7.400000)
prg.add(1460, "IGBT 2 pinch+comp", 7.200000)
prg.add(1560, "IGBT 2 pinch+comp", 7.000000)
prg.add(1660, "IGBT 2 pinch+comp", 6.800000)
prg.add(1760, "IGBT 2 pinch+comp", 6.600000)
prg.add(1860, "IGBT 2 pinch+comp", 6.400000)
prg.add(1960, "IGBT 2 pinch+comp", 6.200000)
prg.add(2060, "IGBT 2 pinch+comp", 6.000000)
prg.add(2160, "IGBT 2 pinch+comp", 5.800000)
prg.add(2260, "IGBT 2 pinch+comp", 5.600000)
prg.add(2360, "IGBT 2 pinch+comp", 5.400000)
prg.add(2460, "IGBT 2 pinch+comp", 5.200000)
prg.add(2560, "IGBT 2 pinch+comp", 5.000000)
prg.add(2660, "IGBT 2 pinch+comp", 4.800000)
prg.add(2760, "IGBT 2 pinch+comp", 4.600000)
prg.add(2859, "IGBT 2 pinch+comp", 4.400000)
prg.add(2960, "IGBT 2 pinch+comp", 4.200000)
prg.add(3060, "IGBT 2 pinch+comp", 4.000000)
prg.add(3160, "IGBT 2 pinch+comp", 3.800000)
prg.add(3260, "IGBT 2 pinch+comp", 3.600000)
prg.add(3360, "IGBT 2 pinch+comp", 3.400000)
prg.add(3459, "IGBT 2 pinch+comp", 3.200000)
prg.add(3560, "IGBT 2 pinch+comp", 3.000000)
prg.add(3660, "IGBT 2 pinch+comp", 2.800000)
prg.add(3760, "IGBT 2 pinch+comp", 2.600000)
prg.add(3860, "IGBT 2 pinch+comp", 2.400000)
prg.add(3960, "IGBT 2 pinch+comp", 2.200000)
prg.add(4060, "IGBT 2 pinch+comp", 2.000000)
prg.add(4160, "IGBT 2 pinch+comp", 1.800000)
prg.add(4260, "IGBT 2 pinch+comp", 1.600000)
prg.add(4360, "IGBT 2 pinch+comp", 1.400000)
prg.add(4460, "IGBT 2 pinch+comp", 1.200000)
prg.add(4560, "IGBT 2 pinch+comp", 1.000000)
prg.add(4660, "IGBT 2 pinch+comp", 0.800000)
prg.add(4760, "IGBT 2 pinch+comp", 0.600000)
prg.add(4860, "IGBT 2 pinch+comp", 0.400000)
prg.add(4960, "IGBT 2 pinch+comp", 0.200000)
prg.add(5060, "IGBT 2 pinch+comp", 0.000000)
prg.add(5160, "IGBT 2 pinch+comp", -0.200000)
prg.add(5260, "IGBT 2 pinch+comp", -0.400000)
prg.add(5360, "IGBT 2 pinch+comp", -0.600000)
prg.add(5460, "IGBT 2 pinch+comp", -0.800000)
prg.add(5560, "IGBT 2 pinch+comp", -1.000000)
prg.add(5659, "IGBT 2 pinch+comp", -1.200000)
prg.add(5760, "IGBT 2 pinch+comp", -1.400000)
prg.add(5860, "IGBT 2 pinch+comp", -1.600000)
prg.add(5960, "IGBT 2 pinch+comp", -1.800000)
prg.add(6060, "IGBT 2 pinch+comp", -2.000000)
prg.add(6160, "IGBT 2 pinch+comp", -2.200000)
prg.add(6260, "IGBT 2 pinch+comp", -2.400000)
prg.add(6360, "IGBT 2 pinch+comp", -2.600000)
prg.add(6460, "IGBT 2 pinch+comp", -2.800000)
prg.add(6560, "IGBT 2 pinch+comp", -3.000000)
prg.add(6660, "IGBT 2 pinch+comp", -3.200000)
prg.add(6760, "IGBT 2 pinch+comp", -3.400000)
prg.add(6860, "IGBT 2 pinch+comp", -3.600000)
prg.add(6959, "IGBT 2 pinch+comp", -3.800000)
prg.add(7060, "IGBT 2 pinch+comp", -4.000000)
prg.add(7160, "IGBT 2 pinch+comp", -4.200000)
prg.add(7260, "IGBT 2 pinch+comp", -4.400000)
prg.add(7360, "IGBT 2 pinch+comp", -4.600000)
prg.add(7460, "IGBT 2 pinch+comp", -4.800000)
prg.add(7560, "IGBT 2 pinch+comp", -5.000000)
prg.add(7660, "IGBT 2 pinch+comp", -5.200000)
prg.add(7760, "IGBT 2 pinch+comp", -5.400000)
prg.add(7860, "IGBT 2 pinch+comp", -5.600000)
prg.add(7960, "IGBT 2 pinch+comp", -5.800000)
prg.add(8060, "IGBT 2 pinch+comp", -6.000000)
prg.add(8159, "IGBT 2 pinch+comp", -6.200000)
prg.add(8260, "IGBT 2 pinch+comp", -6.400000)
prg.add(8360, "IGBT 2 pinch+comp", -6.600000)
prg.add(8460, "IGBT 2 pinch+comp", -6.800000)
prg.add(8560, "IGBT 2 pinch+comp", -7.000000)
prg.add(8660, "IGBT 2 pinch+comp", -7.200000)
prg.add(8760, "IGBT 2 pinch+comp", -7.400000)
prg.add(8860, "IGBT 2 pinch+comp", -7.600000)
prg.add(8960, "IGBT 2 pinch+comp", -7.800000)
prg.add(9060, "IGBT 2 pinch+comp", -8.000000)
prg.add(9160, "IGBT 2 pinch+comp", -8.200000)
prg.add(9260, "IGBT 2 pinch+comp", -8.400000)
prg.add(9360, "IGBT 2 pinch+comp", -8.600000)
prg.add(9460, "IGBT 2 pinch+comp", -8.800000)
prg.add(9560, "IGBT 2 pinch+comp", -9.000000)
prg.add(9660, "IGBT 2 pinch+comp", -9.200000)
prg.add(9760, "IGBT 2 pinch+comp", -9.400000)
prg.add(9860, "IGBT 2 pinch+comp", -9.600000)
prg.add(9960, "IGBT 2 pinch+comp", -9.800000)
prg.add(10060, "IGBT 2 pinch+comp", -10.000000)
prg.add(10100, "IGBT 4 Open")
prg.add(10120, "IGBT 5 Open")
prg.add(10350, "IGBT 1 pinch", -10.000000)
prg.add(10360, "IGBT 2 pinch+comp", -10.000000)
prg.add(10370, "IGBT 3 Close")
prg.add(10380, "IGBT 4 Close")
prg.add(10390, "IGBT 5 Open")
prg.add(10400, "Delta 2 Voltage", 0.000000)
prg.add(10410, "Delta 1 Current", 15.100000)
prg.add(10450, "B comp x", 0.000000)
prg.add(10500, "B comp y", 5.000000)
prg.add(11030, "B comp y", 4.740000)
prg.add(11550, "B comp y", 4.470000)
prg.add(12080, "B comp y", 4.210000)
prg.add(12609, "B comp y", 3.950000)
prg.add(13130, "B comp y", 3.680000)
prg.add(13660, "B comp y", 3.420000)
prg.add(14180, "B comp y", 3.160000)
prg.add(14710, "B comp y", 2.890000)
prg.add(15240, "B comp y", 2.630000)
prg.add(15760, "B comp y", 2.370000)
prg.add(16290, "B comp y", 2.110000)
prg.add(16820, "B comp y", 1.840000)
prg.add(17340, "B comp y", 1.580000)
prg.add(17870, "B comp y", 1.320000)
prg.add(18390, "B comp y", 1.050000)
prg.add(18920, "B comp y", 0.790000)
prg.add(19450, "B comp y", 0.530000)
prg.add(19970, "B comp y", 0.260000)
prg.add(20500, "B comp y", 0.000000)
prg.add(1782000, "B comp y", 1.000000)
prg.add(1795000, "IGBT 1 pinch", -10.000000)
prg.add(1795010, "IGBT 2 pinch+comp", -10.000000)
prg.add(1795020, "IGBT 3 Open")
prg.add(1795029, "IGBT 4 Open")
prg.add(1795040, "IGBT 5 Open")
prg.add(1795600, "K probe Cooler (-) freq", 99.500000)
prg.add(1796000, "K Cooler 2p (+) freq", 97.500000)
prg.add(1796399, "K Repumper 1p (+) Amp", 1000.000000)
prg.add(1796800, "K Repumper 1p (+) freq", 115.000000)
prg.add(1797200, "K Repumper 2p (+) freq", 96.000000)
prg.add(1798500, "Na Repumper MOT Amp", 1000.000000)
prg.add(1799000, "Na Repumper1 (+) Amp", 1000.000000)
prg.add(1799400, "Na Repumper Tune (+) freq", 1713.000000)
prg.add(1799800, "Na Probe/Push (+) freq", 110.000000)
prg.add(1800200, "Na Probe/Push (-) freq", 110.000000)
prg.add(1800500, "Trig ON Stingray 1")
prg.add(1800600, "Na Probe/Push (+) Amp", 1000.000000)
prg.add(1801000, "Na Probe/Push (-) Amp", 1000.000000)
prg.add(1801399, "K probe Cooler (-) Amp", 1000.000000)
prg.add(1802000, "Na Probe/Push (-) Amp", 1.000000)
prg.add(1802400, "K probe Cooler (-) Amp", 1.000000)
prg.add(1803000, "Trig OFF Stingray 1")
prg.add(2051000, "Shutter Probe Na Close")
prg.add(2061000, "Shutter Probe K Close")
prg.add(2800500, "Trig ON Stingray 1")
prg.add(2801000, "Na Probe/Push (-) Amp", 1000.000000)
prg.add(2801400, "K probe Cooler (-) Amp", 1000.000000)
prg.add(2802000, "Na Probe/Push (-) Amp", 1.000000)
prg.add(2802400, "K probe Cooler (-) Amp", 1.000000)
prg.add(2803000, "Trig OFF Stingray 1")
prg.add(2811000, "Na Repumper MOT Amp", 1.000000)
prg.add(2821000, "Na Repumper1 (+) Amp", 1.000000)
prg.add(2831000, "K Repumper 1p (+) Amp", 1.000000)
prg.add(3800500, "Trig ON Stingray 1")
prg.add(3802500, "Trig OFF Stingray 1")
prg.add(4800500, "Trig ON Stingray 1")
prg.add(4802500, "Trig OFF Stingray 1")
prg.add(5801000, "B comp y", 0.000000)
return prg
| [
"[email protected]"
] | |
221cec3f4089608568eafe559900dd873f9954db | ce7cd2b2f9709dbadf613583d9816c862003b38b | /oof3dtest | 8aa978a57bd7edb034f399937ceef705b49f9323 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | usnistgov/OOF3D | 32b01a25154443d29d0c44d5892387e8ef6146fa | 7614f8ea98a095e78c62c59e8952c0eb494aacfc | refs/heads/master | 2023-05-25T13:01:20.604025 | 2022-02-18T20:24:54 | 2022-02-18T20:24:54 | 29,606,158 | 34 | 7 | null | 2015-02-06T19:56:26 | 2015-01-21T19:04:14 | Python | UTF-8 | Python | false | false | 1,089 | #!python
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# [email protected].
# This is the start up script for the oof3d regression test suite. It
# just wraps regression.py. There's no difference between running
# this script and running "python regression.py" in the TEST3D
# directory except that with this script the user doesn't have to know
# how to find regression.py. (Hint: TEST3D is installed as 'ooftests'
# in the oof3d directory in site-packages, whereever that might be.)
import sys
import os
from math import *
import oof3d
sys.path.append(os.path.dirname(oof3d.__file__))
import ooftests
from ooftests import regression
homedir = os.path.dirname(regression.__file__)
regression.run(homedir)
| [
"[email protected]"
] | ||
792392790e9fed19536acbe1906d318837c248c1 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-mpc/huaweicloudsdkmpc/v1/model/video_extend_settings.py | 3ac0cb8875d30324610815939b561555434a26ff | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,866 | py | # coding: utf-8
import re
import six
class VideoExtendSettings:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'preset': 'str'
}
attribute_map = {
'preset': 'preset'
}
def __init__(self, preset=None):
"""VideoExtendSettings - a model defined in huaweicloud sdk"""
self._preset = None
self.discriminator = None
if preset is not None:
self.preset = preset
@property
def preset(self):
"""Gets the preset of this VideoExtendSettings.
扩展编码质量等级,用于覆盖模板中的同名参数。取值如下: - SPEED - HIGHQUALITY
:return: The preset of this VideoExtendSettings.
:rtype: str
"""
return self._preset
@preset.setter
def preset(self, preset):
"""Sets the preset of this VideoExtendSettings.
扩展编码质量等级,用于覆盖模板中的同名参数。取值如下: - SPEED - HIGHQUALITY
:param preset: The preset of this VideoExtendSettings.
:type: str
"""
self._preset = preset
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VideoExtendSettings):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
84ea987a1929e5afa62a6584aa35866c79086874 | 12e78946542250f64792bc6c1d8c8ff1ffecdaf7 | /Python/Django/ninja_gold/apps/dojo_ninjas/views.py | 10a05c9d6389bbd63f012c7f6704b9a636a3e904 | [] | no_license | mkrabacher/CodingDojoAssignments | 0fde5adf7223a9eac07a4867499a243e230a300e | 4afef4aaf4f129fb56376e57d8be437d1f124521 | refs/heads/master | 2021-05-14T13:38:03.570533 | 2018-02-23T00:09:24 | 2018-02-23T00:09:24 | 113,722,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse
def index(request):
return HttpResponse('hello')
| [
"[email protected]"
] | |
8f4884fe12de96cea161db86521397a440b8eef1 | 732536468e61932e7c0829934262b645effbd6d4 | /python_stack/django/django_intro/for_test/form_app/urls.py | d4277500dd0fc48dfbaa91957221c89bff1c4ca0 | [] | no_license | jignacioa/Coding-Dojo | 7a83919d09fb6ad714379dc58b1ce8e706ccc7b6 | 0e1f0d4fc528439bf34d866f4c409994741e870b | refs/heads/master | 2023-01-21T06:48:15.880635 | 2021-02-08T01:36:17 | 2021-02-08T01:36:17 | 251,421,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('users', views.create_user),
path('success', views.success)
] | [
"[email protected]"
] | |
d98b243efb145b61bf43a079eb3c779f1d21840e | 4b72cfb730e9d967ecc9ece7a5dcdff03242ce7a | /7 კლასი/7_1/ბეგიაშვილი სანდრო/რიცხვის კვადრატები-.py | 6216a18d50bc1c6d313d3539b3c0b3b1d88a0031 | [] | no_license | sc-199/2018-2019 | 20945a0aaf7a998e6038e3fd3310c8be2296e54f | 578d7aad254dc566cf5f8502f1a82c1eb267cbc2 | refs/heads/master | 2020-04-26T02:59:26.560166 | 2019-05-03T13:32:30 | 2019-05-03T13:32:30 | 168,499,541 | 4 | 0 | null | 2019-02-14T17:17:25 | 2019-01-31T09:36:13 | Python | UTF-8 | Python | false | false | 128 | py | for i in range(1,10):
print(i**2)
print( 'kenti ricxvebi:')
for i in range(1,10):
if i % 2 != 0:
print(i**2)
| [
"[email protected]"
] | |
72edaccc4f12c2a7a357dd545d3b19e07e1e876b | ac1bbabc7c1b3149711c416dd8b5f5969a0dbd04 | /Python OOP/exams/shop/deliveries/drink.py | e8ab76ba18a1bb5a6342eb4daaa653a29d11c4be | [] | no_license | AssiaHristova/SoftUni-Software-Engineering | 9e904221e50cad5b6c7953c81bc8b3b23c1e8d24 | d4910098ed5aa19770d30a7d9cdf49f9aeaea165 | refs/heads/main | 2023-07-04T04:47:00.524677 | 2021-08-08T23:31:51 | 2021-08-08T23:31:51 | 324,847,727 | 1 | 0 | null | 2021-08-08T23:31:52 | 2020-12-27T20:58:01 | Python | UTF-8 | Python | false | false | 163 | py | from shop.deliveries.product import Product
class Drink(Product):
quantity = 10
def __init__(self, name):
super().__init__(name, Drink.quantity) | [
"[email protected]"
] | |
853de356187a49bd486d2e2290adba638ee294a9 | 8f6cc0e8bd15067f1d9161a4b178383e62377bc7 | /__OLD_CODE_STORAGE/openGL_NAME/textbook/5-12.py | f132d16dda9bf39c1a4c61c443e44627b68fb309 | [] | no_license | humorbeing/python_github | 9c4dfc61a3cefbb266fefff335f6b28d05797e5e | e4b4b49bee7e7e3843c6874717779ce8d619bd02 | refs/heads/master | 2023-01-22T21:51:20.193131 | 2020-01-26T21:47:23 | 2020-01-26T21:47:23 | 163,707,778 | 0 | 0 | null | 2022-12-27T15:37:48 | 2019-01-01T01:58:18 | Python | UTF-8 | Python | false | false | 1,751 | py | from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
PALETTE = ((255, 255, 255),
(0, 255, 255),
(255, 0, 255),
(0, 0, 255),
(192, 192, 192),
(128, 128, 128),
(0, 128, 128),
(128, 0, 128),
(0, 0, 128),
(255, 255, 0),
(0, 255, 0),
(128, 128, 0),
(0, 128, 0),
(255, 0, 0),
(128, 0, 0),
(0, 0, 0),
)
Delta = 0.0
Index = 0
def MyDisplay():
global Delta, Index, PALETTE
Red = PALETTE[Index][0] / 255.0
Green = PALETTE[Index][1] / 255.0
Blue = PALETTE[Index][2] / 255.0
glColor3f(Red, Green, Blue)
glBegin(GL_LINES)
glVertex3f(-1.0 + Delta, 1.0, 0.0)
glVertex3f(1.0 - Delta, -1.0, 0.0)
glVertex3f(-1.0, -1.0 + Delta, 0.0)
glVertex3f(1.0, 1.0 - Delta, 0.0)
glEnd()
glutSwapBuffers()
def MyTimer(Value):
global Delta, Index
if Delta < 2.0:
Delta += 0.01
else:
Delta = 0.0
Index += 1
if Index == 15:
Index = 0
glutPostRedisplay()
glutTimerFunc(10, MyTimer, 1)
def main():
glutInit()
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE)
glutInitWindowSize(500, 500)
glutInitWindowPosition(0, 0)
glutCreateWindow(b"OpenGL Timer Animation Sample") # not only string, put 'b' in front of string.
glClearColor(0.0, 0.0, 0.0, 1.0)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
glutTimerFunc(10, MyTimer, 1)
glutDisplayFunc(MyDisplay)
glutMainLoop()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2fed7108c460e6c23e9fe55b399f1701ac0aeeeb | d9a633b01da49ed38ca7c9c736b271ba671d9c4f | /pygameTest/pygame_test_01.py | 83fc17e7c9fa2f2a2183dcbddc176324564775b4 | [] | no_license | chmberl/sys | 4b90263f86d070f3bb15de4bd788c32f56ee17d0 | 7b7258796157d1b4417715ababc1d1cc5b7dbc1b | refs/heads/master | 2021-01-22T02:58:53.055702 | 2018-01-08T06:41:47 | 2018-01-08T06:41:47 | 22,147,604 | 0 | 0 | null | 2018-01-08T06:41:48 | 2014-07-23T12:54:34 | Python | UTF-8 | Python | false | false | 658 | py | import pygame
from pygame.locals import *
from sys import exit
pygame.init()
SCREEN_SIZE = (640, 480)
screen = pygame.display.set_mode(SCREEN_SIZE, 0, 32)
font = pygame.font.SysFont("arial", 16)
font_height = font.get_linesize()
event_text = []
while True:
event = pygame.event.wait()
event_text.append(str(event))
event_text = event_text[-SCREEN_SIZE[1]/font_height:]
if event.type == QUIT:
exit()
screen.fill((255, 255, 255))
y = SCREEN_SIZE[1] - font_height
for text in reversed(event_text):
screen.blit(font.render(text, True, (0, 0, 0)), (0, y))
y -= font_height
pygame.display.update()
| [
"[email protected]"
] | |
cc2bb833560bf80beae9e1a7cf4d46f75bf7a75b | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/arc053/C/1254642.py | b4ebd66ee9b1032de9f913be200469beb2744bab | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | import math,string,itertools,fractions,heapq,collections,re,array,bisect,sys,random,time,copy,functools
sys.setrecursionlimit(10**7)
inf = 10**20
mod = 10**9 + 7
def LI(): return [int(x) for x in sys.stdin.readline().split()]
def LI_(): return [int(x)-1 for x in sys.stdin.readline().split()]
def LF(): return [float(x) for x in sys.stdin.readline().split()]
def LS(): return sys.stdin.readline().split()
def I(): return int(sys.stdin.readline())
def F(): return float(sys.stdin.readline())
def S(): return input()
def main():
n = I()
a = []
b = []
d = []
for _ in range(n):
x,y = LI()
if x < y:
a.append([x,y])
else:
b.append([x,y])
r = 0
c = 0
a = sorted(a)
for x,y in a:
c += x
if r < c:
r = c
c -= y
b = sorted(b, key=lambda x: [-x[1],-x[0]])
for x,y in b:
c += x
if r < c:
r = c
c -= y
return r
print(main()) | [
"[email protected]"
] | |
79e3985a32a36c053f62393485d4a2dbc3ca86c3 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/ADTRAN-FRPerform-MIB.py | 4ee4d3d812057d1a18c0ba29eadf92d333cd83c3 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 74,864 | py | #
# PySNMP MIB module ADTRAN-FRPerform-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ADTRAN-FRPerform-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:14:52 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
adProdPhysAddress, adMgmt, adtran, adProducts = mibBuilder.importSymbols("ADTRAN-MIB", "adProdPhysAddress", "adMgmt", "adtran", "adProducts")
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Gauge32, Unsigned32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, Integer32, Counter32, IpAddress, Counter64, MibIdentifier, ObjectIdentity, NotificationType, ModuleIdentity, enterprises, NotificationType, iso, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Unsigned32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "Integer32", "Counter32", "IpAddress", "Counter64", "MibIdentifier", "ObjectIdentity", "NotificationType", "ModuleIdentity", "enterprises", "NotificationType", "iso", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
adPerform = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4))
adFRPerformmg = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1))
adFRPerformHistoryControl = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1, 1))
adFRPerformCurrentPvcStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1, 2))
adFRPerformIntPvcStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1, 3))
adFRPerformIntPortStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1, 4))
adFRPerformIntPortError = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1, 5))
adFRPerformIntHistoryTime = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1, 6))
adFRPerformDayPvcStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1, 7))
adFRPerformDayPortStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1, 8))
adFRPerformDayPortError = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1, 9))
adFRPerformDayHistoryTime = MibIdentifier((1, 3, 6, 1, 4, 1, 664, 4, 1, 10))
adFRPerformHistoryIntLength = MibScalar((1, 3, 6, 1, 4, 1, 664, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("min5", 1), ("min10", 2), ("min15", 3), ("min20", 4), ("min30", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: adFRPerformHistoryIntLength.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformHistoryIntLength.setDescription('This value selects the sampling interval period for data collected in the interval tables.')
adFRPerformCurrentIntTimeRemaining = MibScalar((1, 3, 6, 1, 4, 1, 664, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1800))).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformCurrentIntTimeRemaining.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformCurrentIntTimeRemaining.setDescription('Seconds remaining in current Interval.')
adFRPerformCompletedInts = MibScalar((1, 3, 6, 1, 4, 1, 664, 4, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformCompletedInts.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformCompletedInts.setDescription('Number of completed intervals in interval tables.')
adFRPerformCompletedDays = MibScalar((1, 3, 6, 1, 4, 1, 664, 4, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformCompletedDays.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformCompletedDays.setDescription('Number of completed days in day table.')
adFRPerformCurrentPvcStatusTable = MibTable((1, 3, 6, 1, 4, 1, 664, 4, 1, 2, 1), )
if mibBuilder.loadTexts: adFRPerformCurrentPvcStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformCurrentPvcStatusTable.setDescription('n/a.')
adFRPerformCurrentPvcStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 4, 1, 2, 1, 1), ).setIndexNames((0, "ADTRAN-FRPerform-MIB", "adFRPerformCurrentPvcIfIndex"), (0, "ADTRAN-FRPerform-MIB", "adFRPerformCurrentPvcStatusIndex"))
if mibBuilder.loadTexts: adFRPerformCurrentPvcStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformCurrentPvcStatusEntry.setDescription('n/a')
adFRPerformCurrentPvcIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformCurrentPvcIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformCurrentPvcIfIndex.setDescription('n/a.')
adFRPerformCurrentPvcStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 2, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformCurrentPvcStatusIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformCurrentPvcStatusIndex.setDescription('n/a.')
adFRPerformCurrentPvcState = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("active", 1), ("inactive", 2), ("unknown", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformCurrentPvcState.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformCurrentPvcState.setDescription('Current state for this PVC.')
adFRPerformCurrentPvcStatistics = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("available", 1), ("not-available", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformCurrentPvcStatistics.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformCurrentPvcStatistics.setDescription('Indicates whether this PVC is managed and has statistics available or is not managed.')
adFRPerformIntPvcStatusTable = MibTable((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1), )
if mibBuilder.loadTexts: adFRPerformIntPvcStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPvcStatusTable.setDescription('n/a.')
adFRPerformIntPvcStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1), ).setIndexNames((0, "ADTRAN-FRPerform-MIB", "adFRPerformIntPvcIfIndex"), (0, "ADTRAN-FRPerform-MIB", "adFRPerformIntPvcIndex"), (0, "ADTRAN-FRPerform-MIB", "adFRPerformIntPvcSlotIndex"))
if mibBuilder.loadTexts: adFRPerformIntPvcStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPvcStatusEntry.setDescription('n/a')
adFRPerformIntPvcIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPvcIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPvcIfIndex.setDescription('n/a.')
adFRPerformIntPvcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPvcIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPvcIndex.setDescription('n/a.')
adFRPerformIntPvcSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPvcSlotIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPvcSlotIndex.setDescription('n/a.')
adFRPerformIntPVCStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCStateChange.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCStateChange.setDescription('The number of state changes for this PVC for the interval.')
adFRPerformIntPVCInactiveTime = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCInactiveTime.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCInactiveTime.setDescription('Time in seconds the PVC has been in the inactive state for the interval.')
adFRPerformIntPVCFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCFramesRx.setDescription('The number of Frames the PVC has received for the interval.')
adFRPerformIntPVCFramesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCFramesTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCFramesTx.setDescription('The number of Frames the PVC has transmitted for the interval.')
adFRPerformIntPVCBytesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCBytesRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCBytesRx.setDescription('The number of bytes the PVC has received for the interval.')
adFRPerformIntPVCBytesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCBytesTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCBytesTx.setDescription('The number of bytes the PVC has transmitted for the interval.')
adFRPerformIntPVCAvgThruputTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCAvgThruputTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCAvgThruputTx.setDescription('Average Throughput the PVC has transmitted for the interval.')
adFRPerformIntPVCAvgThruputRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCAvgThruputRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCAvgThruputRx.setDescription('Average throughput the PVC has received for the interval.')
adFRPerformIntPVCMaxThruputTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCMaxThruputTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCMaxThruputTx.setDescription('The Maximum Throughput the PVC has transmitted for the interval.')
adFRPerformIntPVCMaxThruputRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCMaxThruputRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCMaxThruputRx.setDescription('The Maximum Throughput the PVC has received for the interval.')
adFRPerformIntPVCAvgUtilizationTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCAvgUtilizationTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCAvgUtilizationTx.setDescription('The Average Utilization the PVC has transmitted for the interval.')
adFRPerformIntPVCAvgUtilizationRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCAvgUtilizationRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCAvgUtilizationRx.setDescription('The Average Utilization the PVC has received for the interval.')
adFRPerformIntPVCMaxUtilizationTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCMaxUtilizationTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCMaxUtilizationTx.setDescription('The Maximum Utilization the PVC has transmitted for the interval.')
adFRPerformIntPVCMaxUtilizationRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCMaxUtilizationRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCMaxUtilizationRx.setDescription('The Maximum Utilization the PVC has received for the interval.')
adFRPerformIntPVCBurstTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCBurstTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCBurstTx.setDescription('Amount of time (in seconds that throughput in the transmit direction is greater than CIR.')
adFRPerformIntPVCBurstRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCBurstRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCBurstRx.setDescription('Amount of time (in seconds that throughput in the receive direction is greater than CIR.')
adFRPerformIntPVCFecnRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCFecnRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCFecnRx.setDescription('The number of FECNs the PVC has received for the interval.')
adFRPerformIntPVCFecnTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCFecnTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCFecnTx.setDescription('The number of FECNs the PVC has transmitted for the interval.')
adFRPerformIntPVCBecnRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCBecnRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCBecnRx.setDescription('The number of BECNs the PVC has received for the interval.')
adFRPerformIntPVCBecnTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCBecnTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCBecnTx.setDescription('The number of BECNs the PVC has transmitted for the interval.')
adFRPerformIntPVCDeRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCDeRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCDeRx.setDescription('The number of DEs the PVC has received for the interval.')
adFRPerformIntPVCDeTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCDeTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCDeTx.setDescription('The number of DEs the PVC has transmitted for the interval.')
adFRPerformIntPVCCrRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCCrRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCCrRx.setDescription('The number of CRs the PVC has received for the interval.')
adFRPerformIntPVCCrTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCCrTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCCrTx.setDescription('The number of CRs the PVC has transmitted for the interval.')
adFRPerformIntPVCMinFrameSizeRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCMinFrameSizeRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCMinFrameSizeRx.setDescription('The Minimum Frame Size the PVC received for the interval.')
adFRPerformIntPVCMinFrameSizeTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCMinFrameSizeTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCMinFrameSizeTx.setDescription('The Minimum Frame Size the PVC transmitted for the interval.')
adFRPerformIntPVCMaxFrameSizeRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCMaxFrameSizeRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCMaxFrameSizeRx.setDescription('The Maximum Frame Size the PVC received for the interval.')
adFRPerformIntPVCMaxFrameSizeTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCMaxFrameSizeTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCMaxFrameSizeTx.setDescription('The Maximum Frame Size the PVC transmitted for the interval.')
adFRPerformIntPVCAvgFrameSizeRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 32), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCAvgFrameSizeRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCAvgFrameSizeRx.setDescription('The Average Frame Size the PVC received for the interval.')
adFRPerformIntPVCAvgFrameSizeTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 33), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCAvgFrameSizeTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCAvgFrameSizeTx.setDescription('The Average Frame Size the PVC transmitted for the interval.')
adFRPerformIntPVCLostFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 34), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCLostFrames.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCLostFrames.setDescription('The number of Lost Frames on the PVC for the interval. Applies only if Sequence Numbering is Enabled on the PVC.')
adFRPerformIntPVCRemoteLostFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 35), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCRemoteLostFrames.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCRemoteLostFrames.setDescription('The number of Remote Lost Frames on the PVC for the interval. Applies only if Sequence Numbering is Enabled on the PVC.')
adFRPerformIntPVCMaxDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 36), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCMaxDelay.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCMaxDelay.setDescription('The Maximum Delay in milliseconds on the PVC for the interval. Applies only if Delay Measurement is Enabled for the PVC or PVC Diagnostics are being performed.')
adFRPerformIntPVCMinDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 37), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCMinDelay.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCMinDelay.setDescription('The Minimum Delay in milliseconds on the PVC for the interval. Applies only if Delay Measurement is Enabled for the PVC or PVC Diagnostics are being performed.')
adFRPerformIntPVCAvgDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 38), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCAvgDelay.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCAvgDelay.setDescription('The Average Delay in milliseconds on the PVC for the interval. Applies only if Delay Measurement is Enabled for the PVC or PVC Diagnostics are being performed.')
adFRPerformIntPVCTimeInDBU = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 3, 1, 1, 39), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPVCTimeInDBU.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPVCTimeInDBU.setDescription('Time in seconds the PVC is in the DBU state.')
adFRPerformIntPortStatusTable = MibTable((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1), )
if mibBuilder.loadTexts: adFRPerformIntPortStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortStatusTable.setDescription('n/a.')
adFRPerformIntPortStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1), ).setIndexNames((0, "ADTRAN-FRPerform-MIB", "adFRPerformIntIfIndex"), (0, "ADTRAN-FRPerform-MIB", "adFRPerformIntPortSlotIndex"))
if mibBuilder.loadTexts: adFRPerformIntPortStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortStatusEntry.setDescription('n/a')
adFRPerformIntIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntIfIndex.setDescription('n/a.')
adFRPerformIntPortSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortSlotIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortSlotIndex.setDescription('n/a.')
adFRPerformIntPortFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortFramesRx.setDescription('The number of Frames the Port received for the interval.')
adFRPerformIntPortFramesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortFramesTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortFramesTx.setDescription('The number of Frames the Port transmitted for the interval.')
adFRPerformIntPortBytesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortBytesRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortBytesRx.setDescription('The number of Bytes the Port received for the interrval.')
adFRPerformIntPortBytesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortBytesTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortBytesTx.setDescription('The number of Bytes the Port transmitted for the interrval.')
adFRPerformIntPortAvgThruputTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortAvgThruputTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortAvgThruputTx.setDescription('The Average Throughput the Port transmitted for the interval.')
adFRPerformIntPortAvgThruputRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortAvgThruputRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortAvgThruputRx.setDescription('The Average Throughput the Port received for the interval.')
adFRPerformIntPortMaxThruputTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortMaxThruputTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortMaxThruputTx.setDescription('The Maximum Throughput the Port transmitted for the interval.')
adFRPerformIntPortMaxThruputRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortMaxThruputRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortMaxThruputRx.setDescription('The Maximum Throughput the Port received for the interval.')
adFRPerformIntPortAvgUtilizationTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortAvgUtilizationTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortAvgUtilizationTx.setDescription('The Average Utilization the Port transmitted for the interval.')
adFRPerformIntPortAvgUtilizationRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortAvgUtilizationRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortAvgUtilizationRx.setDescription('The Average Utilization the Port received for the interval.')
adFRPerformIntPortMaxUtilizationTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortMaxUtilizationTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortMaxUtilizationTx.setDescription('The Maximum Utilization the Port transmitted for the interval.')
adFRPerformIntPortMaxUtilizationRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortMaxUtilizationRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortMaxUtilizationRx.setDescription('The Maximum Utilization the Port received for the interval.')
adFRPerformIntPortFullStatusRX = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortFullStatusRX.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortFullStatusRX.setDescription('Number of PVC signaling full status frames received.')
adFRPerformIntPortFullStatusTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortFullStatusTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortFullStatusTx.setDescription('Number of PVC signaling full status frames transmitted.')
adFRPerformIntPortLIOnlyRX = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortLIOnlyRX.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortLIOnlyRX.setDescription('Number of PVC signaling link integrity only frames received.')
adFRPerformIntPortLIOnlyTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortLIOnlyTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortLIOnlyTx.setDescription('Number of PVC signaling link integrity only frames transmitted.')
adFRPerformIntPortAsyncStatusFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 4, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortAsyncStatusFrame.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortAsyncStatusFrame.setDescription('Number of single PVC status frames received.')
adFRPerformIntPortErrorTable = MibTable((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1), )
if mibBuilder.loadTexts: adFRPerformIntPortErrorTable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortErrorTable.setDescription('n/a.')
adFRPerformIntPortErrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1), ).setIndexNames((0, "ADTRAN-FRPerform-MIB", "adFRPerformIntPortIndex"), (0, "ADTRAN-FRPerform-MIB", "adFRPerformIntPortErrorSlotIndex"))
if mibBuilder.loadTexts: adFRPerformIntPortErrorEntry.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortErrorEntry.setDescription('n/a')
adFRPerformIntPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortIndex.setDescription('n/a.')
adFRPerformIntPortErrorSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortErrorSlotIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortErrorSlotIndex.setDescription('n/a.')
adFRPerformIntPortUnavailableTime = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntPortUnavailableTime.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntPortUnavailableTime.setDescription('Time in seconds the port is unavailable due to a physical or frame relay outage.')
adFRPerformIntCrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntCrcErrors.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntCrcErrors.setDescription('Number of frames received with CRC errors.')
adFRPerformIntAbortFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntAbortFrames.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntAbortFrames.setDescription('Number of frames received without proper flag termination.')
adFRPerformIntOctectViolations = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntOctectViolations.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntOctectViolations.setDescription('Number of frames received with a bit count not divisible by eigth.')
adFRPerformIntDiscardFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntDiscardFrames.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntDiscardFrames.setDescription('Number of frames discarded by the IQ unit')
adFRPerformIntLengthErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntLengthErrors.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntLengthErrors.setDescription('Number of frames received that is less than 5 bytes or greater than 4500 bytes.')
adFRPerformIntEAViolations = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntEAViolations.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntEAViolations.setDescription('Number of frames received with errors in the EA field of the frame relay header.')
adFRPerformIntEncapsulationErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntEncapsulationErrors.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntEncapsulationErrors.setDescription('Number of frames destined for the IQ IP stack with that does not meet the FRF.3 IA.')
adFRPerformIntInactiveDLCI = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntInactiveDLCI.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntInactiveDLCI.setDescription('Number of frames received while the PVC is in the inactive state.')
adFRPerformIntInvalidDLCI = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntInvalidDLCI.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntInvalidDLCI.setDescription('Number of frames received with a DLCI value less than 16 or greater than 1007 not including PVC signaling frames.')
adFRPerformIntUnroutable = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntUnroutable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntUnroutable.setDescription('Number of frames received on a management DLCI destined for the IQ unit and have the wrong IP address.')
adFRPerformIntSignalDownTime = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntSignalDownTime.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntSignalDownTime.setDescription('Time in seconds the signaling state has been down.')
adFRPerformIntSignalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntSignalErrors.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntSignalErrors.setDescription('Number of PVC signaling frames received with protocol violations.')
adFRPerformIntSignalTimeOut = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntSignalTimeOut.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntSignalTimeOut.setDescription('Number of PVC signal timeouts. Either T391 seconds elapsed without receiving a response to a poll or T392 elapsed seconds with receiving a poll.')
adFRPerformIntSignalStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 5, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntSignalStateChange.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntSignalStateChange.setDescription('Number of state changes for the PVC signaling protocol. This includes transitions from down state to up state and vice versa.')
adFRPerformIntHistoryTimeTable = MibTable((1, 3, 6, 1, 4, 1, 664, 4, 1, 6, 1), )
if mibBuilder.loadTexts: adFRPerformIntHistoryTimeTable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntHistoryTimeTable.setDescription('n/a.')
adFRPerformIntHistoryTimeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 4, 1, 6, 1, 1), ).setIndexNames((0, "ADTRAN-FRPerform-MIB", "adFRPerformIntHistoryTimeSlotIndex"))
if mibBuilder.loadTexts: adFRPerformIntHistoryTimeEntry.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntHistoryTimeEntry.setDescription('n/a')
adFRPerformIntHistoryTimeSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 6, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntHistoryTimeSlotIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntHistoryTimeSlotIndex.setDescription('n/a.')
adFRPerformIntHistorySlotTotalTime = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 6, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntHistorySlotTotalTime.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntHistorySlotTotalTime.setDescription('Total time in seconds this interval slot represents.')
adFRPerformIntHistoryTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 6, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformIntHistoryTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformIntHistoryTimeStamp.setDescription('Time interval started. Format H:M if not midnight else M-D')
adFRPerformDayPvcStatusTable = MibTable((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1), )
if mibBuilder.loadTexts: adFRPerformDayPvcStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPvcStatusTable.setDescription('n/a.')
adFRPerformDayPvcStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1), ).setIndexNames((0, "ADTRAN-FRPerform-MIB", "adFRPerformDayPvcIfIndex"), (0, "ADTRAN-FRPerform-MIB", "adFRPerformDayPvcIndex"), (0, "ADTRAN-FRPerform-MIB", "adFRPerformDayPvcSlotIndex"))
if mibBuilder.loadTexts: adFRPerformDayPvcStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPvcStatusEntry.setDescription('n/a')
adFRPerformDayPvcIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPvcIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPvcIfIndex.setDescription('n/a.')
adFRPerformDayPvcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPvcIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPvcIndex.setDescription('n/a.')
adFRPerformDayPvcSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPvcSlotIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPvcSlotIndex.setDescription('n/a.')
adFRPerformDayPVCStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCStateChange.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCStateChange.setDescription('The number of State Changes on the PVC for the day.')
adFRPerformDayPVCInactiveTime = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCInactiveTime.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCInactiveTime.setDescription('Time in seconds the PVC has been in the inactive state.')
adFRPerformDayPVCFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCFramesRx.setDescription('The number of Frames the PVC received for the day.')
adFRPerformDayPVCFramesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCFramesTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCFramesTx.setDescription('The number of Frames the PVC transmitted for the day.')
adFRPerformDayPVCBytesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCBytesRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCBytesRx.setDescription('The number of Bytes the PVC received for the day.')
adFRPerformDayPVCBytesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCBytesTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCBytesTx.setDescription('The number of Bytes the PVC transmitted for the day.')
adFRPerformDayPVCAvgThruputTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 10), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCAvgThruputTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCAvgThruputTx.setDescription('The Average Throughput the PVC transmitted for the day.')
adFRPerformDayPVCAvgThruputRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCAvgThruputRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCAvgThruputRx.setDescription('The Average Throughput the PVC received for the day.')
adFRPerformDayPVCMaxThruputTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCMaxThruputTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCMaxThruputTx.setDescription('The Maximum Throughput the PVC transmitted for the day.')
adFRPerformDayPVCMaxThruputRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCMaxThruputRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCMaxThruputRx.setDescription('The Maximum Throughput the PVC received for the day.')
adFRPerformDayPVCAvgUtilizationTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 14), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCAvgUtilizationTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCAvgUtilizationTx.setDescription('The Average Utilization the PVC transmitted for the day.')
adFRPerformDayPVCAvgUtilizationRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 15), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCAvgUtilizationRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCAvgUtilizationRx.setDescription('The Average Utilization the PVC received for the day.')
adFRPerformDayPVCMaxUtilizationTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCMaxUtilizationTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCMaxUtilizationTx.setDescription('The Maximum Utilization the PVC transmitted for the day.')
adFRPerformDayPVCMaxUtilizationRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCMaxUtilizationRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCMaxUtilizationRx.setDescription('The Maximum Utilization the PVC received for the day.')
adFRPerformDayPVCBurstTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCBurstTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCBurstTx.setDescription('n/a')
adFRPerformDayPVCBurstRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCBurstRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCBurstRx.setDescription('n/a')
adFRPerformDayPVCFecnRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCFecnRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCFecnRx.setDescription('The number of FECNs the PVC received for the day.')
adFRPerformDayPVCFecnTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCFecnTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCFecnTx.setDescription('The number of FECNs the PVC transmitted for the day.')
adFRPerformDayPVCBecnRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCBecnRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCBecnRx.setDescription('The number of BECNs the PVC received for the day.')
adFRPerformDayPVCBecnTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCBecnTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCBecnTx.setDescription('The number of BECNs the PVC transmitted for the day.')
adFRPerformDayPVCDeRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCDeRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCDeRx.setDescription('The number of DEs the PVC received for the day.')
adFRPerformDayPVCDeTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCDeTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCDeTx.setDescription('The number of DEs the PVC transmitted for the day.')
adFRPerformDayPVCCrRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCCrRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCCrRx.setDescription('The number of CRs the PVC received for the day.')
adFRPerformDayPVCCrTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCCrTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCCrTx.setDescription('The number of CRs the PVC transmitted for the day.')
adFRPerformDayPVCMinFrameSizeRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCMinFrameSizeRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCMinFrameSizeRx.setDescription('The Minimum Frame Size the PVC received for the day.')
adFRPerformDayPVCMinFrameSizeTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCMinFrameSizeTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCMinFrameSizeTx.setDescription('The Minimum Frame Size the PVC transmitted for the day.')
adFRPerformDayPVCMaxFrameSizeRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCMaxFrameSizeRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCMaxFrameSizeRx.setDescription('The Maximum Frame Size the PVC received for the day.')
adFRPerformDayPVCMaxFrameSizeTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCMaxFrameSizeTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCMaxFrameSizeTx.setDescription('The Maximum Frame Size the PVC transmitted for the day.')
adFRPerformDayPVCAvgFrameSizeRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 32), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCAvgFrameSizeRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCAvgFrameSizeRx.setDescription('The Average Frame Size the PVC received for the day.')
adFRPerformDayPVCAvgFrameSizeTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 33), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCAvgFrameSizeTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCAvgFrameSizeTx.setDescription('The Average Frame Size the PVC transmitted for the day.')
adFRPerformDayPVCLostFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 34), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCLostFrames.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCLostFrames.setDescription('The number of Lost Frames on the PVC for the day. Applies only if Sequence Numbering is Enabled for the PVC.')
adFRPerformDayPVCRemoteLostFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 35), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCRemoteLostFrames.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCRemoteLostFrames.setDescription('The number of Remote Lost Frames on the PVC for the day. Applies only if Sequence Numbering is Enabled for the PVC.')
adFRPerformDayPVCMaxDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 36), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCMaxDelay.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCMaxDelay.setDescription('The Maximum Delay on the PVC for the day. Applies only if Delay Measurement or PVC Diagnostics are Enabled for the PVC.')
adFRPerformDayPVCMinDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 37), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCMinDelay.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCMinDelay.setDescription('The Minimum Delay on the PVC for the day. Applies only if Delay Measurement or PVC Diagnostics are Enabled for the PVC.')
adFRPerformDayPVCAvgDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 38), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCAvgDelay.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCAvgDelay.setDescription('The Average Delay on the PVC for the day. Applies only if Delay Measurement or PVC Diagnostics are Enabled for the PVC.')
adFRPerformDayPVCTimeInDBU = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 7, 1, 1, 39), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPVCTimeInDBU.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPVCTimeInDBU.setDescription('Time in seconds the PVC is in the DBU state.')
adFRPerformDayPortStatusTable = MibTable((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1), )
if mibBuilder.loadTexts: adFRPerformDayPortStatusTable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortStatusTable.setDescription('n/a.')
adFRPerformDayPortStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1), ).setIndexNames((0, "ADTRAN-FRPerform-MIB", "adFRPerformDayIfIndex"), (0, "ADTRAN-FRPerform-MIB", "adFRPerformDayPortSlotIndex"))
if mibBuilder.loadTexts: adFRPerformDayPortStatusEntry.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortStatusEntry.setDescription('n/a')
adFRPerformDayIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayIfIndex.setDescription('n/a.')
adFRPerformDayPortSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortSlotIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortSlotIndex.setDescription('n/a.')
adFRPerformDayPortFramesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortFramesRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortFramesRx.setDescription('The number of Frames the Port received for the day.')
adFRPerformDayPortFramesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortFramesTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortFramesTx.setDescription('The number of Frames the Port transmitted for the day.')
adFRPerformDayPortBytesRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortBytesRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortBytesRx.setDescription('The number of Bytes the Port received for the day.')
adFRPerformDayPortBytesTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortBytesTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortBytesTx.setDescription('he number of Bytes the Port transmitted for the day.')
adFRPerformDayPortAvgThruputTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 7), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortAvgThruputTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortAvgThruputTx.setDescription('The Average Throughput the Port transmitted for the day.')
adFRPerformDayPortAvgThruputRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortAvgThruputRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortAvgThruputRx.setDescription('The Average Throughput the Port received for the day.')
adFRPerformDayPortMaxThruputTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortMaxThruputTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortMaxThruputTx.setDescription('The Maximum Throughput the Port transmitted for the day.')
adFRPerformDayPortMaxThruputRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortMaxThruputRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortMaxThruputRx.setDescription('The Maximum Throughput the Port received for the day.')
adFRPerformDayPortAvgUtilizationTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortAvgUtilizationTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortAvgUtilizationTx.setDescription('The Average Utilization the Port transmitted for the day.')
adFRPerformDayPortAvgUtilizationRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 12), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortAvgUtilizationRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortAvgUtilizationRx.setDescription('The Average Utilization the Port received for the day.')
adFRPerformDayPortMaxUtilizationTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortMaxUtilizationTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortMaxUtilizationTx.setDescription('The Maximum Utilization the Port transmitted for the day.')
adFRPerformDayPortMaxUtilizationRx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortMaxUtilizationRx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortMaxUtilizationRx.setDescription('The Maximum Utilization the Port received for the day.')
adFRPerformDayPortFullStatusRX = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortFullStatusRX.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortFullStatusRX.setDescription('Number of PVC signaling full status frames received.')
adFRPerformDayPortFullStatusTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortFullStatusTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortFullStatusTx.setDescription('Number of PVC signaling full status frames transmitted.')
adFRPerformDayPortLIOnlyRX = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortLIOnlyRX.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortLIOnlyRX.setDescription('Number of PVC signaling link integrity only frames received.')
adFRPerformDayPortLIOnlyTx = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortLIOnlyTx.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortLIOnlyTx.setDescription('Number of PVC signaling link integrity only frames transmitted.')
adFRPerformDayPortAsyncStatusFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 8, 1, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortAsyncStatusFrame.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortAsyncStatusFrame.setDescription('Number of single PVC status frames received.')
adFRPerformDayPortErrorTable = MibTable((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1), )
if mibBuilder.loadTexts: adFRPerformDayPortErrorTable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortErrorTable.setDescription('n/a.')
adFRPerformDayPortErrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1), ).setIndexNames((0, "ADTRAN-FRPerform-MIB", "adFRPerformDayPortIndex"), (0, "ADTRAN-FRPerform-MIB", "adFRPerformDayPortErrorSlotIndex"))
if mibBuilder.loadTexts: adFRPerformDayPortErrorEntry.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortErrorEntry.setDescription('n/a')
adFRPerformDayPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortIndex.setDescription('n/a.')
adFRPerformDayPortErrorSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortErrorSlotIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortErrorSlotIndex.setDescription('n/a.')
adFRPerformDayPortUnavailableTime = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayPortUnavailableTime.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayPortUnavailableTime.setDescription('Time in seconds the port is unavailable due to a physical or frame relay outage.')
adFRPerformDayCrcErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayCrcErrors.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayCrcErrors.setDescription('Number of frames received with CRC errors.')
adFRPerformDayAbortFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayAbortFrames.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayAbortFrames.setDescription('Number of frames received without proper flag termination.')
adFRPerformDayOctectViolations = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayOctectViolations.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayOctectViolations.setDescription('Number of frames received with a bit count not divisible by eigth.')
adFRPerformDayDiscardFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayDiscardFrames.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayDiscardFrames.setDescription('Number of frames discarded by the IQ unit')
adFRPerformDayLengthErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayLengthErrors.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayLengthErrors.setDescription('Number of frames received that is less than 5 bytes or greater than 4500 bytes.')
adFRPerformDayEAViolations = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayEAViolations.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayEAViolations.setDescription('Number of frames received with errors in the EA field of the frame relay header.')
adFRPerformDayEncapsulationErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayEncapsulationErrors.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayEncapsulationErrors.setDescription('Number of frames destined for the IQ IP stack with that does not meet the FRF.3 IA.')
adFRPerformDayInactiveDLCI = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayInactiveDLCI.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayInactiveDLCI.setDescription('Number of frames received while the PVC is in the inactive state.')
adFRPerformDayInvalidDLCI = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayInvalidDLCI.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayInvalidDLCI.setDescription('Number of frames received with a DLCI value less than 16 or greater than 1007 not including PVC signaling frames.')
adFRPerformDayUnroutable = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayUnroutable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayUnroutable.setDescription('Number of frames received on a management DLCI destined for the IQ unit and have the wrong IP address.')
adFRPerformDaySignalDownTime = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDaySignalDownTime.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDaySignalDownTime.setDescription('Time in seconds the signaling state has been down.')
adFRPerformDaySignalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDaySignalErrors.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDaySignalErrors.setDescription('Number of PVC signaling frames received with protocol violations.')
adFRPerformDaySignalTimeOut = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDaySignalTimeOut.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDaySignalTimeOut.setDescription('Number of PVC signal timeouts. Either T391 seconds elapsed without receiving a response to a poll or T392 elapsed seconds with receiving a poll.')
adFRPerformDaySignalStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 9, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDaySignalStateChange.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDaySignalStateChange.setDescription('Number of state changes for the PVC signaling protocol. This includes transitions from down state to up state and vice versa.')
adFRPerformDayHistoryTimeTable = MibTable((1, 3, 6, 1, 4, 1, 664, 4, 1, 10, 1), )
if mibBuilder.loadTexts: adFRPerformDayHistoryTimeTable.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayHistoryTimeTable.setDescription('n/a.')
adFRPerformDayHistoryTimeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 664, 4, 1, 10, 1, 1), ).setIndexNames((0, "ADTRAN-FRPerform-MIB", "adFRPerformDayHistoryTimeSlotIndex"))
if mibBuilder.loadTexts: adFRPerformDayHistoryTimeEntry.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayHistoryTimeEntry.setDescription('n/a')
adFRPerformDayHistoryTimeSlotIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 10, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayHistoryTimeSlotIndex.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayHistoryTimeSlotIndex.setDescription('n/a.')
adFRPerformDayHistorySlotTotalTime = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 10, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayHistorySlotTotalTime.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayHistorySlotTotalTime.setDescription('Time in seconds this day slot represents.')
adFRPerformDayHistoryTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 664, 4, 1, 10, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: adFRPerformDayHistoryTimeStamp.setStatus('mandatory')
if mibBuilder.loadTexts: adFRPerformDayHistoryTimeStamp.setDescription('Date the day slot started. Format M-D')
mibBuilder.exportSymbols("ADTRAN-FRPerform-MIB", adFRPerformIntPVCAvgThruputRx=adFRPerformIntPVCAvgThruputRx, adFRPerformIntPortMaxThruputRx=adFRPerformIntPortMaxThruputRx, adFRPerformIntInactiveDLCI=adFRPerformIntInactiveDLCI, adFRPerformIntPVCBurstTx=adFRPerformIntPVCBurstTx, adFRPerformIntDiscardFrames=adFRPerformIntDiscardFrames, adFRPerformDayPvcStatusEntry=adFRPerformDayPvcStatusEntry, adFRPerformDayPVCBurstRx=adFRPerformDayPVCBurstRx, adFRPerformIntPVCFramesTx=adFRPerformIntPVCFramesTx, adFRPerformIntPVCMinDelay=adFRPerformIntPVCMinDelay, adFRPerformIntPVCAvgDelay=adFRPerformIntPVCAvgDelay, adFRPerformDaySignalErrors=adFRPerformDaySignalErrors, adFRPerformDayEAViolations=adFRPerformDayEAViolations, adFRPerformDayPvcSlotIndex=adFRPerformDayPvcSlotIndex, adFRPerformCurrentPvcIfIndex=adFRPerformCurrentPvcIfIndex, adFRPerformDayPortLIOnlyRX=adFRPerformDayPortLIOnlyRX, adFRPerformDayPVCBytesTx=adFRPerformDayPVCBytesTx, adFRPerformCurrentPvcStatus=adFRPerformCurrentPvcStatus, adFRPerformDayPortErrorSlotIndex=adFRPerformDayPortErrorSlotIndex, adFRPerformDaySignalStateChange=adFRPerformDaySignalStateChange, adFRPerformDayHistoryTimeSlotIndex=adFRPerformDayHistoryTimeSlotIndex, adFRPerformDayPVCFecnTx=adFRPerformDayPVCFecnTx, adFRPerformIntPVCBytesRx=adFRPerformIntPVCBytesRx, adFRPerformDayEncapsulationErrors=adFRPerformDayEncapsulationErrors, adFRPerformIntPortErrorEntry=adFRPerformIntPortErrorEntry, adFRPerformDayPVCMaxUtilizationRx=adFRPerformDayPVCMaxUtilizationRx, adFRPerformDayPortErrorTable=adFRPerformDayPortErrorTable, adFRPerformDayPVCDeTx=adFRPerformDayPVCDeTx, adFRPerformDayOctectViolations=adFRPerformDayOctectViolations, adFRPerformDayPVCAvgThruputRx=adFRPerformDayPVCAvgThruputRx, adFRPerformDayUnroutable=adFRPerformDayUnroutable, adFRPerformIntPvcIfIndex=adFRPerformIntPvcIfIndex, adFRPerformIntSignalErrors=adFRPerformIntSignalErrors, adFRPerformIntPortErrorTable=adFRPerformIntPortErrorTable, adFRPerformDayPVCBytesRx=adFRPerformDayPVCBytesRx, adFRPerformIntPortStatusTable=adFRPerformIntPortStatusTable, adFRPerformIntPortLIOnlyRX=adFRPerformIntPortLIOnlyRX, adFRPerformDayPortSlotIndex=adFRPerformDayPortSlotIndex, adFRPerformIntSignalTimeOut=adFRPerformIntSignalTimeOut, adFRPerformDayPortIndex=adFRPerformDayPortIndex, adFRPerformIntEncapsulationErrors=adFRPerformIntEncapsulationErrors, adFRPerformIntPVCMaxUtilizationRx=adFRPerformIntPVCMaxUtilizationRx, adFRPerformIntPVCMaxDelay=adFRPerformIntPVCMaxDelay, adFRPerformDayPortAvgUtilizationTx=adFRPerformDayPortAvgUtilizationTx, adFRPerformDayPortFramesTx=adFRPerformDayPortFramesTx, adFRPerformIntPortError=adFRPerformIntPortError, adFRPerformIntPortMaxUtilizationTx=adFRPerformIntPortMaxUtilizationTx, adFRPerformIntPVCBurstRx=adFRPerformIntPVCBurstRx, adFRPerformDayPVCAvgUtilizationRx=adFRPerformDayPVCAvgUtilizationRx, adFRPerformDaySignalTimeOut=adFRPerformDaySignalTimeOut, adFRPerformIntHistoryTime=adFRPerformIntHistoryTime, adFRPerformIntLengthErrors=adFRPerformIntLengthErrors, adFRPerformDayPortFullStatusRX=adFRPerformDayPortFullStatusRX, adFRPerformIntPortUnavailableTime=adFRPerformIntPortUnavailableTime, adFRPerformDayInvalidDLCI=adFRPerformDayInvalidDLCI, adFRPerformIntPVCBecnRx=adFRPerformIntPVCBecnRx, adFRPerformDayPVCMaxFrameSizeRx=adFRPerformDayPVCMaxFrameSizeRx, adFRPerformCurrentPvcStatusEntry=adFRPerformCurrentPvcStatusEntry, adFRPerformDayPvcIfIndex=adFRPerformDayPvcIfIndex, adFRPerformIntOctectViolations=adFRPerformIntOctectViolations, adFRPerformmg=adFRPerformmg, adFRPerformCurrentPvcStatusIndex=adFRPerformCurrentPvcStatusIndex, adFRPerformIntPortAvgUtilizationRx=adFRPerformIntPortAvgUtilizationRx, adFRPerformIntPVCAvgUtilizationTx=adFRPerformIntPVCAvgUtilizationTx, adFRPerformIntPvcSlotIndex=adFRPerformIntPvcSlotIndex, adFRPerformDayPVCFramesTx=adFRPerformDayPVCFramesTx, adFRPerformIntUnroutable=adFRPerformIntUnroutable, adFRPerformIntPortMaxThruputTx=adFRPerformIntPortMaxThruputTx, adFRPerformIntInvalidDLCI=adFRPerformIntInvalidDLCI, adFRPerformIntCrcErrors=adFRPerformIntCrcErrors, adFRPerformDayPvcStatus=adFRPerformDayPvcStatus, adFRPerformIntPvcStatusEntry=adFRPerformIntPvcStatusEntry, adFRPerformIntPvcIndex=adFRPerformIntPvcIndex, adFRPerformDayPVCDeRx=adFRPerformDayPVCDeRx, adFRPerformDayPortStatusEntry=adFRPerformDayPortStatusEntry, adFRPerformDayPvcStatusTable=adFRPerformDayPvcStatusTable, adFRPerformDayPVCInactiveTime=adFRPerformDayPVCInactiveTime, adFRPerformIntPVCCrRx=adFRPerformIntPVCCrRx, adFRPerformDayInactiveDLCI=adFRPerformDayInactiveDLCI, adFRPerformIntPortLIOnlyTx=adFRPerformIntPortLIOnlyTx, adFRPerformIntPortFullStatusTx=adFRPerformIntPortFullStatusTx, adFRPerformIntPortStatusEntry=adFRPerformIntPortStatusEntry, adFRPerformDayPVCMaxFrameSizeTx=adFRPerformDayPVCMaxFrameSizeTx, adFRPerformIntHistoryTimeTable=adFRPerformIntHistoryTimeTable, adFRPerformIntPortErrorSlotIndex=adFRPerformIntPortErrorSlotIndex, adFRPerformIntEAViolations=adFRPerformIntEAViolations, adFRPerformDayPortAsyncStatusFrame=adFRPerformDayPortAsyncStatusFrame, adFRPerformIntPVCAvgThruputTx=adFRPerformIntPVCAvgThruputTx, adFRPerformCurrentPvcStatistics=adFRPerformCurrentPvcStatistics, adFRPerformDayPVCFecnRx=adFRPerformDayPVCFecnRx, adFRPerformIntHistoryTimeSlotIndex=adFRPerformIntHistoryTimeSlotIndex, adFRPerformDayPortAvgThruputTx=adFRPerformDayPortAvgThruputTx, adFRPerformDayPortStatus=adFRPerformDayPortStatus, adFRPerformIntAbortFrames=adFRPerformIntAbortFrames, adFRPerformDayPVCBecnRx=adFRPerformDayPVCBecnRx, adFRPerformDayPortFramesRx=adFRPerformDayPortFramesRx, adFRPerformIntPortFullStatusRX=adFRPerformIntPortFullStatusRX, adFRPerformDayPVCMinDelay=adFRPerformDayPVCMinDelay, adFRPerformIntPVCMaxFrameSizeRx=adFRPerformIntPVCMaxFrameSizeRx, adFRPerformIntPVCBytesTx=adFRPerformIntPVCBytesTx, adFRPerformDayPVCFramesRx=adFRPerformDayPVCFramesRx, adFRPerformDayPortMaxUtilizationRx=adFRPerformDayPortMaxUtilizationRx, adFRPerformIntPvcStatusTable=adFRPerformIntPvcStatusTable, adFRPerformIntPVCMinFrameSizeRx=adFRPerformIntPVCMinFrameSizeRx, adFRPerformDayPVCTimeInDBU=adFRPerformDayPVCTimeInDBU, adFRPerformDayPVCAvgThruputTx=adFRPerformDayPVCAvgThruputTx, adFRPerformDayPVCRemoteLostFrames=adFRPerformDayPVCRemoteLostFrames, adFRPerformIntPVCAvgUtilizationRx=adFRPerformIntPVCAvgUtilizationRx, adFRPerformDayPVCMinFrameSizeRx=adFRPerformDayPVCMinFrameSizeRx, adFRPerformIntPVCBecnTx=adFRPerformIntPVCBecnTx, adFRPerformIntPVCStateChange=adFRPerformIntPVCStateChange, adFRPerformDayPortErrorEntry=adFRPerformDayPortErrorEntry, adFRPerformDayPortStatusTable=adFRPerformDayPortStatusTable, adFRPerformIntPVCAvgFrameSizeRx=adFRPerformIntPVCAvgFrameSizeRx, adFRPerformIntHistorySlotTotalTime=adFRPerformIntHistorySlotTotalTime, adFRPerformDayPVCStateChange=adFRPerformDayPVCStateChange, adFRPerformIntPVCCrTx=adFRPerformIntPVCCrTx, adFRPerformDayPvcIndex=adFRPerformDayPvcIndex, adFRPerformCurrentPvcStatusTable=adFRPerformCurrentPvcStatusTable, adFRPerformDayCrcErrors=adFRPerformDayCrcErrors, adFRPerformDayPortError=adFRPerformDayPortError, adFRPerformIntPVCFramesRx=adFRPerformIntPVCFramesRx, adFRPerformIntPVCMaxThruputRx=adFRPerformIntPVCMaxThruputRx, adFRPerformIntPortFramesRx=adFRPerformIntPortFramesRx, adFRPerformDayHistoryTimeTable=adFRPerformDayHistoryTimeTable, adFRPerformIntPVCAvgFrameSizeTx=adFRPerformIntPVCAvgFrameSizeTx, adFRPerformIntPVCInactiveTime=adFRPerformIntPVCInactiveTime, adFRPerformCurrentIntTimeRemaining=adFRPerformCurrentIntTimeRemaining, adFRPerformIntPvcStatus=adFRPerformIntPvcStatus, adFRPerformIntHistoryTimeStamp=adFRPerformIntHistoryTimeStamp, adFRPerformDayHistoryTimeEntry=adFRPerformDayHistoryTimeEntry, adFRPerformIntPVCFecnRx=adFRPerformIntPVCFecnRx, adFRPerformDayPVCAvgDelay=adFRPerformDayPVCAvgDelay, adFRPerformCompletedInts=adFRPerformCompletedInts, adFRPerformIntPortAsyncStatusFrame=adFRPerformIntPortAsyncStatusFrame, adFRPerformDayPortBytesRx=adFRPerformDayPortBytesRx, adFRPerformIntPortBytesTx=adFRPerformIntPortBytesTx, adFRPerformDayPVCLostFrames=adFRPerformDayPVCLostFrames, adFRPerformDayPVCMaxThruputRx=adFRPerformDayPVCMaxThruputRx, adFRPerformIntPortSlotIndex=adFRPerformIntPortSlotIndex, adFRPerformIntPVCMaxFrameSizeTx=adFRPerformIntPVCMaxFrameSizeTx, adFRPerformIntPortAvgThruputTx=adFRPerformIntPortAvgThruputTx, adFRPerformDayPortUnavailableTime=adFRPerformDayPortUnavailableTime, adFRPerformDayHistoryTime=adFRPerformDayHistoryTime, adFRPerformDayPVCAvgFrameSizeTx=adFRPerformDayPVCAvgFrameSizeTx, adFRPerformDayPVCCrRx=adFRPerformDayPVCCrRx, adFRPerformIntPVCDeTx=adFRPerformIntPVCDeTx, adFRPerformIntPVCLostFrames=adFRPerformIntPVCLostFrames, adFRPerformIntPVCMaxUtilizationTx=adFRPerformIntPVCMaxUtilizationTx, adFRPerformDayLengthErrors=adFRPerformDayLengthErrors, adFRPerformIntSignalDownTime=adFRPerformIntSignalDownTime, adFRPerformDayPVCAvgFrameSizeRx=adFRPerformDayPVCAvgFrameSizeRx, adPerform=adPerform, adFRPerformDaySignalDownTime=adFRPerformDaySignalDownTime, adFRPerformIntPortIndex=adFRPerformIntPortIndex, adFRPerformIntPortFramesTx=adFRPerformIntPortFramesTx, adFRPerformDayHistoryTimeStamp=adFRPerformDayHistoryTimeStamp, adFRPerformDayPVCBecnTx=adFRPerformDayPVCBecnTx, adFRPerformDayPVCMinFrameSizeTx=adFRPerformDayPVCMinFrameSizeTx, adFRPerformIntPortBytesRx=adFRPerformIntPortBytesRx, adFRPerformCompletedDays=adFRPerformCompletedDays, adFRPerformIntPortAvgThruputRx=adFRPerformIntPortAvgThruputRx, adFRPerformIntIfIndex=adFRPerformIntIfIndex, adFRPerformDayPVCBurstTx=adFRPerformDayPVCBurstTx, adFRPerformIntPVCMaxThruputTx=adFRPerformIntPVCMaxThruputTx, adFRPerformIntHistoryTimeEntry=adFRPerformIntHistoryTimeEntry, adFRPerformDayPVCCrTx=adFRPerformDayPVCCrTx, adFRPerformDayPVCAvgUtilizationTx=adFRPerformDayPVCAvgUtilizationTx, adFRPerformDayIfIndex=adFRPerformDayIfIndex, adFRPerformIntPortStatus=adFRPerformIntPortStatus, adFRPerformDayPortBytesTx=adFRPerformDayPortBytesTx, adFRPerformDayPVCMaxThruputTx=adFRPerformDayPVCMaxThruputTx, adFRPerformDayPortMaxThruputRx=adFRPerformDayPortMaxThruputRx, adFRPerformDayHistorySlotTotalTime=adFRPerformDayHistorySlotTotalTime, adFRPerformIntSignalStateChange=adFRPerformIntSignalStateChange, adFRPerformIntPVCRemoteLostFrames=adFRPerformIntPVCRemoteLostFrames, adFRPerformIntPortAvgUtilizationTx=adFRPerformIntPortAvgUtilizationTx, adFRPerformIntPortMaxUtilizationRx=adFRPerformIntPortMaxUtilizationRx, adFRPerformDayAbortFrames=adFRPerformDayAbortFrames, adFRPerformDayPortMaxThruputTx=adFRPerformDayPortMaxThruputTx, adFRPerformIntPVCMinFrameSizeTx=adFRPerformIntPVCMinFrameSizeTx, adFRPerformCurrentPvcState=adFRPerformCurrentPvcState, adFRPerformIntPVCFecnTx=adFRPerformIntPVCFecnTx, adFRPerformDayPortMaxUtilizationTx=adFRPerformDayPortMaxUtilizationTx, adFRPerformDayPortAvgThruputRx=adFRPerformDayPortAvgThruputRx, adFRPerformIntPVCDeRx=adFRPerformIntPVCDeRx, adFRPerformDayPVCMaxDelay=adFRPerformDayPVCMaxDelay, adFRPerformDayDiscardFrames=adFRPerformDayDiscardFrames, adFRPerformHistoryIntLength=adFRPerformHistoryIntLength, adFRPerformDayPortAvgUtilizationRx=adFRPerformDayPortAvgUtilizationRx, adFRPerformDayPortFullStatusTx=adFRPerformDayPortFullStatusTx, adFRPerformDayPortLIOnlyTx=adFRPerformDayPortLIOnlyTx, adFRPerformIntPVCTimeInDBU=adFRPerformIntPVCTimeInDBU, adFRPerformDayPVCMaxUtilizationTx=adFRPerformDayPVCMaxUtilizationTx, adFRPerformHistoryControl=adFRPerformHistoryControl)
| [
"[email protected]"
] | |
2c6bda9fcebbde930a42e1a56b79a973d79d0de3 | faf9b450a2c13486ba6e720385716ac5b696ccae | /mysite2/urls.py | 8a9ed321cf2f1a38bd5bdb4c0ceb88deeaf33a74 | [] | no_license | NIRVANALAN/Django-showpart | 2b21618f151ede70d27be5395c2fce7494998869 | d0f3cd862d5c04f6e803bad83fae5b2e7c7c272a | refs/heads/master | 2020-03-27T23:15:25.672962 | 2019-09-21T16:10:05 | 2019-09-21T16:10:05 | 147,305,173 | 0 | 1 | null | 2019-05-08T16:43:10 | 2018-09-04T07:23:50 | CSS | UTF-8 | Python | false | false | 955 | py | """mysite2 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('main/', include('main.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
from django.conf.urls import static
from django.conf import settings
from main import views
import main.urls
urlpatterns = [
path('admin/', admin.site.urls),
path('main/', include(main.urls))
# path('github/',)
] | [
"[email protected]"
] | |
c7add1aab765b5b612d8ba38cb651a1359bd3b53 | 24c5c46f1d281fc15de7f6b72a5148ae85f89fb4 | /script/runTest.py | 3256953f3be202d7068f5dbf6030c76716de72c7 | [] | no_license | enterpriseih/easyTest | 22d87c7ffe40fb10a07f7c5cdd505f63dd45adc0 | 43b8d294e898f25055c78313cfece2753352c250 | refs/heads/master | 2023-08-23T22:55:14.798341 | 2020-02-11T09:13:43 | 2020-02-11T09:13:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # coding:utf-8
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from script import addPathToPython, initSettings, selectModel
addPathToPython()
initSettings()
selectModel()
from SRC.main import Main
Main('login.xml').run() | [
"yaolihui0506"
] | yaolihui0506 |
29d0252de36655292ca9eb8a181601dcd56492fb | 545306d8368bbe6a369e462127ba5a9389f3f0e1 | /page/migrations/0003_postcategory_link.py | 4e121a8e4cbc0adbc6329f148a2ea7fbc3d7d7c3 | [
"MIT"
] | permissive | euskate/django-page | d86c45644508af635b828cbde764cdd7fd721d66 | f7d12f16a56545ace0e09e03727aa56416b538a2 | refs/heads/master | 2020-12-07T19:37:45.250776 | 2020-01-01T15:52:18 | 2020-01-01T15:52:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # Generated by Django 2.0.5 on 2018-11-14 16:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('page', '0002_auto_20181114_0429'),
]
operations = [
migrations.AddField(
model_name='postcategory',
name='link',
field=models.CharField(blank=True, max_length=1024, null=True, verbose_name='키테고리 페이지 링크'),
),
]
| [
"[email protected]"
] | |
1b5b6be1567e3fe383b9568a3a0e8ea969def7c4 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/anagram/d8df0609726548d9a6083d84700d3f3e.py | 708559896080d2883ad2f51a3acf1faf0d9d44f1 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 409 | py | from collections import Counter
def detect_anagrams(word, options):
word = word.lower() #ignore case
word_char_count = Counter(char for char in word)
res = []
for option in options:
if option.lower() != word:
option_char_count = Counter(c for c in option.lower())
if option_char_count == word_char_count:
res.append(option)
return res
| [
"[email protected]"
] | |
766ac4a820d253af991aaa21e7355e7d3a386660 | 8940f1b88ae54f6b6cbedb8cb856b048c50c310e | /work/models.py | e20a40536a4b5c2c765b44bd1310c8f5512a4b90 | [] | no_license | ronscoder/pmu | daf49f5d2faa29467ce4cda1fa7f603cf87af369 | 1c652134e2a3f9235fc20a0183b02379cb7ad63f | refs/heads/master | 2022-12-11T06:19:39.378101 | 2020-01-24T16:25:00 | 2020-01-24T16:25:00 | 236,026,914 | 0 | 0 | null | 2022-11-22T01:59:47 | 2020-01-24T15:20:54 | Python | UTF-8 | Python | false | false | 11,517 | py | from django.db import models
import re
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from simple_history.models import HistoricalRecords
# from work.functions import getHabID, getSiteProgress, formatString
def getHabID(**kwargs):
return re.sub('[\W]+', '', "{}{}".format(kwargs['census'], kwargs['habitation'])).upper()
# class Test(models.Model):
# def __str__(self):
# return self.hab_id
# ref_id = models.CharField(max_length=200)
# Create your models here.
# DPR sites plus surveyed
class Timestamp(models.Model):
created_at = models.DateTimeField(
auto_now_add=True, blank=True, null=True)
updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)
class Meta:
abstract = True
class Common(Timestamp):
changeid = models.CharField(max_length=50, blank=True, null=True)
history = HistoricalRecords(inherit=True)
# to save without history!!
def _save(self, *args, **kwargs):
self.skip_history_when_saving = True
try:
ret = self.save(*args, **kwargs)
finally:
del self.skip_history_when_saving
return ret
class Meta:
abstract = True
class Log(Common):
def __str__(self):
return '{}-{}'.format(self.updated_at, self.changeid)
model = models.CharField(max_length=50, blank=True, null=True)
class Project(models.Model):
name = models.CharField(max_length=100, unique=True)
code = models.CharField(max_length=20, unique=True)
desc = models.TextField(default="", blank=True)
def __str__(self):
return self.name
class SiteMeta(models.Model):
class Meta:
abstract = True
hab_id = models.CharField(max_length=50, unique=True, null=True, blank=True)
approve_id = models.CharField(max_length=50, blank=True, null=True)
village = models.CharField(max_length=50)
census = models.CharField(max_length=6, blank=True)
habitation = models.CharField(max_length=50)
district = models.CharField(max_length=50)
division = models.CharField(max_length=50)
category = models.CharField(max_length=50, null=True, blank=True)
block = models.CharField(max_length=50, blank=True, null=True)
remark = models.TextField(default="", blank=True)
project = models.ForeignKey(Project,on_delete=models.SET_NULL, null=True, blank=True)
def save(self, *args, **kwargs):
self.hab_id = getHabID(census=self.census, habitation=self.habitation)
self.habitation = str(self.habitation).upper()
self.village = str(self.village).upper()
print('saving... {}'.format(self.hab_id))
# super(SiteMeta, self).save(*args, **kwargs)
super().save(*args, **kwargs)
class Qfields(models.Model):
ht = models.FloatField(default=0)
ht_conductor = models.FloatField(default=0)
lt_1p = models.FloatField(default=0)
lt_3p = models.FloatField(default=0)
dtr_25 = models.IntegerField(default=0)
dtr_63 = models.IntegerField(default=0)
dtr_100 = models.IntegerField(default=0)
pole_lt_8m = models.IntegerField(default=0)
pole_ht_8m = models.IntegerField(default=0)
pole_9m = models.IntegerField(default=0)
pole_8m = property(lambda self: sum([qty for qty in [self.pole_lt_8m, self.pole_ht_8m] if qty != None]))
class Meta:
abstract = True
class Site(Common, SiteMeta):
def __str__(self):
return '[{}|{}|{}]'.format(self.village, self.census, self.habitation)
origin = models.ForeignKey(
"self", on_delete=models.SET_NULL, blank=True, null=True)
# def save(self, *args, **kwargs):
# self.hab_id = getHabID(census=self.census, habitation=self.habitation)
# super(Site, self).save(*args, **kwargs)
class DprQty(Common, Qfields):
def __str__(self):
return "{}".format(self.site)
site = models.OneToOneField(Site, on_delete=models.CASCADE)
category = models.CharField(max_length=50, blank=True, null=True)
mode = models.CharField(max_length=50, blank=True, null=True)
status = models.CharField(max_length=50, blank=True, null=True)
type = models.CharField(max_length=50, blank=True, null=True)
hh_bpl = models.IntegerField(default=0)
hh_bpl_metered = models.IntegerField(default=0)
hh_metered = models.IntegerField(default=0)
hh_unmetered = models.IntegerField(default=0)
hh_apl_free = models.IntegerField(default=0)
hh_apl_not_free = models.IntegerField(default=0)
# ht = models.FloatField(blank=True, null=True)
# lt_3p = models.FloatField(blank=True, null=True)
# lt_1p = models.FloatField(blank=True, null=True)
# dtr_100 = models.IntegerField(blank=True, null=True)
# dtr_63 = models.IntegerField(blank=True, null=True)
# dtr_25 = models.IntegerField(blank=True, null=True)
remark = models.CharField(max_length=100, blank=True, null=True)
has_infra = models.BooleanField(null=True, blank=True)
is_dpr_scope = models.BooleanField(default=False)
project = models.CharField(max_length=10, default='main')
def save(self, *args, **kwargs):
if(sum([self.ht, self.lt_3p, self.lt_1p, self.dtr_100, self.dtr_63, self.dtr_25]) > 0):
self.has_infra = True
else:
self.has_infra = False
super().save(*args, **kwargs)
class SurveyQty(Common, Qfields):
def __str__(self):
return "{} - {}".format(self.site, self.status)
site = models.OneToOneField(Site, on_delete=models.CASCADE)
status = models.CharField(default="pending", max_length=200)
remark = models.CharField(max_length=200, blank=True, null=True)
class ShiftedMeta(models.Model):
class Meta:
abstract = True
acsr = models.FloatField(blank=True, null=True)
cable_3p = models.FloatField(blank=True, null=True)
cable_1p = models.FloatField(blank=True, null=True)
pole_8m = models.IntegerField(blank=True, null=True)
pole_9m = models.IntegerField(blank=True, null=True)
dtr_100 = models.IntegerField(blank=True, null=True)
dtr_63 = models.IntegerField(blank=True, null=True)
dtr_25 = models.IntegerField(blank=True, null=True)
remark = models.CharField(max_length=200, blank=True, null=True)
class ShiftedQty(Common, ShiftedMeta):
def __str__(self):
return "{}".format(self.site)
site = models.OneToOneField(Site, on_delete=models.CASCADE)
def habCompletionDocPath(instance, filename):
return 'CompletionDocuments/{}/{}'.format(instance.site.district, instance.site.hab_id + "-" + filename)
class ProgressMeta(Common, Qfields):
class Meta:
abstract = True
remark = models.CharField(max_length=200, blank=True, null=True)
status = models.CharField(default = 'not started', max_length=200,
choices=(
('completed', 'completed'),
('ongoing', 'ongoing'),
('not started', 'not started'),
('canceled', 'canceled'),
))
cert = models.BooleanField(default=False)
document = models.FileField(
upload_to=habCompletionDocPath, null=True, blank=True)
review = models.CharField(default='not reviewed', max_length=50, blank=True, null=True,
choices=(
('ok', 'ok'),
('issue', 'issue'),
('freeze', 'freeze'),
('not reviewed', 'not reviewed'),
))
review_text = models.TextField(null=True, blank=True)
has_infra = models.BooleanField(default=False)
def save(self, *args, **kwargs):
if(sum([self.ht, self.lt_3p, self.lt_1p, self.dtr_100, self.dtr_63, self.dtr_25]) > 0):
self.has_infra = True
else:
self.has_infra = False
super().save(*args, **kwargs)
# def _pole_8m(self):
# return sum([qty for qty in [self.pole_lt_8m, self.pole_ht_8m] if qty != None])
# pole_8m = property(lambda self: sum(
# [qty for qty in [self.pole_lt_8m, self.pole_ht_8m] if qty != None]))
# pole_9m = property(lambda self: sum(self.dtr_100, self.dtr_63, self.dtr_25))
class ProgressQty(ProgressMeta):
def __str__(self):
return "{}".format(self.site)
site = models.OneToOneField(Site, on_delete=models.CASCADE)
class SiteExtra(Common, SiteMeta):
def __str__(self):
return '[{}|{}|{}](additional)'.format(self.village, self.census, self.habitation)
site = models.ForeignKey(
Site, on_delete=models.CASCADE, blank=True, null=True)
# def save(self, *args, **kwargs):
# self.hab_id = getHabID(census=self.census, habitation=self.habitation)
# super(SiteExtra, self).save(*args, **kwargs)
class ProgressQtyExtra(ProgressMeta):
def __str__(self):
return "{}".format(self.site)
site = models.OneToOneField(SiteExtra, on_delete=models.CASCADE)
class ShiftedQtyExtra(Common, ShiftedMeta):
def __str__(self):
return "{}".format(self.site)
site = models.OneToOneField(SiteExtra, on_delete=models.CASCADE)
def HeadlineDocPath(instance, filename):
return 'Resolutions/{}-{}'.format(instance.created_at, filename)
class Resolution(Timestamp):
def __str__(self):
if(self.status == 'pending'):
status = "🔴"
elif(self.status == 'done'):
status = "✅"
else:
status = "🌕"
len = 30
return "{} {} | {}".format(status, self.statement[:len], self.resolution[:len])
# return "{} | {}".format(status, self.__dict__)
statement = models.TextField()
resolution = models.TextField(null=True, blank=True)
deadline = models.DateField(null=True, blank=True)
status = models.CharField(null=True, blank=True,
max_length=10,
choices=(
("done", "done"),
("pending", "pending"),
("deferred", "deferred"),
("info", "info"),
)
)
document = models.FileField(
upload_to=HeadlineDocPath, blank=True, null=True)
history = HistoricalRecords(inherit=True)
class ResolutionLink(models.Model):
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
resolution = models.ForeignKey(
Resolution, on_delete=models.CASCADE, null=True)
# user1 = models.CharField(max_length=100, null=True, blank=True)
def LoaDocPath(instance, filename):
return 'LOA/{}-{}'.format(filename)
class Loa(Qfields):
area = models.CharField(max_length=50, unique=True)
supply_cost = models.FloatField(blank=True, null=True)
erection_cost = models.FloatField(blank=True, null=True)
document = models.FileField(
upload_to=LoaDocPath, blank=True, null=True)
class Variations(models.Model):
variant = models.CharField(max_length=50)
variantof = models.ManyToManyField("self")
class HabitationVariations(models.Model):
site = models.ManyToManyField(Site)
habitation = models.CharField(max_length=50)
| [
"[email protected]"
] | |
c1e12dad4fdd03f0ad5fadb0a2918fe560bb2501 | c237dfae82e07e606ba9385b336af8173d01b251 | /lib/python/Products/ZGadflyDA/db.py | 409dcc6774b9ca8d71c9382f168eeff4d9eb2e18 | [
"ZPL-2.0"
] | permissive | OS2World/APP-SERVER-Zope | 242e0eec294bfb1ac4e6fa715ed423dd2b3ea6ff | dedc799bd7eda913ffc45da43507abe2fa5113be | refs/heads/master | 2020-05-09T18:29:47.818789 | 2014-11-07T01:48:29 | 2014-11-07T01:48:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,563 | py | ##############################################################################
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
'''$Id: db.py,v 1.13 2002/08/14 22:25:17 mj Exp $'''
__version__='$Revision: 1.13 $'[11:-2]
import os
from string import strip, split
import gadfly
import Globals, Shared.DC.ZRDB.THUNK
from DateTime import DateTime
data_dir=os.path.join(Globals.data_dir,'gadfly')
def manage_DataSources():
if not os.path.exists(data_dir):
try:
os.mkdir(data_dir)
os.mkdir(os.path.join(data_dir,'demo'))
except:
raise 'Gadfly Error', (
"""
The Zope Gadfly Database Adapter requires the
existence of the directory, <code>%s</code>. An error
occurred while trying to create this directory.
""" % data_dir)
if not os.path.isdir(data_dir):
raise 'Gadfly Error', (
"""
The Zope Gadfly Database Adapter requires the
existence of the directory, <code>%s</code>. This
exists, but is not a directory.
""" % data_dir)
return map(
lambda d: (d,''),
filter(lambda f, i=os.path.isdir, d=data_dir, j=os.path.join:
i(j(d,f)),
os.listdir(data_dir))
)
class DB(Shared.DC.ZRDB.THUNK.THUNKED_TM):
database_error=gadfly.error
opened=''
def tables(self,*args,**kw):
if self.db is None: self.open()
return map(
lambda name: {
'TABLE_NAME': name,
'TABLE_TYPE': 'TABLE',
},
filter(self.db.database.datadefs.has_key, self.db.table_names())
)
def columns(self, table_name):
if self.db is None: self.open()
return map(lambda col: {
'Name': col.colid, 'Type': col.datatype, 'Precision': 0,
'Scale': 0, 'Nullable': 'with Null'
}, self.db.database.datadefs[table_name].colelts)
def open(self):
connection=self.connection
path=os.path
dir=path.join(data_dir,connection)
if not path.isdir(dir):
raise self.database_error, 'invalid database error, ' + connection
if not path.exists(path.join(dir,connection+".gfd")):
db=gadfly.gadfly()
db.startup(connection,dir)
else: db=gadfly.gadfly(connection,dir)
self.db=db
self.opened=DateTime()
def close(self):
self.db=None
del self.opened
def __init__(self,connection):
self.connection=connection
self.open()
def query(self,query_string, max_rows=9999999):
if self.db is None: self.open()
self._register()
c=self.db.cursor()
queries=filter(None, map(strip,split(query_string, '\0')))
if not queries: raise 'Query Error', 'empty query'
desc=None
result=[]
for qs in queries:
c.execute(qs)
d=c.description
if d is None: continue
if desc is None: desc=d
elif d != desc:
raise 'Query Error', (
'Multiple incompatible selects in '
'multiple sql-statement query'
)
if not result: result=c.fetchmany(max_rows)
elif len(result) < max_rows:
result=result+c.fetchmany(max_rows-len(result))
if desc is None: return (),()
items=[]
for name, type, width, ds, p, scale, null_ok in desc:
if type=='NUMBER':
if scale==0: type='i'
else: type='n'
elif type=='DATE':
type='d'
else: type='s'
items.append({
'name': name,
'type': type,
'width': width,
'null': null_ok,
})
return items, result
# Gadfly needs the extra checkpoint call.
def _abort(self):
self.db.rollback()
self.db.checkpoint()
| [
"[email protected]"
] | |
7aa5681a1475a9d4efeaf93110d2d95cf57166aa | 528f910908885c3ded4ecc6380b9603c8dcacbd6 | /tbapi/top/api/rest/WlbTmsorderQueryRequest.py | bcac48c2b08a01a46b9956084883246e889adc8d | [] | no_license | Monica-ckd/data007 | 15fe9c4c898a51a58100138b6b064211199d2ed1 | 0e54ae57eb719b86ec14ce9f77b027882a3398a8 | refs/heads/master | 2023-03-16T05:26:14.257318 | 2016-05-25T06:57:05 | 2016-05-25T06:57:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 363 | py | '''
Created by auto_sdk on 2013-04-01 16:44:41
'''
from top.api.base import RestApi
class WlbTmsorderQueryRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.order_code = None
self.page_no = None
self.page_size = None
def getapiname(self):
return 'taobao.wlb.tmsorder.query'
| [
"[email protected]"
] | |
1330a1d78ccbfcb3b204dddbc5a7a1770d985167 | 9e3d8cca75de767f948736c7cde601221c4ef8ab | /core/migrations/0006_pontoturistico_enderecos.py | 7918f3e394208fee0d5e855cc4eed65f5a8b96d4 | [] | no_license | miradouro/pontosturisticos | b6d744c0b33f9ecdc8dca405615c7cc8a3ba202f | 4ea382281b8999eceab8add13ab2bd511319d58c | refs/heads/master | 2023-03-30T16:11:14.359286 | 2021-04-07T11:47:18 | 2021-04-07T11:47:18 | 354,115,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # Generated by Django 3.1.7 on 2021-04-03 11:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('enderecos', '0001_initial'),
('core', '0005_pontoturistico_avaliacoes'),
]
operations = [
migrations.AddField(
model_name='pontoturistico',
name='enderecos',
field=models.ManyToManyField(to='enderecos.Endereco'),
),
]
| [
"[email protected]"
] | |
718c8ce1a5105ec6d7e129d1115b09c10add018e | eb61e15895b7f76ff093b972fc369336880b97cb | /project/hn-sinc-nsf-9/model.py | 011cdb5ed1e754cfb6e5f05a6e33121422c72a1d | [
"BSD-3-Clause"
] | permissive | xhtian/project-NN-Pytorch-scripts | f85221e234979c5caa7078cde7cd32444616220f | 8c8318612e467c61c9d7d9315714e522bce3f2fe | refs/heads/master | 2022-10-11T16:13:39.630670 | 2020-06-05T04:45:53 | 2020-06-05T04:45:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,478 | py | #!/usr/bin/env python
"""
model.py for harmonic-plus-noise NSF with trainable sinc filter
version: 9
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import numpy as np
import torch
import torch.nn as torch_nn
import torch.nn.functional as torch_nn_func
import core_scripts.other_tools.debug as nii_debug
import config as prj_conf
__author__ = "Xin Wang"
__email__ = "[email protected]"
__copyright__ = "Copyright 2020, Xin Wang"
##############
# Building blocks (torch.nn modules + dimension operation)
#
# For blstm
class BLSTMLayer(torch_nn.Module):
""" Wrapper over dilated conv1D
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
Recurrency is conducted along "length"
"""
def __init__(self, input_dim, output_dim):
super(BLSTMLayer, self).__init__()
if output_dim % 2 != 0:
print("Output_dim of BLSTMLayer is {:d}".format(output_dim))
print("BLSTMLayer expects a layer size of even number")
sys.exit(1)
# bi-directional LSTM
self.l_blstm = torch_nn.LSTM(input_dim, output_dim // 2, \
bidirectional=True)
def forward(self, x):
# permute to (length, batchsize=1, dim)
blstm_data, _ = self.l_blstm(x.permute(1, 0, 2))
# permute it backt to (batchsize=1, length, dim)
return blstm_data.permute(1, 0, 2)
#
# 1D dilated convolution that keep the input/output length
class Conv1dKeepLength(torch_nn.Conv1d):
""" Wrapper for causal convolution
Input tensor: (batchsize=1, length, dim_in)
Output tensor: (batchsize=1, length, dim_out)
https://github.com/pytorch/pytorch/issues/1333
Note: Tanh is optional
"""
def __init__(self, input_dim, output_dim, dilation_s, kernel_s,
causal = False, stride = 1, groups=1, bias=True, \
tanh = True, pad_mode='constant'):
super(Conv1dKeepLength, self).__init__(
input_dim, output_dim, kernel_s, stride=stride,
padding = 0, dilation = dilation_s, groups=groups, bias=bias)
self.pad_mode = pad_mode
self.causal = causal
# input & output length will be the same
if self.causal:
# left pad to make the convolution causal
self.pad_le = dilation_s * (kernel_s - 1)
self.pad_ri = 0
else:
# pad on both sizes
self.pad_le = dilation_s * (kernel_s - 1) // 2
self.pad_ri = dilation_s * (kernel_s - 1) - self.pad_le
if tanh:
self.l_ac = torch_nn.Tanh()
else:
self.l_ac = torch_nn.Identity()
def forward(self, data):
# permute to (batchsize=1, dim, length)
# add one dimension (batchsize=1, dim, ADDED_DIM, length)
# pad to ADDED_DIM
# squeeze and return to (batchsize=1, dim, length)
# https://github.com/pytorch/pytorch/issues/1333
x = torch_nn_func.pad(data.permute(0, 2, 1).unsqueeze(2), \
(self.pad_le, self.pad_ri, 0, 0),
mode = self.pad_mode).squeeze(2)
# tanh(conv1())
# permmute back to (batchsize=1, length, dim)
output = self.l_ac(super(Conv1dKeepLength, self).forward(x))
return output.permute(0, 2, 1)
#
# Moving average
class MovingAverage(Conv1dKeepLength):
""" Wrapper to define a moving average smoothing layer
Note: MovingAverage can be implemented using TimeInvFIRFilter too.
Here we define another Module dicrectly on Conv1DKeepLength
"""
def __init__(self, feature_dim, window_len, causal=False, \
pad_mode='replicate'):
super(MovingAverage, self).__init__(
feature_dim, feature_dim, 1, window_len, causal,
groups=feature_dim, bias=False, tanh=False, \
pad_mode=pad_mode)
# set the weighting coefficients
torch_nn.init.constant_(self.weight, 1/window_len)
# turn off grad for this layer
for p in self.parameters():
p.requires_grad = False
def forward(self, data):
return super(MovingAverage, self).forward(data)
#
# FIR filter layer
class TimeInvFIRFilter(Conv1dKeepLength):
""" Wrapper to define a FIR filter over Conv1d
Note: FIR Filtering is conducted on each dimension (channel)
independently: groups=channel_num in conv1d
"""
def __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False):
""" __init__(self, feature_dim, filter_coef,
causal=True, flag_train=False)
feature_dim: dimension of input data
filter_coef: 1-D tensor of filter coefficients
causal: FIR is causal or not (default: true)
flag_train: whether train the filter coefficients (default false)
Input data: (batchsize=1, length, feature_dim)
Output data: (batchsize=1, length, feature_dim)
"""
super(TimeInvFIRFilter, self).__init__(
feature_dim, feature_dim, 1, filter_coef.shape[0], causal,
groups=feature_dim, bias=False, tanh=False)
if filter_coef.ndim == 1:
# initialize weight using provided filter_coef
with torch.no_grad():
tmp_coef = torch.zeros([feature_dim, 1,
filter_coef.shape[0]])
tmp_coef[:, 0, :] = filter_coef
tmp_coef = torch.flip(tmp_coef, dims=[2])
self.weight = torch.nn.Parameter(tmp_coef,
requires_grad=flag_train)
else:
print("TimeInvFIRFilter expects filter_coef to be 1-D tensor")
print("Please implement the code in __init__ if necessary")
sys.exit(1)
def forward(self, data):
return super(TimeInvFIRFilter, self).forward(data)
class TimeVarFIRFilter(torch_nn.Module):
""" TimeVarFIRFilter
Given sequences of filter coefficients and a signal, do filtering
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
For batch 0:
For n in [1, sequence_length):
output(0, n, 1) = \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
Note: filter coef (0, n, :) is only used to compute the output
at (0, n, 1)
"""
def __init__(self):
super(TimeVarFIRFilter, self).__init__()
def forward(self, signal, f_coef):
"""
Filter coefs: (batchsize=1, signal_length, filter_order = K)
Signal: (batchsize=1, signal_length, 1)
Output: (batchsize=1, signal_length, 1)
For n in [1, sequence_length):
output(0, n, 1)= \sum_{k=1}^{K} signal(0, n-k, 1)*coef(0, n, k)
This method may be not efficient:
Suppose signal [x_1, ..., x_N], filter [a_1, ..., a_K]
output [y_1, y_2, y_3, ..., y_N, *, * ... *]
= a_1 * [x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_2 * [ 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
+ a_3 * [ 0, 0, x_1, x_2, x_3, ..., x_N, 0, ..., 0]
"""
signal_l = signal.shape[1]
order_k = f_coef.shape[-1]
# pad to (batchsize=1, signal_length + filter_order-1, dim)
padded_signal = torch_nn_func.pad(signal, (0, 0, 0, order_k - 1))
y = torch.zeros_like(signal)
# roll and weighted sum, only take [0:signal_length]
for k in range(order_k):
y += torch.roll(padded_signal, k, dims=1)[:, 0:signal_l, :] \
* f_coef[:, :, k:k+1]
# done
return y
# Sinc filter generator
class SincFilter(torch_nn.Module):
""" SincFilter
Given the cut-off-frequency, produce the low-pass and high-pass
windowed-sinc-filters.
If input cut-off-frequency is (batchsize=1, signal_length, 1),
output filter coef is (batchsize=1, signal_length, filter_order).
For each time step in [1, signal_length), we calculate one
filter for low-pass sinc filter and another for high-pass filter.
Example:
import scipy
import scipy.signal
import numpy as np
filter_order = 31
cut_f = 0.2
sinc_layer = SincFilter(filter_order)
lp_coef, hp_coef = sinc_layer(torch.ones(1, 10, 1) * cut_f)
w, h1 = scipy.signal.freqz(lp_coef[0, 0, :].numpy(), [1])
w, h2 = scipy.signal.freqz(hp_coef[0, 0, :].numpy(), [1])
plt.plot(w, 20*np.log10(np.abs(h1)))
plt.plot(w, 20*np.log10(np.abs(h2)))
plt.plot([cut_f * np.pi, cut_f * np.pi], [-100, 0])
"""
def __init__(self, filter_order):
super(SincFilter, self).__init__()
# Make the filter oder an odd number
# [-(M-1)/2, ... 0, (M-1)/2]
#
self.half_k = (filter_order - 1) // 2
self.order = self.half_k * 2 +1
def hamming_w(self, n_index):
""" prepare hamming window for each time step
n_index (batchsize=1, signal_length, filter_order)
For each time step, n_index will be [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 0, :] = [-(M-1)/2, ... 0, (M-1)/2]
n_index[0, 1, :] = [-(M-1)/2, ... 0, (M-1)/2]
...
output (batchsize=1, signal_length, filter_order)
output[0, 0, :] = hamming_window
output[0, 1, :] = hamming_window
...
"""
# Hamming window
return 0.54 + 0.46 * torch.cos(2 * np.pi * n_index / self.order)
def sinc(self, x):
""" Normalized sinc-filter sin( pi * x) / pi * x
https://en.wikipedia.org/wiki/Sinc_function
Assume x (batchsize, signal_length, filter_order) and
x[0, 0, :] = [-half_order, - half_order+1, ... 0, ..., half_order]
x[:, :, self.half_order] -> time index = 0, sinc(0)=1
"""
y = torch.zeros_like(x)
y[:,:,0:self.half_k]=torch.sin(np.pi * x[:, :, 0:self.half_k]) \
/ (np.pi * x[:, :, 0:self.half_k])
y[:,:,self.half_k+1:]=torch.sin(np.pi * x[:, :, self.half_k+1:]) \
/ (np.pi * x[:, :, self.half_k+1:])
y[:,:,self.half_k] = 1
return y
def forward(self, cut_f):
""" lp_coef, hp_coef = forward(self, cut_f)
cut-off frequency cut_f (batchsize=1, length, dim = 1)
lp_coef: low-pass filter coefs (batchsize, length, filter_order)
hp_coef: high-pass filter coefs (batchsize, length, filter_order)
"""
# create the filter order index
with torch.no_grad():
# [- (M-1) / 2, ..., 0, ..., (M-1)/2]
lp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
# [[[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ],
# [[- (M-1) / 2, ..., 0, ..., (M-1)/2],
# [- (M-1) / 2, ..., 0, ..., (M-1)/2],
# ...
# ]]
lp_coef = lp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
hp_coef = torch.arange(-self.half_k, self.half_k + 1,
device=cut_f.device)
hp_coef = hp_coef.repeat(cut_f.shape[0], cut_f.shape[1], 1)
# temporary buffer of [-1^n] for gain norm in hp_coef
tmp_one = torch.pow(-1, hp_coef)
# unnormalized filter coefs with hamming window
lp_coef = cut_f * self.sinc(cut_f * lp_coef) \
* self.hamming_w(lp_coef)
hp_coef = (self.sinc(hp_coef) \
- cut_f * self.sinc(cut_f * hp_coef)) \
* self.hamming_w(hp_coef)
# normalize the coef to make gain at 0/pi is 0 dB
# sum_n lp_coef[n]
lp_coef_norm = torch.sum(lp_coef, axis=2).unsqueeze(-1)
# sum_n hp_coef[n] * -1^n
hp_coef_norm = torch.sum(hp_coef * tmp_one, axis=2).unsqueeze(-1)
lp_coef = lp_coef / lp_coef_norm
hp_coef = hp_coef / hp_coef_norm
# return normed coef
return lp_coef, hp_coef
#
# Up sampling
class UpSampleLayer(torch_nn.Module):
""" Wrapper over up-sampling
Input tensor: (batchsize=1, length, dim)
Ouput tensor: (batchsize=1, length * up-sampling_factor, dim)
"""
def __init__(self, feature_dim, up_sampling_factor, smoothing=False):
super(UpSampleLayer, self).__init__()
# wrap a up_sampling layer
self.scale_factor = up_sampling_factor
self.l_upsamp = torch_nn.Upsample(scale_factor=self.scale_factor)
if smoothing:
self.l_ave1 = MovingAverage(feature_dim, self.scale_factor)
self.l_ave2 = MovingAverage(feature_dim, self.scale_factor)
else:
self.l_ave1 = torch_nn.Identity()
self.l_ave2 = torch_nn.Identity()
return
def forward(self, x):
# permute to (batchsize=1, dim, length)
up_sampled_data = self.l_upsamp(x.permute(0, 2, 1))
# permute it backt to (batchsize=1, length, dim)
# and do two moving average
return self.l_ave1(self.l_ave2(up_sampled_data.permute(0, 2, 1)))
# Neural filter block (1 block)
class NeuralFilterBlock(torch_nn.Module):
""" Wrapper over a single filter block
"""
def __init__(self, signal_size, hidden_size,\
kernel_size=3, conv_num=10):
super(NeuralFilterBlock, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.conv_num = conv_num
self.dilation_s = [np.power(2, x) for x in np.arange(conv_num)]
# ff layer to expand dimension
self.l_ff_1 = torch_nn.Linear(signal_size, hidden_size, \
bias=False)
self.l_ff_1_tanh = torch_nn.Tanh()
# dilated conv layers
tmp = [Conv1dKeepLength(hidden_size, hidden_size, x, \
kernel_size, causal=True, bias=False) \
for x in self.dilation_s]
self.l_convs = torch_nn.ModuleList(tmp)
# ff layer to de-expand dimension
self.l_ff_2 = torch_nn.Linear(hidden_size, hidden_size//4, \
bias=False)
self.l_ff_2_tanh = torch_nn.Tanh()
self.l_ff_3 = torch_nn.Linear(hidden_size//4, signal_size, \
bias=False)
self.l_ff_3_tanh = torch_nn.Tanh()
# a simple scale
self.scale = torch_nn.Parameter(torch.tensor([0.1]),
requires_grad=False)
return
def forward(self, signal, context):
"""
Assume: signal (batchsize=1, length, signal_size)
context (batchsize=1, length, hidden_size)
Output: (batchsize=1, length, signal_size)
"""
# expand dimension
tmp_hidden = self.l_ff_1_tanh(self.l_ff_1(signal))
# loop over dilated convs
# output of a d-conv is input + context + d-conv(input)
for l_conv in self.l_convs:
tmp_hidden = tmp_hidden + l_conv(tmp_hidden) + context
# to be consistent with legacy configuration in CURRENNT
tmp_hidden = tmp_hidden * self.scale
# compress the dimesion and skip-add
tmp_hidden = self.l_ff_2_tanh(self.l_ff_2(tmp_hidden))
tmp_hidden = self.l_ff_3_tanh(self.l_ff_3(tmp_hidden))
output_signal = tmp_hidden + signal
return output_signal
#
# Sine waveform generator
#
# Sine waveform generator
class SineGen(torch_nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2],\
device = f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
sines = torch.sin(torch.cumsum(rad_values, dim=1) *2*np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
phase_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
phase_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
phase_buf[:, :, idx+1] = phase_buf[:, :, 0] * (idx+2)
# generate sine waveforms
sine_waves = self._f02sine(phase_buf) * self.sine_amp
# generate uv signal
#uv = torch.ones(f0.shape)
#uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
#. for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1-uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
#####
## Model definition
##
## For condition module only provide Spectral feature to Filter block
class CondModuleHnSincNSF(torch_nn.Module):
""" Condition module for hn-sinc-NSF
Upsample and transform input features
CondModuleHnSincNSF(input_dimension, output_dimension, up_sample_rate,
blstm_dimension = 64, cnn_kernel_size = 3)
Spec, F0, cut_off_freq = CondModuleHnSincNSF(features, F0)
Both input features should be frame-level features
If x doesn't contain F0, just ignore the returned F0
CondModuleHnSincNSF(input_dim, output_dim, up_sample,
blstm_s = 64, cnn_kernel_s = 3,
voiced_threshold = 0):
input_dim: sum of dimensions of input features
output_dim: dim of the feature Spec to be used by neural filter-block
up_sample: up sampling rate of input features
blstm_s: dimension of the features from blstm (default 64)
cnn_kernel_s: kernel size of CNN in condition module (default 3)
voiced_threshold: f0 > voiced_threshold is voiced, otherwise unvoiced
"""
def __init__(self, input_dim, output_dim, up_sample, \
blstm_s = 64, cnn_kernel_s = 3, voiced_threshold = 0):
super(CondModuleHnSincNSF, self).__init__()
# input feature dimension
self.input_dim = input_dim
self.output_dim = output_dim
self.up_sample = up_sample
self.blstm_s = blstm_s
self.cnn_kernel_s = cnn_kernel_s
self.cut_f_smooth = up_sample * 4
self.voiced_threshold = voiced_threshold
# the blstm layer
self.l_blstm = BLSTMLayer(input_dim, self.blstm_s)
# the CNN layer (+1 dim for cut_off_frequence of sinc filter)
self.l_conv1d = Conv1dKeepLength(self.blstm_s, \
self.output_dim, \
dilation_s = 1, \
kernel_s = self.cnn_kernel_s)
# Upsampling layer for hidden features
self.l_upsamp = UpSampleLayer(self.output_dim, \
self.up_sample, True)
# separate layer for up-sampling normalized F0 values
self.l_upsamp_f0_hi = UpSampleLayer(1, self.up_sample, True)
# Upsampling for F0: don't smooth up-sampled F0
self.l_upsamp_F0 = UpSampleLayer(1, self.up_sample, False)
# Another smoothing layer to smooth the cut-off frequency
# for sinc filters. Use a larger window to smooth
self.l_cut_f_smooth = MovingAverage(1, self.cut_f_smooth)
def get_cut_f(self, hidden_feat, f0):
""" cut_f = get_cut_f(self, feature, f0)
feature: (batchsize, length, dim=1)
f0: (batchsize, length, dim=1)
"""
# generate uv signal
uv = torch.ones_like(f0) * (f0 > self.voiced_threshold)
# hidden_feat is between (-1, 1) after conv1d with tanh
# (-0.2, 0.2) + 0.3 = (0.1, 0.5)
# voiced: (0.1, 0.5) + 0.4 = (0.5, 0.9)
# unvoiced: (0.1, 0.5) = (0.1, 0.5)
return hidden_feat * 0.2 + uv * 0.4 + 0.3
def forward(self, feature, f0):
""" spec, f0 = forward(self, feature, f0)
feature: (batchsize, length, dim)
f0: (batchsize, length, dim=1), which should be F0 at frame-level
spec: (batchsize, length, self.output_dim), at wave-level
f0: (batchsize, length, 1), at wave-level
"""
tmp = self.l_upsamp(self.l_conv1d(self.l_blstm(feature)))
# concatenat normed F0 with hidden spectral features
context = torch.cat((tmp[:, :, 0:self.output_dim-1], \
self.l_upsamp_f0_hi(feature[:, :, -1:])), \
dim=2)
# hidden feature for cut-off frequency
hidden_cut_f = tmp[:, :, self.output_dim-1:]
# directly up-sample F0 without smoothing
f0_upsamp = self.l_upsamp_F0(f0)
# get the cut-off-frequency from output of CNN
cut_f = self.get_cut_f(hidden_cut_f, f0_upsamp)
# smooth the cut-off-frequency using fixed average smoothing
cut_f_smoothed = self.l_cut_f_smooth(cut_f)
# return
return context, f0_upsamp, cut_f_smoothed, hidden_cut_f
# For source module
class SourceModuleHnNSF(torch_nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch_nn.Linear(harmonic_num+1, 1)
self.l_tanh = torch_nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
sine_wavs, uv, _ = self.l_sin_gen(x)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
# For Filter module
class FilterModuleHnSincNSF(torch_nn.Module):
""" Filter for Hn-sinc-NSF
FilterModuleHnSincNSF(signal_size, hidden_size, sinc_order = 31,
block_num = 5, kernel_size = 3,
conv_num_in_block = 10)
signal_size: signal dimension (should be 1)
hidden_size: dimension of hidden features inside neural filter block
sinc_order: order of the sinc filter
block_num: number of neural filter blocks in harmonic branch
kernel_size: kernel size in dilated CNN
conv_num_in_block: number of d-conv1d in one neural filter block
Usage:
output = FilterModuleHnSincNSF(har_source, noi_source, cut_f, context)
har_source: source for harmonic branch (batchsize, length, dim=1)
noi_source: source for noise branch (batchsize, length, dim=1)
cut_f: cut-off-frequency of sinc filters (batchsize, length, dim=1)
context: hidden features to be added (batchsize, length, dim)
output: (batchsize, length, dim=1)
"""
def __init__(self, signal_size, hidden_size, sinc_order = 31, \
block_num = 5, kernel_size = 3, conv_num_in_block = 10):
super(FilterModuleHnSincNSF, self).__init__()
self.signal_size = signal_size
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.block_num = block_num
self.conv_num_in_block = conv_num_in_block
self.sinc_order = sinc_order
# filter blocks for harmonic branch
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block) \
for x in range(self.block_num)]
self.l_har_blocks = torch_nn.ModuleList(tmp)
# filter blocks for noise branch (only one block, 5 sub-blocks)
tmp = [NeuralFilterBlock(signal_size, hidden_size, \
kernel_size, conv_num_in_block // 2) \
for x in range(1)]
self.l_noi_blocks = torch_nn.ModuleList(tmp)
# sinc filter generators and time-variant filtering layer
self.l_sinc_coef = SincFilter(self.sinc_order)
self.l_tv_filtering = TimeVarFIRFilter()
# done
def forward(self, har_component, noi_component, cond_feat, cut_f):
"""
"""
# harmonic component
for l_har_block in self.l_har_blocks:
har_component = l_har_block(har_component, cond_feat)
# noise componebt
for l_noi_block in self.l_noi_blocks:
noi_component = l_noi_block(noi_component, cond_feat)
# get sinc filter coefficients
lp_coef, hp_coef = self.l_sinc_coef(cut_f)
# time-variant filtering
har_signal = self.l_tv_filtering(har_component, lp_coef)
noi_signal = self.l_tv_filtering(noi_component, hp_coef)
# get output
return har_signal + noi_signal
## FOR MODEL
class Model(torch_nn.Module):
""" Model definition
"""
def __init__(self, in_dim, out_dim, args, mean_std=None):
super(Model, self).__init__()
torch.manual_seed(1)
# mean std of input and output
in_m, in_s, out_m, out_s = self.prepare_mean_std(in_dim,out_dim,\
args, mean_std)
self.input_mean = torch_nn.Parameter(in_m, requires_grad=False)
self.input_std = torch_nn.Parameter(in_s, requires_grad=False)
self.output_mean = torch_nn.Parameter(out_m, requires_grad=False)
self.output_std = torch_nn.Parameter(out_s, requires_grad=False)
self.input_dim = in_dim
self.output_dim = out_dim
# configurations
# amplitude of sine waveform (for each harmonic)
self.sine_amp = 0.1
# standard deviation of Gaussian noise for additive noise
self.noise_std = 0.003
# dimension of hidden features in filter blocks
self.hidden_dim = 64
# upsampling rate on input acoustic features (16kHz * 5ms = 80)
# assume input_reso has the same value
self.upsamp_rate = prj_conf.input_reso[0]
# sampling rate (Hz)
self.sampling_rate = prj_conf.wav_samp_rate
# CNN kernel size in filter blocks
self.cnn_kernel_s = 3
# number of filter blocks (for harmonic branch)
# noise branch only uses 1 block
self.filter_block_num = 5
# number of dilated CNN in each filter block
self.cnn_num_in_block = 10
# number of harmonic overtones in source
self.harmonic_num = 7
# order of sinc-windowed-FIR-filter
self.sinc_order = 31
# the three modules
self.m_cond = CondModuleHnSincNSF(self.input_dim, \
self.hidden_dim, \
self.upsamp_rate, \
cnn_kernel_s=self.cnn_kernel_s)
self.m_source = SourceModuleHnNSF(self.sampling_rate,
self.harmonic_num,
self.sine_amp, self.noise_std)
self.m_filter = FilterModuleHnSincNSF(self.output_dim, \
self.hidden_dim, \
self.sinc_order, \
self.filter_block_num, \
self.cnn_kernel_s, \
self.cnn_num_in_block)
# done
return
def prepare_mean_std(self, in_dim, out_dim, args, data_mean_std=None):
"""
"""
if data_mean_std is not None:
in_m = torch.from_numpy(data_mean_std[0])
in_s = torch.from_numpy(data_mean_std[1])
out_m = torch.from_numpy(data_mean_std[2])
out_s = torch.from_numpy(data_mean_std[3])
if in_m.shape[0] != in_dim or in_s.shape[0] != in_dim:
print("Input dim: {:d}".format(in_dim))
print("Mean dim: {:d}".format(in_m.shape[0]))
print("Std dim: {:d}".format(in_s.shape[0]))
print("Input dimension incompatible")
sys.exit(1)
if out_m.shape[0] != out_dim or out_s.shape[0] != out_dim:
print("Output dim: {:d}".format(out_dim))
print("Mean dim: {:d}".format(out_m.shape[0]))
print("Std dim: {:d}".format(out_s.shape[0]))
print("Output dimension incompatible")
sys.exit(1)
else:
in_m = torch.zeros([in_dim])
in_s = torch.zeros([in_dim])
out_m = torch.zeros([out_dim])
out_s = torch.zeros([out_dim])
return in_m, in_s, out_m, out_s
def normalize_input(self, x):
""" normalizing the input data
"""
return (x - self.input_mean) / self.input_std
def normalize_target(self, y):
""" normalizing the target data
"""
return (y - self.output_mean) / self.output_std
def denormalize_output(self, y):
""" denormalizing the generated output from network
"""
return y * self.output_std + self.output_mean
def forward(self, x):
""" definition of forward method
Assume x (batchsize=1, length, dim)
Return output(batchsize=1, length)
"""
# assume x[:, :, -1] is F0, denormalize F0
f0 = x[:, :, -1:]
# normalize the input features data
feat = self.normalize_input(x)
# condition module
# feature-to-filter-block, f0-up-sampled, cut-off-f-for-sinc,
# hidden-feature-for-cut-off-f
cond_feat, f0_upsamped, cut_f, hid_cut_f = self.m_cond(feat, f0)
# source module
# harmonic-source, noise-source (for noise branch), uv
har_source, noi_source, uv = self.m_source(f0_upsamped)
# neural filter module (including sinc-based FIR filtering)
# output
output = self.m_filter(har_source, noi_source, cond_feat, cut_f)
if self.training:
# just in case we need to penalize the hidden feauture for
# cut-off-freq.
return [output.squeeze(-1), hid_cut_f]
else:
return output.squeeze(-1)
class Loss():
""" Wrapper to define loss function
"""
def __init__(self, args):
"""
"""
# frame shift (number of points)
self.frame_hops = [80, 40, 640]
# frame length
self.frame_lens = [320, 80, 1920]
# fft length
self.fft_n = [512, 128, 2048]
# window type in stft
self.win = torch.hann_window
# floor in log-spectrum-amplitude calculating
self.amp_floor = 0.00001
# loss function
self.loss = torch_nn.MSELoss()
# weight to penalize hidden features for cut-off-frequency
# for experiments on CMU-arctic, ATR-F009, VCTK, cutoff_w = 0.0
self.cutoff_w = 0.0
def compute(self, outputs, target):
""" Loss().compute(outputs, target) should return
the Loss in torch.tensor format
Assume output and target as (batchsize=1, length)
"""
# hidden-feature for cut-off-frequency
cut_f = outputs[1]
# generated signal
output = outputs[0]
# convert from (batchsize=1, length, dim=1) to (1, length)
if target.ndim == 3:
target.squeeze_(-1)
# compute loss
loss = 0
for frame_shift, frame_len, fft_p in \
zip(self.frame_hops, self.frame_lens, self.fft_n):
x_stft = torch.stft(output, fft_p, frame_shift, frame_len, \
window=self.win(frame_len), onesided=True,
pad_mode="constant")
y_stft = torch.stft(target, fft_p, frame_shift, frame_len, \
window=self.win(frame_len), onesided=True,
pad_mode="constant")
x_sp_amp = torch.log(torch.norm(x_stft, 2, -1).pow(2) + \
self.amp_floor)
y_sp_amp = torch.log(torch.norm(y_stft, 2, -1).pow(2) + \
self.amp_floor)
loss += self.loss(x_sp_amp, y_sp_amp)
# A norm on cut_f, which forces sinc-cut-off-frequency
# to be close to the U/V-decided value
# Experiments on CMU-arctic, ATR-F009, and VCTK don't use it
# by setting self.cutoff_w = 0.0
# However, just in case
loss += self.cutoff_w * self.loss(cut_f, torch.zeros_like(cut_f))
return loss
if __name__ == "__main__":
print("Definition of model")
| [
"[email protected]"
] | |
28b89f558948ad6bda3670a0422fe88aae42ce52 | c3113792bfe44d160b7cc9d79203d7f6da02aec5 | /news/migrations/0023_remove_post_thumbnail.py | e249ea056b565076d9b4e11801fb1189d4dae220 | [] | no_license | eyobofficial/sport-news-app | 9eb07afeeeae2bf7143405fbc0d1964292e5a11d | 2da74b1c458b34a7a8f9a306234f067436eda9da | refs/heads/master | 2020-03-18T20:08:18.213627 | 2018-06-04T19:07:16 | 2018-06-04T19:07:16 | 135,197,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | # Generated by Django 2.0.5 on 2018-06-02 18:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0022_auto_20180602_1834'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='thumbnail',
),
]
| [
"[email protected]"
] | |
952ed8e0e406b3182690925434ed4df9d05e24fc | dabaa419581f4905833c495c2801be49429b272d | /apps/bloodhound/bloodhound_multiproduct/multiproduct/config.py | 3d47abf9683e7c3ebf781f4cd4a38aedc4166449 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Stackato-Apps/stackato-apps | 222ea90b280d7f80e53a259e86662839c8db4a71 | e95077a8255f7042f9414d4570b03ab984fd9587 | refs/heads/master | 2021-01-19T06:25:29.565845 | 2015-05-29T21:01:54 | 2015-05-29T21:01:54 | 30,000,576 | 1 | 3 | null | 2015-04-21T18:05:10 | 2015-01-29T02:53:02 | PHP | UTF-8 | Python | false | false | 12,356 | py |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Configuration objects for Bloodhound product environments"""
__all__ = 'Configuration', 'Section'
import os.path
from trac.config import Configuration, ConfigurationError, Option, \
OrderedExtensionsOption, Section, _use_default
from trac.resource import ResourceNotFound
from trac.util.text import to_unicode
from multiproduct.model import ProductSetting
from multiproduct.perm import MultiproductPermissionPolicy
class Configuration(Configuration):
"""Product-aware settings repository equivalent to instances of
`trac.config.Configuration` (and thus `ConfigParser` from the
Python Standard Library) but retrieving configuration values
from the database.
"""
def __init__(self, env, product, parents=None):
"""Initialize configuration object with an instance of
`trac.env.Environment` and product prefix.
Optionally it is possible to inherit settings from parent
Configuration objects. Environment's configuration will not
be added to parents list.
"""
self.env = env
self.product = to_unicode(product)
self._sections = {}
self._setup_parents(parents)
def __getitem__(self, name):
"""Return the configuration section with the specified name.
"""
if name not in self._sections:
self._sections[name] = Section(self, name)
return self._sections[name]
def sections(self, compmgr=None, defaults=True):
"""Return a list of section names.
If `compmgr` is specified, only the section names corresponding to
options declared in components that are enabled in the given
`ComponentManager` are returned.
"""
sections = set(to_unicode(s) \
for s in ProductSetting.get_sections(self.env, self.product))
for parent in self.parents:
sections.update(parent.sections(compmgr, defaults=False))
if defaults:
sections.update(self.defaults(compmgr))
return sorted(sections)
def has_option(self, section, option, defaults=True):
"""Returns True if option exists in section in either the project
trac.ini or one of the parents, or is available through the Option
registry.
(since Trac 0.11)
"""
if ProductSetting.exists(self.env, self.product, section, option):
return True
for parent in self.parents:
if parent.has_option(section, option, defaults=False):
return True
return defaults and (section, option) in Option.registry
def save(self):
"""Nothing to do.
Notice: Opposite to Trac's Configuration objects Bloodhound's
product configuration objects commit changes to the database
immediately. Thus there's no much to do in this method.
"""
def parse_if_needed(self, force=False):
"""Just invalidate options cache.
Notice: Opposite to Trac's Configuration objects Bloodhound's
product configuration objects commit changes to the database
immediately. Thus there's no much to do in this method.
"""
for section in self.sections():
self[section]._cache.clear()
def touch(self):
pass
def set_defaults(self, compmgr=None):
"""Retrieve all default values and store them explicitly in the
configuration, so that they can be saved to file.
Values already set in the configuration are not overridden.
"""
for section, default_options in self.defaults(compmgr).items():
for name, value in default_options.items():
if not ProductSetting.exists(self.env, self.product,
section, name):
if any(parent[section].contains(name, defaults=False)
for parent in self.parents):
value = None
self.set(section, name, value)
# Helper methods
def _setup_parents(self, parents=None):
"""Inherit configuration from parent `Configuration` instances.
If there's a value set to 'file' option in 'inherit' section then
it will be considered as a list of paths to .ini files
that will be added to parents list as well.
"""
from trac import config
self.parents = (parents or [])
for filename in self.get('inherit', 'file').split(','):
filename = Section._normalize_path(filename.strip(), self.env)
self.parents.append(config.Configuration(filename))
class Section(Section):
"""Proxy for a specific configuration section.
Objects of this class should not be instantiated directly.
"""
__slots__ = ['config', 'name', 'overridden', '_cache']
@staticmethod
def optionxform(optionstr):
return to_unicode(optionstr.lower());
def __init__(self, config, name):
self.config = config
self.name = to_unicode(name)
self.overridden = {}
self._cache = {}
@property
def env(self):
return self.config.env
@property
def product(self):
return self.config.product
def contains(self, key, defaults=True):
key = self.optionxform(key)
if ProductSetting.exists(self.env, self.product, self.name, key):
return True
for parent in self.config.parents:
if parent[self.name].contains(key, defaults=False):
return True
return defaults and Option.registry.has_key((self.name, key))
__contains__ = contains
def iterate(self, compmgr=None, defaults=True):
"""Iterate over the options in this section.
If `compmgr` is specified, only return default option values for
components that are enabled in the given `ComponentManager`.
"""
options = set()
name_str = self.name
for setting in ProductSetting.select(self.env,
where={'product':self.product, 'section':name_str}):
option = self.optionxform(setting.option)
options.add(option)
yield option
for parent in self.config.parents:
for option in parent[self.name].iterate(defaults=False):
loption = self.optionxform(option)
if loption not in options:
options.add(loption)
yield option
if defaults:
for section, option in Option.get_registry(compmgr).keys():
if section == self.name and \
self.optionxform(option) not in options:
yield option
__iter__ = iterate
def __repr__(self):
return '<%s [%s , %s]>' % (self.__class__.__name__, \
self.product, self.name)
def get(self, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
key = self.optionxform(key)
cached = self._cache.get(key, _use_default)
if cached is not _use_default:
return cached
name_str = self.name
key_str = to_unicode(key)
settings = ProductSetting.select(self.env,
where={'product':self.product, 'section':name_str,
'option':key_str})
if len(settings) > 0:
value = settings[0].value
else:
for parent in self.config.parents:
value = parent[self.name].get(key, _use_default)
if value is not _use_default:
break
else:
if default is not _use_default:
option = Option.registry.get((self.name, key))
value = option.default if option else _use_default
else:
value = _use_default
if value is _use_default:
return default
if not value:
value = u''
elif isinstance(value, basestring):
value = to_unicode(value)
self._cache[key] = value
return value
def getpath(self, key, default=''):
"""Return a configuration value as an absolute path.
Relative paths are resolved relative to `conf` subfolder
of the target global environment. This approach is consistent
with TracIni path resolution.
Valid default input is a string. Returns a normalized path.
(enabled since Trac 0.11.5)
"""
path = self.get(key, default)
if not path:
return default
return self._normalize_path(path, self.env)
def remove(self, key):
"""Delete a key from this section.
Like for `set()`, the changes won't persist until `save()` gets called.
"""
key_str = self.optionxform(key)
option_key = {
'product' : self.product,
'section' : self.name,
'option' : key_str,
}
try:
setting = ProductSetting(self.env, keys=option_key)
except ResourceNotFound:
self.env.log.warning("No record for product option %s", option_key)
else:
self._cache.pop(key, None)
setting.delete()
self.env.log.info("Removing product option %s", option_key)
def set(self, key, value):
"""Change a configuration value.
These changes will be persistent right away.
"""
key_str = self.optionxform(key)
value_str = to_unicode(value)
self._cache.pop(key_str, None)
option_key = {
'product' : self.product,
'section' : self.name,
'option' : key_str,
}
try:
setting = ProductSetting(self.env, option_key)
except ResourceNotFound:
if value is not None:
# Insert new record in the database
setting = ProductSetting(self.env)
setting._data.update(option_key)
setting._data['value'] = value_str
self.env.log.debug('Writing option %s', setting._data)
setting.insert()
else:
if value is None:
# Delete existing record from the database
# FIXME : Why bother with setting overriden
self.overridden[key] = True
setting.delete()
else:
# Update existing record
setting._data['value'] = value
setting.update()
# Helper methods
@staticmethod
def _normalize_path(path, env):
if not os.path.isabs(path):
path = os.path.join(env.path, 'conf', path)
return os.path.normcase(os.path.realpath(path))
#--------------------
# Option override classes
#--------------------
class ProductPermissionPolicyOption(OrderedExtensionsOption):
"""Prepend an instance of `multiproduct.perm.MultiproductPermissionPolicy`
"""
def __get__(self, instance, owner):
# FIXME: Better handling of recursive imports
from multiproduct.env import ProductEnvironment
if instance is None:
return self
components = OrderedExtensionsOption.__get__(self, instance, owner)
env = getattr(instance, 'env', None)
return [MultiproductPermissionPolicy(env)] + components \
if isinstance(env, ProductEnvironment) \
else components
| [
"[email protected]"
] | |
a74223ae54fe94a82e612222bbbeff4f6a24e933 | 8ef5a09d76a11c56963f18e6a08474a1a8bafe3c | /leet_code/331. Verify Preorder Serialization of a Binary Tree.py | 58f4b9c19280f69a0cbf987f798fba83c5888e2b | [] | no_license | roiei/algo | 32c4677649c7666db148f6183fbfbf66c8b1969f | ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec | refs/heads/master | 2022-04-01T19:21:27.768675 | 2022-02-19T06:15:29 | 2022-02-19T06:15:29 | 169,021,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | import time
from util_list import *
from util_tree import *
import copy
import collections
class Solution:
def isValidSerialization(self, preorder: str) -> bool:
stk = []
for val in preorder.split(','):
stk += val,
while len(stk) >= 2 and stk[-2] == '#' and stk[-1] == '#':
stk.pop()
stk.pop()
if not stk:
return False
stk.pop()
stk += '#',
return stk == ['#']
stime = time.time()
print(True == Solution().isValidSerialization([2,-1,1,2,2]))
print('elapse time: {} sec'.format(time.time() - stime)) | [
"[email protected]"
] | |
b2450cfef5fe4b41d70cd89869059ea22707ec59 | 437b1f6c3450a53e6e51eda62c85e3b4e098c8d2 | /operadores/Idades.py | ed6f27b28bdc85390c51fcbcd39dc168e5c6fbc8 | [] | no_license | pocceschi/aprendendo_git | 16d5194b2372970b469ff8db42290f7f152b538b | 704e4e40cd0e36b02e09bf411f42f23ab931d5fc | refs/heads/main | 2023-04-07T05:25:03.549470 | 2021-04-01T23:29:04 | 2021-04-01T23:29:04 | 353,849,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | print("Dados da primeira pessoa: ")
nome1 = str(input("Nome: "))
idade1 = int(input("Idade: "))
print("Dados da segunda pessoa: ")
nome2 = str(input("Nome: "))
idade2 = int(input("Idade: "))
media = (idade1 + idade2) / 2
print(f"A idade média de {nome1} e {nome2} é de {media:.2f}")
| [
"[email protected]"
] | |
1d83c4355f9a26123159aa9e8e48d5b0fe6a3a75 | 37194bcee20e66e84360010d98a45adcced57963 | /Algorithem_my/IM_Motherboard/6190/6190.py | fc9d929b23a4e98d9078e6f893607835ebc2a934 | [] | no_license | dmdekf/algo | edcd1bbd067102a622ff1d55b2c3f6274126414a | 544a531799295f0f9879778a2d092f23a5afc4ce | refs/heads/master | 2022-09-13T14:53:31.593307 | 2020-06-05T07:06:03 | 2020-06-05T07:06:03 | 237,857,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | import sys
sys.stdin = open('input.txt')
def p(num):
while num:
r = num % 10
num = num // 10
if r < num%10:
return False
return True
T = int(input())
for tc in range(1, T+1):
N = int(input())
d = list(map(int, input().split()))
d = sorted(d)
for i in range(N-1,0,-1):
x=d[i]*d[i-1]
if p(x):
print('#{} {}'.format(tc,x))
break
| [
"[email protected]"
] | |
bc7c8910ea133c2a685757bc62f84d72cabcac8b | ffe4c155e228f1d3bcb3ff35265bb727c684ec1a | /UCL/Algorithms/Sorting & Searching/heap_sort.py | 9e3cb91645279c9eff8750c1ff7939fbd07e1d04 | [] | no_license | yuuee-www/Python-Learning | 848407aba39970e7e0058a4adb09dd35818c1d54 | 2964c9144844aed576ea527acedf1a465e9a8664 | refs/heads/master | 2023-03-12T00:55:06.034328 | 2021-02-28T13:43:14 | 2021-02-28T13:43:14 | 339,406,816 | 0 | 0 | null | 2021-02-28T11:27:40 | 2021-02-16T13:26:46 | Jupyter Notebook | UTF-8 | Python | false | false | 231 | py | from binary_heap import*
def heapSort(alist):
bh = BinHeap()
bh.buildHeap(alist)
lst = []
while not bh.isEmpty():
lst.append(bh.delMin())
return lst
alist = [1,2,3,4,5,8,7,6,6]
print(heapSort(alist))
| [
"[email protected]"
] | |
feeb494e05682a099d4143630355ca8874f95592 | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/common/system/zipfileset_mock.py | 24ac8cba05f8d3289e404b3fb87d9f92fe1e400c | [
"MIT",
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0"
] | permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 2,166 | py | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def make_factory(ziphashes):
"""ZipFileSet factory routine that looks up zipfiles in a dict;
each zipfile should also be a dict of member names -> contents."""
class MockZipFileSet(object):
def __init__(self, url):
self._url = url
self._ziphash = ziphashes[url]
def namelist(self):
return self._ziphash.keys()
def read(self, member):
return self._ziphash[member]
def close(self):
pass
def maker(url):
# We return None because there's no tempfile to delete.
return (None, MockZipFileSet(url))
return maker
| [
"[email protected]"
] | |
a3bb51288c892782c7b8c94d0503d2598d5cec18 | 05297c66b8881b734bd3d8cb01b69eb927157f53 | /src/ava/job/errors.py | 3fa89547c8404e6f5dfdf84142bd377e2814214a | [
"Apache-2.0"
] | permissive | nickchen-mitac/fork | 4b75c2204cede08e4c71d85261da2e826330ff9f | 64dab56012da47465b4923f30f26925476c87afc | refs/heads/master | 2021-01-10T06:27:57.701844 | 2015-11-03T09:01:31 | 2015-11-03T09:01:31 | 45,453,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from ava.core import AvaError
class JobCancelledError(AvaError):
pass
class ScriptSyntaxError(AvaError):
def __init__(self, *args, **kwargs):
super(ScriptSyntaxError, self).__init__(*args, **kwargs)
| [
"[email protected]"
] | |
2dba634705e99a2bad5fca280fc4c0d56088c2d8 | 78456214246b3fca7636d9f91d15f1c25ae00f77 | /flowmeter/config/core/log.py | 9e79c6f0bb73290c74854b5ce5ec16488b7f6851 | [] | no_license | Zhuuhn/flowmeter | 4404bbead2c7e7804364623b612a89ec80a69fb9 | 9ea4f497fb2a4de64f7b2d8c453066df7ec8a483 | refs/heads/master | 2022-08-22T01:01:45.687526 | 2020-05-27T10:02:21 | 2020-05-27T10:02:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | # coding=utf-8
from flowmeter.config.db.log_table import AlarmLog, OprLog, SystemLog
def add_alarm_log(log):
return AlarmLog.objects.create(alarm_type=log['alarm_type'], meter_id=log['meter_id'], opr_time=log['opr_time'])
def add_opr_log(log):
opr = OprLog.objects.create(**log)
return opr
def add_system_log(log):
log = SystemLog.objects.create(**log)
return log
def find_one_opr_log(log_info):
try:
opr = OprLog.objects.get(**log_info)
return opr
except OprLog.DoesNotExist:
return None
def find_one_system_log(log_info):
try:
log = SystemLog.objects.get(**log_info)
return log
except SystemLog.DoesNotExist:
return None
| [
"[email protected]"
] | |
8ffc68f6def3a710f9c3285915e486060990e051 | b0f2c47881f39ceb5a989b9638483f7439bfb5cf | /Problem77.py | 2d72d0b64c35bd5631ecaf1a7efff986981eaefb | [] | no_license | chrisvail/Project_Euler | 9ba264c8ec9d158b33ec677811e59d1e0e52fef2 | 41623c27b3e1344f9d8ebdfac4df297d0666cc07 | refs/heads/main | 2023-02-13T20:26:42.752780 | 2021-01-15T16:38:27 | 2021-01-15T16:38:27 | 329,964,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | from StandardFunctions import genprimes
from itertools import count
primes = genprimes(1000)
for target in count():
ways = [0 for _ in range(target + 1)]
ways[0] = 1
for i in primes:
for j in range(i, target + 1):
ways[j] += ways[j - i]
print(ways[-1] - 1)
if ways[-1] - 1 > 5000:
print(target)
break | [
"[email protected]"
] | |
cd211fae8a73579ec113ee9bc0d541ad3e3dd4b7 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /kjJWvK9XtdbEJ2EKe_22.py | 279d9872a60ea239f541d40214d2a846a569506d | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | """
Python has a beautiful built-in function `sorted` that sorts an iterable,
usually an array of numbers, sorting them in ascending order, but using `key=`
you can sort the iterable in different ways.
Create a function that takes an array of integers as an argument and returns
the same array in ascending order. Using `sorted()` would be easy, but for
this challenge YOU have to sort the array creating your own algorithm.
### Examples
sort_array([2, -5, 1, 4, 7, 8]) ➞ [-5, 1, 2, 4, 7, 8]
sort_array([23, 15, 34, 17, -28]) ➞ [-28, 15, 17, 23, 34]
sort_array([38, 57, 45, 18, 47, 39]) ➞ [18, 38, 39, 45, 47, 57]
### Notes
* The arrays can contain either positive or negative elements.
* The arrays will only contain integers.
* The arrays won't contain duplicate numbers.
* This is a challenge to enhance your ability, using the sorted built-in won't enhance your skills.
"""
def sort_array(lst):
return [lst.pop(lst.index(min(lst))) for x in range(len(lst))]
| [
"[email protected]"
] | |
c1d7b781f24649f27e2a1e76543ce918281badf7 | 1113c8d5689685106fd77363e5561006d8ecef0d | /confbusterplusplus/aligner.py | 2ec09dc3fe54ecf09bc33b94b1e27766826602c5 | [
"MIT"
] | permissive | dsvatunek/ConfBusterPlusPlus | 238f73ab48e6d1d1491cbf4406acf828d76a56f9 | 2de751f409ffdb791d8b04fd4b3d08645beebaa6 | refs/heads/master | 2022-11-09T18:28:26.880541 | 2020-06-24T05:50:35 | 2020-06-24T05:50:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 448 | py |
from rdkit.Chem import AllChem
class MolAligner:
def __init__(self, max_iters):
self.max_iters = max_iters
def align_global(self, mol):
rmsd = []
AllChem.AlignMolConformers(mol, maxIters=self.max_iters, RMSlist=rmsd)
return rmsd
def align_atoms(self, mol, atoms):
rmsd = []
AllChem.AlignMolConformers(mol, maxIters=self.max_iters, atomIds=atoms, RMSlist=rmsd)
return rmsd
| [
"[email protected]"
] | |
b4e86b17979bb6357a4ba72329709913ee08d6b1 | 05e2277cf1af409123f43fc0a3226014dd170556 | /11286.py | bda540ef1205168f0af5aa383953c4a6c163a4e4 | [] | no_license | 2021-01-06/baekjoon | 4dec386574ce9f51f589a944b71436ce1eb2521e | ca8f02ecbed11fe98adfd1c18ce265b10f1298bc | refs/heads/main | 2023-05-06T08:19:53.943479 | 2021-05-14T03:25:55 | 2021-05-14T03:25:55 | 327,730,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | import heapq
import sys
h=[]
n=int(sys.stdin.readline())
for _ in range(n):
o=int(sys.stdin.readline())
if o==0:
if len(h)==0:
print(0)
else:
print(heapq.heappop(h)[1])
else:
heapq.heappush(h,(abs(o),o)) | [
"[email protected]"
] | |
07b1e3108279514ee49174b1f35c9e3067165c51 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/cherry.su/b.py | 41f0dc05a41ed75223d02f95d2bd1c67fc0af7a0 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,190 | py | def flip(S, n):
for i in range((n+1)//2):
tmp = S[n-1-i]
S[n-1-i] = not S[i]
S[i] = not tmp
def num_upright_bottom(S):
c = 0
for p in reversed(S):
if p:
c += 1
else:
break
return c
def num_down_top(S):
c = 0
for p in S:
if not p:
c += 1
else:
break
return c
def num_upright_top(S):
c = 0
for p in S:
if p:
c += 1
else:
break
return c
def solve(S):
N = len(S)
k = num_upright_bottom(S)
n_flips = 0
k_prev = N
while k < N:
n_down_top = num_down_top(S)
if n_down_top == 0:
n_up_top = num_upright_top(S)
flip(S, n_up_top)
n_flips += 1
"""top of stack should have some facing down
"""
flip(S, N - k)
n_flips += 1
k_prev = k
k = num_upright_bottom(S)
assert k_prev < k, "extra work"
return n_flips
if '__main__' == __name__:
T = int(raw_input())
for _t in range(T):
S = [p == '+' for p in raw_input().strip()]
print "Case #%d: %d" % (_t+1, solve(S))
| [
"[[email protected]]"
] | |
4d1ec5bf8d2d82b6cf5987de69f24df3b6806f4f | 1c2ed80f77782ebee5b90480dbfc74a2d145d53f | /python-base/src/learn/function/func_param.py | d44d060af6242c40aa3893ff506a424f0c5fc7fd | [] | no_license | icesx/IPython | 970dfe7260906d85706b7117044510b5929e9098 | 452c63e17c6f05cb0540974f7c01c1e73f9836fe | refs/heads/master | 2021-07-05T02:30:29.956369 | 2021-02-02T07:09:58 | 2021-02-02T07:09:58 | 220,771,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | #!/usr/bin/python
# Filename: func_param.py
def printMax(a,b):
if a > b:
print(a,'is maximum')
else:
print(b,'is maximum')
printMax(3,4) # directly give literal values
x = 5
y = 7
printMax(x,y) # give variables as arguments
| [
"[email protected]"
] | |
15f896154eed2c30dc1b256275a7d295e611e2d9 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-7820.py | 32d4c4451b3612b1447cac9aa0671ee79b4518de | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,744 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return $Exp
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
b4cc2afa134e8d011f1cf4f1848b20a0d3abc505 | 5c9b70a21636cd64a3d9ccfd224740d4ca049e19 | /rest/app.py | fb43feb693bc0233c6893c8bd082e3a9cc4b1e94 | [
"MIT"
] | permissive | shaneramey/wq.db | 14d8c711796ab29b6d84cca6c2d52db8e3ebb2b6 | bc14412e5a7be7964d6d0fcddca931c348120b82 | refs/heads/master | 2021-01-15T19:13:14.601893 | 2015-08-11T19:53:44 | 2015-08-11T19:53:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | import warnings
warnings.warn(
"The wq.db.rest.app API has been moved to wq.db.rest. "
"The actual Router implementation has moved to wq.db.rest.routers",
DeprecationWarning
)
from . import ModelRouter as Router, router, autodiscover # NOQA
| [
"[email protected]"
] | |
c36022c620246c3970cc742c543e4e7be05fb805 | 92b9a9253c63ba56ff4bd58af268c1e304634841 | /backend/chat/chat/settings.py | fdd9a88f6827828de5a2564632bedf1457f7533d | [] | no_license | alisamadzadeh46/ChatApplication | 264a48a3747235d5f30c254f6fcd7cd145b595ac | 77bb86ccc26685a6e4dbf9a84d9d950bd1574549 | refs/heads/main | 2023-05-10T23:07:00.337906 | 2021-05-28T16:23:28 | 2021-05-28T16:23:28 | 364,041,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,226 | py | """
Django settings for chat project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-nj_-*vz+@)fdsv1i8@6ior0r_1tseko#vm)jugfla5y-5r%u_+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
AUTH_USER_MODEL = "account.CustomUser"
REST_FRAMEWORK = {
'EXCEPTION_HANDLER': 'chat.custom_methods.custom_exception_handler',
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.PageNumberPagination",
"PAGE_SIZE": 20,
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# 3rd party apps
'rest_framework',
'drf_yasg',
# My app
'account.apps.AccountConfig',
'message_control.apps.MessageControlConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chat.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chat.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# STATICFILES_DIRS = [
# BASE_DIR / "static",
# ]
# STATIC_ROOT = BASE_DIR / 'static'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# ARVAN CLOUD STORAGE
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_ACCESS_KEY_ID = 'your access key id'
AWS_SECRET_ACCESS_KEY = 'your secret access key'
AWS_STORAGE_BUCKET_NAME = 'your bucket name'
AWS_SERVICE_NAME = 's3'
AWS_S3_ENDPOINT_URL = 'https://s3.ir-thr-at1.arvanstorage.com'
AWS_S3_FILE_OVERWRITE = False
AWS_LOCAL_STORAGE = f'{BASE_DIR}/aws/'
SOCKET_SERVER = config("SOCKET_SERVER")
| [
"[email protected]"
] | |
66234f05a7f6f820e92d03a4479a6148be56e6a3 | 1fe56144905244643dbbab69819720bc16031657 | /.history/books/models_20210422134752.py | 92cb170912298100d4da3d6feccd653ccf98d187 | [] | no_license | RaghdaMadiane/django | 2052fcdd532f9678fefb034bd60e44f466bd9759 | 6ca3f87f0b72880f071d90968f0a63ea5badcca8 | refs/heads/master | 2023-04-15T17:28:25.939823 | 2021-04-24T22:33:21 | 2021-04-24T22:33:21 | 361,279,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | from django.db import models
from django.contrib.auth.models import User
import uuid
class Category(models.Model):
class Meta:
verbose_name_plural="categories"
name = models.CharField(max_length=50)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
# class Metric(models.Model):
# visits = models.IntegerField(null=True,blank=True)
# ratio = models.DecimalField(null=True,blank=True,max_digits=2,decimal_places=1)
# def __str__(self):
# return f"{self.visits} visits | ratio: {self.ratio}"
class Isbn(models.):
number=models
def __str__(self):
return self.name
class Book(models.Model):
title=models.CharField( max_length=250)
author=models.CharField(max_length=100)
user=models.ForeignKey(User,on_delete=models.CASCADE,related_name="Books")
categories=models.ManyToManyField(Category)
# metrics=models.OneToOneField(Metric,on_delete=models.CASCADE,null=True,blank=True)
# tag=models.ForeignKey(Tag,null=True,blank=True,on_delete=models.CASCADE)
def __str__(self):
return self.title
| [
"[email protected]"
] | |
7bb45521fbdcb09cb1e5d4ca17431145a3fc3ac0 | 2e74c7339c63385172629eaa84680a85a4731ee9 | /codem/central_production_code/codem/query.py | 59b5bfcfdfe180dab0391a1244e4470dc902aeaf | [] | no_license | zhusui/ihme-modeling | 04545182d0359adacd22984cb11c584c86e889c2 | dfd2fe2a23bd4a0799b49881cb9785f5c0512db3 | refs/heads/master | 2021-01-20T12:30:52.254363 | 2016-10-11T00:33:36 | 2016-10-11T00:33:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,859 | py | '''
This File Contains all the helper functions for performing an SQL query of the
COD database. The goal of this script is to create wrapper functions around
sql syntax so that individuals can retrieve data from the databased without a
deep understanding of the complex database. In addition these functions will be
used within the CODEm rewrite.
'''
import sqlalchemy as sql
import pandas as pd
import queryStrings as QS
import numpy as np
import sys
def getModelParams(model_version_id, update=False):
'''
integer -> dictionary
Given an integer that indicates a valid model version id the function will
return a dictionary with keys indicating the model parameters start age,
end age, sex, start year, cause, and whether to run covariate selection or
not. "update" indicates whether during the querying process the database
should be updated to running during the querying process, default is False.
True should be used when running CODEm.
'''
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = "SELECT * FROM cod.model_version WHERE model_version_id = {0}"
model = conn.execute(call.format(model_version_id)).fetchone()
model = dict(model.items())
model["start_year"] = 1980
call = "SELECT acause FROM shared.cause WHERE cause_id = {0}"
aC = conn.execute(call.format(model["cause_id"])).fetchone()["acause"]
model["acause"] = aC
call = "UPDATE cod.model_version SET status = 0 WHERE model_version_id = {0}"
if update: conn.execute(call.format(model_version_id))
conn.close()
return model
def codQuery(cause_id, sex, start_year, start_age, end_age, location_set_version_id):
'''
strings indicating model parameters -> Pandas Data Frame
Given a list of model parameters will query from the COD database and
return a pandas data frame. The data frame contains the base variables
used in the CODEm process.
'''
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = QS.codQueryStr.format(c=cause_id, s=sex, sy=start_year, sa=start_age,
ea=end_age, loc_set_id=location_set_version_id)
result = conn.execute(call)
df = pd.DataFrame(result.fetchall()); df.columns = result.keys()
df['national'] = df['national'].map(lambda x: x == 1).astype(int)
conn.close()
return df
def mortQuery(sex, start_year, start_age, end_age, location_set_version_id):
'''
strings indicating model parameters -> Pandas Data Frame
Given a set of model parameters will query from the mortality database and
return a pandas data frame. The data frame contains the base variables
used in the CODEm process.
'''
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = QS.mortQueryStr.format(sa=start_age, ea=end_age, sy=start_year, s=sex, loc_set_id=location_set_version_id)
result = conn.execute(call)
df = pd.DataFrame(result.fetchall()); df.columns = result.keys()
conn.close()
return df
def locQuery(locations, location_set_version_id):
'''
list -> Pandas Data Frame
Given a list of country ID numbers will query from the mortality database
and return a pandas data frame. The data frame contains columns for
location, super region and region ID.
'''
loc = "(" + ",".join([str(l) for l in set(locations)]) + ")"
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = QS.locQueryStr.format(loc=loc, loc_set_id=location_set_version_id)
result = conn.execute(call)
df = pd.DataFrame(result.fetchall()); df.columns = result.keys()
conn.close()
df["path_to_top_parent"] = \
df["path_to_top_parent"].map(lambda x: ",".join((x[2:]).split(",")[:3]))
arr = np.array(list(df.path_to_top_parent.map(lambda x: x.split(","))))
df2 = pd.DataFrame(arr.astype(int),
columns=["super_region", "region", "country_id"])
df2.loc[df.location_id == 385, "country_id"] = 385 # patch for puerto_rico and usa subnationals
return pd.concat([df["location_id"], df2], axis=1)
def excludeRegions(df, regionsExclude):
'''
(Pandas data frame, list of regions) -> Pandas data frame
Given a pandas data frame and a list of regions to exclude, which
can include id codes for super region, region, country or subnational,
will remove all of the regions of the data frame.
'''
exclude = np.array(regionsExclude.split()).astype(int)
SN_remove = df.location_id.map(lambda x: x not in exclude)
C_remove = df.country_id.map(lambda x: x not in exclude)
R_remove = df.region.map(lambda x: x not in exclude)
SR_remove = df.super_region.map(lambda x: x not in exclude)
df2 = df[(SN_remove) & (C_remove) & (R_remove) & (SR_remove)]
df2.reset_index(drop=True, inplace=True)
return df2
def data_variance(df, response):
'''
(data frame, string) -> array
Given a data frame and a response type generates an estimate of the variance
for that response based on sample size. A single array is returned where
each observation has been sampled 100 times from a normal distribution to
find the estimate.
'''
cf = df.cf.values
N = df.sample_size.values
env = df.envelope.values
pop = df["pop"].values
cf[cf <= 0.00000001] = np.NaN
cf[cf >= 1.] = np.NaN
cf_sd = (cf * (1-cf) / N)**.5
cf_sd[cf_sd > .5] = .5 # cap cf_sd
f = lambda i: np.random.normal(cf[i], cf_sd[i], 100) * (env[i]/pop[i])
if response == "lt_cf":
f = lambda i: np.random.normal(cf[i], cf_sd[i], 100)
draws = np.array(map(f, range(len(cf))))
draws[draws <= 0] = np.NaN
if response == "lt_cf":
draws = np.log(draws/ (1 - draws))
elif response == "ln_rate":
draws = np.log(draws)
draws_masked = np.ma.masked_array(draws, np.isnan(draws))
sd_final = np.array(draws_masked.std(axis=1))
sd_final[sd_final == 0.] = np.NaN
return sd_final
def data_process(df):
'''
Pandas data frame -> Pandas data frame
Given a pandas data frame that was queried for CODEm returns a
Pandas data frame that has columns added for mixed effect analysis and
is re-indexed after removing countries with full sub-national data.
'''
df2 = df.copy()
remove = df2[df2.country_id != df2.location_id].country_id.unique()
df2 = df2[df2.location_id.map(lambda x: x not in remove)]
df2 = df2.replace([np.inf, -np.inf], np.nan)
df2["region_nest"] = df2.super_region.map(str) + ":" + df2.region.map(str)
df2["age_nest"] = df2.region_nest + ":" + df2.age.map(str)
df2["country_nest"] = df2.region_nest + ":" + df2.country_id.map(str)
df2["sub_nat_nest"] = df2.country_nest + ":" + df2.location_id.map(str)
df2["ln_rate_sd"] = data_variance(df2, "ln_rate")
df2["lt_cf_sd"] = data_variance(df2, "lt_cf")
df2.reset_index(inplace=True, drop=True)
return df2
def queryCodData(cause_id, sex, start_year, start_age, end_age, regionsExclude, location_set_version_id):
'''
list -> Pandas data frame
Given a set of model parameters, will return a pandas data frame
which contains the identification variables necessary to complete
the algorithms in CODEm.
'''
cod = codQuery(cause_id, sex, start_year, start_age, end_age, location_set_version_id)
mort = mortQuery(sex, start_year, start_age, end_age, location_set_version_id)
loc = locQuery(mort.location_id.values, location_set_version_id)
loc = excludeRegions(loc, regionsExclude)
mortDF = mort.merge(loc, how='right', on=['location_id'])
codDF = cod.merge(mortDF, how='right',
on=['location_id', 'age', 'sex', 'year'])
codDF['ln_rate'] = np.log(codDF['cf'] * codDF['envelope'] / codDF['pop'])
codDF['lt_cf'] = np.log(codDF['cf'].map(lambda x: x/(1.0-x)))
codDF.loc[codDF["cf"] == 1, "ln_rate"] = np.NAN
df = data_process(codDF)
return df
def covMetaData(model_version_id):
'''
integer -> Pandas data frame
Given an integer that represents a valid model ID number, will
return a pandas data frame which contains the covariate model ID's
for that model as well as the metadata needed for covariate selection.
'''
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = QS.metaQueryStr.format(model_version_id)
result = conn.execute(call)
df = pd.DataFrame(result.fetchall()); df.columns = result.keys()
conn.close()
return df
def covQuery(covID, location_set_version_id):
'''
integer -> Pandas data frame
Given an integer which represents a valid covariate ID will return a data
frame which contains a unique value for each country, year, age group.
This data may be aggregated in some form as well.
'''
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = QS.cvQueryStr.format(mvid=covID, loc_set_id=location_set_version_id)
result = conn.execute(call)
try:
df = pd.DataFrame(result.fetchall()); df.columns = result.keys()
except ValueError:
sys.stderr.write("There appears to be an error with covariate id {0}".format(covID))
sys.exit()
df = df.rename(columns={"mean_value":df["name"][0]})
conn.close()
return df
def transform(data, trans):
'''
(array, string) -> array
Given an array of numeric data and a string indicating the type of
transformation desired will return an array with the desired transformation
applied. If the string supplied is invalid the same array will be returned.
'''
if trans == "ln": return np.log(data)
elif trans == "lt": return np.log(data / (1. - data))
elif trans == "sq": return data**2
elif trans == "sqrt": return data**.05
elif trans == "scale1000": return data * 1000.
else: return data
def transDF(df, var, trans):
'''
(Pandas data frame, string, string) -> Pandas data frame
Given a pandas data frame, a string that represents a valid numeric
variable in that column and a string representing a type of transformation,
will return a Pandas data frame with the variable transform as specified.
Additionally the name of the variable will be changed to note the
transformation.
'''
df2 = df
df2[var] = transform(df2[var].values, trans)
if trans in ["ln", "lt", "sq", "sqrt", "scale1000"]:
df2 = df2.rename(columns={var: (trans + "_" + var)})
return df2
def lagIt(df, var, lag):
'''
(Pandas data frame, string, string) -> Pandas data frame
Given a pandas data frame, a string that represents a valid numeric
variable in that column and an integer representing the number of years to
lag, will return a Pandas data frame with the specified lag applied.
Additionally, the name of the variable will be changed to note the
transformation.
'''
if lag is None: return df
if np.isnan(lag): return df
df2 = df
df2["year"] = df2["year"] + lag
df2 = df2.rename(columns={var: ("lag" + str(lag) + "_" + var)})
return df2
def createAgeDF():
'''
None -> Pandas data frame
Creates a Pandas data frame with two columns, all the age groups currently
used in analysis at IHME as noted by the data base as well as a column with
the code used for the aggregate group.
'''
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call ="SELECT age_group_id AS all_ages FROM age_group WHERE age_group_plot = 1"
result = conn.execute(call)
ageDF = pd.DataFrame(result.fetchall(), columns=["all_ages"])
ageDF['age'] = 22
ageDF = ageDF[(ageDF.all_ages >= 2) & (ageDF.all_ages <= 21)]
conn.close()
return ageDF
def ageSexData(df, sex):
'''
(Pandas data frame, integer) -> Pandas Data frame
Given a Pandas data frame and an integer which represents the desired sex
of the analysis, will return a data frame with a value for each age group
and only for the desired sex.
'''
df2 = df.copy(); ageDF = createAgeDF()
if len(df2["age"].unique()) == 1:
df2 = df2.merge(ageDF, on="age")
df2 = df2.drop("age", 1)
df2 = df2.rename(columns={"all_ages":"age"})
if len(df2["sex"].unique()) == 1: df2["sex"] = sex
df2 = df2[df2["sex"] == sex]
return df2
def getCVDF(covID, trans, lag, offset, sex, location_set_version_id):
'''
(integer, string, integer, integer) -> Pandas data frame
Given a covariate id number, a string representing a transformation
type, an integer representing lags of the variable and an integer
representing which sex to restrict the data to, will return a
data frame which contains teh values for that covariate transformed
as specified.
'''
df = covQuery(covID, location_set_version_id)
df[df.columns.values[0]] = df[df.columns.values[0]] + offset
df = transDF(df, df.columns.values[0], trans)
df = lagIt(df, df.columns.values[0], lag)
df = ageSexData(df, sex)
df = df.drop("name", 1)
df = df.replace([np.inf, -np.inf], np.nan)
df = df.astype("float32")
df = df[df.year >= 1980]
return df
def getCovData(model_version_id, location_set_version_id):
'''
integer -> (Pandas data frame, Pandas data frame)
Given an integer which represents a valid model version ID, returns
two Pandas data frames. The first is a data frame which contains the
covariate data for that model. The second is the meta data of those
same covarites which will be used for the model selection process.
'''
covs = covMetaData(model_version_id)
sex = getModelParams(model_version_id)["sex_id"]
df = getCVDF(covs.covariate_model_id[0], covs.transform_type_short[0],
covs.lag[0], covs.offset[0], sex, location_set_version_id)
for i in range(1, len(covs)):
dfTemp = getCVDF(covs.covariate_model_id[i],
covs.transform_type_short[i], covs.lag[i], covs.offset[i], sex, location_set_version_id)
df = df.merge(dfTemp, how="outer", on=["location_id", "age", "sex", "year"])
n = df.drop(["location_id", "age", "sex", "year"], axis=1).columns.values
covs["name"] = n
return df, covs
def getCodemInputData(model_version_id):
'''
integer -> (Pandas data frame, Pandas data frame)
Given an integer which represents a valid model version ID, returns
two pandas data frames. The first is the input data needed for
running CODEm models and the second is a data frame of meta data
needed for covariate selection.
'''
model = getModelParams(model_version_id)
df = queryCodData(cause_id=model["cause_id"], sex=model["sex_id"],
start_year=model["start_year"],
start_age=model["age_start"], end_age=model["age_end"],
regionsExclude=model["locations_exclude"],
location_set_version_id=model["location_set_version_id"])
cvDF, priors = getCovData(model_version_id, model["location_set_version_id"])
df = df[(df.year >= model["start_year"]) & (model["age_start"] <= df.age) &
(df.age <= model["age_end"])]
df2 = df.merge(cvDF, how="left", on=["location_id", "age", "sex", "year"])
covs = df2[priors.name.values]
df = df.drop_duplicates()
covs = covs.loc[df.index]
df.reset_index(drop=True, inplace=True)
covs.reset_index(drop=True, inplace=True)
columns = df.columns.values[df.dtypes.values == np.dtype('float64')]
df[columns] = df[columns].astype('float32')
return df, covs, priors
def get_site_data(path, var, trans, lag):
'''
(string, string, string, integer) -> Pandas Data Frame
Given a valid path within the J drive returns a transformed Pandas data
frame of the specified transformation type and lag time.
'''
df = pd.read_csv("/home/j/" + path)
df = transDF(df, var, trans)
df = lagIt(df, var, lag)
return df
def get_raw_reference(priorsDF, loc):
'''
(Pandas data frame, string)
Given a priors Data frame attempts to retrieve all the site specific or
reference data based on the chosen value of [loc].
'''
l = []
for i in range(len(priorsDF)):
if priorsDF[loc][i] != '':
try:
l.append(get_site_data(priorsDF[loc][i],
priorsDF.var[i],
priorsDF.transform_type_short[i],
priorsDF.lag[i]))
except:
l = l
return l
def get_raw_reference_data(priorsDF, df, loc):
'''
(Pandas data frame, Pandas data frame, string)
Given a priors data frame, a data frame for each country, age, year of
interest and a string [loc] indicating a variable in the pandas data frame
retrieves all the data from the specified column to be attached to the
country, age, year data frame.
'''
l = get_raw_reference(priorsDF, loc)
sub = priorsDF[priorsDF[loc] != ""]
for d in l:
df = df.merge(d, how="left")
try:
return df[sub.name.values]
except:
return pd.DataFrame()
def write_submodel(model_version_id, submodel_type_id, submodel_dep_id, weight, rank):
'''
(int, int, int, float, int) -> int
Write a submodel to the table and get the id back
'''
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = QS.submodel_query_str.format(model_version_id, submodel_type_id,
submodel_dep_id, weight, rank)
conn.execute(call)
call = QS.submodel_get_id.format(model_version_id, rank)
result = conn.execute(call)
submodel_id = result.fetchone()["submodel_version_id"]
conn.close()
return submodel_id
def write_submodel_covariate(submodel_id, list_of_covariate_ids):
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
for cov in list_of_covariate_ids:
call = QS.submodel_cov_write_str.format(submodel_id, cov)
conn.execute(call)
conn.close()
def write_model_pv(tag, value, model_version_id):
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = QS.pv_write.format(tag, value, model_version_id)
conn.execute(call)
conn.close()
def write_model_output(df_true, model_version_id, sex_id):
df = df_true.copy()
df["sex"] = sex_id
columns = ["draw_%d" % i for i in range(1000)]
df[columns] = df[columns].values / df["envelope"].values[..., np.newaxis]
df["mean_cf"] = df[columns].mean(axis=1)
df["lower_cf"] = df[columns].quantile(.025, axis=1)
df["upper_cf"] = df[columns].quantile(.975, axis=1)
df = df[["mean_cf", "lower_cf", "upper_cf", "year", "age", "sex", "location_id"]]
df["model_version_id"] = model_version_id
df.rename(columns={'year': 'year_id', 'sex': 'sex_id', 'age': 'age_group_id'}, inplace=True)
DB = "strConnection"
engine = sql.create_engine(DB); con = engine.connect().connection
df.to_sql("model", con, flavor="mysql", if_exists="append", index=False, chunksize=15000)
con.close()
def get_submodel_summary(model_version_id):
'''
(int) -> data_frame
Retrieves the summary submodel rank table for a particular model.
'''
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = QS.submodel_summary_query.format(model_version_id)
result = conn.execute(call)
df = pd.DataFrame(result.fetchall()); df.columns = result.keys()
conn.close()
return df
def get_codem_run_time(model_version_id):
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = QS.codem_run_time.format(model_version_id=model_version_id)
result = conn.execute(call)
minutes = np.array(result.fetchall())
conn.close()
return float(minutes[0, 0])
def submodel_covs(submodel_version_id):
"""
:param submodel_version_id: integer representing a codem submodel version id
:return: Pandas data frame with information on submodel covariates
Given a submodel version id returns the covariates that were used in the
construction of that model.
"""
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = '''
SELECT covariate_name_short FROM shared.covariate
WHERE covariate_id IN (SELECT covariate_id from covariate.data_version WHERE data_version_id IN
(SELECT data_version_id FROM covariate.model_version
WHERE model_version_id IN
(SELECT covariate_model_version_id FROM cod.submodel_version_covariate
WHERE submodel_version_id={submodel_version_id})))
'''.format(submodel_version_id=submodel_version_id)
result = conn.execute(call)
df = pd.DataFrame(result.fetchall()); df.columns = result.keys()
conn.close()
df["submodel_version_id"] = submodel_version_id
return df
def get_submodels(model_version_id):
"""
:param model_version_id: integer representing a codem model version id
:return: Pandas Data frame with submodels and corresponding information
"""
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
call = '''
SELECT submodel_version_id, rank, weight, submodel_type_id, submodel_dep_id
FROM cod.submodel_version
WHERE model_version_id = {model_version_id}
'''.format(model_version_id=model_version_id)
result = conn.execute(call)
df = pd.DataFrame(result.fetchall()); df.columns = result.keys()
conn.close()
return df
def all_submodel_covs(model_version_id):
"""
:param model_version_id: integer representing a codem model version id
:return: Pandas Data frame with submodels, covariates, and corresponding information
"""
submodels = get_submodels(model_version_id)
covs = pd.concat([submodel_covs(x) for x in submodels.submodel_version_id],
axis=0).reset_index(drop=True)
df = covs.merge(submodels, how="left")
df = df.sort(["rank", "covariate_name_short"])
call = '''
SELECT submodel_type_id, submodel_type_name FROM cod.submodel_type;
'''
DB = "strConnection"
engine = sql.create_engine(DB); conn = engine.connect()
result = conn.execute(call)
df2 = pd.DataFrame(result.fetchall()); df2.columns = result.keys()
conn.close()
df = df.merge(df2, how="left")
call = '''
SELECT submodel_dep_id, submodel_dep_name FROM cod.submodel_dep;
'''
engine = sql.create_engine(DB); conn = engine.connect()
result = conn.execute(call)
df2 = pd.DataFrame(result.fetchall()); df2.columns = result.keys()
conn.close()
df = df.merge(df2, how="left")
df.drop(["submodel_type_id", "submodel_dep_id"],inplace=True, axis=1)
df = df.sort(["rank", "covariate_name_short"])
df["approximate_draws"] = np.round(df.weight.values * 1000.)
return df
def truncate_draws(mat, percent=95):
"""
:param mat: array where rows correspond to observations and columns draws
:param percent: a value between 0 and 100 corresponding to the amount of
data to keep
:return: array where row data outside row percentile has been
replaced with the mean.
"""
assert 0 < percent < 100, "percent is out of range"
low_bound = (100. - float(percent)) / 2.
hi_bound = 100. - low_bound
matrix = np.copy(mat)
row_lower_bound = np.percentile(matrix, low_bound, axis=1)
row_upper_bound = np.percentile(matrix, hi_bound, axis=1)
replacements = (matrix.T < row_lower_bound).T | (matrix.T > row_upper_bound).T
replacements[matrix.std(axis=1) < 10**-5, :] = False
masked_matrix = np.ma.masked_array(matrix, replacements)
row_mean_masked = np.mean(masked_matrix, axis=1)
row_replacements = np.where(replacements)[0]
matrix[replacements] = row_mean_masked[row_replacements]
return matrix
def acause_from_id(model_version_id):
"""
Given a valid model version id returns the acause associated with it.
:param model_version_id: int
valid model version id
:return: str
string representing an acause
"""
DB = "strConnection"
call = '''
SELECT
acause
FROM
shared.cause
WHERE
cause_id = (SELECT cause_id
FROM cod.model_version
WHERE model_version_id = {})
'''.format(model_version_id)
engine = sql.create_engine(DB); conn = engine.connect()
acause = conn.execute(call).fetchone()["acause"]
conn.close()
return acause
| [
"[email protected]"
] | |
2cb94f2886fd17bfec48f28e55cc1d7bdcaa1b10 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/apimanagement/v20200601preview/get_api_operation_policy.py | 8bd64155864e351bfc26b33f84d54382ac902c40 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,468 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetApiOperationPolicyResult',
'AwaitableGetApiOperationPolicyResult',
'get_api_operation_policy',
]
@pulumi.output_type
class GetApiOperationPolicyResult:
"""
Policy Contract details.
"""
def __init__(__self__, format=None, id=None, name=None, type=None, value=None):
if format and not isinstance(format, str):
raise TypeError("Expected argument 'format' to be a str")
pulumi.set(__self__, "format", format)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
Format of the policyContent.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> str:
"""
Contents of the Policy as defined by the format.
"""
return pulumi.get(self, "value")
class AwaitableGetApiOperationPolicyResult(GetApiOperationPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiOperationPolicyResult(
format=self.format,
id=self.id,
name=self.name,
type=self.type,
value=self.value)
def get_api_operation_policy(api_id: Optional[str] = None,
format: Optional[str] = None,
operation_id: Optional[str] = None,
policy_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiOperationPolicyResult:
"""
Policy Contract details.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str format: Policy Export Format.
:param str operation_id: Operation identifier within an API. Must be unique in the current API Management service instance.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['apiId'] = api_id
__args__['format'] = format
__args__['operationId'] = operation_id
__args__['policyId'] = policy_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20200601preview:getApiOperationPolicy', __args__, opts=opts, typ=GetApiOperationPolicyResult).value
return AwaitableGetApiOperationPolicyResult(
format=__ret__.format,
id=__ret__.id,
name=__ret__.name,
type=__ret__.type,
value=__ret__.value)
| [
"[email protected]"
] | |
e807a9fb612e9c8b3680313ec76bcc3edd87e3e7 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/zsdonghao_tensorlayer/tensorlayer-master/example/tutorial_generate_text.py | bf1921c1e6582cb14548e163888dce3762549139 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 21,447 | py | #! /usr/bin/python
# -*- coding: utf8 -*-
# Copyright 2016 TensorLayer. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Example of Synced sequence input and output.
Generate text using LSTM.
"""
import tensorflow as tf
import tensorlayer as tl
import numpy as np
import time
import re
_UNK = "_UNK"
def basic_clean_str(string):
"""Tokenization/string cleaning for a datasets.
"""
string = re.sub(r"\n", " ", string) # '\n' --> ' '
string = re.sub(r"\'s", " \'s", string) # it's --> it 's
string = re.sub(r"\’s", " \'s", string)
string = re.sub(r"\'ve", " have", string) # they've --> they have
string = re.sub(r"\’ve", " have", string)
string = re.sub(r"\'t", " not", string) # can't --> can not
string = re.sub(r"\’t", " not", string)
string = re.sub(r"\'re", " are", string) # they're --> they are
string = re.sub(r"\’re", " are", string)
string = re.sub(r"\'d", "", string) # I'd (I had, I would) --> I
string = re.sub(r"\’d", "", string)
string = re.sub(r"\'ll", " will", string) # I'll --> I will
string = re.sub(r"\’ll", " will", string)
string = re.sub(r"\“", " ", string) # “a” --> “ a ”
string = re.sub(r"\”", " ", string)
string = re.sub(r"\"", " ", string) # "a" --> " a "
string = re.sub(r"\'", " ", string) # they' --> they '
string = re.sub(r"\’", " ", string) # they’ --> they ’
string = re.sub(r"\.", " . ", string) # they. --> they .
string = re.sub(r"\,", " , ", string) # they, --> they ,
string = re.sub(r"\!", " ! ", string)
string = re.sub(r"\-", " ", string) # "low-cost"--> lost cost
string = re.sub(r"\(", " ", string) # (they) --> ( they)
string = re.sub(r"\)", " ", string) # ( they) --> ( they )
string = re.sub(r"\]", " ", string) # they] --> they ]
string = re.sub(r"\[", " ", string) # they[ --> they [
string = re.sub(r"\?", " ", string) # they? --> they ?
string = re.sub(r"\>", " ", string) # they> --> they >
string = re.sub(r"\<", " ", string) # they< --> they <
string = re.sub(r"\=", " ", string) # easier= --> easier =
string = re.sub(r"\;", " ", string) # easier; --> easier ;
string = re.sub(r"\;", " ", string)
string = re.sub(r"\:", " ", string) # easier: --> easier :
string = re.sub(r"\"", " ", string) # easier" --> easier "
string = re.sub(r"\$", " ", string) # $380 --> $ 380
string = re.sub(r"\_", " ", string) # _100 --> _ 100
string = re.sub(r"\s{2,}", " ", string) # Akara is handsome --> Akara is handsome
return string.strip().lower() # lowercase
def customized_clean_str(string):
"""Tokenization/string cleaning for a datasets.
"""
string = re.sub(r"\n", " ", string) # '\n' --> ' '
string = re.sub(r"\'s", " \'s", string) # it's --> it 's
string = re.sub(r"\’s", " \'s", string)
string = re.sub(r"\'ve", " have", string) # they've --> they have
string = re.sub(r"\’ve", " have", string)
string = re.sub(r"\'t", " not", string) # can't --> can not
string = re.sub(r"\’t", " not", string)
string = re.sub(r"\'re", " are", string) # they're --> they are
string = re.sub(r"\’re", " are", string)
string = re.sub(r"\'d", "", string) # I'd (I had, I would) --> I
string = re.sub(r"\’d", "", string)
string = re.sub(r"\'ll", " will", string) # I'll --> I will
string = re.sub(r"\’ll", " will", string)
string = re.sub(r"\“", " “ ", string) # “a” --> “ a ”
string = re.sub(r"\”", " ” ", string)
string = re.sub(r"\"", " “ ", string) # "a" --> " a "
string = re.sub(r"\'", " ' ", string) # they' --> they '
string = re.sub(r"\’", " ' ", string) # they’ --> they '
string = re.sub(r"\.", " . ", string) # they. --> they .
string = re.sub(r"\,", " , ", string) # they, --> they ,
string = re.sub(r"\-", " ", string) # "low-cost"--> lost cost
string = re.sub(r"\(", " ( ", string) # (they) --> ( they)
string = re.sub(r"\)", " ) ", string) # ( they) --> ( they )
string = re.sub(r"\!", " ! ", string) # they! --> they !
string = re.sub(r"\]", " ] ", string) # they] --> they ]
string = re.sub(r"\[", " [ ", string) # they[ --> they [
string = re.sub(r"\?", " ? ", string) # they? --> they ?
string = re.sub(r"\>", " > ", string) # they> --> they >
string = re.sub(r"\<", " < ", string) # they< --> they <
string = re.sub(r"\=", " = ", string) # easier= --> easier =
string = re.sub(r"\;", " ; ", string) # easier; --> easier ;
string = re.sub(r"\;", " ; ", string)
string = re.sub(r"\:", " : ", string) # easier: --> easier :
string = re.sub(r"\"", " \" ", string) # easier" --> easier "
string = re.sub(r"\$", " $ ", string) # $380 --> $ 380
string = re.sub(r"\_", " _ ", string) # _100 --> _ 100
string = re.sub(r"\s{2,}", " ", string) # Akara is handsome --> Akara is handsome
return string.strip().lower() # lowercase
def customized_read_words(input_fpath):#, dictionary):
with open(input_fpath, "r") as f:
words = f.read()
# Clean the data
words = customized_clean_str(words)
# Split each word
return words.split()
def main_restore_embedding_layer():
"""How to use Embedding layer, and how to convert IDs to vector,
IDs to words, etc.
"""
## Step 1: Build the embedding matrix and load the existing embedding matrix.
vocabulary_size = 50000
embedding_size = 128
model_file_name = "model_word2vec_50k_128"
batch_size = None
print("Load existing embedding matrix and dictionaries")
all_var = tl.files.load_npy_to_any(name=model_file_name+'.npy')
data = all_var['data']; count = all_var['count']
dictionary = all_var['dictionary']
reverse_dictionary = all_var['reverse_dictionary']
tl.nlp.save_vocab(count, name='vocab_'+model_file_name+'.txt')
del all_var, data, count
load_params = tl.files.load_npz(name=model_file_name+'.npz')
x = tf.placeholder(tf.int32, shape=[batch_size])
y_ = tf.placeholder(tf.int32, shape=[batch_size, 1])
emb_net = tl.layers.EmbeddingInputlayer(
inputs = x,
vocabulary_size = vocabulary_size,
embedding_size = embedding_size,
name ='embedding_layer')
# sess.run(tf.initialize_all_variables())
tl.layers.initialize_global_variables(sess)
tl.files.assign_params(sess, [load_params[0]], emb_net)
emb_net.print_params()
emb_net.print_layers()
## Step 2: Input word(s), output the word vector(s).
word = b'hello'
word_id = dictionary[word]
print('word_id:', word_id)
words = [b'i', b'am', b'tensor', b'layer']
word_ids = tl.nlp.words_to_word_ids(words, dictionary, _UNK)
context = tl.nlp.word_ids_to_words(word_ids, reverse_dictionary)
print('word_ids:', word_ids)
print('context:', context)
vector = sess.run(emb_net.outputs, feed_dict={x : [word_id]})
print('vector:', vector.shape)
vectors = sess.run(emb_net.outputs, feed_dict={x : word_ids})
print('vectors:', vectors.shape)
def main_lstm_generate_text():
"""Generate text by Synced sequence input and output.
"""
# rnn model and update (describtion: see tutorial_ptb_lstm.py)
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_steps = 4
hidden_size = 200
max_epoch = 4
max_max_epoch = 100
keep_prob = 0.8
lr_decay = 0.9
batch_size = 20
## word embedding
vocab_size = 10000
embedding_size = 200
## text generation
# diversity_list = [None, 1.0]
top_k_list = [5, 10, 50, 100]
print_length = 100
resume = False # load existing model, data and dictionaries
model_file_name = "model_generate_text"
if resume:
print("Load existing data and dictionaries" + "!"*10)
all_var = tl.files.load_npy_to_any(name=model_file_name+'.npy')
data = all_var['data']; count = all_var['count']
dictionary = all_var['dictionary']
reverse_dictionary = all_var['reverse_dictionary']
else:
print("Load data and creat dictionaries ....")
## You can read any txt file by using this:
# words = customized_read_words(input_fpath="tensorlayer/data/trump_twitter.txt")
## Alternatively, you can use the Nietzsche dataset as follow:
words = tl.files.load_nietzsche_dataset()
words = basic_clean_str(words)
words = words.split()
## Build the data and dictionaries from word to id and id to word.
data, count, dictionary, reverse_dictionary = \
tl.nlp.build_words_dataset(words, vocab_size, True, _UNK)
# data = tl.nlp.words_to_word_ids(words, dictionary, unk_key = _UNK)
data = np.asarray(data)
del words # save memory
print('Data size %d' % len(data))
print('Most 5 common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
train_data = data
print('len(train_data) {}'.format(len(train_data)))
# Set the seed to generate sentence.
seed = "it should be good"
seed = basic_clean_str(seed).split()
print('seed : %s' % seed)
sess = tf.InteractiveSession()
# One int represents one word, the meaning of batch_size here is not the
# same with MNIST example, it is the number of concurrent processes for
# computational reasons.
# Training and Validing
input_data = tf.placeholder(tf.int32, [batch_size, num_steps])
targets = tf.placeholder(tf.int32, [batch_size, num_steps])
# Testing (Evaluation), for generate text
input_data_test = tf.placeholder(tf.int32, [1, 1])
targets_test = tf.placeholder(tf.int32, [1, 1])
def inference(x, is_training, num_steps, reuse=None):
"""If reuse is True, the inferences use the existing parameters,
then different inferences share the same parameters.
"""
print("\nnum_steps : %d, is_training : %s, reuse : %s" %
(num_steps, is_training, reuse))
initializer = tf.random_uniform_initializer(init_scale, init_scale)
with tf.variable_scope("model", reuse=reuse):
tl.layers.set_name_reuse(reuse)
network = tl.layers.EmbeddingInputlayer(
inputs = x,
vocabulary_size = vocab_size,
embedding_size = embedding_size,
E_init = tf.random_uniform_initializer(-init_scale, init_scale),
name ='embedding_layer')
if is_training:
network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop1')
network = tl.layers.RNNLayer(network,
cell_fn=tf.contrib.rnn.BasicLSTMCell, #tf.nn.rnn_cell.BasicLSTMCell,
cell_init_args={'forget_bias': 0.0, 'state_is_tuple': True},
n_hidden=hidden_size,
initializer=tf.random_uniform_initializer(-init_scale, init_scale),
n_steps=num_steps,
return_last=False,
name='basic_lstm_layer1')
lstm1 = network
if is_training:
network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop2')
network = tl.layers.RNNLayer(network,
cell_fn=tf.contrib.rnn.BasicLSTMCell,#tf.nn.rnn_cell.BasicLSTMCell,
cell_init_args={'forget_bias': 0.0, 'state_is_tuple': True},
n_hidden=hidden_size,
initializer=tf.random_uniform_initializer(-init_scale, init_scale),
n_steps=num_steps,
return_last=False,
return_seq_2d=True,
name='basic_lstm_layer2')
lstm2 = network
## Alternatively, if return_seq_2d=False, in the above RNN layer,
## you can reshape the outputs as follow:
# network = tl.layers.ReshapeLayer(network,
# shape=[-1, int(network.outputs._shape[-1])], name='reshape')
if is_training:
network = tl.layers.DropoutLayer(network, keep=keep_prob, name='drop3')
network = tl.layers.DenseLayer(network,
n_units=vocab_size,
W_init=tf.random_uniform_initializer(-init_scale, init_scale),
b_init=tf.random_uniform_initializer(-init_scale, init_scale),
act = tf.identity, name='output_layer')
return network, lstm1, lstm2
# Inference for Training
network, lstm1, lstm2 = inference(input_data,
is_training=True, num_steps=num_steps, reuse=None)
# Inference for Testing (Evaluation), generate text
network_test, lstm1_test, lstm2_test = inference(input_data_test,
is_training=False, num_steps=1, reuse=True)
y_linear = network_test.outputs
y_soft = tf.nn.softmax(y_linear)
# y_id = tf.argmax(tf.nn.softmax(y), 1)
# sess.run(tf.initialize_all_variables())
def loss_fn(outputs, targets, batch_size, num_steps):
# Returns the cost function of Cross-entropy of two sequences, implement
# softmax internally.
# outputs : 2D tensor [n_examples, n_outputs]
# targets : 2D tensor [n_examples, n_outputs]
# n_examples = batch_size * num_steps
# so
# cost is the averaged cost of each mini-batch (concurrent process).
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example( # loss = tf.nn.seq2seq.sequence_loss_by_example( # TF0.12
[outputs],
[tf.reshape(targets, [-1])],
[tf.ones([batch_size * num_steps])])
cost = tf.reduce_sum(loss) / batch_size
return cost
## Cost for Training
cost = loss_fn(network.outputs, targets, batch_size, num_steps)
## Truncated Backpropagation for training
with tf.variable_scope('learning_rate'):
lr = tf.Variable(0.0, trainable=False)
## You can get all trainable parameters as follow.
# tvars = tf.trainable_variables()
## Alternatively, you can specific the parameters for training as follw.
# tvars = network.all_params $ all parameters
# tvars = network.all_params[1:] $ parameters except embedding matrix
## Train the whole network.
tvars = network.all_params
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(lr)
train_op = optimizer.apply_gradients(zip(grads, tvars))
tl.layers.initialize_global_variables(sess)
network.print_params()
network.print_layers()
tl.layers.print_all_variables()
if resume:
print("Load existing model" + "!"*10)
load_params = tl.files.load_npz(name=model_file_name+'.npz')
tl.files.assign_params(sess, load_params, network)
print("\nStart learning a model to generate text")
for i in range(max_max_epoch):
# decrease the learning_rate after ``max_epoch``, by multipling lr_decay.
new_lr_decay = lr_decay ** max(i - max_epoch, 0.0)
sess.run(tf.assign(lr, learning_rate * new_lr_decay))
print("Epoch: %d/%d Learning rate: %.8f" % (i + 1, max_max_epoch, sess.run(lr)))
epoch_size = ((len(train_data) // batch_size) - 1) // num_steps
start_time = time.time()
costs = 0.0; iters = 0
## reset all states at the begining of every epoch
state1 = tl.layers.initialize_rnn_state(lstm1.initial_state)
state2 = tl.layers.initialize_rnn_state(lstm2.initial_state)
for step, (x, y) in enumerate(tl.iterate.ptb_iterator(train_data,
batch_size, num_steps)):
feed_dict = {input_data: x, targets: y,
lstm1.initial_state: state1,
lstm2.initial_state: state2,
}
## For training, enable dropout
feed_dict.update( network.all_drop )
_cost, state1, state2, _ = sess.run([cost,
lstm1.final_state,
lstm2.final_state,
train_op],
feed_dict=feed_dict
)
costs += _cost; iters += num_steps
if step % (epoch_size // 10) == 1:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * batch_size / (time.time() - start_time)))
train_perplexity = np.exp(costs / iters)
# print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
print("Epoch: %d/%d Train Perplexity: %.3f" % (i + 1, max_max_epoch,
train_perplexity))
# for diversity in diversity_list:
for top_k in top_k_list:
# Testing, generate some text from a given seed.
state1 = tl.layers.initialize_rnn_state(lstm1_test.initial_state)
state2 = tl.layers.initialize_rnn_state(lstm2_test.initial_state)
# prepare the seed
outs_id = tl.nlp.words_to_word_ids(seed, dictionary, _UNK)
# feed the seed to initialize the state for generation.
for ids in outs_id[:-1]:
a_id = np.asarray(ids).reshape(1,1)
feed_dict = {input_data_test: a_id,
lstm1_test.initial_state: state1,
lstm2_test.initial_state: state2,
}
_, state1, state2 = sess.run([y_soft, #y_linear, #y_soft, #y_id,
lstm1_test.final_state,
lstm2_test.final_state],
feed_dict=feed_dict
)
# feed the last word in seed, and start to generate sentence.
a_id = outs_id[-1]
for _ in range(print_length):
a_id = np.asarray(a_id).reshape(1,1)
feed_dict = {input_data_test: a_id,
lstm1_test.initial_state: state1,
lstm2_test.initial_state: state2,
}
out, state1, state2 = sess.run([y_soft, #y_linear, #y_soft, #y_id,
lstm1_test.final_state,
lstm2_test.final_state],
feed_dict=feed_dict
)
## Without sampling
# a_id = np.argmax(out[0])
## Sample from all words, if vocab_size is large,
# this may have numeric error.
# a_id = tl.nlp.sample(out[0], diversity)
## Sample from the top k words.
a_id = tl.nlp.sample_top(out[0], top_k=top_k)
outs_id.append(a_id)
sentence = tl.nlp.word_ids_to_words(outs_id, reverse_dictionary)
sentence = " ".join(sentence)
# print(diversity, ':', sentence)
print(top_k, ':', sentence)
if i % 5 == 0:
print("Save model, data and dictionaries" + "!"*10);
tl.files.save_npz(network_test.all_params, name=model_file_name+'.npz')
tl.files.save_any_to_npy(save_dict={'data': data, 'count': count,
'dictionary': dictionary, 'reverse_dictionary':
reverse_dictionary}, name=model_file_name+'.npy')
if __name__ == '__main__':
sess = tf.InteractiveSession()
"""Restore a pretrained embedding matrix."""
# main_restore_embedding_layer()
"""How to generate text from a given context."""
main_lstm_generate_text()
#
| [
"[email protected]"
] | |
46fbfb2944c9774a7d1991d1a1b0dc3d4e26a4fd | 01733042e84a768b77f64ec24118d0242b2f13b8 | /uhd_restpy/testplatform/sessions/ixnetwork/topology/isisdcepseudonode_6416adf40a9c10f6ed462f220e3c16ff.py | b86e582922a40968d95ae27c74430a4bffca970b | [
"MIT"
] | permissive | slieberth/ixnetwork_restpy | e95673905854bc57e56177911cb3853c7e4c5e26 | 23eeb24b21568a23d3f31bbd72814ff55eb1af44 | refs/heads/master | 2023-01-04T06:57:17.513612 | 2020-10-16T22:30:55 | 2020-10-16T22:30:55 | 311,959,027 | 0 | 0 | NOASSERTION | 2020-11-11T12:15:34 | 2020-11-11T12:06:00 | null | UTF-8 | Python | false | false | 9,978 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class IsisDcePseudoNode(Base):
"""Fabric-Path Pseudo Node Configuration
The IsisDcePseudoNode class encapsulates a list of isisDcePseudoNode resources that are managed by the system.
A list of resources can be retrieved from the server using the IsisDcePseudoNode.find() method.
"""
__slots__ = ()
_SDM_NAME = 'isisDcePseudoNode'
_SDM_ATT_MAP = {
'Active': 'active',
'BroadcastRootPriority': 'broadcastRootPriority',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'Nickname': 'nickname',
}
def __init__(self, parent):
super(IsisDcePseudoNode, self).__init__(parent)
@property
def Active(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def BroadcastRootPriority(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Broadcast Root Priority
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BroadcastRootPriority']))
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def Nickname(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Nickname
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Nickname']))
def update(self, Name=None):
"""Updates isisDcePseudoNode resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
"""Finds and retrieves isisDcePseudoNode resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve isisDcePseudoNode resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all isisDcePseudoNode resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching isisDcePseudoNode resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of isisDcePseudoNode data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the isisDcePseudoNode resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, BroadcastRootPriority=None, Nickname=None):
"""Base class infrastructure that gets a list of isisDcePseudoNode device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- BroadcastRootPriority (str): optional regex of broadcastRootPriority
- Nickname (str): optional regex of nickname
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
return self._execute('abort', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
start(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(SessionIndices=list)
-------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stop(SessionIndices=string)
---------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
| [
"[email protected]"
] | |
ad4ec0230be02dee98e2add102f733659a731918 | e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7 | /flask_api/venv/lib/python3.7/site-packages/vsts/git/v4_1/models/git_commit_changes.py | 5986784ad2ae58eca95cc064936a557c15a18a50 | [] | no_license | u-blavins/secret_sasquatch_society | c36993c738ab29a6a4879bfbeb78a5803f4f2a57 | 0214eadcdfa9b40254e331a6617c50b422212f4c | refs/heads/master | 2020-08-14T00:39:52.948272 | 2020-01-22T13:54:58 | 2020-01-22T13:54:58 | 215,058,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class GitCommitChanges(Model):
"""GitCommitChanges.
:param change_counts:
:type change_counts: dict
:param changes:
:type changes: list of :class:`object <git.v4_1.models.object>`
"""
_attribute_map = {
'change_counts': {'key': 'changeCounts', 'type': '{int}'},
'changes': {'key': 'changes', 'type': '[object]'}
}
def __init__(self, change_counts=None, changes=None):
super(GitCommitChanges, self).__init__()
self.change_counts = change_counts
self.changes = changes
| [
"[email protected]"
] | |
2362371a28d13cb9cafaadbc182329e9fbd924c5 | 679fc3c015f31859899543c6844c76da44941a28 | /main/migrations/0003_auto_20151024_1811.py | d9fd8c07a24c7e40590184cead52757839f2608c | [] | no_license | swheatley/twitter | 2323a2c0270b4cd13795e8f0bf342aa428fd5274 | ce118d22368d958696710baf44dcd07115e1ce66 | refs/heads/master | 2021-01-10T03:12:42.113952 | 2015-11-18T05:18:13 | 2015-11-18T05:18:13 | 46,396,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,576 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0002_auto_20151023_1738'),
]
operations = [
migrations.DeleteModel(
name='Place',
),
migrations.RemoveField(
model_name='tweet',
name='favorites',
),
migrations.RemoveField(
model_name='tweet',
name='lang',
),
migrations.AddField(
model_name='tweet',
name='created_at',
field=models.CharField(max_length=200, null=True, blank=True),
),
migrations.AddField(
model_name='tweet',
name='location',
field=models.CharField(max_length=100, null=True, blank=True),
),
migrations.AddField(
model_name='tweet',
name='profile_image_url',
field=models.CharField(max_length=100, null=True, blank=True),
),
migrations.AddField(
model_name='tweet',
name='screen_name',
field=models.CharField(max_length=100, null=True, blank=True),
),
migrations.AddField(
model_name='tweet',
name='source',
field=models.CharField(max_length=150, null=True, blank=True),
),
migrations.AddField(
model_name='tweet',
name='time_zone',
field=models.IntegerField(null=True, blank=True),
),
]
| [
"[email protected]"
] | |
afea4008f8c4c5c9538d98a64af56f9314244cb0 | 2e8f0de7a1526ef511927783235edc93f7c90036 | /communicare/core/migrations/0033_expense.py | bfde8cbaa2f90f3e72d5fa3cfcba4cecd9ac021e | [] | no_license | ConTTudOweb/CommunicareProject | 3d663578dfdeb455bc49419b3d103daec69c8fab | 211a1124c8c4549c609832ad71069a55c714a430 | refs/heads/master | 2022-12-21T12:59:35.424560 | 2021-05-10T22:16:15 | 2021-05-10T22:16:15 | 163,891,380 | 0 | 1 | null | 2022-12-08T07:43:22 | 2019-01-02T21:27:42 | HTML | UTF-8 | Python | false | false | 922 | py | # Generated by Django 2.1.8 on 2019-09-23 16:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0032_registration_net_value'),
]
operations = [
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255, verbose_name='descrição')),
('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=15, null=True, verbose_name='valor')),
('event', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='core.Event', verbose_name='evento')),
],
options={
'verbose_name': 'despesa',
},
),
]
| [
"[email protected]"
] | |
70221007808c9d0fe7c30493eff55d871b67547e | 46279163a543cd8820bdc38133404d79e787c5d2 | /torch/distributed/rpc/rref_proxy.py | f087514d92a8deea48d50c6d77e6e353ff2fe889 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | erwincoumans/pytorch | 31738b65e7b998bfdc28d0e8afa7dadeeda81a08 | ae9f39eb580c4d92157236d64548b055f71cf14b | refs/heads/master | 2023-01-23T10:27:33.628897 | 2020-12-06T01:22:00 | 2020-12-06T01:23:40 | 318,930,000 | 5 | 1 | NOASSERTION | 2020-12-06T01:58:57 | 2020-12-06T01:58:56 | null | UTF-8 | Python | false | false | 1,197 | py | from functools import partial
from . import functions
import torch
def _local_invoke(rref, func_name, args, kwargs):
return getattr(rref.local_value(), func_name)(*args, **kwargs)
@functions.async_execution
def _local_invoke_async_execution(rref, func_name, args, kwargs):
return getattr(rref.local_value(), func_name)(*args, **kwargs)
def _invoke_rpc(rref, rpc_api, func_name, *args, **kwargs):
rref_type = rref._get_type()
_invoke_func = _local_invoke
# Bypass ScriptModules when checking for async function attribute.
bypass_type = issubclass(rref_type, torch.jit.ScriptModule) or issubclass(
rref_type, torch._C.ScriptModule
)
if not bypass_type:
func = getattr(rref_type, func_name)
if hasattr(func, "_wrapped_async_rpc_function"):
_invoke_func = _local_invoke_async_execution
return rpc_api(
rref.owner(),
_invoke_func,
args=(rref, func_name, args, kwargs)
)
class RRefProxy:
def __init__(self, rref, rpc_api):
self.rref = rref
self.rpc_api = rpc_api
def __getattr__(self, func_name):
return partial(_invoke_rpc, self.rref, self.rpc_api, func_name)
| [
"[email protected]"
] | |
a462a36edc996a0c6c8e2ae42759f8a01a7cb609 | 66b1748a1238eda820345f914f60da434c668cf0 | /CodeUp/CodeUp1050.py | aafd819b919e0592b26cd76f597c92f3e9bb81f6 | [] | no_license | kwangminini/Algorhitm | 5d3140021584239e30468d3dcb353b119b935e76 | 4d9a3b9284c90d141c1a73e14329152455373c53 | refs/heads/master | 2023-09-03T07:33:51.228150 | 2023-08-28T13:39:52 | 2023-08-28T13:39:52 | 225,879,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py |
a,b=input().split()
if a==b:
print(1)
else:
print(0) | [
"[email protected]"
] | |
596324358e29714d198611478dfc5c42b15d24f5 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/185.py | 9299bc6923e7f47d089372fb9fc20d6aba78a70c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | file = open('input', 'r')
problems = int(file.readline())
for i in range(1, problems+1):
values = file.readline().split()
C = float(values[0])
F = float(values[1])
X = float(values[2])
F_t = 2 # total cookie production
T_t = 0.0 # total Time
while True:
if (X-C) / F_t < X / (F_t + F):
T_t = T_t + X / F_t
print 'Case #' + str(i) + ': ' + str(T_t)
break # exit loop
else:
T_t = T_t + C / F_t
F_t = F_t + F
| [
"[email protected]"
] | |
a969fda4550ba3a2812a6c95c059b51c7a6b534a | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res/scripts/client/gui/scaleform/daapi/view/lobby/fortifications/components/sorties_dps.py | b8a5d2e3ac266597a634520df829f0586537324c | [] | no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 18,600 | py | # 2016.02.14 12:39:36 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/fortifications/components/sorties_dps.py
import random
import BigWorld
from UnitBase import UNIT_FLAGS, SORTIE_DIVISION
from debug_utils import LOG_ERROR
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.fort_formatters import getIconLevel
from gui.Scaleform.daapi.view.lobby.rally.vo_converters import getUnitMaxLevel, makeFortBattleShortVO
from gui.Scaleform.daapi.view.lobby.rally.vo_converters import makeSortieShortVO
from gui.Scaleform.framework.entities.DAAPIDataProvider import DAAPIDataProvider, SortableDAAPIDataProvider
from gui.Scaleform.genConsts.FORTIFICATION_ALIASES import FORTIFICATION_ALIASES
from gui.Scaleform.locale.FORTIFICATIONS import FORTIFICATIONS as I18N_FORTIFICATIONS
from gui.Scaleform.locale.RES_ICONS import RES_ICONS
from gui.prb_control.items.sortie_items import getDivisionsOrderData
from gui.prb_control.prb_helpers import unitFunctionalProperty
from gui.shared.formatters import icons, text_styles
from gui.shared.fortifications.fort_seqs import BATTLE_ITEM_TYPE, getDivisionSettings
from gui.shared.utils import sortByFields
from helpers import i18n, time_utils
from shared_utils import CONST_CONTAINER
from messenger import g_settings
from messenger.m_constants import USER_GUI_TYPE
from messenger.storage import storage_getter
from unit_roster_config import SortieSlot6, SortieSlot8, SortieSlot10
MIN_MAX_VEH_LVLS_MAPPING = {SORTIE_DIVISION.MIDDLE: SortieSlot6,
SORTIE_DIVISION.CHAMPION: SortieSlot8,
SORTIE_DIVISION.ABSOLUTE: SortieSlot10}
def makeDivisionData(nameGenerator = None):
result = []
for name, divisionID, rosterTypeID in getDivisionsOrderData():
settings = getDivisionSettings(name)
if settings:
profit = settings.resourceBonus
else:
profit = 0
result.append({'profit': profit,
'level': divisionID,
'label': nameGenerator or I18N_FORTIFICATIONS.sortie_division_name(name),
'data': rosterTypeID,
'vehLvls': MIN_MAX_VEH_LVLS_MAPPING[divisionID].DEFAULT_LEVELS})
return result
class DivisionsDataProvider(DAAPIDataProvider):
def __init__(self):
super(DivisionsDataProvider, self).__init__()
self.__list = []
@property
def collection(self):
return self.__list
def emptyItem(self):
return {'label': '',
'data': 0}
def clear(self):
self.__list = []
def init(self, flashObject):
self.buildList()
self.setFlashObject(flashObject)
def fini(self):
self.clear()
self._dispose()
def buildList(self):
self.__list = [{'label': I18N_FORTIFICATIONS.sortie_division_name('ALL'),
'data': 0}]
self.__list.extend(makeDivisionData())
def getTypeIDByIndex(self, index):
rosterTypeID = 0
if -1 < index < len(self.__list):
rosterTypeID = self.__list[index]['data']
return rosterTypeID
def getIndexByTypeID(self, rosterTypeID):
found = 0
for index, item in enumerate(self.__list[1:]):
if item['data'] == rosterTypeID:
found = index + 1
break
return found
class SortiesDataProvider(SortableDAAPIDataProvider):
def __init__(self):
super(SortiesDataProvider, self).__init__()
self._list = []
self._listMapping = {}
self._mapping = {}
self._selectedID = None
return
@unitFunctionalProperty
def unitFunctional(self):
return None
@property
def collection(self):
return self._list
def emptyItem(self):
return None
def clear(self):
self._list = []
self._listMapping.clear()
self._mapping.clear()
self._selectedID = None
return
def fini(self):
self.clear()
self._dispose()
def getSelectedIdx(self):
if self._selectedID in self._mapping:
return self._mapping[self._selectedID]
return -1
def setSelectedID(self, id):
self._selectedID = id
def getVO(self, index):
vo = None
if index > -1:
try:
vo = self.sortedCollection[index]
except IndexError:
LOG_ERROR('Item not found', index)
return vo
def getUnitVO(self, clientIdx):
return makeSortieShortVO(self.unitFunctional, unitIdx=clientIdx)
def getUnitMaxLevel(self, clientIdx):
return getUnitMaxLevel(self.unitFunctional, unitIdx=clientIdx)
def buildList(self, cache):
self.clear()
for index, item in enumerate(cache.getIterator()):
self._list.append(self._makeVO(index, item))
self._listMapping[item.getID()] = index
self._rebuildMapping()
def rebuildList(self, cache):
self.buildList(cache)
self.refresh()
def updateItem(self, cache, item):
sortieID = item.getID()
if sortieID in self._mapping and item.filter(cache.getRosterTypeID()):
index = self._listMapping[sortieID]
try:
self._list[index] = self._makeVO(index, item)
except IndexError:
LOG_ERROR('Item is not found', sortieID, index)
self.flashObject.update([index])
self._rebuildMapping()
return self.getSelectedIdx()
else:
self.rebuildList(cache)
return None
def removeItem(self, cache, removedID):
if removedID in self._mapping:
dropSelection = removedID == self._selectedID
self.rebuildList(cache)
return dropSelection
return False
def pyGetSelectedIdx(self):
return self.getSelectedIdx()
def pySortOn(self, fields, order):
super(SortiesDataProvider, self).pySortOn(fields, order)
self._rebuildMapping()
self.refresh()
@storage_getter('users')
def usersStorage(self):
return None
def _rebuildMapping(self):
self._mapping = dict(map(lambda item: (item[1]['sortieID'], item[0]), enumerate(self.sortedCollection)))
def _makeVO(self, index, item):
isInBattle = item.getFlags() & UNIT_FLAGS.IN_ARENA > 0 or item.getFlags() & UNIT_FLAGS.IN_QUEUE > 0 or item.getFlags() & UNIT_FLAGS.IN_SEARCH > 0
user = self.usersStorage.getUser(item.getCommanderDatabaseID())
scheme = g_settings.getColorScheme('rosters')
if user:
colors = scheme.getColors(user.getGuiType())
color = colors[0] if user.isOnline() else colors[1]
else:
colors = scheme.getColors(USER_GUI_TYPE.OTHER)
color = colors[1]
return {'sortieID': item.getID(),
'creatorName': item.getCommanderFullName(),
'divisionName': I18N_FORTIFICATIONS.sortie_division_name(item.getDivisionName()),
'description': text_styles.standard(item.getDescription()),
'descriptionForTT': item.getDescription(),
'isInBattle': isInBattle,
'division': item.getDivision(),
'playersCount': item.itemData.count,
'commandSize': item.itemData.maxCount,
'rallyIndex': index,
'igrType': item.getIgrType(),
'color': color}
class IntelligenceDataProvider(SortableDAAPIDataProvider):
def __init__(self):
super(IntelligenceDataProvider, self).__init__()
self._list = []
self._listMapping = {}
self.__mapping = {}
self.__selectedID = None
return
@unitFunctionalProperty
def unitFunctional(self):
return None
@property
def collection(self):
return self._list
def emptyItem(self):
return None
def clear(self):
self._list = []
self._listMapping.clear()
self.__mapping.clear()
self.__selectedID = None
return
def fini(self):
self.clear()
self._dispose()
def getSelectedIdx(self):
if self.__selectedID in self.__mapping:
return self.__mapping[self.__selectedID]
return -1
def setSelectedID(self, id):
self.__selectedID = id
def getVO(self, index):
vo = None
if index > -1:
try:
vo = self.sortedCollection[index]
except IndexError:
LOG_ERROR('Item not found', index)
return vo
def buildList(self, cache):
self.clear()
favorites = cache.getFavorites()
for index, item in enumerate(cache.getIterator()):
self._list.append(self._makeVO(index, item, favorites))
self._listMapping[item.getClanDBID()] = index
self.__rebuildMapping()
def rebuildList(self, cache):
self.buildList(cache)
self.refresh()
def refreshItem(self, cache, clanDBID):
isSelected = self.__selectedID == clanDBID
self.buildList(cache)
if isSelected and clanDBID not in self.__mapping:
return True
return False
def pyGetSelectedIdx(self):
return self.getSelectedIdx()
def pySortOn(self, fields, order):
super(IntelligenceDataProvider, self).pySortOn(fields, order)
self.__rebuildMapping()
self.refresh()
def deleteBrackets(self, element):
element['clanTag'] = element['clanTag'][1:-1]
return element
def addBrackets(self, element):
element['clanTag'] = '[%s]' % element['clanTag']
return element
@property
def sortedCollection(self):
return map(self.addBrackets, sortByFields(self._sort, map(self.deleteBrackets, self.collection)))
def __rebuildMapping(self):
self.__mapping = dict(map(lambda item: (item[1]['clanID'], item[0]), enumerate(self.sortedCollection)))
def _makeVO(self, index, item, favorites):
timestamp = item.getAvailability()
defHour, defMin = item.getDefHourFor(timestamp)
defenceStart = time_utils.getTimeForLocal(timestamp, defHour, defMin)
defenceFinish = defenceStart + time_utils.ONE_HOUR
defenceTime = '%s - %s' % (BigWorld.wg_getShortTimeFormat(defenceStart), BigWorld.wg_getShortTimeFormat(defenceFinish))
return {'clanID': item.getClanDBID(),
'levelIcon': getIconLevel(item.getLevel()),
'clanTag': '[%s]' % item.getClanAbbrev(),
'defenceTime': defenceTime,
'defenceStartTime': int('%02d%02d' % (defHour, defMin)),
'avgBuildingLvl': round(item.getAvgBuildingLevel(), 1),
'isFavorite': item.getClanDBID() in favorites,
'clanLvl': item.getLevel()}
class FortBattlesDataProvider(SortableDAAPIDataProvider):
class DAY_OF_BATTLE(CONST_CONTAINER):
TODAY = 0
TOMORROW = 1
OTHER = 2
def __init__(self):
super(FortBattlesDataProvider, self).__init__()
self._list = []
self._listMapping = {}
self._mapping = {}
self._selectedID = None
return
@unitFunctionalProperty
def unitFunctional(self):
return None
@property
def collection(self):
return self._list
def emptyItem(self):
return None
def clear(self):
self._list = []
self._listMapping.clear()
self._mapping.clear()
self._selectedID = None
return
def fini(self):
self.clear()
self._dispose()
def getSelectedIdx(self):
if self._selectedID in self._mapping:
return self._mapping[self._selectedID]
return -1
def setSelectedID(self, id):
self._selectedID = id
def getVO(self, index):
vo = None
if index > -1:
try:
vo = self.sortedCollection[index]
except IndexError:
LOG_ERROR('Item not found', index)
return vo
def getUnitVO(self, clientIdx):
return makeFortBattleShortVO(self.unitFunctional, unitIdx=clientIdx)
def getUnitMaxLevel(self, clientIdx):
return getUnitMaxLevel(self.unitFunctional, unitIdx=clientIdx)
def buildList(self, cache):
self.clear()
if not BigWorld.player().isLongDisconnectedFromCenter:
for index, (item, battleItem) in enumerate(cache.getIterator()):
self._list.append(self._makeVO(index, item, battleItem))
self._listMapping[item.getBattleID()] = index
self._rebuildMapping()
def rebuildList(self, cache):
self.buildList(cache)
self.refresh()
def updateItem(self, cache, item, battleItem):
fortBattleID = item.getBattleID()
if fortBattleID in self._mapping and item.filter():
index = self._listMapping[fortBattleID]
try:
self._list[index] = self._makeVO(index, item, battleItem)
except IndexError:
LOG_ERROR('Item is not found', fortBattleID, index)
self.flashObject.update([index])
self._rebuildMapping()
return self.getSelectedIdx()
else:
self.rebuildList(cache)
return None
def removeItem(self, cache, removedID):
if removedID in self._mapping:
dropSelection = removedID == self._selectedID
self.rebuildList(cache)
return dropSelection
return False
def pyGetSelectedIdx(self):
return self.getSelectedIdx()
def pySortOn(self, fields, order):
super(FortBattlesDataProvider, self).pySortOn(fields, order)
self._rebuildMapping()
self.refresh()
def _rebuildMapping(self):
self._mapping = dict(map(lambda item: (item[1]['sortieID'][0], item[0]), enumerate(self.sortedCollection)))
def _makeVO(self, index, item, battleItem):
if item.getType() == BATTLE_ITEM_TYPE.DEFENCE:
battleType = FORTIFICATION_ALIASES.CLAN_BATTLE_DEFENCE
else:
battleType = FORTIFICATION_ALIASES.CLAN_BATTLE_OFFENCE
if battleItem:
startTime = battleItem.getRoundStartTime()
startTimeLeft = battleItem.getRoundStartTimeLeft()
isBattleRound = battleItem.isBattleRound()
else:
startTime = item.getStartTime()
startTimeLeft = item.getStartTimeLeft()
isBattleRound = False
dayOfBattle = self.DAY_OF_BATTLE.TODAY
if startTimeLeft > time_utils.QUARTER_HOUR:
stateOfBattle = FORTIFICATION_ALIASES.CLAN_BATTLE_BATTLE_TOMORROW
if time_utils.isTimeThisDay(startTime):
stateOfBattle = FORTIFICATION_ALIASES.CLAN_BATTLE_BATTLE_TODAY
elif time_utils.isTimeNextDay(startTime):
dayOfBattle = self.DAY_OF_BATTLE.TOMORROW
else:
dayOfBattle = self.DAY_OF_BATTLE.OTHER
elif startTimeLeft > 0 and not isBattleRound:
stateOfBattle = FORTIFICATION_ALIASES.CLAN_BATTLE_BEGINS
else:
stateOfBattle = FORTIFICATION_ALIASES.CLAN_BATTLE_IS_IN_BATTLE
return {'sortieID': (item.getBattleID(), item.getPeripheryID()),
'battleType': battleType,
'battleName': self.__makeBattleName(item, battleType),
'battleDirection': self.__makeBattleDirection(item),
'dayOfBattle': self.__makeDayOfBattle(dayOfBattle, startTime),
'beforeBegins': self.__makeTimeOfBattle(item, battleItem, stateOfBattle),
'stateOfBattle': stateOfBattle,
'startTimeLeft': startTimeLeft,
'direction': item.getDirection()}
def __makeBattleName(self, item, battleType):
_, clanAbbrev, _ = item.getOpponentClanInfo()
clanName = '[%s]' % clanAbbrev
result = i18n.makeString(I18N_FORTIFICATIONS.fortclanbattlelist_renderbattlename(battleType), clanName=clanName)
result = text_styles.middleTitle(result)
return result
def __makeBattleDirection(self, item):
direction = i18n.makeString('#fortifications:General/directionName%d' % item.getDirection())
directionName = i18n.makeString(I18N_FORTIFICATIONS.FORTCLANBATTLELIST_RENDERDIRECTION, directionName=direction)
return text_styles.standard(directionName)
def __makeDayOfBattle(self, dayOfBattle, timestamp):
if dayOfBattle == self.DAY_OF_BATTLE.TODAY:
availability = i18n.makeString(I18N_FORTIFICATIONS.fortclanbattlelist_renderdayofbattle('today'))
elif dayOfBattle == self.DAY_OF_BATTLE.TOMORROW:
availability = i18n.makeString(I18N_FORTIFICATIONS.fortclanbattlelist_renderdayofbattle('tomorrow'))
else:
availability = BigWorld.wg_getShortDateFormat(timestamp)
return text_styles.main(availability)
def __makeTimeOfBattle(self, item, battleItem, currentState):
result = {}
if currentState == FORTIFICATION_ALIASES.CLAN_BATTLE_IS_IN_BATTLE:
icon = icons.makeImageTag(RES_ICONS.MAPS_ICONS_LIBRARY_BATTLERESULTICON_1, 16, 16, -3, 0)
formattedText = text_styles.error(i18n.makeString(I18N_FORTIFICATIONS.FORTCLANBATTLELIST_RENDERCURRENTTIME_ISBATTLE))
result['text'] = icon + ' ' + formattedText
elif currentState == FORTIFICATION_ALIASES.CLAN_BATTLE_BEGINS:
battleID = item.getBattleID()
timer = {}
htmlFormatter = text_styles.alert('###')
locale = text_styles.main(i18n.makeString(I18N_FORTIFICATIONS.FORTCLANBATTLELIST_RENDERCURRENTTIME_BEFOREBATTLE))
result['text'] = locale
if battleItem:
startTimeLeft = battleItem.getRoundStartTimeLeft()
else:
startTimeLeft = item.getStartTimeLeft()
timer['useUniqueIdentifier'] = True
timer['uniqueIdentifier'] = battleID
timer['deltaTime'] = startTimeLeft
timer['htmlFormatter'] = htmlFormatter
timer['timerDefaultValue'] = '00'
result['timer'] = timer
else:
lastBattleTimeUserString = '%s - %s' % (BigWorld.wg_getShortTimeFormat(item.getStartTime()), BigWorld.wg_getShortTimeFormat(item.getFinishTime()))
result['text'] = text_styles.main(lastBattleTimeUserString)
return result
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\fortifications\components\sorties_dps.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:39:36 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
06a17dd5afb93cb7d5552f15f447eb8cc99404d0 | 0125bbe0ce453e94604ff5834fbc280fe44f3220 | /examples/word_level/wmt_2018/de_en/microtransquest.py | 8303eb872b8574349a419fb78ec7a163952b894a | [
"Apache-2.0"
] | permissive | mfomicheva/TransQuest | fc51bcb90e386534845841fd75a3860054e76dd7 | 4225f7195a703414ed13ce597854cc1a59703229 | refs/heads/master | 2023-06-12T14:52:49.066705 | 2021-05-07T10:35:21 | 2021-05-07T10:35:21 | 263,876,762 | 6 | 1 | Apache-2.0 | 2020-05-14T09:52:07 | 2020-05-14T09:52:06 | null | UTF-8 | Python | false | false | 8,955 | py | import os
import shutil
from sklearn.model_selection import train_test_split
from examples.word_level.common.util import reader, prepare_testdata
from examples.word_level.wmt_2018.de_en.microtransquest_config import TRAIN_PATH, TRAIN_SOURCE_FILE, \
TRAIN_SOURCE_TAGS_FILE, \
TRAIN_TARGET_FILE, \
TRAIN_TARGET_TAGS_FLE, MODEL_TYPE, MODEL_NAME, microtransquest_config, TEST_PATH, TEST_SOURCE_FILE, \
TEST_TARGET_FILE, TEMP_DIRECTORY, TEST_SOURCE_TAGS_FILE, SEED, TEST_TARGET_TAGS_FILE, TEST_TARGET_GAPS_FILE, \
DEV_PATH, DEV_SOURCE_FILE, DEV_TARGET_FILE, DEV_SOURCE_TAGS_FILE, DEV_TARGET_TAGS_FLE, DEV_SOURCE_TAGS_FILE_SUB, \
DEV_TARGET_TAGS_FILE_SUB, DEV_TARGET_GAPS_FILE_SUB
from transquest.algo.word_level.microtransquest.run_model import MicroTransQuestModel
if not os.path.exists(TEMP_DIRECTORY):
os.makedirs(TEMP_DIRECTORY)
raw_train_df = reader(TRAIN_PATH, TRAIN_SOURCE_FILE, TRAIN_TARGET_FILE, TRAIN_SOURCE_TAGS_FILE,
TRAIN_TARGET_TAGS_FLE)
raw_dev_df = reader(DEV_PATH, DEV_SOURCE_FILE, DEV_TARGET_FILE, DEV_SOURCE_TAGS_FILE,
DEV_TARGET_TAGS_FLE)
raw_test_df = reader(TEST_PATH, TEST_SOURCE_FILE, TEST_TARGET_FILE)
test_sentences = prepare_testdata(raw_test_df)
dev_sentences = prepare_testdata(raw_dev_df)
fold_sources_tags = []
fold_targets_tags = []
dev_fold_sources_tags = []
dev_fold_targets_tags = []
for i in range(microtransquest_config["n_fold"]):
if os.path.exists(microtransquest_config['output_dir']) and os.path.isdir(microtransquest_config['output_dir']):
shutil.rmtree(microtransquest_config['output_dir'])
if microtransquest_config["evaluate_during_training"]:
raw_train, raw_eval = train_test_split(raw_train_df, test_size=0.1, random_state=SEED * i)
model = MicroTransQuestModel(MODEL_TYPE, MODEL_NAME, labels=["OK", "BAD"], args=microtransquest_config)
model.train_model(raw_train, eval_data=raw_eval)
model = MicroTransQuestModel(MODEL_TYPE, microtransquest_config["best_model_dir"], labels=["OK", "BAD"],
args=microtransquest_config)
else:
model = MicroTransQuestModel(MODEL_TYPE, MODEL_NAME, labels=["OK", "BAD"], args=microtransquest_config)
model.train_model(raw_train_df)
sources_tags, targets_tags = model.predict(test_sentences, split_on_space=True)
fold_sources_tags.append(sources_tags)
fold_targets_tags.append(targets_tags)
dev_sources_tags, dev_targets_tags = model.predict(dev_sentences, split_on_space=True)
dev_fold_sources_tags.append(dev_sources_tags)
dev_fold_targets_tags.append(dev_targets_tags)
source_predictions = []
for sentence_id in range(len(test_sentences)):
majority_prediction = []
predictions = []
for fold_prediction in fold_sources_tags:
predictions.append(fold_prediction[sentence_id])
sentence_length = len(predictions[0])
for word_id in range(sentence_length):
word_prediction = []
for prediction in predictions:
word_prediction.append(prediction[word_id])
majority_prediction.append(max(set(word_prediction), key=word_prediction.count))
source_predictions.append(majority_prediction)
target_predictions = []
for sentence_id in range(len(test_sentences)):
majority_prediction = []
predictions = []
for fold_prediction in fold_targets_tags:
predictions.append(fold_prediction[sentence_id])
sentence_length = len(predictions[0])
for word_id in range(sentence_length):
word_prediction = []
for prediction in predictions:
word_prediction.append(prediction[word_id])
majority_prediction.append(max(set(word_prediction), key=word_prediction.count))
target_predictions.append(majority_prediction)
test_source_sentences = raw_test_df["source"].tolist()
test_target_sentences = raw_test_df["target"].tolist()
with open(os.path.join(TEMP_DIRECTORY, TEST_SOURCE_TAGS_FILE), 'w') as f:
for sentence_id, (test_source_sentence, source_prediction) in enumerate(
zip(test_source_sentences, source_predictions)):
words = test_source_sentence.split()
for word_id, (word, word_prediction) in enumerate(zip(words, source_prediction)):
f.write("MicroTransQuest" + "\t" + "source" + "\t" +
str(sentence_id) + "\t" + str(word_id) + "\t"
+ word + "\t" + word_prediction + '\n')
with open(os.path.join(TEMP_DIRECTORY, TEST_TARGET_TAGS_FILE), 'w') as target_f, open(
os.path.join(TEMP_DIRECTORY, TEST_TARGET_GAPS_FILE), 'w') as gap_f:
for sentence_id, (test_target_sentence, target_prediction) in enumerate(
zip(test_target_sentences, target_predictions)):
# target_sentence = test_sentence.split("[SEP]")[1]
words = test_target_sentence.split()
# word_predictions = target_prediction.split()
gap_index = 0
word_index = 0
for prediction_id, prediction in enumerate(target_prediction):
if prediction_id % 2 == 0:
gap_f.write("MicroTransQuest" + "\t" + "gap" + "\t" +
str(sentence_id) + "\t" + str(gap_index) + "\t"
+ "gap" + "\t" + prediction + '\n')
gap_index += 1
else:
target_f.write("MicroTransQuest" + "\t" + "mt" + "\t" +
str(sentence_id) + "\t" + str(word_index) + "\t"
+ words[word_index] + "\t" + prediction + '\n')
word_index += 1
# Predictions for dev file
dev_source_predictions = []
for sentence_id in range(len(dev_sentences)):
majority_prediction = []
predictions = []
for fold_prediction in dev_fold_sources_tags:
predictions.append(fold_prediction[sentence_id])
sentence_length = len(predictions[0])
for word_id in range(sentence_length):
word_prediction = []
for prediction in predictions:
word_prediction.append(prediction[word_id])
majority_prediction.append(max(set(word_prediction), key=word_prediction.count))
dev_source_predictions.append(majority_prediction)
dev_target_predictions = []
for sentence_id in range(len(dev_sentences)):
majority_prediction = []
predictions = []
for fold_prediction in dev_fold_targets_tags:
predictions.append(fold_prediction[sentence_id])
sentence_length = len(predictions[0])
for word_id in range(sentence_length):
word_prediction = []
for prediction in predictions:
word_prediction.append(prediction[word_id])
majority_prediction.append(max(set(word_prediction), key=word_prediction.count))
dev_target_predictions.append(majority_prediction)
dev_source_sentences = raw_dev_df["source"].tolist()
dev_target_sentences = raw_dev_df["target"].tolist()
dev_source_gold_tags = raw_dev_df["source_tags"].tolist()
dev_target_gold_tags = raw_dev_df["target_tags"].tolist()
with open(os.path.join(TEMP_DIRECTORY, DEV_SOURCE_TAGS_FILE_SUB), 'w') as f:
for sentence_id, (dev_source_sentence, dev_source_prediction, source_gold_tag) in enumerate(
zip(dev_source_sentences, dev_source_predictions, dev_source_gold_tags)):
words = dev_source_sentence.split()
gold_predictions = source_gold_tag.split()
for word_id, (word, word_prediction, gold_prediction) in enumerate(
zip(words, dev_source_prediction, gold_predictions)):
f.write("MicroTransQuest" + "\t" + "source" + "\t" +
str(sentence_id) + "\t" + str(word_id) + "\t"
+ word + "\t" + word_prediction + "\t" + gold_prediction + '\n')
with open(os.path.join(TEMP_DIRECTORY, DEV_TARGET_TAGS_FILE_SUB), 'w') as target_f, open(
os.path.join(TEMP_DIRECTORY, DEV_TARGET_GAPS_FILE_SUB), 'w') as gap_f:
for sentence_id, (dev_sentence, dev_target_prediction, dev_target_gold_tag) in enumerate(
zip(dev_target_sentences, dev_target_predictions, dev_target_gold_tags)):
words = dev_sentence.split()
gold_predictions = dev_target_gold_tag.split()
gap_index = 0
word_index = 0
for prediction_id, (prediction, gold_prediction) in enumerate(zip(dev_target_prediction, gold_predictions)):
if prediction_id % 2 == 0:
gap_f.write("MicroTransQuest" + "\t" + "gap" + "\t" +
str(sentence_id) + "\t" + str(gap_index) + "\t"
+ "gap" + "\t" + prediction + "\t" + gold_prediction + '\n')
gap_index += 1
else:
target_f.write("MicroTransQuest" + "\t" + "mt" + "\t" +
str(sentence_id) + "\t" + str(word_index) + "\t"
+ words[word_index] + "\t" + prediction + "\t" + gold_prediction + '\n')
word_index += 1
| [
"[email protected]"
] | |
2ddbc6fa0cef023e1fcc6a98054552d1885b1d99 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/exercises/_algorithms_challenges/pybites/beginner/143_v2/test_merge.py | fea2b47c842e7412cac0de3aad7246451f21676d | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 438 | py | # ____ ? _______ ? ?
#
#
# ___ test_regular_name
# ... ? 'tim' __ 30
# ... ? 'helen' __ 26
# ... ? 'otto' __ 44
#
#
# ___ test_case_insensitive_lookup
# ... ? 'Tim' __ 30
# ... ? 'BOB' __ 17
# ... ? 'BrEnDa' __ 17
#
#
# ___ test_name_not_found
# ... ? 'timothy' __ ?
# ... ? N.. __ ?
# ... ? F.. __ ?
# ... ?(-1) __ ?
#
#
# ___ test_duplicate_name
# ... ? 'thomas' __ 46
# ... ? 'ana' __ 26 | [
"[email protected]"
] | |
47958a14553a0915ab6b56f000a447ea7dd84437 | 35bad5d9982b5d4f107fc39c41f16e99a5eae1f3 | /leaflet/views/admin/sitetext.py | 8b98796f1183721c1c59d37f075bbd3ea4447018 | [] | no_license | umeboshi2/leaflet | 17c502ce87076633e3b98c6c85efcb07cda2db6b | 63a60d43b93bd07d7d04165ebbe63cb97a44d537 | refs/heads/master | 2020-04-25T12:19:17.824626 | 2014-01-31T19:28:15 | 2014-01-31T19:28:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,338 | py | from cStringIO import StringIO
from datetime import datetime
import transaction
from pyramid.httpexceptions import HTTPFound, HTTPNotFound
from pyramid.security import authenticated_userid
from pyramid.renderers import render
from pyramid.response import Response
from trumpet.models.sitecontent import SiteText
from trumpet.resources import MemoryTmpStore
from trumpet.managers.admin.images import ImageManager
from trumpet.views.menus import BaseMenu
from leaflet.views.base import AdminViewer
from leaflet.views.admin.base import make_main_menu
from leaflet.managers.wiki import WikiArchiver
import colander
import deform
tmpstore = MemoryTmpStore()
class EditSiteTextSchema(colander.Schema):
name = colander.SchemaNode(
colander.String(),
title='Name')
content = colander.SchemaNode(
colander.String(),
title='Content',
widget=deform.widget.TextAreaWidget(rows=10, cols=60))
class SiteTextViewer(AdminViewer):
def __init__(self, request):
super(SiteTextViewer, self).__init__(request)
self.layout.main_menu = make_main_menu(request)
self.images = ImageManager(self.request.db)
self._dispatch_table = dict(
list=self.list_site_text,
add=self.create_site_text,
delete=self.main,
confirmdelete=self.main,
viewentry=self.view_site_text,
editentry=self.edit_site_text,
create=self.create_site_text,
download_wiki_archive=self.download_wiki_archive,)
self.context = self.request.matchdict['context']
self._view = self.context
self._set_options_menu()
self.dispatch()
def _set_options_menu(self):
menu = BaseMenu()
menu.set_header('Site Text Actions')
url = self.url(context='list', id='all')
menu.append_new_entry('List Entries', url)
url = self.url(context='create', id='new')
menu.append_new_entry('Create New Entry', url)
url = self.url(context='download_wiki_archive', id='all')
menu.append_new_entry('Download Wiki Archive', url)
self.layout.options_menus = dict(actions=menu)
def main(self):
content = '<h1>Here is where we manage site text.</h1>'
self.layout.content = content
def manage_site_text(self):
action = None
if 'action' in self.request.GET:
action = self.request.GET['action']
return self._manage_site_text_action_map[action]()
def view_site_text(self):
id = int(self.request.matchdict['id'])
self.layout.footer = str(type(id))
entry = self.request.db.query(SiteText).get(id)
self.layout.subheader = entry.name
self.layout.content = '<pre width="80">%s</pre>' % entry.content
def list_site_text(self):
template = 'leaflet:templates/list-site-text.mako'
entries = self.request.db.query(SiteText).all()
env = dict(viewer=self, entries=entries)
self.layout.content = self.render(template, env)
def _edit_site_text_form(self):
schema = EditSiteTextSchema()
submit_button = deform.form.Button(name='submit_site_text',
title='Update Content')
form = deform.Form(schema, buttons=(submit_button,))
self.layout.resources.deform_auto_need(form)
return form
def _validate_site_text(self, form, create=False):
controls = self.request.POST.items()
try:
data = form.validate(controls)
except deform.ValidationFailure, e:
self.layout.content = e.render()
return {}
if create:
db = self.request.db
query = db.query(SiteText).filter_by(name=data['name'])
rows = query.all()
if rows:
h1 = '<h1>Site Text "%s" already exists.</h1>'
h1 = h1 % data['name']
self.layout.content = h1 + form.render(data)
return {}
else:
self.layout.subheader = str(rows)
return data
def _submit_site_text(self, form, data={}):
rendered = form.render(data)
if 'submit_site_text' in self.request.params:
if not self._validate_site_text(form):
return
else:
self.layout.content = rendered
self.layout.subheader = 'Please edit content'
def create_site_text(self):
form = self._edit_site_text_form()
# check submission
if 'submit_site_text' in self.request.params:
valid = self._validate_site_text(form, create=True)
if not valid:
return
transaction.begin()
entry = SiteText(valid['name'], valid['content'])
self.request.db.add(entry)
transaction.commit()
self.layout.content = 'Submitted for approval.'
else:
self.layout.content = form.render()
self.layout.subheader = 'Please edit content'
def edit_site_text(self):
form = self._edit_site_text_form()
rendered = form.render()
id = int(self.request.matchdict['id'])
entry = self.request.db.query(SiteText).get(id)
data = dict(name=entry.name, content=entry.content)
if 'submit_site_text' in self.request.params:
valid = self._validate_site_text(form)
if not valid:
return
transaction.begin()
entry.content = valid['content']
self.request.db.add(entry)
transaction.commit()
self.layout.content = 'Submitted for approval.'
else:
self.layout.content = form.render(data)
self.layout.subheader = 'Please edit content'
def download_wiki_archive(self):
archiver = WikiArchiver(self.request.db)
archiver.create_new_zipfile()
archive = archiver.archive_pages()
content_type = 'application/zip'
r = Response(content_type=content_type, body=archive)
r.content_disposition = 'attachment; filename="tutwiki-archive.zip"'
self.response = r
| [
"[email protected]"
] | |
dcba76ba54dce28cee2bc6891fc957ec5889ac55 | 1bed2f766620acf085ed2d7fd3e354a3482b8960 | /homeassistant/components/hassio/websocket_api.py | eb0d6c5407700eb07de2f6ea5f08199c18eccb70 | [
"Apache-2.0"
] | permissive | elupus/home-assistant | 5cbb79a2f25a2938a69f3988534486c269b77643 | 564150169bfc69efdfeda25a99d803441f3a4b10 | refs/heads/dev | 2023-08-28T16:36:04.304864 | 2022-09-16T06:35:12 | 2022-09-16T06:35:12 | 114,460,522 | 2 | 2 | Apache-2.0 | 2023-02-22T06:14:54 | 2017-12-16T12:50:55 | Python | UTF-8 | Python | false | false | 4,002 | py | """Websocekt API handlers for the hassio integration."""
import logging
from numbers import Number
import re
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.components.websocket_api.connection import ActiveConnection
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import Unauthorized
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from . import HassioAPIError
from .const import (
ATTR_DATA,
ATTR_ENDPOINT,
ATTR_METHOD,
ATTR_RESULT,
ATTR_TIMEOUT,
ATTR_WS_EVENT,
DOMAIN,
EVENT_SUPERVISOR_EVENT,
WS_ID,
WS_TYPE,
WS_TYPE_API,
WS_TYPE_EVENT,
WS_TYPE_SUBSCRIBE,
)
from .handler import HassIO
SCHEMA_WEBSOCKET_EVENT = vol.Schema(
{vol.Required(ATTR_WS_EVENT): cv.string},
extra=vol.ALLOW_EXTRA,
)
# Endpoints needed for ingress can't require admin because addons can set `panel_admin: false`
# pylint: disable=implicit-str-concat
WS_NO_ADMIN_ENDPOINTS = re.compile(
r"^(?:" r"|/ingress/(session|validate_session)" r"|/addons/[^/]+/info" r")$"
)
# pylint: enable=implicit-str-concat
_LOGGER: logging.Logger = logging.getLogger(__package__)
@callback
def async_load_websocket_api(hass: HomeAssistant):
"""Set up the websocket API."""
websocket_api.async_register_command(hass, websocket_supervisor_event)
websocket_api.async_register_command(hass, websocket_supervisor_api)
websocket_api.async_register_command(hass, websocket_subscribe)
@websocket_api.require_admin
@websocket_api.websocket_command({vol.Required(WS_TYPE): WS_TYPE_SUBSCRIBE})
@websocket_api.async_response
async def websocket_subscribe(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
):
"""Subscribe to supervisor events."""
@callback
def forward_messages(data):
"""Forward events to websocket."""
connection.send_message(websocket_api.event_message(msg[WS_ID], data))
connection.subscriptions[msg[WS_ID]] = async_dispatcher_connect(
hass, EVENT_SUPERVISOR_EVENT, forward_messages
)
connection.send_message(websocket_api.result_message(msg[WS_ID]))
@websocket_api.websocket_command(
{
vol.Required(WS_TYPE): WS_TYPE_EVENT,
vol.Required(ATTR_DATA): SCHEMA_WEBSOCKET_EVENT,
}
)
@websocket_api.async_response
async def websocket_supervisor_event(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
):
"""Publish events from the Supervisor."""
connection.send_result(msg[WS_ID])
async_dispatcher_send(hass, EVENT_SUPERVISOR_EVENT, msg[ATTR_DATA])
@websocket_api.websocket_command(
{
vol.Required(WS_TYPE): WS_TYPE_API,
vol.Required(ATTR_ENDPOINT): cv.string,
vol.Required(ATTR_METHOD): cv.string,
vol.Optional(ATTR_DATA): dict,
vol.Optional(ATTR_TIMEOUT): vol.Any(Number, None),
}
)
@websocket_api.async_response
async def websocket_supervisor_api(
hass: HomeAssistant, connection: ActiveConnection, msg: dict
):
"""Websocket handler to call Supervisor API."""
if not connection.user.is_admin and not WS_NO_ADMIN_ENDPOINTS.match(
msg[ATTR_ENDPOINT]
):
raise Unauthorized()
supervisor: HassIO = hass.data[DOMAIN]
try:
result = await supervisor.send_command(
msg[ATTR_ENDPOINT],
method=msg[ATTR_METHOD],
timeout=msg.get(ATTR_TIMEOUT, 10),
payload=msg.get(ATTR_DATA, {}),
)
if result.get(ATTR_RESULT) == "error":
raise HassioAPIError(result.get("message"))
except HassioAPIError as err:
_LOGGER.error("Failed to to call %s - %s", msg[ATTR_ENDPOINT], err)
connection.send_error(
msg[WS_ID], code=websocket_api.ERR_UNKNOWN_ERROR, message=str(err)
)
else:
connection.send_result(msg[WS_ID], result.get(ATTR_DATA, {}))
| [
"[email protected]"
] | |
843ac0689c6874df72452c43da5315fc594fa5c1 | 03fa87e03f752d22908e6d4b9eadcd6543ea8584 | /setup.py | 6f096efc88129420c5443a2f5ff5aa368241570d | [
"MIT"
] | permissive | chrisberkhout/piecash | 59cfbafda2ac5a1d3b7235a7f2d4a4efbaaeae8b | 956f9af61e28e80a80d03aa138c579f43a77e661 | refs/heads/master | 2021-07-11T13:32:20.365289 | 2021-01-29T05:14:36 | 2021-01-29T05:14:36 | 37,768,805 | 0 | 0 | null | 2015-06-20T12:16:22 | 2015-06-20T12:16:21 | null | UTF-8 | Python | false | false | 8,324 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import imp
import os
import subprocess
import sys
# # Python 2.6 subprocess.check_output compatibility. Thanks Greg Hewgill!
if "check_output" not in dir(subprocess):
def check_output(cmd_args, *args, **kwargs):
proc = subprocess.Popen(cmd_args, *args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs)
out, err = proc.communicate()
if proc.returncode != 0:
raise subprocess.CalledProcessError(args)
return out
subprocess.check_output = check_output
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
# from setuptools.command.test import test
class PyTest(TestCommand):
user_options = [("pytest-args=", "a", "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
# self.test_args = []
# self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
try:
import colorama
colorama.init() # Initialize colorama on Windows
except ImportError:
# Don't require colorama just for running paver tasks. This allows us to
# run `paver install' without requiring the user to first have colorama
# installed.
pass
# Add the current directory to the module search path.
sys.path.append(".")
# # Constants
CODE_DIRECTORY = "piecash"
DOCS_DIRECTORY = "docs"
TESTS_DIRECTORY = "tests"
DATA_DIRECTORY = "gnucash_books"
PYTEST_FLAGS = ["--doctest-modules"]
# Import metadata. Normally this would just be:
#
# from piecash import metadata
#
# However, when we do this, we also import `piecash/__init__.py'. If this
# imports names from some other modules and these modules have third-party
# dependencies that need installing (which happens after this file is run), the
# script will crash. What we do instead is to load the metadata module by path
# instead, effectively side-stepping the dependency problem. Please make sure
# metadata has no dependencies, otherwise they will need to be added to
# the setup_requires keyword.
metadata = imp.load_source("metadata", os.path.join(CODE_DIRECTORY, "metadata.py"))
# # Miscellaneous helper functions
def get_project_files():
"""Retrieve a list of project files, ignoring hidden files.
:return: sorted list of project files
:rtype: :class:`list`
"""
if is_git_project():
return get_git_project_files()
project_files = []
for top, subdirs, files in os.walk("."):
for subdir in subdirs:
if subdir.startswith("."):
subdirs.remove(subdir)
for f in files:
if f.startswith("."):
continue
project_files.append(os.path.join(top, f))
return project_files
def is_git_project():
return os.path.isdir(".git")
def get_git_project_files():
"""Retrieve a list of all non-ignored files, including untracked files,
excluding deleted files.
:return: sorted list of git project files
:rtype: :class:`list`
"""
cached_and_untracked_files = git_ls_files(
"--cached", # All files cached in the index
"--others", # Untracked files
# Exclude untracked files that would be excluded by .gitignore, etc.
"--exclude-standard",
)
uncommitted_deleted_files = git_ls_files("--deleted")
# Since sorting of files in a set is arbitrary, return a sorted list to
# provide a well-defined order to tools like flake8, etc.
return sorted(cached_and_untracked_files - uncommitted_deleted_files)
def git_ls_files(*cmd_args):
"""Run ``git ls-files`` in the top-level project directory. Arguments go
directly to execution call.
:return: set of file names
:rtype: :class:`set`
"""
cmd = ["git", "ls-files"]
cmd.extend(cmd_args)
return set(subprocess.check_output(cmd).splitlines())
def print_success_message(message):
"""Print a message indicating success in green color to STDOUT.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.GREEN + message + colorama.Fore.RESET)
except ImportError:
print(message)
def print_failure_message(message):
"""Print a message indicating failure in red color to STDERR.
:param message: the message to print
:type message: :class:`str`
"""
try:
import colorama
print(colorama.Fore.RED + message + colorama.Fore.RESET, file=sys.stderr)
except ImportError:
print(message, file=sys.stderr)
def read(filename):
"""Return the contents of a file.
:param filename: file path
:type filename: :class:`str`
:return: the file's content
:rtype: :class:`str`
"""
with open(os.path.join(os.path.dirname(__file__), filename)) as f:
return f.read()
def _lint():
"""Run lint and return an exit code."""
# Flake8 doesn't have an easy way to run checks using a Python function, so
# just fork off another process to do it.
# Python 3 compat:
# - The result of subprocess call outputs are byte strings, meaning we need
# to pass a byte string to endswith.
project_python_files = [filename for filename in get_project_files() if filename.endswith(b".py")]
retcode = subprocess.call(
["flake8", "--ignore=E126,E121", "--max-line-length=99", "--max-complexity=10"] + project_python_files
)
if retcode == 0:
print_success_message("No style errors")
return retcode
## package dependencies
install_requires = ["SQLAlchemy>=1.0, <1.4", "SQLAlchemy-Utils!=0.36.8", "pytz", "tzlocal", "click"]
extras_require = {
"postgres": ["psycopg2"],
"mysql": ["PyMySQL"],
"ledger": ["money", "babel"],
"pandas": ["pandas"],
"qif": ["qifparse"],
"yahoo": ["requests"],
"test": ["pytest", "pytest-cov", "tox"],
"doc": ["sphinx", "sphinxcontrib-napoleon", "sphinxcontrib-programoutput", "sphinx-rtd-theme", "ipython"],
}
# build an 'all' option covering all options
extras_require["all"] = deps_all = sum(
(extras_require[k] for k in ["postgres", "mysql", "pandas", "yahoo", "ledger"]), []
)
# add 'all' for both doc and test
extras_require["test"].extend(deps_all)
extras_require["doc"].extend(deps_all)
setup_dict = dict(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=read("README.rst"),
keywords=["GnuCash", "python", "binding", "interface", "sqlalchemy"],
license="MIT",
platforms="any",
# Find a list of classifiers here:
# <http://pypi.python.org/pypi?%3Aaction=list_classifiers>
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Financial and Insurance Industry",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Office/Business",
"Topic :: Office/Business :: Financial",
"Topic :: Office/Business :: Financial :: Accounting",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages=find_packages(exclude=(TESTS_DIRECTORY, DATA_DIRECTORY)),
install_requires=install_requires,
extras_require=extras_require,
# Allow tests to be run with `python setup.py test'.
tests_require=["pytest"] + deps_all,
entry_points={"console_scripts": ["piecash = piecash.scripts.export:cli"]},
cmdclass={"test": PyTest},
test_suite="tests",
zip_safe=False, # don't use eggs
)
def main():
setup(**setup_dict)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
6cfb72ed1c7f118da4e8fdb27bbc290e666deaa6 | a560269290749e10466b1a29584f06a2b8385a47 | /Notebooks/py/manvindra/titanic-predict-survival-first-kernel/titanic-predict-survival-first-kernel.py | 7ac300ed6cf1a1d0ad59edfeafea0ade56325486 | [] | no_license | nischalshrestha/automatic_wat_discovery | c71befad1aa358ae876d5494a67b0f4aa1266f23 | 982e700d8e4698a501afffd6c3a2f35346c34f95 | refs/heads/master | 2022-04-07T12:40:24.376871 | 2020-03-15T22:27:39 | 2020-03-15T22:27:39 | 208,379,586 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,093 | py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#This cell contains basic code from Kaggle and following cells follows outlines and code from Manav Sehgal notebook(Titanic Data Science Solutions)
# Also took learning with code from from https://www.kaggle.com/arthurtok/introduction-to-ensembling-stacking-in-python
# Learning Python using the above notebooks.
#This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# In[ ]:
# visualization libs
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
plt.rc('font', family='sans-serif')
plt.rc('font', serif='Helvetica Neue')
plt.rc('text', usetex='false')
plt.rcParams.update({'font.size': 10})
# In[ ]:
#Import ML Classfication libs
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.model_selection import KFold;
import xgboost as xgb
# In[ ]:
#Acquire data
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
combine = [train_df, test_df] #will be helpful in finding all distinct titles.
# **Exploring dataset**
# In[ ]:
print(train_df.columns.values)
train_df.head()
# In[ ]:
#Data Type of features
train_df.info()
print("------------------")
test_df.info()
# From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship:
# - **Survived**: Outcome of survival (0 = No; 1 = Yes)
# - **Pclass**: Socio-economic class (1 = Upper class; 2 = Middle class; 3 = Lower class)
# - **Name**: Name of passenger
# - **Sex**: Sex of the passenger
# - **Age**: Age of the passenger (Some entries contain `NaN`)
# - **SibSp**: Number of siblings and spouses of the passenger aboard
# - **Parch**: Number of parents and children of the passenger aboard
# - **Ticket**: Ticket number of the passenger
# - **Fare**: Fare paid by the passenger
# - **Cabin** Cabin number of the passenger (Some entries contain `NaN`)
# - **Embarked**: Port of embarkation of the passenger (C = Cherbourg; Q = Queenstown; S = Southampton)
# **Distribution of numberical features from training data,**
# * Total 891 sample, represented 40% of passengers (2,224)
# * 38% of passenger in train dataset survived .Actual surviving rate is 32%
# * More than 75% of passengers didn't travel with parent or childern. (Parch 75% =0)
# * Less than 25% of passengers have siblings and spouse abroad.
# In[ ]:
#Distribution of numerical features
train_df.describe()
# **Distribution of Categorical features**
# * Name are unique. It can be dropped but will take Title from it as new feature
# * Two values of sex,most are male
# * No seprate ticket for all Family or friends. Family can hae single tickets. Can be dropped as many duplicates and also doesn't relates to survival
# * Out of 3 Embarked value, S tops the list.
# * Cabin can be shared.
# In[ ]:
#Distribution of Categorical data
train_df.describe(include=['O'])
#Cabin has lot of null, drop it
train_df.drop("Cabin",axis=1,inplace=True)
test_df.drop("Cabin",axis=1,inplace=True)
#Drop ticket number also
train_df.drop("Ticket",axis=1,inplace=True)
test_df.drop("Ticket",axis=1,inplace=True)
# **Hypothesis we can think of**
# * Women and children are more likely to have survived [](http://)
# * Aged passengers less likely to have survived
# * Higher Class passengers more likely to have survived
# **Visualization**
# In[ ]:
train_df.hist(bins=10,figsize=(10, 10),grid=True);
# In[ ]:
# Embarked =S, Pclass=3, and No SibSp has large set of passenger who didn't survived
fig, (axis1,axis2,axis3) = plt.subplots(1,3,figsize=(15,5))
sns.countplot(x='Survived', hue="Embarked", data=train_df, order=[1,0],ax=axis1)
sns.countplot(x='Survived', hue="Pclass", data=train_df, order=[1,0],ax=axis2)
sns.countplot(x='Survived', hue="SibSp", data=train_df, order=[1,0],ax=axis3)
# In[ ]:
# Remove all NULLS in the Embarked column
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna('S')
# In[ ]:
g = sns.FacetGrid(train_df, col='Survived')
g.map(plt.hist, 'Age', bins=20)
# Most passenger above 60 yr of age didn't survive.
# Childern survival rate is higher.
#Consider for Model training
# In[ ]:
#Age important variable, Fill Null value with random assigment between +-1SD from mean
average_age_train = train_df["Age"].mean()
std_age_train = train_df["Age"].std()
count_nan_age_train = train_df["Age"].isnull().sum()
average_age_test = test_df["Age"].mean()
std_age_test = test_df["Age"].std()
count_nan_age_test = test_df["Age"].isnull().sum()
random_age1=np.random.randint(average_age_train - std_age_train, average_age_train + std_age_train, size = count_nan_age_train)
random_age2=np.random.randint(average_age_test - std_age_test, average_age_test + std_age_test, size = count_nan_age_test)
train_df["Age"][np.isnan(train_df["Age"])] = random_age1
test_df["Age"][np.isnan(test_df["Age"])] = random_age2
train_df['Age'] = train_df['Age'].astype(int)
test_df['Age'] = test_df['Age'].astype(int)
train_df['CategoricalAge'] = pd.cut(train_df['Age'], 5)
# In[ ]:
g = sns.FacetGrid(train_df, col="Sex", row="Survived", margin_titles=True)
g.map(plt.hist, "Age");
#Female has higher survival rate
# In[ ]:
g = sns.FacetGrid(train_df, hue="Survived", col="Pclass", margin_titles=True)
g=g.map(plt.scatter, "Fare", "Age",edgecolor="w").add_legend();
#High Class and Fare have better survival rate. Create band of fare (Think Decision Tree split)
#Fill Null
test_df["Fare"].fillna(test_df["Fare"].median(), inplace=True)
# In[ ]:
#Fare null values imputation
for dataset in combine:
dataset['Fare'] = dataset['Fare'].fillna(train_df['Fare'].median())
# In[ ]:
#Fare categories
train_df['CategoricalFare'] = pd.qcut(train_df['Fare'], 4)
# In[ ]:
# from https://www.kaggle.com/arthurtok/introduction-to-ensembling-stacking-in-python
#Title from names
# Define function to extract titles from passenger names
import re
def get_title(name):
title_search = re.search(' ([A-Za-z]+)\.', name)
# If the title exists, extract and return it.
if title_search:
return title_search.group(1)
return ""
# Create a new feature Title, containing the titles of passenger names
for dataset in combine:
dataset['Title'] = dataset['Name'].apply(get_title)
# Group all non-common titles into one single grouping "Rare"
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
for dataset in combine:
# Mapping Sex
dataset['Sex'] = dataset['Sex'].map( {'female': 0, 'male': 1} ).astype(int)
# Mapping titles
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
# Mapping Embarked
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
# Mapping Fare
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
# Mapping Age
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4 ;
# In[ ]:
train_df.head()
# In[ ]:
# Feature selection
drop_elements = ['PassengerId', 'Name']
train_set = train_df.drop(train_df.columns[3], axis = 1)
train_set = train_set.drop(['CategoricalAge', 'CategoricalFare'], axis = 1)
test_set = test_df.drop(drop_elements, axis = 1)
# In[ ]:
test_set.head()
# In[ ]:
colormap = plt.cm.viridis
plt.figure(figsize=(12,12))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(train_set.astype(float).corr(),linewidths=0.1,vmax=1.0, square=True, cmap=colormap, linecolor='white', annot=True)
# **Ensemble and stacking models**
# [https://www.kaggle.com/arthurtok/introduction-to-ensembling-stacking-in-python/notebook](http://)
# In[ ]:
#training ,test data
train = train_set.drop(["Survived","PassengerId"] , axis=1)
x_train = train.values
y_train = train_set["Survived"].ravel()
x_test = test_set.values
#x_train.shape, y_train.shape, x_test.shape
# In[ ]:
# Some useful parameters which will come in handy later on
ntrain = train_set.shape[0]
ntest = test_set.shape[0]
print(ntrain,ntest)
SEED = 0 # for reproducibility
NFOLDS = 5 # set folds for out-of-fold prediction
kf = KFold(n_splits= NFOLDS, random_state=SEED)
# Class to extend the Sklearn classifier
class SklearnHelper(object):
def __init__(self, clf, seed=0, params=None):
params['random_state'] = seed
self.clf = clf(**params)
def train(self, x_train, y_train):
self.clf.fit(x_train, y_train)
def predict(self, x):
return self.clf.predict(x)
def fit(self,x,y):
return self.clf.fit(x,y)
def feature_importances(self,x,y):
return(self.clf.fit(x,y).feature_importances_)
# In[ ]:
#Out of Fold Prediction
def get_oof(clf, x_train, y_train, x_test):
oof_train = np.zeros((ntrain,))
oof_test = np.zeros((ntest,))
oof_test_skf = np.empty((NFOLDS, ntest))
i=0;
for train_index, test_index in kf.split(x_train):
x_tr, x_te = x_train[train_index], x_train[test_index]
y_tr = y_train[train_index]
clf.train(x_tr, y_tr)
oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)
i=i+1
oof_test[:] = oof_test_skf.mean(axis=0)
return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)
# In[ ]:
#Setting params for classifiers
# Random Forest params
rf_params = {
'n_jobs' : -1,
'n_estimators': 500,
'warm_start': True,
#'max_features': 0.2,
'max_depth': 6,
'min_samples_leaf': 2,
'max_features' : 'sqrt'
}
# Extra Trees Parameters
et_params = {
'n_jobs': -1,
'n_estimators':500,
#'max_features': 0.5,
'max_depth': 86,
'min_samples_leaf': 2
}
# AdaBoost parameters
ada_params = {
'n_estimators': 500,
'learning_rate' : 0.75
}
# Gradient Boosting parameters
gb_params = {
'n_estimators': 500,
#'max_features': 0.2,
'max_depth': 5,
'min_samples_leaf': 2
}
# Support Vector Classifier parameters
svc_params = {
'kernel' : 'linear',
'C' : 0.025
}
# In[ ]:
#Create object of each classifier
rf = SklearnHelper(clf=RandomForestClassifier, seed=SEED, params=rf_params)
et = SklearnHelper(clf=ExtraTreesClassifier, seed=SEED, params=et_params)
ada = SklearnHelper(clf=AdaBoostClassifier, seed=SEED, params=ada_params)
gb = SklearnHelper(clf=GradientBoostingClassifier, seed=SEED, params=gb_params)
svc = SklearnHelper(clf=SVC, seed=SEED, params=svc_params)
# In[ ]:
#fit
et_oof_train, et_oof_test = get_oof(et, x_train, y_train, x_test) # Extra Trees
rf_oof_train, rf_oof_test = get_oof(rf,x_train, y_train, x_test) # Random Forest
ada_oof_train, ada_oof_test = get_oof(ada, x_train, y_train, x_test) # AdaBoost
gb_oof_train, gb_oof_test = get_oof(gb,x_train, y_train, x_test) # Gradient Boost
svc_oof_train, svc_oof_test = get_oof(svc,x_train, y_train, x_test) # Support Vector Classifier
print("_____ Complete")
# In[ ]:
#Feature Importance
rf_feature=rf.feature_importances(x_train,y_train)
et_feature = et.feature_importances(x_train, y_train)
ada_feature = ada.feature_importances(x_train, y_train)
gb_feature = gb.feature_importances(x_train,y_train)
# Not able to store by using
# > rf_features=list(rf_feature)
# et_features=list(et_feature)
# ada_features=list(ada_feature)
# gb_features=list(gb_feature)
# In[ ]:
cols = train.columns.values
# Create a dataframe with features
feature_dataframe = pd.DataFrame( {'features': cols,
'Random Forest Feat': rf_feature,
'Extra Trees Feat': et_feature,
'AdaBoost Feat': ada_feature,
'GB Feat': gb_feature
})
# In[ ]:
feature_dataframe.head()
# In[ ]:
fig, axs = plt.subplots(figsize=(20,10), ncols=2, nrows=2)
g=sns.stripplot(y=feature_dataframe['Random Forest Feat'].values,
x=feature_dataframe['features'].values, data=feature_dataframe
,size=20,ax=axs[0][0]);
g.axes.set_title('Randrom Forest feature importance', fontsize=20,color="r")
g=sns.stripplot(y=feature_dataframe['Extra Trees Feat'].values,
x=feature_dataframe['features'].values, data=feature_dataframe
,size=20,ax=axs[0][1]);
g.axes.set_title('Extra Trees feature importance', fontsize=20,color="r")
g=sns.stripplot(y=feature_dataframe['AdaBoost Feat'].values,
x=feature_dataframe['features'].values, data=feature_dataframe
,size=20,ax=axs[1][0]);
g.axes.set_title('Adaboost feature importance', fontsize=20,color="r")
g=sns.stripplot(y=feature_dataframe['GB Feat'].values,
x=feature_dataframe['features'].values, data=feature_dataframe
,size=20,ax=axs[1][1]);
g.axes.set_title('GB feature importance', fontsize=20,color="r")
# In[ ]:
# Create the new column containing the average of values
feature_dataframe['mean'] = feature_dataframe.mean(axis= 1) # axis = 1 computes the mean row-wise
feature_dataframe.head(3)
# In[ ]:
base_predictions_train = pd.DataFrame( {'RandomForest': rf_oof_train.ravel(),
'ExtraTrees': et_oof_train.ravel(),
'AdaBoost': ada_oof_train.ravel(),
'GradientBoost': gb_oof_train.ravel()
})
base_predictions_train.head()
# In[ ]:
sns.heatmap(base_predictions_train.astype(float).corr().values,
xticklabels=base_predictions_train.columns.values,
yticklabels=base_predictions_train.columns.values)
# In[ ]:
x_train = np.concatenate(( et_oof_train, rf_oof_train, ada_oof_train, gb_oof_train, svc_oof_train), axis=1)
x_test = np.concatenate(( et_oof_test, rf_oof_test, ada_oof_test, gb_oof_test, svc_oof_test), axis=1)
# In[ ]:
x_train.shape
# In[ ]:
gbm = xgb.XGBClassifier(
#learning_rate = 0.02,
n_estimators= 2000,
max_depth= 4,
min_child_weight= 2,
#gamma=1,
gamma=0.9,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
n_jobs= -1,
scale_pos_weight=1).fit(x_train, y_train)
predictions = gbm.predict(x_test)
# In[ ]:
# Generate Submission File
Submission = pd.DataFrame({ 'PassengerId': test_df['PassengerId'],
'Survived': predictions })
Submission.to_csv("Submission.csv", index=False)
| [
"[email protected]"
] | |
a57c77dce103123e048352084a78464c0e3ba38a | 72ea5f014282c4d1a4a0c70f9553bde7882962b4 | /mufins-project/mufins/experiments/lang_ent_max_tag/training_process.py | e95cbea01fb8a3198d535016a206add2927f8f83 | [
"MIT"
] | permissive | werywjw/mBERT-FineTuning | 6eb2b93a0f328f4af3cbd1ba5e042baadcb67428 | 7b64d0a91e9aa23eb3ace8c5c19262e2574f24d7 | refs/heads/master | 2023-08-08T01:41:06.716441 | 2021-09-14T20:21:25 | 2021-09-14T20:21:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,118 | py | '''
Training process for the model.
'''
import os
from typing import Iterator, Optional, Mapping, Sequence
import numpy as np
from mufins.common.log.log import Log
from mufins.common.file.csv_file import CsvFile
from mufins.common.dataset.dataset import Dataset
from mufins.common.random.random_number_generator import RandomNumberGenerator
from mufins.common.model.training_process_adversarial import TrainingProcessAdversarial
from mufins.dataprocs.udpos.data_row import UDPOSDataRow
from mufins.dataprocs.udpos.data_spec import UDPOSDataSpec
from mufins.dataprocs.wikipedia.data_row import WikipediaDataRow
from mufins.dataprocs.wikipedia.data_spec import WikipediaDataSpec
from mufins.experiments.lang_ent_max_tag.evaluate import (
eval_label, eval_lang,
LabelEncoderType, LangEncoderType, LabelPredictorType, LangPredictorType
)
#########################################
class ModelTrainingProcess(TrainingProcessAdversarial):
'''
The training process specification for this model.
'''
# pylint: disable=too-few-public-methods
#########################################
def __init__(
self,
rng: RandomNumberGenerator,
batch_size: int,
label_encoder: LabelEncoderType,
lang_encoder: LangEncoderType,
label_predictor: LabelPredictorType,
lang_predictor: LangPredictorType,
dset_label_train: Dataset[UDPOSDataRow],
dset_lang_train: Dataset[WikipediaDataRow],
dset_label_val: Dataset[UDPOSDataRow],
dset_lang_val: Dataset[WikipediaDataRow],
label_spec: UDPOSDataSpec,
lang_spec: WikipediaDataSpec,
training_main_module: bool,
hyperparameter_search_mode: bool,
train_history_path: str,
log: Log,
) -> None:
'''
Constructor.
:param rng: The random number generator to use.
:param batch_size: The maximum number of data items to process at once.
:param label_encoder: A function that encodes label texts into vectors.
:param lang_encoder: A function that encodes language texts into vectors.
:param label_predictor: A function that predicts labels.
:param lang_predictor: A function that predicts languages.
:param dset_label_train: The label training set.
:param dset_lang_train: The language training set.
:param dset_label_val: The label validation set.
:param dset_lang_val: The language validation set.
:param label_spec: The label data specification.
:param lang_spec: The language data specification.
:param training_main_module: Whether the main module is being trained, otherwise the
language module will be trained.
:param hyperparameter_search_mode: Whether to enter into hyperparameter search mode where
minimal output and evaluation is produced.
:param train_history_path: The path to the folder that will contain the training history.
:param log: The log.
'''
super().__init__(training_main_module)
self.rng: RandomNumberGenerator = rng
self.batch_size: int = batch_size
self.label_encoder: LabelEncoderType = label_encoder
self.lang_encoder: LangEncoderType = lang_encoder
self.label_predictor: LabelPredictorType = label_predictor
self.lang_predictor: LangPredictorType = lang_predictor
self.dset_label_train: Dataset[UDPOSDataRow] = dset_label_train
self.dset_lang_train: Dataset[WikipediaDataRow] = dset_lang_train
self.dset_label_val: Dataset[UDPOSDataRow] = dset_label_val
self.dset_lang_val: Dataset[WikipediaDataRow] = dset_lang_val
self.label_spec: UDPOSDataSpec = label_spec
self.lang_spec: WikipediaDataSpec = lang_spec
self.hyperparameter_search_mode: bool = hyperparameter_search_mode
self.train_history_path: str = train_history_path
self.log: Log = log
self.training_main_module: bool = training_main_module
self.post_disc_score_lang_val: float = 0.0
self.lang_val_min_prob_entropy: float = 0.0
self.lang_val_mean_prob_entropy: float = 0.0
self.lang_val_max_prob_entropy: float = 0.0
self.score_label_label_val: float = 0.0
self.score_label_lang_val: float = 0.0
self.score_lang_val: float = 0.0
self.train_history_file = CsvFile(
os.path.join(train_history_path, 'train_history.csv'),
)
if not self.hyperparameter_search_mode:
self.train_history_file.init([
'phase',
'epoch',
'new_best',
'patience_left',
'post_disc_lang_val_macro_f1_score',
'label_label_val_macro_f1_score',
'label_lang_val_macro_f1_score',
'lang_val_macro_f1_score',
'lang_val_min_entropy',
'lang_val_mean_entropy',
'lang_val_max_entropy',
])
#########################################
def _get_minibatches_disc(
self,
epoch_num: int,
minibatch_size: int,
) -> Iterator[Sequence[Mapping[str, np.ndarray]]]:
'''
Get an iterator over all the batches in the discriminator's training set.
These batches will be passed to `batch_fit` in the backend model.
:param epoch_num: The epoch number about to start.
:param minibatch_size: The number of items in a minibatch.
:return: An iterator of minibatchbatches.
'''
assert self.training_main_module
capped_size = min(self.dset_label_train.size, self.dset_lang_train.size)
return zip(
self.dset_lang_train.get_stochastic_batches(
minibatch_size, self.rng, capped_size=capped_size,
)
)
#########################################
def _on_discriminator_trained(
self,
epoch_num: int,
) -> None:
'''
Listener for when the disciminator has been trained, before training the rest of the model.
:param epoch_num: The epoch number about to start.
'''
assert self.training_main_module
if not self.hyperparameter_search_mode:
(
lang_score_f1_macro,
_,
_,
_,
) = eval_lang(
lang_spec=self.lang_spec,
dset_lang=self.dset_lang_val,
lang_predictor=self.lang_predictor,
batch_size=self.batch_size,
log=self.log,
)
self.post_disc_score_lang_val = lang_score_f1_macro
#########################################
def _get_minibatches(
# pylint: disable=unused-argument
self,
epoch_num: int,
minibatch_size: int
) -> Iterator[Sequence[Mapping[str, np.ndarray]]]:
'''
Get an iterator over all the batches in the training set.
These batches will be passed to `batch_fit` in the backend model.
:param epoch_num: The epoch number about to start.
:param minibatch_size: The number of items in a minibatch.
:return: The iterator of batches.
'''
if self.training_main_module:
return zip(
self.dset_label_train.get_stochastic_batches(
minibatch_size, self.rng,
),
self.dset_lang_train.get_stochastic_batches(
minibatch_size, self.rng,
capped_size=self.dset_label_train.size,
),
)
return zip(
self.dset_lang_train.get_stochastic_batches(
minibatch_size, self.rng,
)
)
#########################################
def _get_val_score(
# pylint: disable=unused-argument
self,
epoch_num: int,
) -> float:
'''
Get the validation score using the current model (score is to be maximised).
:param epoch_num: The current epoch number.
:return: The score.
'''
if self.training_main_module:
(
label_label_score_f1_macro,
_,
_,
label_lang_score_f1_macro,
) = eval_label(
label_spec=self.label_spec,
lang_spec=self.lang_spec,
dset_label=self.dset_label_val,
label_predictor=self.label_predictor,
lang_predictor=self.lang_predictor,
hyperparameter_search_mode=self.hyperparameter_search_mode,
batch_size=self.batch_size,
log=self.log,
)
self.score_label_label_val = label_label_score_f1_macro
self.score_label_lang_val = label_lang_score_f1_macro
if not self.hyperparameter_search_mode:
(
lang_score_f1_macro,
min_prob_entropy,
mean_prob_entropy,
max_prob_entropy,
) = eval_lang(
lang_spec=self.lang_spec,
dset_lang=self.dset_lang_val,
lang_predictor=self.lang_predictor,
batch_size=self.batch_size,
log=self.log,
)
self.score_lang_val = lang_score_f1_macro
self.lang_val_min_prob_entropy = min_prob_entropy
self.lang_val_mean_prob_entropy = mean_prob_entropy
self.lang_val_max_prob_entropy = max_prob_entropy
if self.training_main_module:
return self.score_label_label_val
return self.score_lang_val
#########################################
def _on_validation_check_end(
# pylint: disable=unused-argument
self,
epoch_num: int,
new_best: bool,
patience_left: Optional[int],
curr_val_score: float,
best_val_score: float,
duration: float,
) -> None:
'''
Listener for when a validation check just finished.
:param epoch_num: The current epoch number.
:param new_best: Whether the validation score obtained was a new best score.
:param patience_left: The amount of patience left after the validation check (can be zero,
which means that training will end now).
:param curr_val_score: The validation score obtained from the current validation check.
:param best_val_score: The best validation score up to now.
:param duration: The number of seconds elapsed since the last validation check or beginning
of training if this was the first validation check.
'''
if not self.hyperparameter_search_mode:
if self.training_main_module:
self.train_history_file.append([
'main',
str(epoch_num),
'yes' if new_best else 'no',
str(patience_left),
'{:.10f}'.format(self.post_disc_score_lang_val),
'{:.10f}'.format(self.score_label_label_val),
'{:.10f}'.format(self.score_label_lang_val),
'{:.10f}'.format(self.score_lang_val),
'{:.10f}'.format(self.lang_val_min_prob_entropy),
'{:.10f}'.format(self.lang_val_mean_prob_entropy),
'{:.10f}'.format(self.lang_val_max_prob_entropy),
])
else:
self.train_history_file.append([
'lang',
str(epoch_num),
'yes' if new_best else 'no',
str(patience_left),
'',
'',
'',
'{:.10f}'.format(self.score_lang_val),
'{:.10f}'.format(self.lang_val_min_prob_entropy),
'{:.10f}'.format(self.lang_val_mean_prob_entropy),
'{:.10f}'.format(self.lang_val_max_prob_entropy),
])
| [
"[email protected]"
] | |
58d550ac0a1237bb4221e2f0f80bac28fda11a38 | 8d9318a33afc2c3b5ca8ac99fce0d8544478c94a | /Books/Casandra DB/opscenter-5.1.0/lib/py-unpure/twisted/cred/portal.py | 753b403a49430029b67597bf02961652837e848a | [] | no_license | tushar239/git-large-repo | e30aa7b1894454bf00546312a3fb595f6dad0ed6 | 9ee51112596e5fc3a7ab2ea97a86ec6adc677162 | refs/heads/master | 2021-01-12T13:48:43.280111 | 2016-11-01T22:14:51 | 2016-11-01T22:14:51 | 69,609,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d41d59435554fcf0301c2274fa426aa4626a815369ad9324a386bc1b55e624a4
size 5349
| [
"[email protected]"
] | |
2f28dde2b92df647ed1092a4ab1882c6ecc08c6a | 08c066c8eecf553ff3beb5b7687e36cd8b4c8812 | /tests/test_psf.py | 96c5b89be6f8234447a7a87a173409d1dff54cbd | [
"BSD-2-Clause"
] | permissive | tobias-liaudat/batoid | 668dc0eec627da7cecaabae961a68e3b3b76db41 | e3c8833763597cd3ec73b83a74e8bae1a5cc7cf9 | refs/heads/master | 2022-12-30T07:17:21.964219 | 2020-08-18T22:09:55 | 2020-08-18T22:09:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,380 | py | import numpy as np
import batoid
from test_helpers import timer
@timer
def test_huygens_psf():
try:
import galsim
except ImportError:
print("Huygens PSF test requires GalSim")
# Could do the integral directly without GalSim?
return
if __name__ == '__main__':
obscurations = [0.0, 0.25, 0.5, 0.75]
else:
obscurations = [0.25]
print("Testing HuygensPSF")
# Just do a single parabolic mirror test
focalLength = 1.5
diam = 0.3
R = 2*focalLength
for obscuration in obscurations:
telescope = batoid.CompoundOptic(
items = [
batoid.Mirror(
batoid.Paraboloid(R),
name="Mirror",
obscuration=batoid.ObscNegation(
batoid.ObscAnnulus(0.5*obscuration*diam, 0.5*diam)
)
),
batoid.Detector(
batoid.Plane(),
name="detector",
coordSys=batoid.CoordSys().shiftGlobal([0,0,focalLength])
)
],
pupilSize=diam,
backDist=10.0,
inMedium=batoid.ConstMedium(1.0)
)
airy_size = 1.22*500e-9/diam * 206265
print()
print("Airy radius: {:4.2f} arcsec".format(airy_size))
# Start with the HuygensPSF
npix = 96
size = 3.0
dsize = size/npix # arcsec
dsize_X = dsize*focalLength/206265 # meters
psf = batoid.huygensPSF(telescope, 0.0, 0.0, 500e-9, nx=npix, dx=dsize_X, dy=dsize_X)
psf.array /= np.max(psf.array)
scale = np.sqrt(np.abs(np.linalg.det(psf.primitiveVectors))) # meters
scale *= 206265/focalLength # arcsec
obj = galsim.Airy(lam=500, diam=diam, obscuration=obscuration)
# Need to shift by half a pixel.
obj = obj.shift(scale/2, scale/2)
im = obj.drawImage(nx=npix, ny=npix, scale=scale, method='no_pixel')
arr = im.array/np.max(im.array)
gs_mom = galsim.hsm.FindAdaptiveMom(im)
psfim = galsim.Image(psf.array)
jt_mom = galsim.hsm.FindAdaptiveMom(psfim)
print("GalSim shape: ", gs_mom.observed_shape)
print("batoid shape: ", jt_mom.observed_shape)
print("GalSim centroid: ", gs_mom.moments_centroid)
print("batoid centroid: ", jt_mom.moments_centroid)
print("GalSim size: ", gs_mom.moments_sigma)
print("batoid size: ", jt_mom.moments_sigma)
print("GalSim rho4: ", gs_mom.moments_rho4)
print("batoid rho4: ", jt_mom.moments_rho4)
np.testing.assert_allclose(gs_mom.observed_shape.g1, jt_mom.observed_shape.g1, rtol=0.0, atol=3e-3)
np.testing.assert_allclose(gs_mom.observed_shape.g2, jt_mom.observed_shape.g2, rtol=0.0, atol=3e-3)
np.testing.assert_allclose(gs_mom.moments_centroid.x, jt_mom.moments_centroid.x, rtol=0.0, atol=1e-9)
np.testing.assert_allclose(gs_mom.moments_centroid.y, jt_mom.moments_centroid.y, rtol=0.0, atol=1e-9)
np.testing.assert_allclose(gs_mom.moments_sigma, jt_mom.moments_sigma, rtol=1e-2) # why not better?!
np.testing.assert_allclose(gs_mom.moments_rho4, jt_mom.moments_rho4, rtol=2e-2)
if __name__ == '__main__':
size = scale*npix
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15, 4))
ax1 = fig.add_subplot(131)
im1 = ax1.imshow(np.log10(arr), extent=np.r_[-1,1,-1,1]*size/2, vmin=-7, vmax=0)
plt.colorbar(im1, ax=ax1, label=r'$\log_{10}$ flux')
ax1.set_title('GalSim')
ax1.set_xlabel("arcsec")
ax1.set_ylabel("arcsec")
sizeX = dsize_X * npix * 1e6 # microns
ax2 = fig.add_subplot(132)
im2 = ax2.imshow(np.log10(psf.array), extent=np.r_[-1,1,-1,1]*sizeX/2, vmin=-7, vmax=0)
plt.colorbar(im2, ax=ax2, label=r'$\log_{10}$ flux')
ax2.set_title('batoid')
ax2.set_xlabel(r"$\mu m$")
ax2.set_ylabel(r"$\mu m$")
ax3 = fig.add_subplot(133)
im3 = ax3.imshow((psf.array-arr)/np.max(arr), vmin=-0.01, vmax=0.01, cmap='seismic')
plt.colorbar(im3, ax=ax3, label="(batoid-GalSim)/max(GalSim)")
ax3.set_title('resid')
ax3.set_xlabel(r"$\mu m$")
ax3.set_ylabel(r"$\mu m$")
fig.tight_layout()
plt.show()
@timer
def test_lsst_psf():
# Just testing that doesn't crash for the moment
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
stampSize = 0.5 # arcsec
nx = 64
focalLength = 1.234*8.36 # meters
if __name__ == '__main__':
thetas = [0.0, 1200.0, 3600.0, 6300.0] # arcsec
else:
thetas = [6300.0]
for theta in thetas:
print(theta/3600.0)
dirCos = batoid.utils.gnomonicToDirCos(0.0, theta/206265)
rays = batoid.circularGrid(10.0, 4.2, 2.55,
dirCos[0], dirCos[1], dirCos[2],
10, 100, 620e-9, 1.0, batoid.Air())
telescope.trace(rays)
rays.trimVignetted()
xs = rays.x - np.mean(rays.x)
ys = rays.y - np.mean(rays.y)
xs *= 206265/focalLength
ys *= 206265/focalLength
# Need to add half-pixel offset
xs += stampSize/nx/2
ys += stampSize/nx/2
dx = stampSize/nx * focalLength/206265 # meters
psf = batoid.huygensPSF(telescope, 0.0, theta/206265, 620e-9, nx=64, dx=dx, dy=dx)
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
ax.imshow(psf.array, extent=np.r_[-1,1,-1,1]*stampSize/2)
ax.scatter(xs, ys, s=5, c='r', alpha=0.5)
ax.set_title("LSST PSF field={:5.2f}".format(theta/3600.0))
ax.set_xlabel("arcsec")
ax.set_ylabel("arcsec")
fig.tight_layout()
plt.show()
@timer
def test_hsc_psf():
# Just testing that doesn't crash for the moment
telescope = batoid.Optic.fromYaml("HSC.yaml")
stampSize = 0.75 # arcsec
nx = 64
focalLength = 15.0 # guess
if __name__ == '__main__':
thetas = [0.0, 1350.0, 2700.0] # arcsec
else:
thetas = [2700.0]
for theta in thetas:
print(theta/3600.0)
dirCos = batoid.utils.gnomonicToDirCos(0.0, theta/206265)
rays = batoid.circularGrid(20.0, 4.1, 0.9,
dirCos[0], dirCos[1], dirCos[2],
10, 100, 620e-9, 1.0, batoid.ConstMedium(1.0))
telescope.trace(rays)
rays.trimVignetted()
xs = rays.x - np.mean(rays.x)
ys = rays.y - np.mean(rays.y)
xs *= 206265/focalLength # meters to arcsec
ys *= 206265/focalLength
# Need to add half-pixel offset
xs += stampSize/nx/2
ys += stampSize/nx/2
dx = stampSize/nx * focalLength/206265 # meters
psf = batoid.huygensPSF(telescope, 0.0, theta/206265, 620e-9, nx=nx, dx=dx, dy=dx)
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
ax.imshow(psf.array, extent=np.r_[-1,1,-1,1]*stampSize/2)
ax.scatter(xs, ys, s=5, c='r', alpha=0.5)
ax.set_title("HSC PSF field={:5.2f}".format(theta/3600.0))
ax.set_xlabel("arcsec")
ax.set_ylabel("arcsec")
fig.tight_layout()
plt.show()
@timer
def test_decam_psf():
# Just testing that doesn't crash for the moment
telescope = batoid.Optic.fromYaml("DECam.yaml")
stampSize = 1.0 # arcsec
nx = 64
focalLength = 10.0 # guess
if __name__ == '__main__':
thetas = [0.0, 1800.0, 3960.0] # arcsec
else:
thetas = [3960.0]
for theta in thetas:
print(theta/3600.0)
dirCos = batoid.utils.gnomonicToDirCos(0.0, theta/206265)
rays = batoid.circularGrid(10.0, 1.95, 0.5,
dirCos[0], dirCos[1], dirCos[2],
10, 100, 620e-9, 1.0, batoid.Air())
telescope.trace(rays)
rays.trimVignetted()
xs = rays.x - np.mean(rays.x)
ys = rays.y - np.mean(rays.y)
xs *= 206265/focalLength # meters to arcsec
ys *= 206265/focalLength
# Need to add half-pixel offset
xs += stampSize/nx/2
ys += stampSize/nx/2
dx = stampSize/nx * focalLength/206265 # meters
psf = batoid.huygensPSF(telescope, 0.0, theta/206265, 620e-9, nx=nx, dx=dx, dy=dx)
if __name__ == '__main__':
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
ax.imshow(psf.array, extent=np.r_[-1,1,-1,1]*stampSize/2)
ax.scatter(xs, ys, s=5, c='r', alpha=0.5)
ax.set_title("DECam PSF field={:5.2f}".format(theta/3600.0))
ax.set_xlabel("arcsec")
ax.set_ylabel("arcsec")
fig.tight_layout()
plt.show()
if __name__ == '__main__':
test_huygens_psf()
test_lsst_psf()
test_hsc_psf()
test_decam_psf()
| [
"[email protected]"
] | |
760d089693d1a60a72a09c0a91b878dd7725787c | 409cec0b3346bbb88fdaf1df9adbd38de842f6b6 | /UploadDataToES/test/testCovertStrToDatatime.py | 06098c1dc36750a7f7fe48522406cb6fdaaa567f | [] | no_license | parahaoer/pythoncode | 3276815d9cffbff768128ad83db1e344702f7bd8 | 7cbf7c4e0df7678335e6fbbb48115373c6a8a0df | refs/heads/master | 2020-11-26T06:55:07.685666 | 2020-05-05T02:20:33 | 2020-05-05T02:20:33 | 228,995,890 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | '''
需要用大的时间减去小的时间,才能得到以秒为单位的时间差
如果用小的时间减去大的时间,结果为86399
'''
import datetime
str_p = "2019-05-14T23:16:11.010Z"
str_q = "2019-05-14T23:16:11.011Z"
str_r = "2019-05-14T23:16:09.819Z"
dateTime_p = datetime.datetime.strptime(str_r,'%Y-%m-%dT%H:%M:%S.%fZ')
dateTime_q = datetime.datetime.strptime(str_q,'%Y-%m-%dT%H:%M:%S.%fZ')
if dateTime_p.__gt__(dateTime_q):
print((dateTime_p - dateTime_q).seconds)
else:
print((dateTime_q - dateTime_p).seconds)
| [
"[email protected]"
] | |
9d5335f025da0b78549226cfe60197b70b82deec | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_81/295.py | 320b05162c6c66e77f1bf427cf3bcdffd862d6cf | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,758 | py | #
# Google Code Jam 2011
# Roaund 1B: A. RPI
# submission by EnTerr
#
'''
Input
2
3
.10
0.1
10.
4
.11.
0.00
01.1
.10.
Output
Case #1:
0.5
0.5
0.5
Case #2:
0.645833333333
0.368055555556
0.604166666667
0.395833333333
'''
import sys
import psyco
psyco.full()
inf = open(sys.argv[1])
def input(): return inf.readline().strip()
def avg(scores):
num = 0.
denum = 0.
#print scores
for v in scores:
num += v[0]
denum += v[1]
return num / denum
def rpi(games):
global guest
global home
global P
guest = {}
home = {}
P = {}
for x,y,_ in games:
guest[x] = []
guest[y] = []
home[x] = []
home[y] = []
for x,y,win in games:
f = 14 - 8 * win
guest[x].append( (win*f, f, y) )
guest[y].append( (f - win*f, f, x) )
home[x].append( (win, 1, y) )
home[y].append( (1-win, 1, x) )
for t in home:
P[t] = avg([ (avg([(x,y) for x,y,z in home[o] if z!=t]), 2)
for i,j,o in home[t]
])
#print 'home', home
#print 'guest', guest
for t in home:
home[t] = avg( [(P[o],2) for x,y,o in home[t]])
home[t] += avg( guest[t] )/4
home[t] += P[t]
return home
for caseNo in range(1, int(input())+1):
print >>sys.stderr, caseNo
print 'Case #%d:' % caseNo
sco = []
for i in range(int(input())):
s = input()
for j in range(len(s)):
if s[j] in '01':
sco.append( [i,j, int(s[j])] )
r = rpi(sco)
for i in range(len(r)):
print r[i]
| [
"[email protected]"
] | |
448549db9aca16d831fdea6643397e4b1a98a416 | 0facb323be8a76bb4c168641309972fa77cbecf2 | /Configurations/HWWSemiLepHighMass/nanoAODv5/v6_production/2016/SKIM10/HMVAR10_Full_SBI/FilterMelaReweights.py | 43c5460e94e2d470c6b9a5fb095fd1194f74e2d1 | [] | no_license | bhoh/SNuAnalytics | ef0a1ba9fa0d682834672a831739dfcfa1e7486b | 34d1fc062e212da152faa83be50561600819df0e | refs/heads/master | 2023-07-06T03:23:45.343449 | 2023-06-26T12:18:28 | 2023-06-26T12:18:28 | 242,880,298 | 0 | 1 | null | 2020-02-25T01:17:50 | 2020-02-25T01:17:49 | null | UTF-8 | Python | false | false | 5,858 | py | import ROOT
from math import sqrt
from LatinoAnalysis.Tools.commonTools import *
def GetMinMaxCut(filelist,model,mode,nsigma=2):
#Root.gROOT.SetBatch(True)
Sum=0
Sum2=0
Nentry=0
print "===="+model+mode+"====="
print filelist[0]
for f in filelist:
if '###' in f:
f=f.replace("###","")
myfile=ROOT.TFile.Open(f,"read")
mytree=myfile.Get("Events")
mytree.Draw(model+mode)
htemp=ROOT.gPad.GetPrimitive("htemp")
#print 'MEAN=',htemp.GetMean()
#print 'DEV =',htemp.GetStdDev()
#print "Integrals=", htemp.Integral()
#print "GetEntries=",htemp.GetEntries()
Sum+=htemp.GetMean()*htemp.GetEntries()
Sum2+=(htemp.GetStdDev()*htemp.GetStdDev()+htemp.GetMean()*htemp.GetMean())*htemp.GetEntries()
Nentry+=htemp.GetEntries()
myfile.Close()
#print "Total MEAN=",Sum/Nentry
#print "Total DEV=",sqrt((Sum2/Nentry)-(Sum/Nentry)*(Sum/Nentry))
#print "GetEntries=",Nentry
Mean=Sum/Nentry
dev=sqrt((Sum2/Nentry)-(Sum/Nentry)*(Sum/Nentry))
return Mean-nsigma*dev, Mean+nsigma*dev
##--get cuts for multiple modes
def GetMinMaxCuts(filelist,model,modes=['','_I','_B','_H','_I_HB'],nsigma=2):
ROOT.gROOT.SetBatch(True)
mydict={}
for mode in modes:
mydict[mode]={}
mydict[mode]['Sum']=0
mydict[mode]['Sum2']=0
mydict[mode]['Nentry']=0
mydict[mode]['Mean']=0
mydict[mode]['dev']=0
mydict[mode]['min']=0
mydict[mode]['max']=0
mydict['Nveto']=0
mydict['Npass']=0
mydict['cut']='1'
#Sum=0
#Sum2=0
#Nentry=0
print filelist[0]
for f in filelist:
if '###' in f:
f=f.replace("###","")
myfile=ROOT.TFile.Open(f,"read")
mytree=myfile.Get("Events")
for mode in modes:
#print "===="+model+mode+"====="
mytree.Draw(model+mode)
#print "after draw"
htemp=ROOT.gPad.GetPrimitive("htemp")
#print 'MEAN=',htemp.GetMean()
#print 'DEV =',htemp.GetStdDev()
#print "Integrals=", htemp.Integral()
#print "GetEntries=",htemp.GetEntries()
mydict[mode]['Sum']+=htemp.GetMean()*htemp.GetEntries()
mydict[mode]['Sum2']+=(htemp.GetStdDev()*htemp.GetStdDev()+htemp.GetMean()*htemp.GetMean())*htemp.GetEntries()
mydict[mode]['Nentry']+=htemp.GetEntries()
myfile.Close()
#print "Total MEAN=",Sum/Nentry
#print "Total DEV=",sqrt((Sum2/Nentry)-(Sum/Nentry)*(Sum/Nentry))
#print "GetEntries=",Nentry
for mode in modes:
mydict[mode]['Mean']=mydict[mode]['Sum']/mydict[mode]['Nentry']
mydict[mode]['dev']=sqrt((mydict[mode]['Sum2']/mydict[mode]['Nentry'])-(mydict[mode]['Sum']/mydict[mode]['Nentry'])*(mydict[mode]['Sum']/mydict[mode]['Nentry']))
mydict[mode]['min']=mydict[mode]['Mean']-nsigma*mydict[mode]['dev']
mydict[mode]['max']=mydict[mode]['Mean']+nsigma*mydict[mode]['dev']
for mode in modes:
mydict['cut']+='&&('+model+mode+'<='+str(mydict[mode]['max'])+')&&('+model+mode+'>='+str(mydict[mode]['min'])+')'
mydict['cut']="("+mydict['cut']+')'
for f in filelist:
if '###' in f:
f=f.replace("###","")
myfile=ROOT.TFile.Open(f,"read")
mytree=myfile.Get("Events")
cut='0'
for mode in modes:
#print "===="+model+mode+"====="
#mytree.Draw(model+mode, model+mode+'>'+str(mydict[mode]['max'])+'||'+model+mode+'<'+str(mydict[mode]['min']))
cut+='||('+model+mode+'>'+str(mydict[mode]['max'])+')||('+model+mode+'<'+str(mydict[mode]['min'])+')'
#print cut
mytree.Draw('abs('+model+mode+')',cut)
#print "after draw"
htemp=ROOT.gPad.GetPrimitive("htemp")
#print "veto's mean=",htemp.GetMean()
try:
#print "veto's mean=",htemp.GetMean()
mydict['Nveto']+=htemp.GetEntries()
except AttributeError:
#print "veto's mean=0"
mydict['Nveto']+=0
cut='1'
for mode in modes:
cut+='&&('+model+mode+'<='+str(mydict[mode]['max'])+')&&('+model+mode+'>='+str(mydict[mode]['min'])+')'
mytree.Draw('1',cut)
htemp=ROOT.gPad.GetPrimitive("htemp")
try:
mydict['Npass']+=htemp.GetEntries()
except AttributeError:
mydict['Npass']+=0
return mydict
#def GetStatement(filelist,model,modes=['','_I','_B','_H','_HB'],nsigma=2):
# dict_min_max=GetMinMaxCuts(filelist,model,modes,nsigma)
if __name__ == '__main__':
SITE=os.uname()[1]
xrootdPath = 'root://cms-xrdr.private.lo:2094'
treeBaseDir = "/xrootd/store/user/jhchoi/Latino/HWWNano/"
CAMPAIGN='Fall2017_102X_nAODv5_Full2017v6'
STEP="MCl1loose2017v6__MCCorr2017v6__HMSemilepSKIMv6_10__BWReweight__HMFull_jhchoi10_nom__HMLHEAna"
directory=treeBaseDir+CAMPAIGN+'/'+STEP
import sys
sys.path.insert(0, "MassPoints")
from List_MX import *
from List_MX_VBF import *
model="cprime1.0BRnew0.0"
for MX in List_MX:
print MX
MELA_cuts=GetMinMaxCuts(getSampleFiles(directory,'GluGluHToWWToLNuQQ_M'+str(MX),False,'nanoLatino_'),model)
cut=MELA_cuts['cut']
print cut
'''
MX=4000
mode='_I'
thisdict=GetMinMaxCuts(getSampleFiles(directory,'GluGluHToWWToLNuQQ_M'+str(MX),False,'nanoLatino_'),'MSSModel',['','_I','_B','_I_HB','_H'])
vlist=['Mean','Nentry','dev','min','max']
#,'Nveto','Npass']
print mode
for v in vlist:
print v,thisdict[mode][v]
print 'Nveto=',thisdict['Nveto']
print 'Npass=',thisdict['Npass']
print 'cut=',thisdict['cut']
'''
| [
"[email protected]"
] | |
0d4b7775ac08f9045cc81bf06fee576d22c7ee4d | 3fc4cac282465350d9b2983527140fc735a0d273 | /venv/bin/list_instances | e632e65514a792427c742a744945098751afb9b4 | [] | no_license | Orderlee/SBA_STUDY | 2cfeea54d4a9cbfd0c425e1de56324afcc547b81 | 4642546e7546f896fc8b06e9daba25d27c29e154 | refs/heads/master | 2022-12-25T01:08:05.168970 | 2020-09-27T14:57:23 | 2020-09-27T14:57:23 | 299,050,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,140 | #!/Users/YoungWoo/PycharmProjects/ClassProject/venv/bin/python
import sys
from operator import attrgetter
from optparse import OptionParser
import boto
from boto.ec2 import regions
HEADERS = {
'ID': {'get': attrgetter('id'), 'length':15},
'Zone': {'get': attrgetter('placement'), 'length':15},
'Groups': {'get': attrgetter('groups'), 'length':30},
'Hostname': {'get': attrgetter('public_dns_name'), 'length':50},
'PrivateHostname': {'get': attrgetter('private_dns_name'), 'length':50},
'State': {'get': attrgetter('state'), 'length':15},
'Image': {'get': attrgetter('image_id'), 'length':15},
'Type': {'get': attrgetter('instance_type'), 'length':15},
'IP': {'get': attrgetter('ip_address'), 'length':16},
'PrivateIP': {'get': attrgetter('private_ip_address'), 'length':16},
'Key': {'get': attrgetter('key_name'), 'length':25},
'T:': {'length': 30},
}
def get_column(name, instance=None):
if name.startswith('T:'):
_, tag = name.split(':', 1)
return instance.tags.get(tag, '')
return HEADERS[name]['get'](instance)
def main():
parser = OptionParser()
parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
parser.add_option("-H", "--headers", help="Set headers (use 'T:tagname' for including tags)", default=None, action="store", dest="headers", metavar="ID,Zone,Groups,Hostname,State,T:Name")
parser.add_option("-t", "--tab", help="Tab delimited, skip header - useful in shell scripts", action="store_true", default=False)
parser.add_option("-f", "--filter", help="Filter option sent to DescribeInstances API call, format is key1=value1,key2=value2,...", default=None)
(options, args) = parser.parse_args()
# Connect the region
for r in regions():
if r.name == options.region:
region = r
break
else:
print("Region %s not found." % options.region)
sys.exit(1)
ec2 = boto.connect_ec2(region=region)
# Read headers
if options.headers:
headers = tuple(options.headers.split(','))
else:
headers = ("ID", 'Zone', "Groups", "Hostname")
# Create format string
format_string = ""
for h in headers:
if h.startswith('T:'):
format_string += "%%-%ds" % HEADERS['T:']['length']
else:
format_string += "%%-%ds" % HEADERS[h]['length']
# Parse filters (if any)
if options.filter:
filters = dict([entry.split('=') for entry in options.filter.split(',')])
else:
filters = {}
# List and print
if not options.tab:
print(format_string % headers)
print("-" * len(format_string % headers))
for r in ec2.get_all_reservations(filters=filters):
groups = [g.name for g in r.groups]
for i in r.instances:
i.groups = ','.join(groups)
if options.tab:
print("\t".join(tuple(get_column(h, i) for h in headers)))
else:
print(format_string % tuple(get_column(h, i) for h in headers))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | ||
97c3b121be36f09c69f51581c20a82d254464bc0 | 751d837b8a4445877bb2f0d1e97ce41cd39ce1bd | /codegolf/szekeress-sequence.py | 9da4b302795111ca6d2dc7d80a80fcc565d91a39 | [
"MIT"
] | permissive | qeedquan/challenges | d55146f784a3619caa4541ac6f2b670b0a3dd8ba | 56823e77cf502bdea68cce0e1221f5add3d64d6a | refs/heads/master | 2023-08-11T20:35:09.726571 | 2023-08-11T13:02:43 | 2023-08-11T13:02:43 | 115,886,967 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | #!/usr/bin/env python
"""
Definition
a(1)=1
a(2)=2
a(n) is smallest number k>a(n−1) which avoids any 3-term arithmetic progression in a(1),a(2),...,a(n−1),k.
In other words, a(n) is the smallest number k>a(n−1) such that there does not exist x,y where 0<x<y<n and a(y)−a(x)=k−a(y).
Worked out example
For n=5:
We have a(1),a(2),a(3),a(4)=1,2,4,5
If a(5)=6, then 2,4,6 form an arithmetic progression.
If a(5)=7, then 1,4,7 form an arithmetic progression.
If a(5)=8, then 2,5,8 form an arithmetic progression.
If a(5)=9, then 1,5,9 form an arithmetic progression.
If a(5)=10, no arithmetic progression can be found.
Therefore a(5)=10.
Task
Given n, output a(n).
Specs
n will be a positive integer.
You can use 0-indexed instead of 1-indexed, in which case n can be 0. Please state it in your answer if you are using 0-indexed.
Scoring
Since we are trying to avoid 3-term arithmetic progression, and 3 is a small number, your code should be as small (i.e. short) as possible, in terms of byte-count.
Testcases
The testcases are 1-indexed. You can use 0-indexed, but please specify it in your answer if you do so.
1 1
2 2
3 4
4 5
5 10
6 11
7 13
8 14
9 28
10 29
11 31
12 32
13 37
14 38
15 40
16 41
17 82
18 83
19 85
20 86
10000 1679657
References
WolframMathWorld
OEIS A003278
"""
# https://oeis.org/A003278
def szekeres(n):
return int(format(n-1, 'b'), 3) + 1
def main():
tab = [[1, 1], [2, 2], [3, 4], [4, 5], [5, 10], [6, 11], [7, 13], [8, 14], [9, 28], [10, 29], [11, 31], [12, 32], [13, 37], [14, 38], [15, 40], [16, 41], [17, 82], [18, 83], [19, 85], [20, 86], [10000, 1679657]]
for v in tab:
assert(szekeres(v[0]) == v[1])
main()
| [
"[email protected]"
] | |
422351a1ca13fdc6b9b94e29d06c8ce38f33fc91 | 202bfd3bc23b4aa4e7477c9ba3685517dbad592b | /geo/geo_map.py | 5b621b60069c22a66140d968fe2dca1a2c72fb1a | [] | no_license | yan7509/python_get_cityDistance | 3b493cd9c53ba3543c6febe5d95096b5175ef6f4 | ee51faa2f091352e41511749893bd6c4a74d5596 | refs/heads/master | 2022-03-27T06:59:37.566494 | 2019-12-24T14:42:15 | 2019-12-24T14:42:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,916 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#__author__: stray_camel
import urllib.request #发送请求
from urllib import parse #URL编码
import json,logging,jsonpath,sys,os
absPath = os.path.abspath(__file__) #返回代码段所在的位置,肯定是在某个.py文件中
temPath = os.path.dirname(absPath) #往上返回一级目录,得到文件所在的路径
temPath = os.path.dirname(temPath) #在往上返回一级,得到文件夹所在的路径
sys.path.append(temPath)
from public import config
class Geo_mapInterface(object):
def __init__(self,
key:"高德地图apikey值" = '3e2235273dd2c0ca2421071fbb96def4'):
self.addList = [('湖北省武汉市江岸区', 1, '114.278760,30.592688'), ('湖北省武汉市江汉区', 1, '114.270871,30.601430'), ('湖北省武汉市乔口区', 1, '114.214920,30.582202')] #创建一个列表存放地址数据
# self.dict = dict(set(self.addList))#创建一个字典用于存放地址经纬度数据
self.key = key
def get_coordinatesViaaddress(self,
address:"地点名"
) -> "返回str类型的经纬度":
url='https://restapi.amap.com/v3/geocode/geo?address='+address+'&output=json&key='+self.key
#将一些符号进行URL编码
newUrl = parse.quote(url, safe="/:=&?#+!$,;'@()*[]")
coor = json.loads(urllib.request.urlopen(newUrl).read())['geocodes'][0]['location']
logging.basicConfig(stream=open(config.src_path + '/log/syserror.log', encoding="utf-8", mode="a"), level = logging.DEBUG, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
logger.info("查询{}的经纬度:{}!".format(address,coor))
# print()
return coor
def get_disViaCoordinates(self,
addList:"一个列表存放地址数据"
) -> "{'origin':[],'destination':[],'distance':[],'route':[]}":
dict_route = {'origin':[],'destination':[],'distance':[],'route':[]}
for m in range(len(addList)):
for n in range(m,len(addList)):
if m!=n:
print('get_tetst',m,n)
#从addList中得到地址的名称,经纬度
origin = addList[m][2]
destination = addList[n][2]
url2='https://restapi.amap.com/v3/direction/driving?origin='+origin+'&destination='+destination+'&extensions=all&output=json&key=3e2235273dd2c0ca2421071fbb96def4'
#编码
newUrl2 = parse.quote(url2, safe="/:=&?#+!$,;'@()*[]")
#发送请求
response2 = urllib.request.urlopen(newUrl2)
#接收数据
data2 = response2.read()
#解析json文件
jsonData2 = json.loads(data2)
#输出该json中所有road的值
# print(jsonData2)
road=jsonpath.jsonpath(jsonData2,'$..road')
#从json文件中提取距离
distance = jsonData2['route']['paths'][0]['distance']
#字典dict_route中追加数据
dict_route.setdefault("origin",[]).append(addList[m][0])
dict_route.setdefault("destination",[]).append(addList[n][0])
dict_route.setdefault("distance",[]).append(distance)
dict_route.setdefault("route",[]).append(road)
return dict_route
if __name__ == "__main__":
test = Geo_mapInterface()
print(test.key)
print(test.get_disViaCoordinates(test.addList))
# print(test.get_coordinatesViaaddress('湖北省武汉市洪山区'))
# dict_route={"出发地":[],"目的地":[],"距离":[],"线路":[]}
# k = len(addList) #nameList列表中元素个数
# print(dict_route)
# print(dict(([1,2], i )for i in range(2)))
| [
"[email protected]"
] | |
8d4383b237d5d13addbf71367b1be45c842b7824 | 41c824ce983c2a400ca6484b365d6f7ee077c8a3 | /tools/dlrobot/robot/adhoc/__init__.py | 3c6b21535cd0135398559ace340e20caf1bf521f | [] | no_license | TI-Russia/smart_parser | 2c84c12906e308229037c2bc75299a4b227e795e | 7428904975b2cf88cb329b8da11017cdebe8fa03 | refs/heads/master | 2022-12-10T06:40:43.852974 | 2022-08-05T11:06:18 | 2022-08-05T11:06:18 | 129,266,366 | 16 | 4 | null | 2022-12-08T11:18:29 | 2018-04-12T14:44:23 | HTML | UTF-8 | Python | false | false | 558 | py | from .tomsk import tomsk_gov_ru
from .gossov_tatarstan_ru import gossov_tatarstan_ru
from .tgl_ru import tgl_ru
def process_adhoc(project):
domain_name = project.web_site_snapshots[0].get_site_url()
if domain_name == "tomsk.gov.ru":
tomsk_gov_ru(project.web_site_snapshots[0])
return True
elif domain_name == "gossov.tatarstan.ru":
gossov_tatarstan_ru(project.web_site_snapshots[0])
return True
elif domain_name == "tgl.ru":
tgl_ru(project.web_site_snapshots[0])
return True
return False
| [
"[email protected]"
] | |
58cc6821d162a7e585a143940bbcfeae923b3ce5 | 8d35b8aa63f3cae4e885e3c081f41235d2a8f61f | /discord/ext/dl/extractor/outsidetv.py | a51556678bbcace6900d5b695026f00117f88f0c | [
"MIT"
] | permissive | alexyy802/Texus | 1255f4e54c8d3cc067f0d30daff1cf24932ea0c9 | c282a836f43dfd588d89d5c13f432896aebb540f | refs/heads/master | 2023-09-05T06:14:36.217601 | 2021-11-21T03:39:55 | 2021-11-21T03:39:55 | 429,390,575 | 0 | 0 | MIT | 2021-11-19T09:22:22 | 2021-11-18T10:43:11 | Python | UTF-8 | Python | false | false | 1,062 | py | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class OutsideTVIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?outsidetv\.com/(?:[^/]+/)*?play/[a-zA-Z0-9]{8}/\d+/\d+/(?P<id>[a-zA-Z0-9]{8})"
_TESTS = [
{
"url": "http://www.outsidetv.com/category/snow/play/ZjQYboH6/1/10/Hdg0jukV/4",
"md5": "192d968fedc10b2f70ec31865ffba0da",
"info_dict": {
"id": "Hdg0jukV",
"ext": "mp4",
"title": "Home - Jackson Ep 1 | Arbor Snowboards",
"description": "md5:41a12e94f3db3ca253b04bb1e8d8f4cd",
"upload_date": "20181225",
"timestamp": 1545742800,
},
},
{
"url": "http://www.outsidetv.com/home/play/ZjQYboH6/1/10/Hdg0jukV/4",
"only_matching": True,
},
]
def _real_extract(self, url):
jw_media_id = self._match_id(url)
return self.url_result("jwplatform:" + jw_media_id, "JWPlatform", jw_media_id)
| [
"[email protected]"
] | |
89597cd37a0ca9703e1d6b442608c30d8f3ba662 | 6c95a0cebf78c27b93d15c17631d208d3d4715b1 | /plotApCorr1runMany.py | e4706f6b5032da6193f965dc1c4365fc7b832ef7 | [] | no_license | standardgalactic/hipe-code | 3a7f9d4a7c4877564de1f6468a9783d1de3e90c5 | 600bb3fce7cdf2bc1e6120b3cfe4ffc1bb154d55 | refs/heads/master | 2022-02-11T02:16:35.558135 | 2014-02-26T10:26:13 | 2014-02-26T10:26:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,468 | py | import sys
from herschel.ia.gui.plot.renderer.PComponentEngine import HAlign
from herschel.ia.gui.plot.renderer.PComponentEngine import VAlign
from herschel.ia.numeric.toolbox.basic import Histogram
from herschel.ia.numeric.toolbox.basic import BinCentres
## Plot aperture photometry stuff
pc=False
pf=False
#radiusArcsec=[[22.,30.,45.,50.,60.,100.],\
# [30.,45.,55.,65.,80.,100.],\
# [42.,50.,60.,90.,100.,120.]]
##Max source radii that are valid
#radMax=[[5,6,6,6],[3,6,6,6],[3,4,6,6]]
radiusArcsec=[[22.],[30.],[42.]]
radMax=[[1],[1],[1]]
#innerArcsec=[60.,100.,200.,300.]
#outerArcsec=[90.,150.,250.,350.]
##Min BR radii that are valid
#bgMin=[[0,0,0,0,0,1],[0,0,0,1,1,1],[0,0,0,1,1,2]]
innerArcsec=[60.]
outerArcsec=[90.]
bgMin=[[0],[0],[0]]
nRad=len(radiusArcsec[0])
nBg=len(innerArcsec)
bgStr=[]
for bg in range(nBg):
bgStr.append('Background: %i"-%i"'%(int(innerArcsec[bg]),int(outerArcsec[bg])))
#print bgStr
dirPath = '/home/astrog82/spxcen/Herschel/Calibration/RelGains/Obs/'
dirPlot = '/home/astrog82/spxcen/Herschel/Calibration/RelGains/Plots/'
##Get list of ObsIDs
files=[\
'10_Hygeia_obs_All.dat',\
'173_Ino_obs_All.dat',\
'19_Fortuna_obs_All.dat',\
'1_Ceres_obs_All.dat',\
'20_Massalia_obs_All.dat',\
'21_Lutetia_obs_All.dat',\
'253_Mathilde_obs_All.dat',\
'29_Amphitrite_obs_All.dat',\
'2_Pallas_obs_All.dat',\
'372_Palma_obs_All.dat',\
'37_Fides_obs_All.dat',\
'3_Juno_obs_All.dat',\
'40_Harmonia_obs_All.dat',\
'47_Aglaja_obs_All.dat',\
'4_Vesta_obs_All.dat',\
'511_Davida_obs_All.dat',\
'52_Europa_obs_All.dat',\
'54_Alexandra_obs_All.dat',\
'65_Cybele_obs_All.dat',\
'6_Hebe_obs_All.dat',\
'7_Iris_obs_All.dat',\
'88_Thisbe_obs_All.dat',\
'8_Flora_obs_All.dat',\
'93_Minerva_obs_All.dat',\
]
nFiles=len(files)
tarNames=[]
nObsFiles=Int1d(nFiles)
obsIds=[]
for f in range(nFiles):
fileObsList=dirPath+files[f]
bandStr=["PSW","PMW","PLW"]
fObs=open(fileObsList,'r')
obsidsIn=fObs.readlines()
fObs.close()
obsids=[]
raSrcs=[]
decSrcs=[]
nobIn=len(obsidsIn)
nameFound=False
for ob in range(nobIn):
line=obsidsIn[ob]
if line.find('#') < 0:
obsids.append(int(line.split(',')[0],16))
raSrcs.append(float(line.split(',')[1]))
decSrcs.append(float(line.split(',')[2]))
else:
if line.find('Name:') > 0:
tarNames.append(line.split(':')[1][1:-1])
nameFound=True
if nameFound==False:
tarNames.append['UNKNOWN']
obsIds.append(Int1d(obsids))
#raSrcs=Double1d(raSrcs)
#decSrcs=Double1d(decSrcs)
nob=len(obsids)
nObsFiles[f]=nob
tarNames[f]='%s (%d)'%(tarNames[f],nObsFiles[f])
#File columns
#0:srcRad
#1:bgInner
#2:bgOuter
#3:TimelineFlux
#4:TimelineErr
#5:MapFlux
#6:MapErr
#7:MapRgFlux
#8:MapRgErr
#9:SrcCorr
#10:ApCorr
#11:SrcFlux
#12:SrcErr
#13:ApFlux
#14:ApErr
#15:ApRgFlux
#16:ApRgErr
#######################################
## Read in all obsids
#######################################
fluxMeanFiles=Double5d(nFiles,nRad,nBg,3,6)
fluxRelFiles=Double5d(nFiles,nRad,nBg,3,6)
fluxErrFiles=Double5d(nFiles,nRad,nBg,3,6)
fluxMeanAll=Double4d(nRad,nBg,3,6)
fluxErrAll=Double4d(nRad,nBg,3,6)
##[object, srcRad, bgRad , band , [timeline|map|mapRg|src|ap|apRg]
#CorrFact=Double4d(nRad,nBg,3,2)
###[srcRad, bgRad , band , [src|app]
maxFlux=0.
minFlux=999.
for f in range(nFiles):
nObs=nObsFiles[f]
obsList=obsIds[f]
fluxArrOb=Double5d(nObs,nRad,nBg,3,6)
fluxErrOb=Double5d(nObs,nRad,nBg,3,6)
##[object, srcRad, bgRad , band , [timeline|map|mapRg|src|ap|apRg]
for b in range(3):
band=bandStr[b]
for ob in range(nObs):
myObsid=obsList[ob]
#print '%s Band'%band
fileDat='%s0x%x_SrcFlux1run_%s.dat'%(dirPath,myObsid,band)
fDat=open(fileDat,'r')
lines=fDat.readlines()
nLine=len(lines)
for l in range(nLine):
if lines[l].find('#') < 0:
line=lines[l].split(',')
iRad=radiusArcsec[b].index(float(line[0]))
iBg=innerArcsec.index(float(line[1]))
fluxArrOb[ob,iRad,iBg,b,0] = float(line[3]) #timeline
fluxArrOb[ob,iRad,iBg,b,1] = float(line[5]) #map
fluxArrOb[ob,iRad,iBg,b,2] = float(line[7]) #mapRg
fluxArrOb[ob,iRad,iBg,b,3] = float(line[11]) #srcFlux
fluxArrOb[ob,iRad,iBg,b,4] = float(line[13]) #apFlux
fluxArrOb[ob,iRad,iBg,b,5] = float(line[15]) #apRgFlux
if min(fluxArrOb[ob,iRad,iBg,b,:]) < minFlux:
minFlux=min(fluxArrOb[ob,iRad,iBg,b,:])
if max(fluxArrOb[ob,iRad,iBg,b,:]) > maxFlux:
maxFlux=max(fluxArrOb[ob,iRad,iBg,b,:])
fDat.close()
print iRad
print iBg
for r in [iRad]:
for bg in [iBg]:
for p in range(6):
fluxMeanFiles[f,r,bg,b,p]=MEAN(fluxArrOb[:,r,bg,b,p])
print 'File',f,'param',p,'MEAN:',MEAN(fluxArrOb[:,r,bg,b,p])
fluxErrFiles[f,r,bg,b,p]=STDDEV(fluxArrOb[:,r,bg,b,p])
print 'File',f,'param',p,'STDDEV:',STDDEV(fluxArrOb[:,r,bg,b,p])
fluxRelFiles[f,r,bg,b,p]=fluxMeanFiles[f,r,bg,b,p]/fluxMeanFiles[f,r,bg,b,0]
for b in range(3):
for r in [iRad]:
for bg in [iBg]:
for p in range(6):
fluxMeanAll[r,bg,b,p]=MEAN(fluxMeanFiles[:,r,bg,b,p])
fluxErrAll[r,bg,b,p]=STDDEV(fluxMeanFiles[:,r,bg,b,p])
rFiles=Float1d(range(nFiles)) + 0.5
#pFluxPSW=PlotXY()
#lTime=LayerXY(rFiles,fluxMeanFiles[:,0,0,0,0])
#lTime.line=Style.NONE
#lTime.name='Timeline'
#lTime.symbol=Style.DCROSS
#pFluxPSW.addLayer(lTime)
#
#lMap=LayerXY(rFiles,fluxMeanFiles[:,0,0,0,1])
#lMap.line=Style.NONE
#lMap.name='Map'
#lMap.symbol=Style.VCROSS
#pFluxPSW.addLayer(lMap)
#
#lAp=LayerXY(rFiles,fluxMeanFiles[:,0,0,0,5])
#lAp.line=Style.NONE
#lAp.name='ApCorr'
#lAp.symbol=Style.DIAMOND
#pFluxPSW.addLayer(lAp)
#
#pFluxPSW.legend.visible=1
pErrPSW=PlotXY()
lTimeRel=LayerXY(rFiles,fluxRelFiles[:,0,0,0,0])
lTimeRel.line=Style.NONE
lTimeRel.name='Timeline'
lTimeRel.symbol=Style.DCROSS
lTimeRel.setErrorY(Double1d(fluxErrFiles[:,0,0,0,0]),Double1d(fluxErrFiles[:,0,0,0,0]))
lTimeRel.setColor(java.awt.Color.black)
lTimeRel.style.stroke=2.
pErrPSW.addLayer(lTimeRel)
#lTimeRelUp=LayerXY(rFiles-0.2,fluxRelFiles[:,0,0,0,0]+fluxErrFiles[:,0,0,0,0])
#lTimeRelUp.setColor(java.awt.Color.black)
#pErrPSW.addLayer(lTimeRelUp)
#lTimeRelDown=LayerXY(rFiles-0.2,fluxRelFiles[:,0,0,0,0]-fluxErrFiles[:,0,0,0,0])
#lTimeRelDown.setColor(java.awt.Color.black)
#pErrPSW.addLayer(lTimeRelDown)
lMapRel=LayerXY(rFiles-0.2,fluxRelFiles[:,0,0,0,1])
lMapRel.line=Style.NONE
lMapRel.name='Map'
lMapRel.symbol=Style.VCROSS
#lMapRel.setErrorY(Double1d(fluxErrFiles[:,0,0,0,1]),Double1d(fluxErrFiles[:,0,0,0,1]))
lMapRel.setColor(java.awt.Color.red)
lMapRel.style.stroke=2.
pErrPSW.addLayer(lMapRel)
lApRel=LayerXY(rFiles+0.2,fluxRelFiles[:,0,0,0,5])
lApRel.line=Style.NONE
lApRel.name='Corrected'
lApRel.symbol=Style.DIAMOND
lApRel.setErrorY(Double1d(fluxErrFiles[:,0,0,0,5]),Double1d(fluxErrFiles[:,0,0,0,5]))
lApRel.setColor(java.awt.Color.blue)
lApRel.style.stroke=2.
pErrPSW.addLayer(lApRel)
#lApRelUp=LayerXY(rFiles-0.2,fluxRelFiles[:,0,0,0,5]+fluxErrFiles[:,0,0,0,5])
#lApRelUp.setColor(java.awt.Color.blue)
#pErrPSW.addLayer(lApRelUp)
#lApRelDown=LayerXY(rFiles-0.2,fluxRelFiles[:,0,0,0,5]-fluxErrFiles[:,0,0,0,5])
#lApRelDown.setColor(java.awt.Color.blue)
#pErrPSW.addLayer(lApRelDown)
pErrPSW.yaxis.setRange(0,2)
pErrPSW.yaxis.setTitleText('Flux relative to timeline flux density')
pErrPSW.legend.visible=1
pErrPSW.xaxis.setRange(0,nFiles)
pErrPSW.xaxis.tick.setFixedValues(rFiles)
pErrPSW.xaxis.tick.label.setFixedStrings(tarNames)
pErrPSW.xaxis.tick.label.setOrientation(1)
pErrPSW.xaxis.setTitleText('Object (# Obs)') | [
"[email protected]"
] | |
98427273cabbe338ee47cfa39fde2c69ec153cca | 47bd686ab04d8f6daba2097875dfefdba967d598 | /04_30days_codechallenge/23_bst_level_order_traversal.py | 37aa0c48d678a98f9e83a481962778e43456ddd1 | [] | no_license | EmjayAhn/DailyAlgorithm | 9633638c7cb7064baf26126cbabafd658fec3ca8 | acda1917fa1a290fe740e1bccb237d83b00d1ea4 | refs/heads/master | 2023-02-16T17:04:35.245512 | 2023-02-08T16:29:51 | 2023-02-08T16:29:51 | 165,942,743 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # https://www.hackerrank.com/challenges/30-binary-trees/tutorial
import sys
class Node:
def __init__(self,data):
self.right=self.left=None
self.data = data
class Solution:
def insert(self,root,data):
if root==None:
return Node(data)
else:
if data <= root.data:
cur = self.insert(root.left, data)
root.left = cur
else:
cur = self.insert(root.right, data)
root.right = cur
return root
def levelOrder(self, root):
#Write your code here
queue = [root]
while queue:
current = queue.pop(0)
print(str(current.data) + ' ', end="")
if current.left:
queue.append(current.left)
if current.right:
queue.append(current.right)
T=int(input())
myTree=Solution()
root=None
for i in range(T):
data=int(input())
root=myTree.insert(root,data)
myTree.levelOrder(root)
| [
"[email protected]"
] | |
5e5c6c67ed787564c6fcddba3284c101b5e1b8b2 | 2b255d07420114c40f6c8aeb0fb25228588282ed | /sitecomber/apps/config/management/commands/init_site_config.py | dcea81441583eabd41af10a706aab0aa348125a0 | [] | no_license | ninapavlich/sitecomber | b48b3ee055dac1f419c98f08fffe5e9dc44bd6e3 | 6f34e5bb96ca4c119f98ee90c88881e8ca3f6f06 | refs/heads/master | 2022-12-11T20:55:07.215804 | 2020-03-13T07:58:28 | 2020-03-13T07:58:28 | 197,045,165 | 1 | 0 | null | 2022-12-08T01:47:52 | 2019-07-15T17:42:31 | JavaScript | UTF-8 | Python | false | false | 1,900 | py | import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.validators import URLValidator
from django.core.exceptions import ValidationError
from sitecomber.apps.config.models import Site, SiteDomain, SiteTestSetting
from sitecomber.apps.shared.utils import get_test_choices
logger = logging.getLogger('django')
class Command(BaseCommand):
"""
Example Usage:
python manage.py init_site_config
"""
help = 'Initialize Site Config'
def handle(self, *args, **options):
starting_url = settings.STARTING_URL
if not starting_url:
starting_url = input("Please enter starting URL: ")
validate = URLValidator()
try:
validate(starting_url)
except ValidationError:
print("Please enter a fully qualified URL")
return
user = get_user_model().objects.all().first()
if not user:
print("Please create an admin user first using the command: python manage.py createsuperuser")
return
site, site_created = Site.objects.get_or_create(
owner=user,
title=starting_url
)
print("Initializing site settings for %s" % (starting_url))
site_domain, site_domain_created = SiteDomain.objects.get_or_create(
site=site,
url=starting_url
)
test_choices = get_test_choices()
for test_choice in test_choices:
site_test_setting, site_test_setting_created = SiteTestSetting.objects.get_or_create(
site=site,
test=test_choice[0]
)
print("-- enabled %s" % test_choice[0])
print("Site settings initialized for %s. You may configure it at /admin/config/site/%s/change/" % (site, site.pk))
| [
"[email protected]"
] | |
eaff999078aae74c52b25f7fa8882f8922f8eacf | 1cc59b6493c7579a81ddec76ea96621b934f510a | /venv/bin/jupyter-serverextension | d1da24e349738bd3a6f0e676f039527d9fb937a1 | [] | no_license | miraclemaster11/Corona_Cases | 4097c3f317c66ca6c9f31ed3bfd18af678859b6a | 804c8165dbd8f631e1b30281fb03ae133976de1c | refs/heads/master | 2022-12-10T17:40:03.304722 | 2020-09-05T05:53:51 | 2020-09-05T05:53:51 | 293,014,030 | 0 | 0 | null | 2020-09-05T06:03:48 | 2020-09-05T06:03:47 | null | UTF-8 | Python | false | false | 263 | #!/home/aman/PycharmProjects/Corona_Cases/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from notebook.serverextensions import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
0bbbdfcd49815d6b3e6e70f9b0dea685968749ae | c130a094e04eb448201ca2ab8ed4fe56cd1d80bc | /samples/server/petstore/python-fastapi/src/openapi_server/models/tag.py | 3e0b45cd787b066a6baa3cf8dd7fe960eb9dfa00 | [
"Apache-2.0"
] | permissive | janweinschenker/openapi-generator | 83fb57f9a5a94e548e9353cbf289f4b4172a724e | 2d927a738b1758c2213464e10985ee5124a091c6 | refs/heads/master | 2022-02-01T17:22:05.604745 | 2022-01-19T10:43:39 | 2022-01-19T10:43:39 | 221,860,152 | 1 | 0 | Apache-2.0 | 2019-11-15T06:36:25 | 2019-11-15T06:36:24 | null | UTF-8 | Python | false | false | 644 | py | # coding: utf-8
from __future__ import annotations
from datetime import date, datetime # noqa: F401
import re # noqa: F401
from typing import Any, Dict, List, Optional # noqa: F401
from pydantic import AnyUrl, BaseModel, EmailStr, validator # noqa: F401
class Tag(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
Tag - a model defined in OpenAPI
id: The id of this Tag [Optional].
name: The name of this Tag [Optional].
"""
id: Optional[int] = None
name: Optional[str] = None
Tag.update_forward_refs()
| [
"[email protected]"
] | |
60603cff2e3bd2aa1e8487a109e236043c5e2594 | 6be072ac0c8180d4ddf0b2ba5a71b5c0ca47d66c | /Week9/Login_Example/manage.py | ee8155a6cbfed11bc92ea34aa58c903e2fb779f5 | [] | no_license | ZachLevein/Di_Bootcamp | af7be115cddc5b2910d9ac69111b34a5de4a9d18 | 25d7e160bdfc294714d4f1069f66b50173abf873 | refs/heads/main | 2023-07-17T01:25:31.243129 | 2021-08-31T16:07:48 | 2021-08-31T16:14:43 | 401,768,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'login_example.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f4f2545d2a5100feb14734611ceb4e35981e5375 | a8a1bc1c2223c4931aa451b7930cc838dae44b80 | /eagle_theme_ecomm/models/ks_mega_menu.py | 1e9e8c1a6965c6f2f244a275a01a651418d85fc0 | [] | no_license | ShaheenHossain/eagle_theme_ecomm | 195db37d37896e1c8f9151cfd0b47402d55aa19f | 32a290d5386782b74cf670cef13802ac7c8e4318 | refs/heads/master | 2022-09-18T22:05:03.741941 | 2020-06-01T21:05:33 | 2020-06-01T21:05:33 | 268,632,294 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,119 | py | # # -*- coding: utf-8 -*-
from eagle import models, fields, api
class Ks_WebsiteMegaMenu(models.Model):
_inherit = "website.menu"
_description = "This model will add mega menu option to the website menu"
ks_is_mega_menu = fields.Boolean("Is Dynamic Mega Menu")
ks_display_img = fields.Boolean("Display Category Images")
ks_content_id = fields.Char("Content")
ks_categories = fields.Many2many("product.public.category", string="Categories to display",
relation='website_menu_product_public_categories',
domain=[('parent_id', '=', False)])
ks_banner_img = fields.Binary("Banner Image")
ks_is_background_image = fields.Boolean("Set Background Image")
ks_background_image = fields.Binary("Background Image")
ks_item_selection_method = fields.Selection(
[('products', 'All Products'), ('brands', 'Brands'), ('Cats', 'Categories')],
string='Selection Type', default='products')
ks_products_ids = fields.Many2many("product.template", relation="website_menu_product_templates_ids",string="Products",domain=[('website_published','=',True)])
ks_product_brand_ids = fields.Many2many('ks_product_manager.ks_brand',
relation='website_menu_ks_product_manager_ks_brands', string='Brands')
ks_is_category_tab_layout = fields.Boolean(string='Set Tab Layout For Categories')
# Slider Configuration
ks_is_slider = fields.Selection(
[('image', 'Image'), ('slider', 'Slider')],
string='Show Image/Slider')
ks_item_slider_selection_method = fields.Selection(
[('products', 'All Products'), ('brands', 'Brands'), ('cats', 'Categories')],
string='Selection Type', default='products')
ks_slider_title = fields.Char("Title")
ks_slider_position = fields.Selection(
[('left', 'Left'), ('right', 'Right')],
string='Position', default='left')
ks_slider_Speed = fields.Integer("Slide Speed", default=300)
ks_slider_products_ids = fields.Many2many("product.template", string="Products",
domain=[('website_published', '=', True)])
ks_slider_product_brand_ids = fields.Many2many('ks_product_manager.ks_brand',
relation='website_menu_ks_product_manager_ks_brand', string='Brands')
ks_slider_categories = fields.Many2many("product.public.category", relation='website_menu_product_public_category',
string="Categories")
ks_side_image = fields.Binary("Image")
ks_side_image_description = fields.Char("Short Description")
ks_side_image_link = fields.Char("Link")
# Advance Configuration
ks_is_font_color_set = fields.Boolean("Set Font Color")
ks_font_color_main_cat = fields.Char(default="#000000", string="Main Heading Color")
ks_font_color_sub_cat = fields.Char(default="#000000", string="Sub Heading Color")
ks_set_number_of_columns = fields.Selection(
[('two', '2'), ('three', '3'), ('four', '4'), ('five', '5'), ('six ', '6')],
string='Set Number of Column', default='four')
# ToDo Remove this field when create a new database
ks_font_color = fields.Char()
ks_is_categories_slider = fields.Char()
# @api.multi
def ks_get_image_url(self):
for rec in self:
if rec.ks_is_background_image and rec.ks_background_image:
return '/web/image/website.menu/' + str(rec.id) + '/ks_background_image/'
else:
return ""
# @api.multi
def ks_get_side_image_url(self):
for rec in self:
if rec.ks_side_image and rec.ks_side_image:
return '/web/image/website.menu/' + str(rec.id) + '/ks_side_image/'
else:
return ""
# @api.multi
def get_current_website(self):
for rec in self:
return rec.website.id
else:
return ""
# class ks_website_top_menu(models.Model):
# _inherit = 'website'
#
# @api.model
# def copy_menu_hierarchy(self, top_menu):
# print("dfshhs")
# print("Fdfdsf")
# pass
# c = super(ks_website_top_menu, self).copy_menu_hierarchy(top_menu)
# print("dfshhs")
# # def copy_menu(menu, t_menu):
# # new_menu = menu.copy({
# # 'parent_id': t_menu.id,
# # 'website_id': self.id,
# # })
# # for submenu in menu.child_id:
# # copy_menu(submenu, new_menu)
# #
# # for website in self:
# # new_top_menu = top_menu.copy({
# # 'name': _('Top Menu for Website %s') % website.id,
# # 'website_id': website.id,
# # })
# # li = self.env.ref()
# # for submenu in top_menu.child_id:
# # copy_menu(submenu, new_top_menu)
#
# @api.multi
# def write(self, values):
# a = super(ks_website_top_menu, self).write(values)
# return a
| [
"[email protected]"
] | |
c3d5cf62488778f4d8d76cb907368279058fb85c | 15788d9ce41189864b228494a66c7409afe3b90c | /ice_addresses/importers/csv.py | 8a66fb306f1a349acf791c64a451bf7dd6c266ac | [] | no_license | StefanKjartansson/django-icelandic-addresses | 99133e378acce66825ad8a63f1dfd9c3640a4bd3 | d602f288c031d60e8404e0b97a96602accc1024d | refs/heads/master | 2020-05-30T13:20:55.583308 | 2014-12-23T14:14:06 | 2014-12-23T14:14:06 | 12,543,305 | 0 | 1 | null | 2015-03-28T22:25:38 | 2013-09-02T15:33:55 | Python | UTF-8 | Python | false | false | 2,545 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import unicode_literals, print_function, absolute_import
import codecs
import csv
import os
import sys
from ..models import PostCode, Street, Address
DATA_ROOT = os.path.join(
os.path.abspath(os.path.dirname(__file__)), os.pardir, 'data')
PY3 = (sys.version_info[0] > 2)
def csv_unireader(f, encoding="utf-8"):
if PY3:
f = codecs.open(f, encoding=encoding)
r = csv.reader(f, delimiter='|', quotechar='"')
else:
r = csv.reader(
codecs.iterencode(codecs.iterdecode(open(f), encoding), 'utf-8'),
delimiter=b'|', quotechar=b'"')
for row in r:
if PY3:
yield row
else:
yield [e.decode("utf-8") for e in row]
def import_csv(filename=None):
uniques = set()
if not filename:
filename = os.path.join(DATA_ROOT, 'Stadfangaskra_20131028.dsv')
for fields in csv_unireader(filename, encoding='iso-8859-1'):
if fields[0] == 'HNITNUM':
continue
try:
postcode = int(fields[7])
except ValueError:
postcode = 0
try:
house_number = int(fields[10])
except ValueError:
house_number = 0
uniques.add((
postcode,
fields[8].strip(),
fields[9].strip(),
house_number,
fields[11].strip(),
float(fields[-2].replace(',', '.')),
float(fields[-1].replace(',', '.')),
))
uniques = sorted(uniques)
def get_insert_method(model):
if model.objects.count() > 0:
return model.objects.get_or_create
def _wrap(*args, **kwargs):
return model.objects.create(*args, **kwargs), True
return _wrap
codes = {}
_m = get_insert_method(PostCode)
for c in set((i[0] for i in uniques)):
pc, _ = _m(id=c)
codes[c] = pc
streets = {}
_m = get_insert_method(Street)
for key in set((i[0:3] for i in uniques)):
pc, name1, name2 = key
s, _ = _m(
postcode=codes[pc],
name_nominative=name1,
name_genitive=name2)
streets[key] = s
_m = get_insert_method(Address)
for code, name1, name2, house_number, house_chars, lat, lng in uniques:
_m(street=streets[(code, name1, name2)],
house_number=house_number,
house_characters=house_chars,
lat=lat,
lon=lng)
return len(uniques)
| [
"[email protected]"
] | |
458aec1ce5b13d09ab2c61afc987cbc21b0ee1ef | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/0347-Top-K-Frequent-Elements/soln-1.py | 58ddccc226cee1a0915ab31e3cfff1c5815c70b1 | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 253 | py | class Solution:
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
counts = collections.Counter(nums)
return heapq.nlargest(k, counts, key=counts.get) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.