blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
218811171578585d9acec414683ae88e25e5ede6 | a66460a46611483dfbdc94c7996893f427e60d97 | /ansible/my_env/lib/python2.7/site-packages/ansible/utils/module_docs_fragments/azure_tags.py | ff8579fde490d0ca15e0b0c7577ca688d53f5209 | [
"MIT"
]
| permissive | otus-devops-2019-02/yyashkin_infra | 06b57807dde26f94f501828c07503d6bf1d70816 | 0cd0c003884155ac922e3e301305ac202de7028c | refs/heads/master | 2020-04-29T02:42:22.056724 | 2019-05-15T16:24:35 | 2019-05-15T16:24:35 | 175,780,718 | 0 | 0 | MIT | 2019-05-15T16:24:36 | 2019-03-15T08:37:35 | HCL | UTF-8 | Python | false | false | 1,420 | py | # Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# Azure doc fragment
DOCUMENTATION = '''
options:
tags:
description:
- >
Dictionary of string:string pairs to assign as metadata to the object.
Metadata tags on the object will be updated with any provided values. To remove tags set append_tags option to false.
append_tags:
description:
- Use to control if tags field is canonical or just appends to existing tags.
When canonical, any tags not found in the tags parameter will be removed from the object's metadata.
type: bool
default: 'yes'
'''
| [
"[email protected]"
]
| |
e6bb8d0ea125800830cc4c6c06f82f5d3bfcf228 | 5a319a47587653dab9472eab4055144bcfd25967 | /src/opendr/perception/face_recognition/algorithm/backbone/model_mobilenet.py | 2b552ba355b2561135316393c4e933e363248854 | [
"Apache-2.0"
]
| permissive | passalis/demos | 612a7e07ba125d9815e110ff483132e162759dd7 | d8aeb045ee1832418fa232bc1c73783d72d10cf7 | refs/heads/main | 2023-07-19T03:25:05.269333 | 2021-09-21T19:59:42 | 2021-09-21T19:59:42 | 397,585,032 | 1 | 0 | Apache-2.0 | 2021-09-21T19:58:51 | 2021-08-18T12:02:00 | Python | UTF-8 | Python | false | false | 6,419 | py | from torch import nn
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
class DepthwiseSeparableConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, padding, bias=False):
super(DepthwiseSeparableConv, self).__init__()
self.depthwise = nn.Conv2d(in_planes, in_planes, kernel_size=kernel_size, padding=padding, groups=in_planes,
bias=bias)
self.pointwise = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=bias)
self.bn1 = nn.BatchNorm2d(in_planes)
self.bn2 = nn.BatchNorm2d(out_planes)
self.relu = nn.ReLU()
def forward(self, x):
x = self.depthwise(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pointwise(x)
x = self.bn2(x)
x = self.relu(x)
return x
class GDConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, padding, bias=False):
super(GDConv, self).__init__()
self.depthwise = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, padding=padding, groups=in_planes,
bias=bias)
self.bn = nn.BatchNorm2d(in_planes)
def forward(self, x):
x = self.depthwise(x)
x = self.bn(x)
return x
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileFaceNet(nn.Module):
def __init__(self, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileFaceNet, self).__init__()
block = InvertedResidual
input_channel = 64
last_channel = 512
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[2, 64, 5, 2],
[4, 128, 1, 2],
[2, 128, 6, 1],
[4, 128, 1, 2],
[2, 128, 2, 1],
]
# Only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# Building first layer
# input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
self.conv1 = ConvBNReLU(3, input_channel, stride=2)
self.dw_conv = DepthwiseSeparableConv(in_planes=64, out_planes=64, kernel_size=3, padding=1)
features = list()
# Building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# Building last several layers
self.conv2 = ConvBNReLU(input_channel, self.last_channel, kernel_size=1)
self.gdconv = GDConv(in_planes=512, out_planes=512, kernel_size=7, padding=0)
self.conv3 = nn.Conv2d(512, 128, kernel_size=1)
self.bn = nn.BatchNorm2d(128)
# Make it nn.Sequential
self.features = nn.Sequential(*features)
# Weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.conv1(x)
x = self.dw_conv(x)
x = self.features(x)
x = self.conv2(x)
x = self.gdconv(x)
x = self.conv3(x)
x = self.bn(x)
x = x.view(x.size(0), -1)
return x
| [
"[email protected]"
]
| |
06c35de26a5846395b4534c99e2ccfe287ad949d | cec68acfc0187b7d92fb7d6e5107058e3f8269ea | /GUI/GUI.py | 478f934901894f957075743f6d6cdfe1d7fa20ea | []
| no_license | vektorelpython/Python8 | 441575224100a687467c4934f7c741aa0c4bd087 | d135fbf1444d56a0da38c42fd2e8feda48646f49 | refs/heads/master | 2022-01-18T12:17:40.387422 | 2019-09-07T13:47:55 | 2019-09-07T13:47:55 | 205,534,765 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | import sys
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QIcon
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 örnek window'
self.left = 50
self.top = 50
self.width = 640
self.height = 480
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
| [
"Kurs"
]
| Kurs |
3bfa4d51d40d0d78e7436d184f839ab558a13f1b | 4bb0faf7b0a05b3d487ff386783adf742f26df86 | /run.py | aa3f35368664d6bc7bb5e3e1fc0855b016a2a113 | []
| no_license | AIXME/ins-scraping | f7ba1c5180489e4a497906d06b3bdcb57f05c14b | 250761c436fc0fe033c7c1493f8e0aa2335c1409 | refs/heads/master | 2021-01-04T15:56:37.476985 | 2019-07-11T08:24:34 | 2019-07-11T08:24:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | from core.info import InsInfo
if __name__ == "__main__":
username = input('输入你的ins账号:')
password = input('输入你的password:')
target = input('输入需要爬取的用户:')
ins = InsInfo(username,password,nickname=target)
ins.login().followers().combo().following().export_all().combo().post_images().save()
input('采集已结束,输入任意键退出:')
ins.driver.close() | [
"[email protected]"
]
| |
aca277c2fb030993e12a39e51af2e0754de6ca1d | 5ba34cad2a933adfed6b5df5b1229e48038596d4 | /fabfile.py | 719cea8e51a7f90de04799ef68dbea18f7f5b9aa | [
"MIT"
]
| permissive | Firmicety/fomalhaut-panel | bececa59cd42edd8793440a652d206b250591cb9 | 3e662db65a7ca654f75a19e38cb0931be21f92e9 | refs/heads/master | 2020-06-06T07:52:27.211654 | 2019-06-20T11:38:39 | 2019-06-20T11:38:39 | 192,683,216 | 0 | 0 | MIT | 2019-06-19T07:39:07 | 2019-06-19T07:39:07 | null | UTF-8 | Python | true | false | 3,882 | py | # -*- coding: utf-8 -*-
# Created by restran on 2016/7/27
from __future__ import unicode_literals, absolute_import
from fabric.api import *
from datetime import datetime
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# 登录用户和主机名:
env.user = 'root'
# env.password = 'password'
# 如果有多个主机,fabric会自动依次部署
env.hosts = ['192.168.14.101']
TAR_FILE_NAME = 'fomalhaut_dashboard_deploy.tar.gz'
def pack():
"""
定义一个pack任务, 打一个tar包
:return:
"""
local('rm -rf dist')
local('fis3 release -d dist')
tar_files = ['*.py', 'static/*', 'templates/*', 'common/*',
'fomalhaut/*', 'dashboard/*', 'accounts/*', 'requirements.txt']
exclude_files = ['fabfile.py', 'deploy/*', '*.tar.gz', '.DS_Store', '*/.DS_Store', '__pycache__/*', '*.log']
exclude_files = ['--exclude=\'%s\'' % t for t in exclude_files]
local('rm -f %s' % TAR_FILE_NAME)
with lcd('dist'):
local('tar -czvf %s %s %s' %
(TAR_FILE_NAME, ' '.join(exclude_files), ' '.join(tar_files)))
print('在当前目录创建一个打包文件: %s' % TAR_FILE_NAME)
def backup():
now = datetime.now().strftime('%Y%m%d_%H%M%S')
backup_file = '/home/backup/fomalhaut_dashboard_%s.tar.gz' % now
# 如果不存在, 则创建文件夹
run('mkdir -p /home/backup')
exclude_files = ['*.log', '*.pyc']
exclude_files = ['--exclude=\'%s\'' % t for t in exclude_files]
run('tar -czvf %s %s /home/python/fomalhaut_dashboard' % (backup_file, ' '.join(exclude_files)))
print('创建备份文件: %s' % backup_file)
def deploy():
"""
定义一个部署任务
:return:
"""
# 先进行打包
pack()
# 备份服务器上的版本
backup()
# 远程服务器的临时文件
remote_tmp_tar = '/tmp/%s' % TAR_FILE_NAME
run('rm -f %s' % remote_tmp_tar)
# 上传tar文件至远程服务器
put('dist/%s' % TAR_FILE_NAME, remote_tmp_tar)
# 解压
remote_dist_dir = '/home/python/fomalhaut_dashboard'
# 如果不存在, 则创建文件夹
run('mkdir -p %s' % remote_dist_dir)
name = 'fomalhaut_dashboard'
with cd(remote_dist_dir):
print('解压文件到到目录: %s' % remote_dist_dir)
run('tar -xzvf %s' % remote_tmp_tar)
print('安装 requirements.txt 中的依赖包')
run('pip install -r requirements.txt')
remote_settings_file = '%s/fomalhaut/settings.py' % remote_dist_dir
settings_file = 'deploy/settings.py'
print('上传 settings.py 文件 %s' % settings_file)
put(settings_file, remote_settings_file)
# 创建日志文件夹, 因为当前启动 django 进程用的是 nobody, 会没有权限
remote_logs_path = '%s/logs' % remote_dist_dir
# 如果不存在, 则创建文件夹
run('mkdir -p %s' % remote_logs_path)
nginx_file = 'deploy/%s.conf' % name
remote_nginx_file = '/etc/nginx/conf.d/%s.conf' % name
print('上传 nginx 配置文件 %s' % nginx_file)
put(nginx_file, remote_nginx_file)
print('设置文件夹权限')
run('chown -R oxygen /home/python/%s' % name)
supervisor_file = 'deploy/%s.ini' % name
remote_supervisor_file = '/etc/supervisord.d/%s.ini' % name
print('上传 supervisor 配置文件 %s' % supervisor_file)
put(supervisor_file, remote_supervisor_file)
run('supervisorctl reload')
run('nginx -s reload')
run('nginx -t')
run('supervisorctl restart fomalhaut_dashboard:')
run('supervisorctl restart fomalhaut_celery_beat:')
run('supervisorctl restart fomalhaut_celery_worker:')
# run('service nginx restart')
# 删除本地的打包文件
local('rm -f %s' % TAR_FILE_NAME)
local('rm -rf dist')
# run('supervisorctl restart ')
| [
"[email protected]"
]
| |
1d71cdc28e7df4c73083aeda6d2500856049954c | 1c17aaf0f96c2ba2e2eaf6b552017fb1393a5eaf | /week_1_5/05_3.py | b7fd80b0fad08bacb17e3d113e80b11a72308940 | []
| no_license | ljwon0312/academy_python | b5372387ce317f487e7b498f5f875058661878d3 | b87c3ba38616f3785a1ba587472a3407e113d57a | refs/heads/master | 2020-06-17T03:33:29.989733 | 2019-07-10T07:53:31 | 2019-07-10T07:53:31 | 195,782,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | codon_dict = {'TTT':'F', 'TTC':'F', 'TTA':'L', 'TTG':'L', 'TCT':'S', 'TCC':'S', 'TCA':'S', 'TCG':'S', 'TAT':'Y', 'TAC':'Y', 'TAA':'*', 'TAG':'*', 'TGT':'C', 'TGC':'C', 'TGA':'*', 'TGG':'W', 'CTT':'L', 'CTC':'L', 'CTA':'L', 'CTG':'L', 'CCT':'P', 'CCC':'P', 'CCA':'P', 'CCG':'P', 'CAT':'H', 'CAC':'H', 'CAA':'Q', 'CAG':'Q', 'CGT':'R', 'CGC':'R', 'CGA':'R', 'CGG':'R', 'ATT':'I', 'ATC':'I', 'ATA':'I', 'ATG':'M', 'ACT':'T', 'ACC':'T', 'ACA':'T', 'ACG':'T', 'AAT':'N', 'AAC':'N', 'AAA':'K', 'AAG':'K', 'AGT':'S', 'AGC':'S', 'AGA':'R', 'AGG':'R', 'GTT':'V', 'GTC':'V', 'GTA':'V', 'GTG':'V', 'GCT':'A', 'GCC':'A', 'GCA':'A', 'GCG':'A', 'GAT':'D', 'GAC':'D', 'GAA':'E', 'GAG':'E', 'GGT':'G', 'GGC':'G', 'GGA':'G', 'GGG':'G'}
with open('sequence.nucleotide.fasta','r') as f:
ll = f.read().split('\n')[1:]
sequence = ''.join(ll)
compl = sequence.replace("A","X")
compl = compl.replace("T","A")
compl = compl.replace("X","T")
compl = compl.replace("G","X")
compl = compl.replace("C","G")
compl = compl.replace("X","C")
f1 = ' '.join([codon_dict[sequence[n-3:n]] for n in range(3,2332) if n%3==0])
f2 = ' '.join([codon_dict[sequence[n-3:n]] for n in range(3,2332) if n%3==1])
f3 = ' '.join([codon_dict[sequence[n-3:n]] for n in range(3,2332) if n%3==2])
r1 = ' '.join([codon_dict[compl[::-1][n-3:n]] for n in range(3,2332) if n%3==0])
r2 = ' '.join([codon_dict[compl[::-1][n-3:n]] for n in range(3,2332) if n%3==1])
r3 = ' '.join([codon_dict[compl[::-1][n-3:n]] for n in range(3,2332) if n%3==2])
print(f1+'\n '+f2+'\n '+f3+'\n'+sequence+'\n'+compl+'\n'+
'%2331s\n%2330s\n%2329s' %(r1[::-1],r2[::-1],r3[::-1]))
| [
"[email protected]"
]
| |
004d960888711abd2459097ce1f9fb8af8aa03cc | c15899ee195d5e4ee978f271767b899bcfe6ea60 | /plugins/nzbhydra/resources/nzbhydraUI/freenas/forms.py | 811acb34f1fa212e7ff1bb245b9e07eae08afc28 | []
| no_license | josh4trunks/freenas-plugins | d39159aa8454ea6be02923cf276348124c84033c | ff7a4276fe4d6584700d271b10d350b23f3c03c5 | refs/heads/master | 2020-12-01T13:04:10.856791 | 2018-10-16T13:50:30 | 2018-10-16T13:50:30 | 26,624,312 | 34 | 13 | null | 2017-08-02T10:38:23 | 2014-11-14T05:46:58 | Python | UTF-8 | Python | false | false | 888 | py | from subprocess import Popen, PIPE
import hashlib
import json
import pwd
import urllib
from django.utils.translation import ugettext_lazy as _
from dojango import forms
from nzbhydraUI.freenas import models, utils
class NZBHydraForm(forms.ModelForm):
class Meta:
model = models.NZBHydra
exclude = (
'enable',
)
def __init__(self, *args, **kwargs):
self.jail_path = kwargs.pop('jail_path')
super(NZBHydraForm, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
obj = super(NZBHydraForm, self).save(*args, **kwargs)
if obj.enable:
Popen(["/usr/sbin/sysrc", "nzbhydra_enable=YES"],
stdout=PIPE,
stderr=PIPE)
else:
Popen(["/usr/sbin/sysrc", "nzbhydra_enable=NO"],
stdout=PIPE,
stderr=PIPE)
| [
"[email protected]"
]
| |
e8e4f0a3e63dd64e48c24cae5de0e03b153797e9 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/permutations_20200723155624.py | 1a43bb06b64892a0a05e131276152208c87546a0 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | def perm(arr):
# sort the array
if len(arr) == 0:
return 0
else:
arr.sort()
perm = set(arr)
maxValue = max(arr)
if len(perm) == maxValue:
return 1
elif len(perm) == 1:
return 0
print(perm([1,1,1])) | [
"[email protected]"
]
| |
5ed1806e1cd1f81e5b98badd1e41c57ac7224ed8 | 6f0ceee714bccf2a89c34a06aabd3bcb781a2fa4 | /contrib/tvmop/basic/__init__.py | fc0fa720009f2779778d0c249f9578111e8d5106 | [
"Apache-2.0",
"MIT",
"Unlicense",
"BSL-1.0",
"NCSA",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"OFL-1.0",
"BSD-2-Clause-Views",
"Zlib"
]
| permissive | yajiedesign/mxnet | 5a495fd06dd1730c17d2d27d7e46c8a770847f17 | 8e5a16cf673db5aceb48d2cf7a0fc1abd0ee5e51 | refs/heads/master | 2021-03-30T22:37:18.603396 | 2020-10-23T06:40:17 | 2020-10-23T06:40:17 | 43,763,550 | 214 | 59 | Apache-2.0 | 2020-06-01T23:31:15 | 2015-10-06T16:36:40 | C++ | UTF-8 | Python | false | false | 822 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
from . import ufunc
| [
"[email protected]"
]
| |
b1d37d62e22effe849327ad9efec359561e3190b | c413fce92172f3503fd4790b68ad6bbddbf4c919 | /20170501/02_make_exception/02_mexception.py | b803391aede525e909f53d4899cb32a49b572b0d | []
| no_license | JohnHaan/cloudtechpython | 9dbd80e71b7151e3ba1be416b798f086785a1f19 | 17b9b8224cdc29a4b51e592e86f8c9863309c5b1 | refs/heads/master | 2021-01-22T21:22:36.733331 | 2017-11-28T13:54:32 | 2017-11-28T13:54:32 | 85,414,710 | 0 | 1 | null | 2017-04-21T02:51:09 | 2017-03-18T16:15:17 | Python | UTF-8 | Python | false | false | 299 | py |
class MyError(Exception):
pass
def say_nick(nickname):
if nickname == 'dolbam':
raise MyError()
print(nickname)
if __name__ == '__main__':
for i in ['zeroxkim', 'dolbam']:
try:
say_nick(i)
except MyError:
print("wrong nickname") | [
"[email protected]"
]
| |
4c4313dd1ac27c37e207f29bdec56e2ffde9a46a | a81c1492783e7cafcaf7da5f0402d2d283b7ce37 | /google/ads/google_ads/v6/services/geographic_view_service_client_config.py | 0bb6c499223798af61ddd383cefd941113c27d62 | [
"Apache-2.0"
]
| permissive | VincentFritzsche/google-ads-python | 6650cf426b34392d1f58fb912cb3fc25b848e766 | 969eff5b6c3cec59d21191fa178cffb6270074c3 | refs/heads/master | 2023-03-19T17:23:26.959021 | 2021-03-18T18:18:38 | 2021-03-18T18:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | config = {
"interfaces": {
"google.ads.googleads.v6.services.GeographicViewService": {
"retry_codes": {
"retry_policy_1_codes": [
"UNAVAILABLE",
"DEADLINE_EXCEEDED"
],
"no_retry_codes": []
},
"retry_params": {
"retry_policy_1_params": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
},
"no_retry_params": {
"initial_retry_delay_millis": 0,
"retry_delay_multiplier": 0.0,
"max_retry_delay_millis": 0,
"initial_rpc_timeout_millis": 0,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 0,
"total_timeout_millis": 0
}
},
"methods": {
"GetGeographicView": {
"timeout_millis": 60000,
"retry_codes_name": "retry_policy_1_codes",
"retry_params_name": "retry_policy_1_params"
}
}
}
}
}
| [
"[email protected]"
]
| |
873fa6fdc46a377613d15a63e3fa52a0075972ea | 4e5141121d8b4015db233cbc71946ec3cfbe5fe6 | /samples/basic/executor/models/cisco-ios-xr/Cisco-IOS-XR-snmp-test-trap-act/nc-execute-xr-snmp-test-trap-act-210-ydk.py | 1eab0af3fe816529cbffcdfeb619368a0a50f197 | [
"Apache-2.0"
]
| permissive | itbj/ydk-py-samples | 898c6c9bad9d6f8072892300d42633d82ec38368 | c5834091da0ebedbb11af7bbf780f268aad7040b | refs/heads/master | 2022-11-20T17:44:58.844428 | 2020-07-25T06:18:02 | 2020-07-25T06:18:02 | 282,382,442 | 1 | 0 | null | 2020-07-25T06:04:51 | 2020-07-25T06:04:50 | null | UTF-8 | Python | false | false | 2,657 | py | #!/usr/bin/env python
#
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Execute RPC for model Cisco-IOS-XR-snmp-test-trap-act.
usage: nc-execute-xr-snmp-test-trap-act-210-ydk.py [-h] [-v] device
positional arguments:
device NETCONF device (ssh://user:password@host:port)
optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
"""
from argparse import ArgumentParser
from urlparse import urlparse
from ydk.services import ExecutorService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_snmp_test_trap_act \
as xr_snmp_test_trap_act
import logging
if __name__ == "__main__":
"""Execute main program."""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="NETCONF device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
# log debug messages if verbose argument specified
if args.verbose:
logger = logging.getLogger("ydk")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
handler.setFormatter(formatter)
logger.addHandler(handler)
# create NETCONF provider
provider = NetconfServiceProvider(address=device.hostname,
port=device.port,
username=device.username,
password=device.password,
protocol=device.scheme)
# create executor service
executor = ExecutorService()
infra_syslog_message_generated = xr_snmp_test_trap_act.InfraSyslogMessageGenerated() # create object
# execute RPC on NETCONF device
executor.execute_rpc(provider, infra_syslog_message_generated)
exit()
# End of script
| [
"[email protected]"
]
| |
aad98f93d11fbf4c2a25a15a9d71445b4030e96d | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2882/60705/254962.py | 1ccdafc2880c8dd65ea508aff7136b3cee8594b5 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | line1 = int(input())
line2 = input()
if line2=="3 2 1":
print("YES")
else:
print(line2) | [
"[email protected]"
]
| |
77f4f391c4a589f687c1673464cd42ba2b0ce694 | 4855a73f552c79f9123990a66b4fd29f829cd07d | /JZOffer/JZOffer14_2.py | c55d60a02a04ffcc0d79604611fbc389a45350b9 | []
| no_license | EmlynQuan/Programming-Python | e2d47de71c4d5981b79bba3b4bf9ac9cf7115484 | 11bbc8aa58b5f16d7cfadd30c7eea241e6dd7bdd | refs/heads/master | 2023-06-24T06:50:27.255834 | 2021-07-29T15:47:50 | 2021-07-29T15:47:50 | 362,500,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | # coding=utf-8
def cuttingRope_dp(n):
"""
:type n: int
:rtype: int
"""
if n <= 3:
return n-1
dp = [0 for i in range(n+1)]
dp[1] = 1
dp[2] = 1
dp[3] = 2
for i in range(3, n+1):
for j in range(1, i/2+1):
dp[i] = max(dp[i], j*max(dp[i-j], i-j))
return dp[n] % 1000000007
| [
"[email protected]"
]
| |
ba9eb14de7b40eaeee426d39736d20fa3a6fb304 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_MovingAverage_Seasonal_DayOfMonth_MLP.py | 72d984f241059cea41ee8f1d543e8cd2d2152544 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 169 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['MovingAverage'] , ['Seasonal_DayOfMonth'] , ['MLP'] ); | [
"[email protected]"
]
| |
bba9ea3018221f89eb8650ff3f46c4a24e7e06a1 | 4ede275efc8bc9f9ef121dc37215d2f0d8453e36 | /14pl.py | c9485786e19ce057f3e6bd2acf6e59c83c2fbfd7 | []
| no_license | shanthivimalanataraajan01/code | bfa8a441b0c360aebd02248ad4433cc21889c3d2 | ea467ae1eefd68a5dceaa53aab7149d31bd5faf6 | refs/heads/master | 2020-04-15T05:01:03.625422 | 2019-05-17T09:35:45 | 2019-05-17T09:35:45 | 164,405,963 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | n=int(input())
s=input()
r=["a","e","i","o","u","A","E","I","O","U"]
l=[]
for i in range(len(s)):
if s[i] not in r:
l.append(s[i])
for i in reversed(l):
print(i,end="")
| [
"[email protected]"
]
| |
e885485fe7640445f3916e2dac76b67a331dde18 | 65c001b5f572a6b0ca09dd9821016d628b745009 | /frappe-bench/env/lib/python2.7/site-packages/faker/providers/date_time/__init__.py | 6b36346973a5f57a1572ed39f4a0aaa475a4859b | [
"MIT"
]
| permissive | ibrahmm22/library-management | 666dffebdef1333db122c2a4a99286e7c174c518 | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | refs/heads/master | 2022-10-30T17:53:01.238240 | 2020-06-11T18:36:41 | 2020-06-11T18:36:41 | 271,620,992 | 0 | 1 | MIT | 2022-10-23T05:04:57 | 2020-06-11T18:36:21 | CSS | UTF-8 | Python | false | false | 82,423 | py | # coding=utf-8
from __future__ import unicode_literals
import re
from calendar import timegm
from datetime import MAXYEAR, timedelta
from dateutil import relativedelta
from dateutil.tz import tzlocal, tzutc
from faker.utils import is_string
from faker.utils.datetime_safe import date, datetime, real_date, real_datetime
from .. import BaseProvider
localized = True
def datetime_to_timestamp(dt):
if getattr(dt, 'tzinfo', None) is not None:
dt = dt.astimezone(tzutc())
return timegm(dt.timetuple())
def timestamp_to_datetime(timestamp, tzinfo):
if tzinfo is None:
pick = datetime.fromtimestamp(timestamp, tzlocal())
pick = pick.astimezone(tzutc()).replace(tzinfo=None)
else:
pick = datetime.fromtimestamp(timestamp, tzinfo)
return pick
class ParseError(ValueError):
pass
timedelta_pattern = r''
for name, sym in [('years', 'y'), ('months', 'M'), ('weeks', 'w'), ('days', 'd'),
('hours', 'h'), ('minutes', 'm'), ('seconds', 's')]:
timedelta_pattern += r'((?P<{}>(?:\+|-)\d+?){})?'.format(name, sym)
class Provider(BaseProvider):
centuries = [
'I',
'II',
'III',
'IV',
'V',
'VI',
'VII',
'VIII',
'IX',
'X',
'XI',
'XII',
'XIII',
'XIV',
'XV',
'XVI',
'XVII',
'XVIII',
'XIX',
'XX',
'XXI']
countries = [{'timezones': ['Europe/Andorra'],
'alpha-2-code': 'AD',
'alpha-3-code': 'AND',
'continent': 'Europe',
'name': 'Andorra',
'capital': 'Andorra la Vella'},
{'timezones': ['Asia/Kabul'],
'alpha-2-code': 'AF',
'alpha-3-code': 'AFG',
'continent': 'Asia',
'name': 'Afghanistan',
'capital': 'Kabul'},
{'timezones': ['America/Antigua'],
'alpha-2-code': 'AG',
'alpha-3-code': 'ATG',
'continent': 'North America',
'name': 'Antigua and Barbuda',
'capital': "St. John's"},
{'timezones': ['Europe/Tirane'],
'alpha-2-code': 'AL',
'alpha-3-code': 'ALB',
'continent': 'Europe',
'name': 'Albania',
'capital': 'Tirana'},
{'timezones': ['Asia/Yerevan'],
'alpha-2-code': 'AM',
'alpha-3-code': 'ARM',
'continent': 'Asia',
'name': 'Armenia',
'capital': 'Yerevan'},
{'timezones': ['Africa/Luanda'],
'alpha-2-code': 'AO',
'alpha-3-code': 'AGO',
'continent': 'Africa',
'name': 'Angola',
'capital': 'Luanda'},
{'timezones': ['America/Argentina/Buenos_Aires',
'America/Argentina/Cordoba',
'America/Argentina/Jujuy',
'America/Argentina/Tucuman',
'America/Argentina/Catamarca',
'America/Argentina/La_Rioja',
'America/Argentina/San_Juan',
'America/Argentina/Mendoza',
'America/Argentina/Rio_Gallegos',
'America/Argentina/Ushuaia'],
'alpha-2-code': 'AR',
'alpha-3-code': 'ARG',
'continent': 'South America',
'name': 'Argentina',
'capital': 'Buenos Aires'},
{'timezones': ['Europe/Vienna'],
'alpha-2-code': 'AT',
'alpha-3-code': 'AUT',
'continent': 'Europe',
'name': 'Austria',
'capital': 'Vienna'},
{'timezones': ['Australia/Lord_Howe',
'Australia/Hobart',
'Australia/Currie',
'Australia/Melbourne',
'Australia/Sydney',
'Australia/Broken_Hill',
'Australia/Brisbane',
'Australia/Lindeman',
'Australia/Adelaide',
'Australia/Darwin',
'Australia/Perth'],
'alpha-2-code': 'AU',
'alpha-3-code': 'AUS',
'continent': 'Oceania',
'name': 'Australia',
'capital': 'Canberra'},
{'timezones': ['Asia/Baku'],
'alpha-2-code': 'AZ',
'alpha-3-code': 'AZE',
'continent': 'Asia',
'name': 'Azerbaijan',
'capital': 'Baku'},
{'timezones': ['America/Barbados'],
'alpha-2-code': 'BB',
'alpha-3-code': 'BRB',
'continent': 'North America',
'name': 'Barbados',
'capital': 'Bridgetown'},
{'timezones': ['Asia/Dhaka'],
'alpha-2-code': 'BD',
'alpha-3-code': 'BGD',
'continent': 'Asia',
'name': 'Bangladesh',
'capital': 'Dhaka'},
{'timezones': ['Europe/Brussels'],
'alpha-2-code': 'BE',
'alpha-3-code': 'BEL',
'continent': 'Europe',
'name': 'Belgium',
'capital': 'Brussels'},
{'timezones': ['Africa/Ouagadougou'],
'alpha-2-code': 'BF',
'alpha-3-code': 'BFA',
'continent': 'Africa',
'name': 'Burkina Faso',
'capital': 'Ouagadougou'},
{'timezones': ['Europe/Sofia'],
'alpha-2-code': 'BG',
'alpha-3-code': 'BGR',
'continent': 'Europe',
'name': 'Bulgaria',
'capital': 'Sofia'},
{'timezones': ['Asia/Bahrain'],
'alpha-2-code': 'BH',
'alpha-3-code': 'BHR',
'continent': 'Asia',
'name': 'Bahrain',
'capital': 'Manama'},
{'timezones': ['Africa/Bujumbura'],
'alpha-2-code': 'BI',
'alpha-3-code': 'BDI',
'continent': 'Africa',
'name': 'Burundi',
'capital': 'Bujumbura'},
{'timezones': ['Africa/Porto-Novo'],
'alpha-2-code': 'BJ',
'alpha-3-code': 'BEN',
'continent': 'Africa',
'name': 'Benin',
'capital': 'Porto-Novo'},
{'timezones': ['Asia/Brunei'],
'alpha-2-code': 'BN',
'alpha-3-code': 'BRN',
'continent': 'Asia',
'name': 'Brunei Darussalam',
'capital': 'Bandar Seri Begawan'},
{'timezones': ['America/La_Paz'],
'alpha-2-code': 'BO',
'alpha-3-code': 'BOL',
'continent': 'South America',
'name': 'Bolivia',
'capital': 'Sucre'},
{'timezones': ['America/Noronha',
'America/Belem',
'America/Fortaleza',
'America/Recife',
'America/Araguaina',
'America/Maceio',
'America/Bahia',
'America/Sao_Paulo',
'America/Campo_Grande',
'America/Cuiaba',
'America/Porto_Velho',
'America/Boa_Vista',
'America/Manaus',
'America/Eirunepe',
'America/Rio_Branco'],
'alpha-2-code': 'BR',
'alpha-3-code': 'BRA',
'continent': 'South America',
'name': 'Brazil',
'capital': 'Bras\xc3\xadlia'},
{'timezones': ['America/Nassau'],
'alpha-2-code': 'BS',
'alpha-3-code': 'BHS',
'continent': 'North America',
'name': 'Bahamas',
'capital': 'Nassau'},
{'timezones': ['Asia/Thimphu'],
'alpha-2-code': 'BT',
'alpha-3-code': 'BTN',
'continent': 'Asia',
'name': 'Bhutan',
'capital': 'Thimphu'},
{'timezones': ['Africa/Gaborone'],
'alpha-2-code': 'BW',
'alpha-3-code': 'BWA',
'continent': 'Africa',
'name': 'Botswana',
'capital': 'Gaborone'},
{'timezones': ['Europe/Minsk'],
'alpha-2-code': 'BY',
'alpha-3-code': 'BLR',
'continent': 'Europe',
'name': 'Belarus',
'capital': 'Minsk'},
{'timezones': ['America/Belize'],
'alpha-2-code': 'BZ',
'alpha-3-code': 'BLZ',
'continent': 'North America',
'name': 'Belize',
'capital': 'Belmopan'},
{'timezones': ['America/St_Johns',
'America/Halifax',
'America/Glace_Bay',
'America/Moncton',
'America/Goose_Bay',
'America/Blanc-Sablon',
'America/Montreal',
'America/Toronto',
'America/Nipigon',
'America/Thunder_Bay',
'America/Pangnirtung',
'America/Iqaluit',
'America/Atikokan',
'America/Rankin_Inlet',
'America/Winnipeg',
'America/Rainy_River',
'America/Cambridge_Bay',
'America/Regina',
'America/Swift_Current',
'America/Edmonton',
'America/Yellowknife',
'America/Inuvik',
'America/Dawson_Creek',
'America/Vancouver',
'America/Whitehorse',
'America/Dawson'],
'alpha-2-code': 'CA',
'alpha-3-code': 'CAN',
'continent': 'North America',
'name': 'Canada',
'capital': 'Ottawa'},
{'timezones': ['Africa/Kinshasa',
'Africa/Lubumbashi'],
'alpha-2-code': 'CD',
'alpha-3-code': 'COD',
'continent': 'Africa',
'name': 'Democratic Republic of the Congo',
'capital': 'Kinshasa'},
{'timezones': ['Africa/Brazzaville'],
'alpha-2-code': 'CG',
'alpha-3-code': 'COG',
'continent': 'Africa',
'name': 'Republic of the Congo',
'capital': 'Brazzaville'},
{'timezones': ['Africa/Abidjan'],
'alpha-2-code': 'CI',
'alpha-3-code': 'CIV',
'continent': 'Africa',
'name': "C\xc3\xb4te d'Ivoire",
'capital': 'Yamoussoukro'},
{'timezones': ['America/Santiago',
'Pacific/Easter'],
'alpha-2-code': 'CL',
'alpha-3-code': 'CHL',
'continent': 'South America',
'name': 'Chile',
'capital': 'Santiago'},
{'timezones': ['Africa/Douala'],
'alpha-2-code': 'CM',
'alpha-3-code': 'CMR',
'continent': 'Africa',
'name': 'Cameroon',
'capital': 'Yaound\xc3\xa9'},
{'timezones': ['Asia/Shanghai',
'Asia/Harbin',
'Asia/Chongqing',
'Asia/Urumqi',
'Asia/Kashgar'],
'alpha-2-code': 'CN',
'alpha-3-code': 'CHN',
'continent': 'Asia',
'name': "People's Republic of China",
'capital': 'Beijing'},
{'timezones': ['America/Bogota'],
'alpha-2-code': 'CO',
'alpha-3-code': 'COL',
'continent': 'South America',
'name': 'Colombia',
'capital': 'Bogot\xc3\xa1'},
{'timezones': ['America/Costa_Rica'],
'alpha-2-code': 'CR',
'alpha-3-code': 'CRI',
'continent': 'North America',
'name': 'Costa Rica',
'capital': 'San Jos\xc3\xa9'},
{'timezones': ['America/Havana'],
'alpha-2-code': 'CU',
'alpha-3-code': 'CUB',
'continent': 'North America',
'name': 'Cuba',
'capital': 'Havana'},
{'timezones': ['Atlantic/Cape_Verde'],
'alpha-2-code': 'CV',
'alpha-3-code': 'CPV',
'continent': 'Africa',
'name': 'Cape Verde',
'capital': 'Praia'},
{'timezones': ['Asia/Nicosia'],
'alpha-2-code': 'CY',
'alpha-3-code': 'CYP',
'continent': 'Asia',
'name': 'Cyprus',
'capital': 'Nicosia'},
{'timezones': ['Europe/Prague'],
'alpha-2-code': 'CZ',
'alpha-3-code': 'CZE',
'continent': 'Europe',
'name': 'Czech Republic',
'capital': 'Prague'},
{'timezones': ['Europe/Berlin'],
'alpha-2-code': 'DE',
'alpha-3-code': 'DEU',
'continent': 'Europe',
'name': 'Germany',
'capital': 'Berlin'},
{'timezones': ['Africa/Djibouti'],
'alpha-2-code': 'DJ',
'alpha-3-code': 'DJI',
'continent': 'Africa',
'name': 'Djibouti',
'capital': 'Djibouti City'},
{'timezones': ['Europe/Copenhagen'],
'alpha-2-code': 'DK',
'alpha-3-code': 'DNK',
'continent': 'Europe',
'name': 'Denmark',
'capital': 'Copenhagen'},
{'timezones': ['America/Dominica'],
'alpha-2-code': 'DM',
'alpha-3-code': 'DMA',
'continent': 'North America',
'name': 'Dominica',
'capital': 'Roseau'},
{'timezones': ['America/Santo_Domingo'],
'alpha-2-code': 'DO',
'alpha-3-code': 'DOM',
'continent': 'North America',
'name': 'Dominican Republic',
'capital': 'Santo Domingo'},
{'timezones': ['America/Guayaquil',
'Pacific/Galapagos'],
'alpha-2-code': 'EC',
'alpha-3-code': 'ECU',
'continent': 'South America',
'name': 'Ecuador',
'capital': 'Quito'},
{'timezones': ['Europe/Tallinn'],
'alpha-2-code': 'EE',
'alpha-3-code': 'EST',
'continent': 'Europe',
'name': 'Estonia',
'capital': 'Tallinn'},
{'timezones': ['Africa/Cairo'],
'alpha-2-code': 'EG',
'alpha-3-code': 'EGY',
'continent': 'Africa',
'name': 'Egypt',
'capital': 'Cairo'},
{'timezones': ['Africa/Asmera'],
'alpha-2-code': 'ER',
'alpha-3-code': 'ERI',
'continent': 'Africa',
'name': 'Eritrea',
'capital': 'Asmara'},
{'timezones': ['Africa/Addis_Ababa'],
'alpha-2-code': 'ET',
'alpha-3-code': 'ETH',
'continent': 'Africa',
'name': 'Ethiopia',
'capital': 'Addis Ababa'},
{'timezones': ['Europe/Helsinki'],
'alpha-2-code': 'FI',
'alpha-3-code': 'FIN',
'continent': 'Europe',
'name': 'Finland',
'capital': 'Helsinki'},
{'timezones': ['Pacific/Fiji'],
'alpha-2-code': 'FJ',
'alpha-3-code': 'FJI',
'continent': 'Oceania',
'name': 'Fiji',
'capital': 'Suva'},
{'timezones': ['Europe/Paris'],
'alpha-2-code': 'FR',
'alpha-3-code': 'FRA',
'continent': 'Europe',
'name': 'France',
'capital': 'Paris'},
{'timezones': ['Africa/Libreville'],
'alpha-2-code': 'GA',
'alpha-3-code': 'GAB',
'continent': 'Africa',
'name': 'Gabon',
'capital': 'Libreville'},
{'timezones': ['Asia/Tbilisi'],
'alpha-2-code': 'GE',
'alpha-3-code': 'GEO',
'continent': 'Asia',
'name': 'Georgia',
'capital': 'Tbilisi'},
{'timezones': ['Africa/Accra'],
'alpha-2-code': 'GH',
'alpha-3-code': 'GHA',
'continent': 'Africa',
'name': 'Ghana',
'capital': 'Accra'},
{'timezones': ['Africa/Banjul'],
'alpha-2-code': 'GM',
'alpha-3-code': 'GMB',
'continent': 'Africa',
'name': 'The Gambia',
'capital': 'Banjul'},
{'timezones': ['Africa/Conakry'],
'alpha-2-code': 'GN',
'alpha-3-code': 'GIN',
'continent': 'Africa',
'name': 'Guinea',
'capital': 'Conakry'},
{'timezones': ['Europe/Athens'],
'alpha-2-code': 'GR',
'alpha-3-code': 'GRC',
'continent': 'Europe',
'name': 'Greece',
'capital': 'Athens'},
{'timezones': ['America/Guatemala'],
'alpha-2-code': 'GT',
'alpha-3-code': 'GTM',
'continent': 'North America',
'name': 'Guatemala',
'capital': 'Guatemala City'},
{'timezones': ['America/Guatemala'],
'alpha-2-code': 'HT',
'alpha-3-code': 'HTI',
'continent': 'North America',
'name': 'Haiti',
'capital': 'Port-au-Prince'},
{'timezones': ['Africa/Bissau'],
'alpha-2-code': 'GW',
'alpha-3-code': 'GNB',
'continent': 'Africa',
'name': 'Guinea-Bissau',
'capital': 'Bissau'},
{'timezones': ['America/Guyana'],
'alpha-2-code': 'GY',
'alpha-3-code': 'GUY',
'continent': 'South America',
'name': 'Guyana',
'capital': 'Georgetown'},
{'timezones': ['America/Tegucigalpa'],
'alpha-2-code': 'HN',
'alpha-3-code': 'HND',
'continent': 'North America',
'name': 'Honduras',
'capital': 'Tegucigalpa'},
{'timezones': ['Europe/Budapest'],
'alpha-2-code': 'HU',
'alpha-3-code': 'HUN',
'continent': 'Europe',
'name': 'Hungary',
'capital': 'Budapest'},
{'timezones': ['Asia/Jakarta',
'Asia/Pontianak',
'Asia/Makassar',
'Asia/Jayapura'],
'alpha-2-code': 'ID',
'alpha-3-code': 'IDN',
'continent': 'Asia',
'name': 'Indonesia',
'capital': 'Jakarta'},
{'timezones': ['Europe/Dublin'],
'alpha-2-code': 'IE',
'alpha-3-code': 'IRL',
'continent': 'Europe',
'name': 'Republic of Ireland',
'capital': 'Dublin'},
{'timezones': ['Asia/Jerusalem'],
'alpha-2-code': 'IL',
'alpha-3-code': 'ISR',
'continent': 'Asia',
'name': 'Israel',
'capital': 'Jerusalem'},
{'timezones': ['Asia/Calcutta'],
'alpha-2-code': 'IN',
'alpha-3-code': 'IND',
'continent': 'Asia',
'name': 'India',
'capital': 'New Delhi'},
{'timezones': ['Asia/Baghdad'],
'alpha-2-code': 'IQ',
'alpha-3-code': 'IRQ',
'continent': 'Asia',
'name': 'Iraq',
'capital': 'Baghdad'},
{'timezones': ['Asia/Tehran'],
'alpha-2-code': 'IR',
'alpha-3-code': 'IRN',
'continent': 'Asia',
'name': 'Iran',
'capital': 'Tehran'},
{'timezones': ['Atlantic/Reykjavik'],
'alpha-2-code': 'IS',
'alpha-3-code': 'ISL',
'continent': 'Europe',
'name': 'Iceland',
'capital': 'Reykjav\xc3\xadk'},
{'timezones': ['Europe/Rome'],
'alpha-2-code': 'IT',
'alpha-3-code': 'ITA',
'continent': 'Europe',
'name': 'Italy',
'capital': 'Rome'},
{'timezones': ['America/Jamaica'],
'alpha-2-code': 'JM',
'alpha-3-code': 'JAM',
'continent': 'North America',
'name': 'Jamaica',
'capital': 'Kingston'},
{'timezones': ['Asia/Amman'],
'alpha-2-code': 'JO',
'alpha-3-code': 'JOR',
'continent': 'Asia',
'name': 'Jordan',
'capital': 'Amman'},
{'timezones': ['Asia/Tokyo'],
'alpha-2-code': 'JP',
'alpha-3-code': 'JPN',
'continent': 'Asia',
'name': 'Japan',
'capital': 'Tokyo'},
{'timezones': ['Africa/Nairobi'],
'alpha-2-code': 'KE',
'alpha-3-code': 'KEN',
'continent': 'Africa',
'name': 'Kenya',
'capital': 'Nairobi'},
{'timezones': ['Asia/Bishkek'],
'alpha-2-code': 'KG',
'alpha-3-code': 'KGZ',
'continent': 'Asia',
'name': 'Kyrgyzstan',
'capital': 'Bishkek'},
{'timezones': ['Pacific/Tarawa',
'Pacific/Enderbury',
'Pacific/Kiritimati'],
'alpha-2-code': 'KI',
'alpha-3-code': 'KIR',
'continent': 'Oceania',
'name': 'Kiribati',
'capital': 'Tarawa'},
{'timezones': ['Asia/Pyongyang'],
'alpha-2-code': 'KP',
'alpha-3-code': 'PRK',
'continent': 'Asia',
'name': 'North Korea',
'capital': 'Pyongyang'},
{'timezones': ['Asia/Seoul'],
'alpha-2-code': 'KR',
'alpha-3-code': 'KOR',
'continent': 'Asia',
'name': 'South Korea',
'capital': 'Seoul'},
{'timezones': ['Asia/Kuwait'],
'alpha-2-code': 'KW',
'alpha-3-code': 'KWT',
'continent': 'Asia',
'name': 'Kuwait',
'capital': 'Kuwait City'},
{'timezones': ['Asia/Beirut'],
'alpha-2-code': 'LB',
'alpha-3-code': 'LBN',
'continent': 'Asia',
'name': 'Lebanon',
'capital': 'Beirut'},
{'timezones': ['Europe/Vaduz'],
'alpha-2-code': 'LI',
'alpha-3-code': 'LIE',
'continent': 'Europe',
'name': 'Liechtenstein',
'capital': 'Vaduz'},
{'timezones': ['Africa/Monrovia'],
'alpha-2-code': 'LR',
'alpha-3-code': 'LBR',
'continent': 'Africa',
'name': 'Liberia',
'capital': 'Monrovia'},
{'timezones': ['Africa/Maseru'],
'alpha-2-code': 'LS',
'alpha-3-code': 'LSO',
'continent': 'Africa',
'name': 'Lesotho',
'capital': 'Maseru'},
{'timezones': ['Europe/Vilnius'],
'alpha-2-code': 'LT',
'alpha-3-code': 'LTU',
'continent': 'Europe',
'name': 'Lithuania',
'capital': 'Vilnius'},
{'timezones': ['Europe/Luxembourg'],
'alpha-2-code': 'LU',
'alpha-3-code': 'LUX',
'continent': 'Europe',
'name': 'Luxembourg',
'capital': 'Luxembourg City'},
{'timezones': ['Europe/Riga'],
'alpha-2-code': 'LV',
'alpha-3-code': 'LVA',
'continent': 'Europe',
'name': 'Latvia',
'capital': 'Riga'},
{'timezones': ['Africa/Tripoli'],
'alpha-2-code': 'LY',
'alpha-3-code': 'LBY',
'continent': 'Africa',
'name': 'Libya',
'capital': 'Tripoli'},
{'timezones': ['Indian/Antananarivo'],
'alpha-2-code': 'MG',
'alpha-3-code': 'MDG',
'continent': 'Africa',
'name': 'Madagascar',
'capital': 'Antananarivo'},
{'timezones': ['Pacific/Majuro',
'Pacific/Kwajalein'],
'alpha-2-code': 'MH',
'alpha-3-code': 'MHL',
'continent': 'Oceania',
'name': 'Marshall Islands',
'capital': 'Majuro'},
{'timezones': ['Europe/Skopje'],
'alpha-2-code': 'MK',
'alpha-3-code': 'MKD',
'continent': 'Europe',
'name': 'Macedonia',
'capital': 'Skopje'},
{'timezones': ['Africa/Bamako'],
'alpha-2-code': 'ML',
'alpha-3-code': 'MLI',
'continent': 'Africa',
'name': 'Mali',
'capital': 'Bamako'},
{'timezones': ['Asia/Rangoon'],
'alpha-2-code': 'MM',
'alpha-3-code': 'MMR',
'continent': 'Asia',
'name': 'Myanmar',
'capital': 'Naypyidaw'},
{'timezones': ['Asia/Ulaanbaatar',
'Asia/Hovd',
'Asia/Choibalsan'],
'alpha-2-code': 'MN',
'alpha-3-code': 'MNG',
'continent': 'Asia',
'name': 'Mongolia',
'capital': 'Ulaanbaatar'},
{'timezones': ['Africa/Nouakchott'],
'alpha-2-code': 'MR',
'alpha-3-code': 'MRT',
'continent': 'Africa',
'name': 'Mauritania',
'capital': 'Nouakchott'},
{'timezones': ['Europe/Malta'],
'alpha-2-code': 'MT',
'alpha-3-code': 'MLT',
'continent': 'Europe',
'name': 'Malta',
'capital': 'Valletta'},
{'timezones': ['Indian/Mauritius'],
'alpha-2-code': 'MU',
'alpha-3-code': 'MUS',
'continent': 'Africa',
'name': 'Mauritius',
'capital': 'Port Louis'},
{'timezones': ['Indian/Maldives'],
'alpha-2-code': 'MV',
'alpha-3-code': 'MDV',
'continent': 'Asia',
'name': 'Maldives',
'capital': 'Mal\xc3\xa9'},
{'timezones': ['Africa/Blantyre'],
'alpha-2-code': 'MW',
'alpha-3-code': 'MWI',
'continent': 'Africa',
'name': 'Malawi',
'capital': 'Lilongwe'},
{'timezones': ['America/Mexico_City',
'America/Cancun',
'America/Merida',
'America/Monterrey',
'America/Mazatlan',
'America/Chihuahua',
'America/Hermosillo',
'America/Tijuana'],
'alpha-2-code': 'MX',
'alpha-3-code': 'MEX',
'continent': 'North America',
'name': 'Mexico',
'capital': 'Mexico City'},
{'timezones': ['Asia/Kuala_Lumpur',
'Asia/Kuching'],
'alpha-2-code': 'MY',
'alpha-3-code': 'MYS',
'continent': 'Asia',
'name': 'Malaysia',
'capital': 'Kuala Lumpur'},
{'timezones': ['Africa/Maputo'],
'alpha-2-code': 'MZ',
'alpha-3-code': 'MOZ',
'continent': 'Africa',
'name': 'Mozambique',
'capital': 'Maputo'},
{'timezones': ['Africa/Windhoek'],
'alpha-2-code': 'NA',
'alpha-3-code': 'NAM',
'continent': 'Africa',
'name': 'Namibia',
'capital': 'Windhoek'},
{'timezones': ['Africa/Niamey'],
'alpha-2-code': 'NE',
'alpha-3-code': 'NER',
'continent': 'Africa',
'name': 'Niger',
'capital': 'Niamey'},
{'timezones': ['Africa/Lagos'],
'alpha-2-code': 'NG',
'alpha-3-code': 'NGA',
'continent': 'Africa',
'name': 'Nigeria',
'capital': 'Abuja'},
{'timezones': ['America/Managua'],
'alpha-2-code': 'NI',
'alpha-3-code': 'NIC',
'continent': 'North America',
'name': 'Nicaragua',
'capital': 'Managua'},
{'timezones': ['Europe/Amsterdam'],
'alpha-2-code': 'NL',
'alpha-3-code': 'NLD',
'continent': 'Europe',
'name': 'Kingdom of the Netherlands',
'capital': 'Amsterdam'},
{'timezones': ['Europe/Oslo'],
'alpha-2-code': 'NO',
'alpha-3-code': 'NOR',
'continent': 'Europe',
'name': 'Norway',
'capital': 'Oslo'},
{'timezones': ['Asia/Katmandu'],
'alpha-2-code': 'NP',
'alpha-3-code': 'NPL',
'continent': 'Asia',
'name': 'Nepal',
'capital': 'Kathmandu'},
{'timezones': ['Pacific/Nauru'],
'alpha-2-code': 'NR',
'alpha-3-code': 'NRU',
'continent': 'Oceania',
'name': 'Nauru',
'capital': 'Yaren'},
{'timezones': ['Pacific/Auckland',
'Pacific/Chatham'],
'alpha-2-code': 'NZ',
'alpha-3-code': 'NZL',
'continent': 'Oceania',
'name': 'New Zealand',
'capital': 'Wellington'},
{'timezones': ['Asia/Muscat'],
'alpha-2-code': 'OM',
'alpha-3-code': 'OMN',
'continent': 'Asia',
'name': 'Oman',
'capital': 'Muscat'},
{'timezones': ['America/Panama'],
'alpha-2-code': 'PA',
'alpha-3-code': 'PAN',
'continent': 'North America',
'name': 'Panama',
'capital': 'Panama City'},
{'timezones': ['America/Lima'],
'alpha-2-code': 'PE',
'alpha-3-code': 'PER',
'continent': 'South America',
'name': 'Peru',
'capital': 'Lima'},
{'timezones': ['Pacific/Port_Moresby'],
'alpha-2-code': 'PG',
'alpha-3-code': 'PNG',
'continent': 'Oceania',
'name': 'Papua New Guinea',
'capital': 'Port Moresby'},
{'timezones': ['Asia/Manila'],
'alpha-2-code': 'PH',
'alpha-3-code': 'PHL',
'continent': 'Asia',
'name': 'Philippines',
'capital': 'Manila'},
{'timezones': ['Asia/Karachi'],
'alpha-2-code': 'PK',
'alpha-3-code': 'PAK',
'continent': 'Asia',
'name': 'Pakistan',
'capital': 'Islamabad'},
{'timezones': ['Europe/Warsaw'],
'alpha-2-code': 'PL',
'alpha-3-code': 'POL',
'continent': 'Europe',
'name': 'Poland',
'capital': 'Warsaw'},
{'timezones': ['Europe/Lisbon',
'Atlantic/Madeira',
'Atlantic/Azores'],
'alpha-2-code': 'PT',
'alpha-3-code': 'PRT',
'continent': 'Europe',
'name': 'Portugal',
'capital': 'Lisbon'},
{'timezones': ['Pacific/Palau'],
'alpha-2-code': 'PW',
'alpha-3-code': 'PLW',
'continent': 'Oceania',
'name': 'Palau',
'capital': 'Ngerulmud'},
{'timezones': ['America/Asuncion'],
'alpha-2-code': 'PY',
'alpha-3-code': 'PRY',
'continent': 'South America',
'name': 'Paraguay',
'capital': 'Asunci\xc3\xb3n'},
{'timezones': ['Asia/Qatar'],
'alpha-2-code': 'QA',
'alpha-3-code': 'QAT',
'continent': 'Asia',
'name': 'Qatar',
'capital': 'Doha'},
{'timezones': ['Europe/Bucharest'],
'alpha-2-code': 'RO',
'alpha-3-code': 'ROU',
'continent': 'Europe',
'name': 'Romania',
'capital': 'Bucharest'},
{'timezones': ['Europe/Kaliningrad',
'Europe/Moscow',
'Europe/Volgograd',
'Europe/Samara',
'Asia/Yekaterinburg',
'Asia/Omsk',
'Asia/Novosibirsk',
'Asia/Krasnoyarsk',
'Asia/Irkutsk',
'Asia/Yakutsk',
'Asia/Vladivostok',
'Asia/Sakhalin',
'Asia/Magadan',
'Asia/Kamchatka',
'Asia/Anadyr'],
'alpha-2-code': 'RU',
'alpha-3-code': 'RUS',
'continent': 'Europe',
'name': 'Russia',
'capital': 'Moscow'},
{'timezones': ['Africa/Kigali'],
'alpha-2-code': 'RW',
'alpha-3-code': 'RWA',
'continent': 'Africa',
'name': 'Rwanda',
'capital': 'Kigali'},
{'timezones': ['Asia/Riyadh'],
'alpha-2-code': 'SA',
'alpha-3-code': 'SAU',
'continent': 'Asia',
'name': 'Saudi Arabia',
'capital': 'Riyadh'},
{'timezones': ['Pacific/Guadalcanal'],
'alpha-2-code': 'SB',
'alpha-3-code': 'SLB',
'continent': 'Oceania',
'name': 'Solomon Islands',
'capital': 'Honiara'},
{'timezones': ['Indian/Mahe'],
'alpha-2-code': 'SC',
'alpha-3-code': 'SYC',
'continent': 'Africa',
'name': 'Seychelles',
'capital': 'Victoria'},
{'timezones': ['Africa/Khartoum'],
'alpha-2-code': 'SD',
'alpha-3-code': 'SDN',
'continent': 'Africa',
'name': 'Sudan',
'capital': 'Khartoum'},
{'timezones': ['Europe/Stockholm'],
'alpha-2-code': 'SE',
'alpha-3-code': 'SWE',
'continent': 'Europe',
'name': 'Sweden',
'capital': 'Stockholm'},
{'timezones': ['Asia/Singapore'],
'alpha-2-code': 'SG',
'alpha-3-code': 'SGP',
'continent': 'Asia',
'name': 'Singapore',
'capital': 'Singapore'},
{'timezones': ['Europe/Ljubljana'],
'alpha-2-code': 'SI',
'alpha-3-code': 'SVN',
'continent': 'Europe',
'name': 'Slovenia',
'capital': 'Ljubljana'},
{'timezones': ['Europe/Bratislava'],
'alpha-2-code': 'SK',
'alpha-3-code': 'SVK',
'continent': 'Europe',
'name': 'Slovakia',
'capital': 'Bratislava'},
{'timezones': ['Africa/Freetown'],
'alpha-2-code': 'SL',
'alpha-3-code': 'SLE',
'continent': 'Africa',
'name': 'Sierra Leone',
'capital': 'Freetown'},
{'timezones': ['Europe/San_Marino'],
'alpha-2-code': 'SM',
'alpha-3-code': 'SMR',
'continent': 'Europe',
'name': 'San Marino',
'capital': 'San Marino'},
{'timezones': ['Africa/Dakar'],
'alpha-2-code': 'SN',
'alpha-3-code': 'SEN',
'continent': 'Africa',
'name': 'Senegal',
'capital': 'Dakar'},
{'timezones': ['Africa/Mogadishu'],
'alpha-2-code': 'SO',
'alpha-3-code': 'SOM',
'continent': 'Africa',
'name': 'Somalia',
'capital': 'Mogadishu'},
{'timezones': ['America/Paramaribo'],
'alpha-2-code': 'SR',
'alpha-3-code': 'SUR',
'continent': 'South America',
'name': 'Suriname',
'capital': 'Paramaribo'},
{'timezones': ['Africa/Sao_Tome'],
'alpha-2-code': 'ST',
'alpha-3-code': 'STP',
'continent': 'Africa',
'name': 'S\xc3\xa3o Tom\xc3\xa9 and Pr\xc3\xadncipe',
'capital': 'S\xc3\xa3o Tom\xc3\xa9'},
{'timezones': ['Asia/Damascus'],
'alpha-2-code': 'SY',
'alpha-3-code': 'SYR',
'continent': 'Asia',
'name': 'Syria',
'capital': 'Damascus'},
{'timezones': ['Africa/Lome'],
'alpha-2-code': 'TG',
'alpha-3-code': 'TGO',
'continent': 'Africa',
'name': 'Togo',
'capital': 'Lom\xc3\xa9'},
{'timezones': ['Asia/Bangkok'],
'alpha-2-code': 'TH',
'alpha-3-code': 'THA',
'continent': 'Asia',
'name': 'Thailand',
'capital': 'Bangkok'},
{'timezones': ['Asia/Dushanbe'],
'alpha-2-code': 'TJ',
'alpha-3-code': 'TJK',
'continent': 'Asia',
'name': 'Tajikistan',
'capital': 'Dushanbe'},
{'timezones': ['Asia/Ashgabat'],
'alpha-2-code': 'TM',
'alpha-3-code': 'TKM',
'continent': 'Asia',
'name': 'Turkmenistan',
'capital': 'Ashgabat'},
{'timezones': ['Africa/Tunis'],
'alpha-2-code': 'TN',
'alpha-3-code': 'TUN',
'continent': 'Africa',
'name': 'Tunisia',
'capital': 'Tunis'},
{'timezones': ['Pacific/Tongatapu'],
'alpha-2-code': 'TO',
'alpha-3-code': 'TON',
'continent': 'Oceania',
'name': 'Tonga',
'capital': 'Nuku\xca\xbbalofa'},
{'timezones': ['Europe/Istanbul'],
'alpha-2-code': 'TR',
'alpha-3-code': 'TUR',
'continent': 'Asia',
'name': 'Turkey',
'capital': 'Ankara'},
{'timezones': ['America/Port_of_Spain'],
'alpha-2-code': 'TT',
'alpha-3-code': 'TTO',
'continent': 'North America',
'name': 'Trinidad and Tobago',
'capital': 'Port of Spain'},
{'timezones': ['Pacific/Funafuti'],
'alpha-2-code': 'TV',
'alpha-3-code': 'TUV',
'continent': 'Oceania',
'name': 'Tuvalu',
'capital': 'Funafuti'},
{'timezones': ['Africa/Dar_es_Salaam'],
'alpha-2-code': 'TZ',
'alpha-3-code': 'TZA',
'continent': 'Africa',
'name': 'Tanzania',
'capital': 'Dodoma'},
{'timezones': ['Europe/Kiev',
'Europe/Uzhgorod',
'Europe/Zaporozhye',
'Europe/Simferopol'],
'alpha-2-code': 'UA',
'alpha-3-code': 'UKR',
'continent': 'Europe',
'name': 'Ukraine',
'capital': 'Kiev'},
{'timezones': ['Africa/Kampala'],
'alpha-2-code': 'UG',
'alpha-3-code': 'UGA',
'continent': 'Africa',
'name': 'Uganda',
'capital': 'Kampala'},
{'timezones': ['America/New_York',
'America/Detroit',
'America/Kentucky/Louisville',
'America/Kentucky/Monticello',
'America/Indiana/Indianapolis',
'America/Indiana/Marengo',
'America/Indiana/Knox',
'America/Indiana/Vevay',
'America/Chicago',
'America/Indiana/Vincennes',
'America/Indiana/Petersburg',
'America/Menominee',
'America/North_Dakota/Center',
'America/North_Dakota/New_Salem',
'America/Denver',
'America/Boise',
'America/Shiprock',
'America/Phoenix',
'America/Los_Angeles',
'America/Anchorage',
'America/Juneau',
'America/Yakutat',
'America/Nome',
'America/Adak',
'Pacific/Honolulu'],
'alpha-2-code': 'US',
'alpha-3-code': 'USA',
'continent': 'North America',
'name': 'United States',
'capital': 'Washington, D.C.'},
{'timezones': ['America/Montevideo'],
'alpha-2-code': 'UY',
'alpha-3-code': 'URY',
'continent': 'South America',
'name': 'Uruguay',
'capital': 'Montevideo'},
{'timezones': ['Asia/Samarkand',
'Asia/Tashkent'],
'alpha-2-code': 'UZ',
'alpha-3-code': 'UZB',
'continent': 'Asia',
'name': 'Uzbekistan',
'capital': 'Tashkent'},
{'timezones': ['Europe/Vatican'],
'alpha-2-code': 'VA',
'alpha-3-code': 'VAT',
'continent': 'Europe',
'name': 'Vatican City',
'capital': 'Vatican City'},
{'timezones': ['America/Caracas'],
'alpha-2-code': 'VE',
'alpha-3-code': 'VEN',
'continent': 'South America',
'name': 'Venezuela',
'capital': 'Caracas'},
{'timezones': ['Asia/Saigon'],
'alpha-2-code': 'VN',
'alpha-3-code': 'VNM',
'continent': 'Asia',
'name': 'Vietnam',
'capital': 'Hanoi'},
{'timezones': ['Pacific/Efate'],
'alpha-2-code': 'VU',
'alpha-3-code': 'VUT',
'continent': 'Oceania',
'name': 'Vanuatu',
'capital': 'Port Vila'},
{'timezones': ['Asia/Aden'],
'alpha-2-code': 'YE',
'alpha-3-code': 'YEM',
'continent': 'Asia',
'name': 'Yemen',
'capital': "Sana'a"},
{'timezones': ['Africa/Lusaka'],
'alpha-2-code': 'ZM',
'alpha-3-code': 'ZMB',
'continent': 'Africa',
'name': 'Zambia',
'capital': 'Lusaka'},
{'timezones': ['Africa/Harare'],
'alpha-2-code': 'ZW',
'alpha-3-code': 'ZWE',
'continent': 'Africa',
'name': 'Zimbabwe',
'capital': 'Harare'},
{'timezones': ['Africa/Algiers'],
'alpha-2-code': 'DZ',
'alpha-3-code': 'DZA',
'continent': 'Africa',
'name': 'Algeria',
'capital': 'Algiers'},
{'timezones': ['Europe/Sarajevo'],
'alpha-2-code': 'BA',
'alpha-3-code': 'BIH',
'continent': 'Europe',
'name': 'Bosnia and Herzegovina',
'capital': 'Sarajevo'},
{'timezones': ['Asia/Phnom_Penh'],
'alpha-2-code': 'KH',
'alpha-3-code': 'KHM',
'continent': 'Asia',
'name': 'Cambodia',
'capital': 'Phnom Penh'},
{'timezones': ['Africa/Bangui'],
'alpha-2-code': 'CF',
'alpha-3-code': 'CAF',
'continent': 'Africa',
'name': 'Central African Republic',
'capital': 'Bangui'},
{'timezones': ['Africa/Ndjamena'],
'alpha-2-code': 'TD',
'alpha-3-code': 'TCD',
'continent': 'Africa',
'name': 'Chad',
'capital': "N'Djamena"},
{'timezones': ['Indian/Comoro'],
'alpha-2-code': 'KM',
'alpha-3-code': 'COM',
'continent': 'Africa',
'name': 'Comoros',
'capital': 'Moroni'},
{'timezones': ['Europe/Zagreb'],
'alpha-2-code': 'HR',
'alpha-3-code': 'HRV',
'continent': 'Europe',
'name': 'Croatia',
'capital': 'Zagreb'},
{'timezones': ['Asia/Dili'],
'alpha-2-code': 'TL',
'alpha-3-code': 'TLS',
'continent': 'Asia',
'name': 'East Timor',
'capital': 'Dili'},
{'timezones': ['America/El_Salvador'],
'alpha-2-code': 'SV',
'alpha-3-code': 'SLV',
'continent': 'North America',
'name': 'El Salvador',
'capital': 'San Salvador'},
{'timezones': ['Africa/Malabo'],
'alpha-2-code': 'GQ',
'alpha-3-code': 'GNQ',
'continent': 'Africa',
'name': 'Equatorial Guinea',
'capital': 'Malabo'},
{'timezones': ['America/Grenada'],
'alpha-2-code': 'GD',
'alpha-3-code': 'GRD',
'continent': 'North America',
'name': 'Grenada',
'capital': "St. George's"},
{'timezones': ['Asia/Almaty',
'Asia/Qyzylorda',
'Asia/Aqtobe',
'Asia/Aqtau',
'Asia/Oral'],
'alpha-2-code': 'KZ',
'alpha-3-code': 'KAZ',
'continent': 'Asia',
'name': 'Kazakhstan',
'capital': 'Astana'},
{'timezones': ['Asia/Vientiane'],
'alpha-2-code': 'LA',
'alpha-3-code': 'LAO',
'continent': 'Asia',
'name': 'Laos',
'capital': 'Vientiane'},
{'timezones': ['Pacific/Truk',
'Pacific/Ponape',
'Pacific/Kosrae'],
'alpha-2-code': 'FM',
'alpha-3-code': 'FSM',
'continent': 'Oceania',
'name': 'Federated States of Micronesia',
'capital': 'Palikir'},
{'timezones': ['Europe/Chisinau'],
'alpha-2-code': 'MD',
'alpha-3-code': 'MDA',
'continent': 'Europe',
'name': 'Moldova',
'capital': 'Chi\xc5\x9fin\xc4\x83u'},
{'timezones': ['Europe/Monaco'],
'alpha-2-code': 'MC',
'alpha-3-code': 'MCO',
'continent': 'Europe',
'name': 'Monaco',
'capital': 'Monaco'},
{'timezones': ['Europe/Podgorica'],
'alpha-2-code': 'ME',
'alpha-3-code': 'MNE',
'continent': 'Europe',
'name': 'Montenegro',
'capital': 'Podgorica'},
{'timezones': ['Africa/Casablanca'],
'alpha-2-code': 'MA',
'alpha-3-code': 'MAR',
'continent': 'Africa',
'name': 'Morocco',
'capital': 'Rabat'},
{'timezones': ['America/St_Kitts'],
'alpha-2-code': 'KN',
'alpha-3-code': 'KNA',
'continent': 'North America',
'name': 'Saint Kitts and Nevis',
'capital': 'Basseterre'},
{'timezones': ['America/St_Lucia'],
'alpha-2-code': 'LC',
'alpha-3-code': 'LCA',
'continent': 'North America',
'name': 'Saint Lucia',
'capital': 'Castries'},
{'timezones': ['America/St_Vincent'],
'alpha-2-code': 'VC',
'alpha-3-code': 'VCT',
'continent': 'North America',
'name': 'Saint Vincent and the Grenadines',
'capital': 'Kingstown'},
{'timezones': ['Pacific/Apia'],
'alpha-2-code': 'WS',
'alpha-3-code': 'WSM',
'continent': 'Oceania',
'name': 'Samoa',
'capital': 'Apia'},
{'timezones': ['Europe/Belgrade'],
'alpha-2-code': 'RS',
'alpha-3-code': 'SRB',
'continent': 'Europe',
'name': 'Serbia',
'capital': 'Belgrade'},
{'timezones': ['Africa/Johannesburg'],
'alpha-2-code': 'ZA',
'alpha-3-code': 'ZAF',
'continent': 'Africa',
'name': 'South Africa',
'capital': 'Pretoria'},
{'timezones': ['Europe/Madrid',
'Africa/Ceuta',
'Atlantic/Canary'],
'alpha-2-code': 'ES',
'alpha-3-code': 'ESP',
'continent': 'Europe',
'name': 'Spain',
'capital': 'Madrid'},
{'timezones': ['Asia/Colombo'],
'alpha-2-code': 'LK',
'alpha-3-code': 'LKA',
'continent': 'Asia',
'name': 'Sri Lanka',
'capital': 'Sri Jayewardenepura Kotte'},
{'timezones': ['Africa/Mbabane'],
'alpha-2-code': 'SZ',
'alpha-3-code': 'SWZ',
'continent': 'Africa',
'name': 'Swaziland',
'capital': 'Mbabane'},
{'timezones': ['Europe/Zurich'],
'alpha-2-code': 'CH',
'alpha-3-code': 'CHE',
'continent': 'Europe',
'name': 'Switzerland',
'capital': 'Bern'},
{'timezones': ['Asia/Dubai'],
'alpha-2-code': 'AE',
'alpha-3-code': 'ARE',
'continent': 'Asia',
'name': 'United Arab Emirates',
'capital': 'Abu Dhabi'},
{'timezones': ['Europe/London'],
'alpha-2-code': 'GB',
'alpha-3-code': 'GBR',
'continent': 'Europe',
'name': 'United Kingdom',
'capital': 'London'},
]
regex = re.compile(timedelta_pattern)
def unix_time(self, end_datetime=None, start_datetime=None):
"""
Get a timestamp between January 1, 1970 and now, unless passed
explicit start_datetime or end_datetime values.
:example 1061306726
"""
start_datetime = self._parse_start_datetime(start_datetime)
end_datetime = self._parse_end_datetime(end_datetime)
return self.generator.random.randint(start_datetime, end_datetime)
def time_delta(self, end_datetime=None):
"""
Get a timedelta object
"""
start_datetime = self._parse_start_datetime('now')
end_datetime = self._parse_end_datetime(end_datetime)
seconds = end_datetime - start_datetime
ts = self.generator.random.randint(*sorted([0, seconds]))
return timedelta(seconds=ts)
def date_time(self, tzinfo=None, end_datetime=None):
"""
Get a datetime object for a date between January 1, 1970 and now
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2005-08-16 20:39:21')
:return datetime
"""
# NOTE: On windows, the lowest value you can get from windows is 86400
# on the first day. Known python issue:
# https://bugs.python.org/issue30684
return datetime(1970, 1, 1, tzinfo=tzinfo) + \
timedelta(seconds=self.unix_time(end_datetime=end_datetime))
def date_time_ad(self, tzinfo=None, end_datetime=None, start_datetime=None):
"""
Get a datetime object for a date between January 1, 001 and now
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1265-03-22 21:15:52')
:return datetime
"""
# 1970-01-01 00:00:00 UTC minus 62135596800 seconds is
# 0001-01-01 00:00:00 UTC. Since _parse_end_datetime() is used
# elsewhere where a default value of 0 is expected, we can't
# simply change that class method to use this magic number as a
# default value when None is provided.
start_time = -62135596800 if start_datetime is None else self._parse_start_datetime(start_datetime)
end_datetime = self._parse_end_datetime(end_datetime)
ts = self.generator.random.randint(start_time, end_datetime)
# NOTE: using datetime.fromtimestamp(ts) directly will raise
# a "ValueError: timestamp out of range for platform time_t"
# on some platforms due to system C functions;
# see http://stackoverflow.com/a/10588133/2315612
# NOTE: On windows, the lowest value you can get from windows is 86400
# on the first day. Known python issue:
# https://bugs.python.org/issue30684
return datetime(1970, 1, 1, tzinfo=tzinfo) + timedelta(seconds=ts)
def iso8601(self, tzinfo=None, end_datetime=None):
"""
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example '2003-10-21T16:05:52+0000'
"""
return self.date_time(tzinfo, end_datetime=end_datetime).isoformat()
def date(self, pattern='%Y-%m-%d', end_datetime=None):
"""
Get a date string between January 1, 1970 and now
:param pattern format
:example '2008-11-27'
"""
return self.date_time(end_datetime=end_datetime).strftime(pattern)
def date_object(self, end_datetime=None):
"""
Get a date object between January 1, 1970 and now
:example datetime.date(2016, 9, 20)
"""
return self.date_time(end_datetime=end_datetime).date()
def time(self, pattern='%H:%M:%S', end_datetime=None):
"""
Get a time string (24h format by default)
:param pattern format
:example '15:02:34'
"""
return self.date_time(
end_datetime=end_datetime).time().strftime(pattern)
def time_object(self, end_datetime=None):
"""
Get a time object
:example datetime.time(15, 56, 56, 772876)
"""
return self.date_time(end_datetime=end_datetime).time()
@classmethod
def _parse_start_datetime(cls, value):
if value is None:
return 0
return cls._parse_date_time(value)
@classmethod
def _parse_end_datetime(cls, value):
if value is None:
return datetime_to_timestamp(datetime.now())
return cls._parse_date_time(value)
@classmethod
def _parse_date_string(cls, value):
parts = cls.regex.match(value)
if not parts:
raise ParseError("Can't parse date string `{}`.".format(value))
parts = parts.groupdict()
time_params = {}
for (name_, param_) in parts.items():
if param_:
time_params[name_] = int(param_)
if 'years' in time_params:
if 'days' not in time_params:
time_params['days'] = 0
time_params['days'] += 365.24 * time_params.pop('years')
if 'months' in time_params:
if 'days' not in time_params:
time_params['days'] = 0
time_params['days'] += 30.42 * time_params.pop('months')
if not time_params:
raise ParseError("Can't parse date string `{}`.".format(value))
return time_params
@classmethod
def _parse_timedelta(cls, value):
if isinstance(value, timedelta):
return value.total_seconds()
if is_string(value):
time_params = cls._parse_date_string(value)
return timedelta(**time_params).total_seconds()
if isinstance(value, (int, float)):
return value
raise ParseError("Invalid format for timedelta '{}'".format(value))
@classmethod
def _parse_date_time(cls, value, tzinfo=None):
if isinstance(value, (datetime, date, real_datetime, real_date)):
return datetime_to_timestamp(value)
now = datetime.now(tzinfo)
if isinstance(value, timedelta):
return datetime_to_timestamp(now + value)
if is_string(value):
if value == 'now':
return datetime_to_timestamp(datetime.now(tzinfo))
time_params = cls._parse_date_string(value)
return datetime_to_timestamp(now + timedelta(**time_params))
if isinstance(value, int):
return datetime_to_timestamp(now + timedelta(value))
raise ParseError("Invalid format for date '{}'".format(value))
@classmethod
def _parse_date(cls, value):
if isinstance(value, (datetime, real_datetime)):
return value.date()
elif isinstance(value, (date, real_date)):
return value
today = date.today()
if isinstance(value, timedelta):
return today + value
if is_string(value):
if value in ('today', 'now'):
return today
time_params = cls._parse_date_string(value)
return today + timedelta(**time_params)
if isinstance(value, int):
return today + timedelta(value)
raise ParseError("Invalid format for date '{}'".format(value))
def date_time_between(self, start_date='-30y', end_date='now', tzinfo=None):
"""
Get a DateTime object based on a random date between two given dates.
Accepts date strings that can be recognized by strtotime().
:param start_date Defaults to 30 years ago
:param end_date Defaults to "now"
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
"""
start_date = self._parse_date_time(start_date, tzinfo=tzinfo)
end_date = self._parse_date_time(end_date, tzinfo=tzinfo)
if end_date - start_date <= 1:
ts = start_date + self.generator.random.random()
else:
ts = self.generator.random.randint(start_date, end_date)
if tzinfo is None:
return datetime(1970, 1, 1, tzinfo=tzinfo) + timedelta(seconds=ts)
else:
return (
datetime(1970, 1, 1, tzinfo=tzutc()) + timedelta(seconds=ts)
).astimezone(tzinfo)
def date_between(self, start_date='-30y', end_date='today'):
"""
Get a Date object based on a random date between two given dates.
Accepts date strings that can be recognized by strtotime().
:param start_date Defaults to 30 years ago
:param end_date Defaults to "today"
:example Date('1999-02-02')
:return Date
"""
start_date = self._parse_date(start_date)
end_date = self._parse_date(end_date)
return self.date_between_dates(date_start=start_date, date_end=end_date)
def future_datetime(self, end_date='+30d', tzinfo=None):
"""
Get a DateTime object based on a random date between 1 second form now
and a given date.
Accepts date strings that can be recognized by strtotime().
:param end_date Defaults to "+30d"
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
"""
return self.date_time_between(
start_date='+1s', end_date=end_date, tzinfo=tzinfo,
)
def future_date(self, end_date='+30d', tzinfo=None):
"""
Get a Date object based on a random date between 1 day from now and a
given date.
Accepts date strings that can be recognized by strtotime().
:param end_date Defaults to "+30d"
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
"""
return self.date_between(start_date='+1d', end_date=end_date)
def past_datetime(self, start_date='-30d', tzinfo=None):
"""
Get a DateTime object based on a random date between a given date and 1
second ago.
Accepts date strings that can be recognized by strtotime().
:param start_date Defaults to "-30d"
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
"""
return self.date_time_between(
start_date=start_date, end_date='-1s', tzinfo=tzinfo,
)
def past_date(self, start_date='-30d', tzinfo=None):
"""
Get a Date object based on a random date between a given date and 1 day
ago.
Accepts date strings that can be recognized by strtotime().
:param start_date Defaults to "-30d"
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
"""
return self.date_between(start_date=start_date, end_date='-1d')
def date_time_between_dates(
self,
datetime_start=None,
datetime_end=None,
tzinfo=None):
"""
Takes two DateTime objects and returns a random datetime between the two
given datetimes.
Accepts DateTime objects.
:param datetime_start: DateTime
:param datetime_end: DateTime
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('1999-02-02 11:42:52')
:return DateTime
"""
if datetime_start is None:
datetime_start = datetime.now(tzinfo)
if datetime_end is None:
datetime_end = datetime.now(tzinfo)
timestamp = self.generator.random.randint(
datetime_to_timestamp(datetime_start),
datetime_to_timestamp(datetime_end),
)
try:
if tzinfo is None:
pick = datetime.fromtimestamp(timestamp, tzlocal())
pick = pick.astimezone(tzutc()).replace(tzinfo=None)
else:
pick = datetime.fromtimestamp(timestamp, tzinfo)
except OverflowError:
raise OverflowError(
"You specified an end date with a timestamp bigger than the maximum allowed on this"
" system. Please specify an earlier date.",
)
return pick
def date_between_dates(self, date_start=None, date_end=None):
"""
Takes two Date objects and returns a random date between the two given dates.
Accepts Date or Datetime objects
:param date_start: Date
:param date_end: Date
:return Date
"""
return self.date_time_between_dates(date_start, date_end).date()
def date_time_this_century(
self,
before_now=True,
after_now=False,
tzinfo=None):
"""
Gets a DateTime object for the current century.
:param before_now: include days in current century before today
:param after_now: include days in current century after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
now = datetime.now(tzinfo)
this_century_start = datetime(
now.year - (now.year % 100), 1, 1, tzinfo=tzinfo)
next_century_start = datetime(
min(this_century_start.year + 100, MAXYEAR), 1, 1, tzinfo=tzinfo)
if before_now and after_now:
return self.date_time_between_dates(
this_century_start, next_century_start, tzinfo)
elif not before_now and after_now:
return self.date_time_between_dates(now, next_century_start, tzinfo)
elif not after_now and before_now:
return self.date_time_between_dates(this_century_start, now, tzinfo)
else:
return now
def date_time_this_decade(
self,
before_now=True,
after_now=False,
tzinfo=None):
"""
Gets a DateTime object for the decade year.
:param before_now: include days in current decade before today
:param after_now: include days in current decade after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
now = datetime.now(tzinfo)
this_decade_start = datetime(
now.year - (now.year % 10), 1, 1, tzinfo=tzinfo)
next_decade_start = datetime(
min(this_decade_start.year + 10, MAXYEAR), 1, 1, tzinfo=tzinfo)
if before_now and after_now:
return self.date_time_between_dates(
this_decade_start, next_decade_start, tzinfo)
elif not before_now and after_now:
return self.date_time_between_dates(now, next_decade_start, tzinfo)
elif not after_now and before_now:
return self.date_time_between_dates(this_decade_start, now, tzinfo)
else:
return now
def date_time_this_year(
self,
before_now=True,
after_now=False,
tzinfo=None):
"""
Gets a DateTime object for the current year.
:param before_now: include days in current year before today
:param after_now: include days in current year after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
now = datetime.now(tzinfo)
this_year_start = now.replace(
month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
next_year_start = datetime(now.year + 1, 1, 1, tzinfo=tzinfo)
if before_now and after_now:
return self.date_time_between_dates(
this_year_start, next_year_start, tzinfo)
elif not before_now and after_now:
return self.date_time_between_dates(now, next_year_start, tzinfo)
elif not after_now and before_now:
return self.date_time_between_dates(this_year_start, now, tzinfo)
else:
return now
def date_time_this_month(
self,
before_now=True,
after_now=False,
tzinfo=None):
"""
Gets a DateTime object for the current month.
:param before_now: include days in current month before today
:param after_now: include days in current month after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
now = datetime.now(tzinfo)
this_month_start = now.replace(
day=1, hour=0, minute=0, second=0, microsecond=0)
next_month_start = this_month_start + \
relativedelta.relativedelta(months=1)
if before_now and after_now:
return self.date_time_between_dates(
this_month_start, next_month_start, tzinfo)
elif not before_now and after_now:
return self.date_time_between_dates(now, next_month_start, tzinfo)
elif not after_now and before_now:
return self.date_time_between_dates(this_month_start, now, tzinfo)
else:
return now
def date_this_century(self, before_today=True, after_today=False):
"""
Gets a Date object for the current century.
:param before_today: include days in current century before today
:param after_today: include days in current century after today
:example Date('2012-04-04')
:return Date
"""
today = date.today()
this_century_start = date(today.year - (today.year % 100), 1, 1)
next_century_start = date(this_century_start.year + 100, 1, 1)
if before_today and after_today:
return self.date_between_dates(
this_century_start, next_century_start)
elif not before_today and after_today:
return self.date_between_dates(today, next_century_start)
elif not after_today and before_today:
return self.date_between_dates(this_century_start, today)
else:
return today
def date_this_decade(self, before_today=True, after_today=False):
"""
Gets a Date object for the decade year.
:param before_today: include days in current decade before today
:param after_today: include days in current decade after today
:example Date('2012-04-04')
:return Date
"""
today = date.today()
this_decade_start = date(today.year - (today.year % 10), 1, 1)
next_decade_start = date(this_decade_start.year + 10, 1, 1)
if before_today and after_today:
return self.date_between_dates(this_decade_start, next_decade_start)
elif not before_today and after_today:
return self.date_between_dates(today, next_decade_start)
elif not after_today and before_today:
return self.date_between_dates(this_decade_start, today)
else:
return today
def date_this_year(self, before_today=True, after_today=False):
"""
Gets a Date object for the current year.
:param before_today: include days in current year before today
:param after_today: include days in current year after today
:example Date('2012-04-04')
:return Date
"""
today = date.today()
this_year_start = today.replace(month=1, day=1)
next_year_start = date(today.year + 1, 1, 1)
if before_today and after_today:
return self.date_between_dates(this_year_start, next_year_start)
elif not before_today and after_today:
return self.date_between_dates(today, next_year_start)
elif not after_today and before_today:
return self.date_between_dates(this_year_start, today)
else:
return today
def date_this_month(self, before_today=True, after_today=False):
"""
Gets a Date object for the current month.
:param before_today: include days in current month before today
:param after_today: include days in current month after today
:param tzinfo: timezone, instance of datetime.tzinfo subclass
:example DateTime('2012-04-04 11:02:02')
:return DateTime
"""
today = date.today()
this_month_start = today.replace(day=1)
next_month_start = this_month_start + \
relativedelta.relativedelta(months=1)
if before_today and after_today:
return self.date_between_dates(this_month_start, next_month_start)
elif not before_today and after_today:
return self.date_between_dates(today, next_month_start)
elif not after_today and before_today:
return self.date_between_dates(this_month_start, today)
else:
return today
def time_series(
self,
start_date='-30d',
end_date='now',
precision=None,
distrib=None,
tzinfo=None):
"""
Returns a generator yielding tuples of ``(<datetime>, <value>)``.
The data points will start at ``start_date``, and be at every time interval specified by
``precision``.
``distrib`` is a callable that accepts ``<datetime>`` and returns ``<value>``
"""
start_date = self._parse_date_time(start_date, tzinfo=tzinfo)
end_date = self._parse_date_time(end_date, tzinfo=tzinfo)
if end_date < start_date:
raise ValueError("`end_date` must be greater than `start_date`.")
if precision is None:
precision = (end_date - start_date) / 30
precision = self._parse_timedelta(precision)
if distrib is None:
def distrib(dt): return self.generator.random.uniform(0, precision) # noqa
if not callable(distrib):
raise ValueError(
"`distrib` must be a callable. Got {} instead.".format(distrib))
datapoint = start_date
while datapoint < end_date:
dt = timestamp_to_datetime(datapoint, tzinfo)
datapoint += precision
yield (dt, distrib(dt))
def am_pm(self):
return self.date('%p')
def day_of_month(self):
return self.date('%d')
def day_of_week(self):
return self.date('%A')
def month(self):
return self.date('%m')
def month_name(self):
return self.date('%B')
def year(self):
return self.date('%Y')
def century(self):
"""
:example 'XVII'
"""
return self.random_element(self.centuries)
def timezone(self):
return self.generator.random.choice(
self.random_element(self.countries)['timezones'])
def date_of_birth(self, tzinfo=None, minimum_age=0, maximum_age=115):
"""
Generate a random date of birth represented as a Date object,
constrained by optional miminimum_age and maximum_age
parameters.
:param tzinfo Defaults to None.
:param minimum_age Defaults to 0.
:param maximum_age Defaults to 115.
:example Date('1979-02-02')
:return Date
"""
if not isinstance(minimum_age, int):
raise TypeError("minimum_age must be an integer.")
if not isinstance(maximum_age, int):
raise TypeError("maximum_age must be an integer.")
if (maximum_age < 0):
raise ValueError("maximum_age must be greater than or equal to zero.")
if (minimum_age < 0):
raise ValueError("minimum_age must be greater than or equal to zero.")
if (minimum_age > maximum_age):
raise ValueError("minimum_age must be less than or equal to maximum_age.")
# In order to return the full range of possible dates of birth, add one
# year to the potential age cap and subtract one day if we land on the
# boundary.
now = datetime.now(tzinfo).date()
start_date = now.replace(year=now.year - (maximum_age+1))
end_date = now.replace(year=now.year - minimum_age)
dob = self.date_time_ad(tzinfo=tzinfo, start_datetime=start_date, end_datetime=end_date).date()
return dob if dob != start_date else dob + timedelta(days=1)
| [
"[email protected]"
]
| |
20d292d62891bea770e215fc0d6057a5f8d0e71c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03013/s862770643.py | 7ccbf8573c48d9c4b4925c320990f7e34dc43586 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | #import math
#import itertools
#import numpy as np
#from collections import deque
# sys.setrecursionlimit(10 ** 6)
MOD = 10 ** 9 + 7
#INF = 10 ** 9
#PI = 3.14159265358979323846
INT = lambda: int(input())
INTM = lambda: map(int,input().split())
STRM = lambda: map(str,input().split())
STR = lambda: str(input())
LIST = lambda: list(map(int,input().split()))
LISTS = lambda: list(map(str,input().split()))
def do():
n,m=INTM()
b_sts=[False]*n
for i in range(m):
b=INT()
b-=1
b_sts[b]=True
step=[0]*(n+2)
step[1]=1
for i in range(n):
if b_sts[i]:
continue
else:
step[i+2]=(step[i+1]+step[i])%MOD
print(step[-1]%MOD)
if __name__=='__main__':
do() | [
"[email protected]"
]
| |
1ceccd56e6f916af518465d5e03cdc7d5860afd4 | c26b3a7d446fbfd71ce11e04129358c17dd6239a | /tAPP/3/P4.py | e0a55f3cd27e899abf582eb63103c2b47d192dfc | [
"MIT"
]
| permissive | ArvinZJC/UofG_PGT_PSD_Python | 4c4c2430cb95309a37086503e7ee638a45d571ab | d90e9bb0b53b14c6b1d7e657c3c61e2792e0d9c4 | refs/heads/main | 2023-05-09T21:18:07.847277 | 2021-06-14T17:26:38 | 2021-06-14T17:26:38 | 329,307,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | '''
Description: Problem 4 (complete the code)
Version: 1.0.1.20210117
Author: Arvin Zhao
Date: 2021-01-16 00:19:34
Last Editors: Arvin Zhao
LastEditTime: 2021-01-17 12:13:41
'''
from tkinter import Tk, Label, Entry, Button
from tkinter.constants import END
def convert_to_km():
miles = textbox1.get()
message = float(miles) / 1.6093
textbox2.delete(0, END)
textbox2.insert(END, message)
textbox2.insert(END, ' km')
def convert_to_miles():
km = textbox1.get()
km = int(km)
message = km * 0.6214
textbox2.delete(0, END)
textbox2.insert(END, message)
textbox2.insert(END, ' miles')
if __name__ == '__main__':
window = Tk()
window.title('Distance')
window.geometry('260x200')
label1 = Label(text = 'Enter the value you want to convert: ')
label1.place(x = 30, y = 20)
textbox1 = Entry(text = '')
textbox1.place(x = 30, y = 50, width = 200, height = 25)
textbox1['justify'] = 'center'
textbox1.focus()
convert1 = Button(text = 'Convert mile to km', command = convert_to_km)
convert1.place(x = 30, y = 80, width = 200, height = 25)
convert2 = Button(text = 'Convert km to mile', command = convert_to_miles)
convert2.place(x = 30, y = 110, width = 200, height = 25)
textbox2 = Entry(text = '')
textbox2.place(x = 30, y = 140, width = 200, height = 25)
textbox2['justify'] = 'center'
window.mainloop() | [
"[email protected]"
]
| |
11384963eb0c43c775c81061bdd8d70f1ecc25fe | 4ad14e547695daaf4637c0790c21245a99fb6114 | /Fmfiles/Fmfiles/pipelines.py | be1185d41ce5ce649a20c8f991da0de3c39a83d3 | []
| no_license | gxl2632537/pachong | b8e1c26ef0e13504dc084b8e12975823126dac41 | 1ec30451e83a66768b4616ed9d736af8ee53deba | refs/heads/master | 2022-11-07T02:16:42.549654 | 2020-06-30T09:47:34 | 2020-06-30T09:47:34 | 272,400,120 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import scrapy
import os
from scrapy.pipelines.files import FilesPipeline
from Fmfiles.settings import FILES_STORE
from scrapy.exceptions import DropItem
class FmfilesPipeline(FilesPipeline):
# 动态获取资源文件默认父目录“full”,并保存为属性
__file_dir = None
# def process_item(self, item, spider):
# return item
def get_media_requests(self, item, info):
file_url = item['file_url']
yield scrapy.Request(file_url)
def item_completed(self, results, item, info):
# file_paths系统自带的
file_paths = [x['path'] for ok, x in results if ok]
if not self.__file_dir:
self.__file_dir = file_paths[0].split('/')[0]
if not file_paths:
raise DropItem("Item contains no files")
# os.rename(src,dst) 方法用于重命名文件或目录 src -- 要修改的目录名 dst -- 修改后的目录名
os.rename(FILES_STORE + file_paths[0],
FILES_STORE + item['file_album'] + '/' + item['file_name'])
return item
def close_spider(self, spider):
if self.__file_dir is None:
return None
ori_filepath = FILES_STORE + self.__file_dir
if os.path.exists(ori_filepath):
# os.rmdir() 方法用于删除指定路径的目录 如果文件已经存在则删除文件
os.rmdir(ori_filepath)
| [
"l"
]
| l |
dc78ca14f957482b684aac9c5650a6871a3f4025 | c7329046b8c19cb55e41e2275993b32fbec7b7eb | /test/test_mirage/test_model/test_impl/test_singular_isothermal_sphere.py | 300b755948572277d34cde53d13dde974e235466 | [
"MIT"
]
| permissive | JordanKoeller/Mirage | 72af9b7e6e71f97574e67eff400eb20dfa0a340e | 5ed8dcf8934495bd18e534e54784e053475eb49e | refs/heads/main | 2023-08-09T06:57:40.111101 | 2023-08-05T21:21:08 | 2023-08-05T21:21:08 | 147,987,257 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | from unittest import TestCase
from astropy import units as u
from mirage.model.impl import SingularIsothermalSphereLens
from mirage.model import Quasar
from mirage.util import Vec2D, PolarVec
class TestPointLens(TestCase):
def get_lens(self):
return SingularIsothermalSphereLens(
quasar=Quasar(1.2, mass=u.Quantity(1e9, "solMass")),
redshift=0.7,
velocity_dispersion=u.Quantity(300, "km/s"),
star_fraction=0.8,
shear=PolarVec(u.Quantity(0.1, "rad"), u.Quantity(0.2, "rad")),
ellipticity=PolarVec(u.Quantity(0.1, "rad"), u.Quantity(0.3, "rad")),
)
def testEinsteinRadius_success(self):
lens = self.get_lens()
er = lens.einstein_radius
self.assertEqual(er.unit, u.arcsec)
def testMicrotracingParameters_success(self):
lens = self.get_lens()
tracing_params = lens.microtracing_parameters(Vec2D(2, 3, "arcsec"))
| [
"[email protected]"
]
| |
b57ac9bebc820d96dd6b1125d35da94fdce9ece2 | c59738ddfb08134af01d75255c4469071a1e135e | /001_Python_from_introduction_to_practice/001_plot_折线图_条形图(柱状图)_直方图_中文显示/006_直方图_统计不同电影时长电影数量分布.py | 7ebbb5c7f69fd15b0a432168e857db18a8547595 | []
| no_license | FelixZFB/Python_data_analysis | 371a8460da79e8fdb30b10c02b662419b62a5998 | 62f018d88d8454afe65980efd8d771ac8691956a | refs/heads/master | 2020-05-20T14:46:00.606684 | 2020-02-04T14:25:20 | 2020-02-04T14:25:20 | 185,629,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,871 | py | # -*- coding: utf-8 -*-
# 导入包
import matplotlib.pyplot as plt
import numpy as np
# 显示中文和显示负号
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 豆瓣TOP250电影时长分布,统计电影时长分布直方图
# 横坐标是电影时长,纵坐标统计出每个时长段的电影数量
a=[131, 98, 125, 131, 124, 139, 131, 117, 128, 108, 135, 138, 131, 102, 107, 114, 119, 128, 121, 142, 127, 130, 124, 101, 110, 116, 117, 110, 128, 128, 115, 99, 136, 126, 134, 95, 138, 117, 111,78, 132, 124, 113, 150, 110, 117, 86, 95, 144, 105, 126, 130,126, 130, 126, 116, 123, 106, 112, 138, 123, 86, 101, 99, 136,123, 117, 119, 105, 137, 123, 128, 125, 104, 109, 134, 125, 127,105, 120, 107, 129, 116, 108, 132, 103, 136, 118, 102, 120, 114,105, 115, 132, 145, 119, 121, 112, 139, 125, 138, 109, 132, 134,156, 106, 117, 127, 144, 139, 139, 119, 140, 83, 110, 102,123,107, 143, 115, 136, 118, 139, 123, 112, 118, 125, 109, 119, 133,112, 114, 122, 109, 106, 123, 116, 131, 127, 115, 118, 112, 135,115, 146, 137, 116, 103, 144, 83, 123, 111, 110, 111, 100, 154,136, 100, 118, 119, 133, 134, 106, 129, 126, 110, 111, 109, 141,120, 117, 106, 149, 122, 122, 110, 118, 127, 121, 114, 125, 126,114, 140, 103, 130, 141, 117, 106, 114, 121, 114, 133, 137, 92,121, 112, 146, 97, 137, 105, 98, 117, 112, 81, 97, 139, 113,134, 106, 144, 110, 137, 137, 111, 104, 117, 100, 111, 101, 110,105, 129, 137, 112, 120, 113, 133, 112, 83, 94, 146, 133, 101,131, 116, 111, 84, 137, 115, 122, 106, 144, 109, 123, 116, 111,111, 133, 150]
# 根据电影时长,找出最短和最长的,然后选取合适的组距进行分组
# 组距, 最大最小出于组距取整,查看最大值最小值能被整除的数
# 刚刚整除,最后刻度分组刻度和直方图宽度才是一直均匀分布的,可以通过网格线查看
d = 6
num_bins = (max(a) - min(a)) // d
print(max(a))
print(min(a))
print(num_bins)
# 创建figure窗口,指定窗口大小和分辨率
plt.figure(dpi=128, figsize=(8, 6))
# 画直方图, 自动根据分组统计每个时段的电影数量
# x轴是一个连续变量,y轴是组距,hist会根据组距和a列表,
# 自动统计每个组距里面的出现次数,自动进行分组
plt.hist(a, num_bins, color='orange', label='电影数量分布')
# 设置图表名称和坐标轴名称
plt.title('豆瓣电影Top250电影时长分布图', fontsize=24)
plt.xlabel('电影时长(分钟)', fontsize=14)
plt.ylabel('电影数量', fontsize=14)
# 显示网格线,显示图例,注意该处显示中文的参数是prop,alpha透明度
plt.grid(axis='both', color='grey', linestyle='-.', alpha=0.5)
plt.legend(loc='upper right')
# 设置坐标轴刻度,刻度间隔
plt.xticks(np.arange(78, 161, 6))
# 绘制数量图
plt.yticks(np.arange(0, 55, 5))
# 显示图形
plt.show() | [
"[email protected]"
]
| |
65d5a881a175b2753d0e88f4d19f3cd42c2452b9 | 4148260054c2cf4605dacb8bdef3605c82eca470 | /temboo/Library/SendGrid/NewsletterAPI/ListsEmail/AddEmailToList.py | 5d18334ef39a88208f7a4b7f40dc69496a4969ba | []
| no_license | wimsy/actuarize-web | 0f23d5f00afe3d36d430621cdb497d2e64998416 | 5f43af3019da6fb08cafeec9ff0a89df5196b864 | refs/heads/master | 2021-03-12T19:38:21.887681 | 2012-12-19T01:13:50 | 2012-12-19T01:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,649 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# AddEmailToList
# Add an email to an existing Recipient List.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class AddEmailToList(Choreography):
"""
Create a new instance of the AddEmailToList Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/SendGrid/NewsletterAPI/ListsEmail/AddEmailToList')
def new_input_set(self):
return AddEmailToListInputSet()
def _make_result_set(self, result, path):
return AddEmailToListResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AddEmailToListChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the AddEmailToList
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class AddEmailToListInputSet(InputSet):
"""
Set the value of the APIKey input for this choreography. ((required, string) The API Key obtained from SendGrid.)
"""
def set_APIKey(self, value):
InputSet._set_input(self, 'APIKey', value)
"""
Set the value of the APIUser input for this choreography. ((required, string) The username registered with SendGrid.)
"""
def set_APIUser(self, value):
InputSet._set_input(self, 'APIUser', value)
"""
Set the value of the Data input for this choreography. ((required, string) The JSON string containing the name, email and additional fields to be added to the specified recipient lists. Example: {"email":"[email protected]","name":"name","other_field":"value"})
"""
def set_Data(self, value):
InputSet._set_input(self, 'Data', value)
"""
Set the value of the List input for this choreography. ((required, string) The recipient list to which emaill addresses are being added. The list must be already existing.)
"""
def set_List(self, value):
InputSet._set_input(self, 'List', value)
"""
Set the value of the ResponseFormat input for this choreography. ((optional, string) The format of the response from SendGrid, in either json, or xml. Default is set to json.)
"""
def set_ResponseFormat(self, value):
InputSet._set_input(self, 'ResponseFormat', value)
"""
Set the value of the VaultFile input for this choreography. ()
"""
"""
A ResultSet with methods tailored to the values returned by the AddEmailToList choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class AddEmailToListResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. (The response from SendGrid. The format corresponds to the ResponseFormat input. Default is json.)
"""
def get_Response(self):
return self._output.get('Response', None)
class AddEmailToListChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AddEmailToListResultSet(response, path)
| [
"[email protected]"
]
| |
076599354848efd0d14fdc3c76a3c7360e7ed706 | 70b339d0b2638a7914d0d56c5edf8a2637c9f4b0 | /thousandSeparator.py | 999ebd880f493406a097ca8fd4036f33990e4133 | []
| no_license | pflun/advancedAlgorithms | 9991da7514024e18ba08de8688966b9220e12571 | 5520dbcd26999b98e1229bf03c2f62dd690a2ddc | refs/heads/master | 2023-02-19T12:05:26.902535 | 2023-02-14T06:08:54 | 2023-02-14T06:08:54 | 189,055,701 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | class Solution(object):
def thousandSeparator(self, n):
"""
:type n: int
:rtype: str
"""
res = []
while n / 1000 > 0:
remain = str(n % 1000)
if len(remain) == 1:
remain = '00' + remain
elif len(remain) == 2:
remain = '0' + remain
res = [remain] + res
n /= 1000
res = [str(n % 1000)] + res
return '.'.join(res) | [
"[email protected]"
]
| |
4964d2f9abf377ed066bd2037665447f341785e3 | 8d54e547467ee9f91311dab51b6dd104e814f7d5 | /text_txt_lmfit.py | dbab90c88c6528b1b9d01fa2e077bdbb7052b923 | []
| no_license | alictg81/pyspeckit-tests | 9dca6e41b8b783ef72d05f7135652efe6eddef1e | 62153cb2dbff2cffda1a9c8f626ac4e1549f8532 | refs/heads/master | 2021-01-22T06:49:11.600689 | 2015-04-07T16:22:17 | 2015-04-07T16:22:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | from pyspeckit import spectrum
if not 'interactive' in globals():
interactive=False
if not 'savedir' in globals():
savedir = ''
sp = spectrum.Spectrum('simple_txt.txt')
print "Does it have an axis? ",sp.plotter.axis
sp.plotter()
print "How about now? ",sp.plotter.axis
print "FITTING GAUSSIAN"
sp.specfit(quiet=False)
sp.specfit.annotate(loc='lower right')
sp.plotter.figure.savefig(savedir+'txt_gaussfit.png')
print "Guesses: ", sp.specfit.guesses
print "Best fit: ", sp.specfit.modelpars
print "Datamax: ",sp.data.max()
if interactive: raw_input("Wait here a moment")
# Fit a baseline, excluding the velocities with data, and don't subtract it
print "FITTING BASELINE"
sp.baseline(exclude=[2.111,2.129],subtract=False)
# DEBUG added 1/16/2012 sp.baseline.highlight_fitregion()
sp.plotter.figure.savefig(savedir+'txt_baseline.png')
print "Baseline: ",sp.baseline.baselinepars
#print "Excludepix: ",sp.baseline.excludepix
#print "Excludevelo: ",sp.baseline.excludevelo
print "EQW: ",sp.specfit.EQW()
print "Datamax: ",sp.data.max()
print "NOK: ",sp.baseline.OKmask.sum()
print sp.data[sp.baseline.includemask]
print sp.baseline.basespec
if interactive: raw_input("Wait here a moment")
print "REFITTING GAUSSIAN"
sp.specfit(quiet=False)
sp.specfit.annotate(loc='lower right')
print "Guesses: ", sp.specfit.guesses
print "Best fit: ", sp.specfit.modelpars
print "Datamax: ",sp.data.max()
sp.plotter.figure.savefig(savedir+'txt_baseline_gaussfit.png')
if interactive: raw_input("Wait here a moment")
print "EQW: ",sp.specfit.EQW(plot=True,annotate=True)
sp.plotter.refresh()
sp.plotter.figure.savefig(savedir+'txt_EQW.png')
sp.specfit.add_sliders()
if interactive: raw_input('Press enter to print guesses and best fit and end code')
#from matplotlib import pyplot
| [
"[email protected]"
]
| |
225d064d759e8103f09dc0d0d525433e922a6c19 | f8ece22d9e9e12e2cbca56d72a6b2728ba9a275a | /setup.py | 28b7dbcd103df90c11e4c7e2ecd33447d0f296a8 | [
"MIT"
]
| permissive | pparan/polyaxon | 8c8912f9ba724e007357efcaefeab86fec2d5630 | 423199721e90431209b00c0f76caa6b4f9aa4b24 | refs/heads/master | 2021-04-15T07:15:19.701268 | 2018-03-21T11:59:12 | 2018-03-21T11:59:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,047 | py | #!/usr/bin/env python
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(name='polyaxon',
version='0.0.6',
description='Deep Learning library for TensorFlow for '
'building end to end models and experiments.',
maintainer='Mourad Mourafiq',
maintainer_email='[email protected]',
author='Mourad Mourafiq',
author_email='[email protected]',
url='https://github.com/polyaxon/polyaxon',
license='MIT',
platforms='any',
packages=find_packages(),
keywords=[
'polyaxon',
'tensorFlow',
'deep-learning',
'machine-learning',
'data-science',
'neural-networks',
'artificial-intelligence',
'ai',
'reinforcement-learning'
],
install_requires=[
"celery==4.1.0",
"Django==1.11.7",
"django-cors-headers==2.1.0",
"djangorestframework==3.7.0",
"djangorestframework-camel-case==0.2.0",
"docker==2.6.1",
"GitPython==2.1.7",
"Jinja2==2.9.6",
"pika==0.11.0",
"psycopg2==2.7.3.1",
"redis==2.10.6",
"sanic==0.6.0",
"six==1.11.0",
"Unipath==1.1",
"uWSGI==2.0.15",
"websockets==3.4",
],
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
tests_require=['pytest'],
cmdclass={'test': PyTest})
| [
"[email protected]"
]
| |
caf878602d26dd8f569da3d2ddf5ec00fd85e19f | 3c59fe3748fe0461d1327b9b751d8dc7c9405367 | /teus_back/users/migrations/0014_auto_20210505_0038.py | 47010d11a0dca65fd47a52b7741afa4401f6e016 | []
| no_license | RympeR/teus-django-vue | 8c69b5aaf38af091597065ab1306e33283846d16 | b6be2bace144a0f3102339295eae05579657205e | refs/heads/main | 2023-05-13T16:30:29.425392 | 2021-06-03T07:13:34 | 2021-06-03T07:13:34 | 342,588,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # Generated by Django 3.1.7 on 2021-05-04 21:38
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0013_auto_20210430_0435'),
]
operations = [
migrations.AlterField(
model_name='phone',
name='expires_at',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 5, 0, 20), verbose_name='Expires at'),
),
]
| [
"[email protected]"
]
| |
931b73a7d655f1fb2521e8aeced917c19b27cb99 | 13eb4610eb9316c246768c7dffe94e24eb5ce3ed | /docs/conf.py | b320008b6bfca53accb03b2174ac8109ac31137d | [
"Apache-2.0"
]
| permissive | webclinic017/pyEX-zipline | e5141b70424b0c04f7ad59a34ab9bcd26b817ec5 | 3d0636af2686861cd08054447bcc60190d48aedd | refs/heads/master | 2022-07-22T08:38:08.060746 | 2020-05-19T19:32:16 | 2020-05-19T19:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,420 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pyEX.zipline documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 12 22:07:11 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys
import os
import os.path
import subprocess
import sphinx_rtd_theme
from recommonmark.transform import AutoStructify
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'recommonmark']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pyEX.zipline'
copyright = '2020, Tim Paine'
author = 'Tim Paine'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
_version_py = os.path.join('..', 'pyEX', 'zipline', '_version.py')
version_ns = {}
with open(_version_py, mode='r') as version_file:
exec(version_file.read(), version_ns)
# The short X.Y version.
version = '%i.%i' % version_ns['version_info'][:2]
# The full version, including alpha/beta/rc tags.
release = version_ns['__version__']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyEXziplinedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyEX.zipline.tex', 'pyEX.zipline Documentation',
'Tim Paine', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyEX.zipline', 'pyEX.zipline Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyEX.zipline', 'pyEX.zipline Documentation',
author, 'pyEX.zipline', 'One line description of project.',
'Miscellaneous'),
]
def run_copyreadme(_):
out = os.path.abspath(os.path.join(os.path.dirname(__file__), 'index.md'))
readme = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'README.md'))
api = os.path.abspath(os.path.join(os.path.dirname(__file__), 'api.md'))
with open(out, 'w') as fp1:
with open(readme, 'r') as fp2:
for line in fp2:
if 'src=' in line:
# <img>
fp1.write(line.replace("docs/", ""))
elif "](docs/" in line:
# md
fp1.write(line.replace("](docs/", "]("))
else:
fp1.write(line)
fp1.write("# API Documentation\n\n")
with open(api, 'r') as fp2:
fp1.write(fp2.read())
def run_apidoc(_):
out_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'api'))
pyEX_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'pyEX'))
cmd_path = 'sphinx-apidoc'
if hasattr(sys, 'real_prefix'): # Check to see if we are in a virtualenv
# If we are, assemble the path manually
cmd_path = os.path.abspath(os.path.join(sys.prefix, 'bin', 'sphinx-apidoc'))
subprocess.check_call([cmd_path,
'-E',
'-M',
'-o',
out_dir,
pyEX_dir,
'--force'])
def setup(app):
app.add_config_value('recommonmark_config', {
'auto_toc_tree_section': 'Contents',
}, True)
app.add_transform(AutoStructify)
app.connect('builder-inited', run_copyreadme)
app.connect('builder-inited', run_apidoc)
| [
"[email protected]"
]
| |
c1918f6020987d4268163b9fcd8cd562a0d239d6 | cb30200e8af3a27f3d4faf7e0ca31bdb96f2f85a | /neutron/db/uos_db.py | 97a8667d885eac45f0cd4a4a096228b44fc247b2 | [
"Apache-2.0"
]
| permissive | CingHu/neutron-ustack | ee7bf5a373dfc400c1ed86dedc7414c27185de4d | a1da17d0d63b3342a48c35da37984d6386ee1016 | refs/heads/master | 2016-08-13T02:02:48.179378 | 2015-11-27T02:46:46 | 2015-11-27T02:46:46 | 46,955,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,291 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 UnitedStack Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Yong Sheng Gong, UnitedStack, inc.
from neutron.api.v2 import attributes
from neutron.db import db_base_plugin_v2
from neutron.extensions import l3
from neutron.openstack.common import timeutils
def _uos_extend_timestamp(res, db):
res['created_at'] = timeutils.strtime(db['created_at'])
def _uos_extend_floatingip_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
def _uos_extend_router_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
def _uos_extend_network_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
def _uos_extend_port_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
def _uos_extend_subnet_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
def _uos_extend_sg_dict_binding(core_plugin, res, db):
_uos_extend_timestamp(res, db)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.FLOATINGIPS, [_uos_extend_floatingip_dict_binding])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, [_uos_extend_router_dict_binding])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, [_uos_extend_network_dict_binding])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, [_uos_extend_port_dict_binding])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.SUBNETS, [_uos_extend_subnet_dict_binding])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
'security_groups', [_uos_extend_sg_dict_binding])
| [
"[email protected]"
]
| |
aa12b468ee49bf3c91454e59e480bd4297a3b27e | 7b8d9cfc3c1881e00ead43b4f080e5b885b39f98 | /skipper/aws/host.py | 12931001399ecb522836720c8264bbdf87dae7f1 | [
"BSD-2-Clause"
]
| permissive | cameronmaske/skipper | ddfcc7ab4c5a672837aa7ac9de3824ae3d608f6c | 965a36f3ce985acba8b15dc4b086f4478c7b6358 | refs/heads/master | 2023-08-17T16:46:52.105912 | 2014-09-22T19:49:42 | 2014-09-22T19:49:42 | 18,116,398 | 1 | 0 | BSD-2-Clause | 2021-03-25T21:27:21 | 2014-03-25T20:59:44 | Python | UTF-8 | Python | false | false | 14,364 | py | from skipper.hosts import BaseHost
from skipper.instances import InstanceNotFound
from skipper.logger import log
from skipper.ssh import ssh_tunnel
from skipper.fleet import Fleet
from skipper.ports import parse_ports_list
from skipper.exceptions import NoSuchService
from ec2 import EC2, authorize_group, revoke_group
from groups import Group
from instances import Instance
from regions import REGIONS
from helpers import (
find_instance, find_machine_by_ip, outdated_rules)
from StringIO import StringIO
import click
from multiprocessing.dummy import Pool
class Host(BaseHost):
def __init__(self, creds=None, project=None):
self.creds = creds
self.project = project
self._ec2 = None
def ec2(self):
if not self._ec2:
self._ec2 = EC2(**self.get_creds())
return self._ec2
@property
def private_key(self):
keys_path = self.get_keys_paths()
with open(keys_path['private'], 'r') as f:
contents = StringIO(f.read())
return contents
@property
def public_key(self):
keys_path = self.get_keys_paths()
with open(keys_path['public'], 'r') as f:
contents = StringIO(f.read())
return contents
def make_group(self, name, **kwargs):
raw_ports = kwargs.pop("ports", {})
ports = {
"public": parse_ports_list(raw_ports.get("public", []))
}
return Group(
name=name,
ports=ports,
**kwargs
)
def get_creds(self):
"""
Returns a dictonary with of an access_key and secret_key. If not stored
will prompt user to enter them.
"""
access = self.creds.get('AWS', {}).get('ACCESS_KEY')
secret = self.creds.get('AWS', {}).get('SECRET_KEY')
if not access and not secret:
message = (
'\nAs this is your first time running skipper, we need to store'
' your some AWS Security Credentials.\nPlease visit '
'https://console.aws.amazon.com/iam/home?#security_credential\n'
'Under Access Keys, click Create New Access Key.'
)
elif not access or not secret:
message = (
'\nThe AWS Security Credentials stored are incomplete. Please '
're-enter them. \nYou can generate them on '
'https://console.aws.amazon.com/iam/home?#security_credential\n'
'Under Access Keys, click Create New Access Key.'
)
if not access or not secret:
click.echo(message)
self.creds['AWS']['ACCESS_KEY'] = click.prompt(
"Enter your Access Key ID")
self.creds['AWS']['SECRET_KEY'] = click.prompt(
"Enter your Secret Access Key")
self.creds.save()
click.echo('\n')
return {
"access_key": self.creds['AWS']['ACCESS_KEY'],
"secret_key": self.creds['AWS']['SECRET_KEY']
}
def get_keypair_name(self):
"""
Returns the saved KeyPair name to use, else prompts the user to enter
a desired name.
"""
name = self.creds.get('AWS', {}).get('KEY_NAME')
if not name:
click.echo(
"\nA KeyPair, consisting of an SSH public and private key is "
"required for access to the cluster."
)
name = click.prompt(
"Please enter the an existing KeyPair's name, or enter a "
"unique name for yourself"
)
self.creds['AWS']['KEY_NAME'] = name
self.creds.save()
click.echo('\n')
return name
def get_or_import_key(self, region, name):
"""
Attempts to retrieve a key pair else imports one locally.
"""
key = self.ec2().get_key(region=region, name=name)
if not key:
public_key = self.public_key
key = self.ec2().import_key(
region=region,
name=name,
public_key_material=public_key.read()
)
return key
def ps_instances(self):
"""
Returns a lite dictonary version of all instances.
"""
instances = self.all_instances()
print_instances = []
for instance in instances:
group, scale = instance.uuid.split("_", 1)
print_instances.append({
"uuid": instance.uuid,
"state": instance.boto_instance.state,
"ip": instance.public_ip,
"size": instance.boto_instance.instance_type,
"group": group,
"scale": scale,
"region": instance.region,
})
return print_instances
def __instances_by_region(self, region):
"""
Private method used to return all associated instances with a region.
Used in conjugation with a thread pool.
"""
instances = self.ec2().filter_instances(
region=region,
filters={
'tag:project': self.project.name
},
ignore=["terminated"]
)
return {
'region': region,
'instances': instances
}
def all_instances(self, **kwargs):
"""
Returns all instances associated with the project (including stopped)
"""
# Looks across all AWS regions, in parallel using a thread pool.
pool = Pool(8)
regions = kwargs.get('regions', REGIONS.keys())
results = pool.map(self.__instances_by_region, regions)
pool.close()
pool.join()
instances = []
for result in results:
for boto_instance in result['instances']:
uuid = boto_instance.tags['uuid']
instance = Instance(
host=self,
uuid=uuid,
project_name=self.project.name,
boto_instance=boto_instance,
size=boto_instance.instance_type,
region=result['region'],
)
instances.append(instance)
return instances
def get_or_create_instances(self, name, **kwargs):
"""
Gets or creates an instances.
"""
instances = []
scale = kwargs.pop('scale', 1)
region = kwargs.pop('region', "us-east-1")
for i in range(1, scale + 1):
uuid = "%s_%s" % (name, i)
try:
instance = self.get_instance(
uuid=uuid,
project_name=self.project.name,
region=region,
size=kwargs.get('size')
)
instance.update()
except InstanceNotFound:
instance = self.create_instance(
uuid=uuid,
project_name=self.project.name,
region=region,
size=kwargs.get('size')
)
instances.append(instance)
return instances
def get_instance(self, uuid, project_name, **kwargs):
size = kwargs.get('size', "t1.micro")
region = kwargs.get('region', "us-east-1")
log.info("Checking for instance [%s]." % uuid)
boto_instances = self.ec2().filter_instances(
region=region,
filters={
'tag:uuid': uuid,
'tag:project': project_name
})
try:
boto_instance = boto_instances[0]
instance = Instance(
host=self,
uuid=uuid,
project_name=project_name,
boto_instance=boto_instance,
size=size,
region=region,
)
log.info("Successfully found instance [%s]." % uuid)
return instance
except IndexError:
log.info("Failed to find instance [%s]." % uuid)
raise InstanceNotFound()
def create_instance(self, uuid, project_name, **kwargs):
size = kwargs.get('size', "t1.micro")
region = kwargs.get('region', "us-east-1")
group = self.ec2().get_or_create_group(
region=region,
name=project_name)[0]
keypair_name = self.get_keypair_name()
key = self.get_or_import_key(name=keypair_name, region=region)
authorize_group(group, 'tcp', 22, 22, to_ip='0.0.0.0/0')
authorize_group(group, 'tcp', 7001, 7001, to_group=group)
authorize_group(group, 'tcp', 4001, 4001, to_group=group)
log.info("Starting a new instance [%s]." % uuid)
config = """#cloud-config
coreos:
etcd:
# generate a new token for each unique cluster from https://discovery.etcd.io/new
discovery: {}
name: {}
# multi-region and multi-cloud deployments need to use $public_ipv4
addr: $private_ipv4:4001
peer-addr: $private_ipv4:7001
units:
- name: etcd.service
command: start
- name: fleet.socket
command: start
content: |
[Socket]
ListenStream=6001
Service=fleet.service
[Install]
WantedBy=sockets.target
- name: fleet.service
command: start
""".format(self.get_etcd_token(), uuid)
boto_instance = self.ec2().create_instance(
region=region,
instance_type=size,
key_name=key.name,
security_groups=[group.name],
user_data=config.strip()
)
boto_instance.add_tag('name', uuid)
boto_instance.add_tag('uuid', uuid)
boto_instance.add_tag('project', project_name)
instance = Instance(
host=self,
uuid=uuid,
project_name=project_name,
boto_instance=boto_instance,
size=size,
region=region,
)
log.info("Successfully created instance [%s]." % uuid)
return instance
def ps_services(self):
"""
Returns all services across a host.
"""
instances = self.all_instances()
with ssh_tunnel(instances[0].tunnel_params(port=6001)) as port:
fleet = Fleet(port=port)
machines = fleet.list_machines()
states = fleet.list_states()
return _ps_services(
machines=machines, states=states, instances=instances)
def run_service(self, instances, service, tag):
"""
Runs a service across multiple instances.
"""
instance = instances[0]
with ssh_tunnel(instance.tunnel_params(port=6001)) as port:
fleet = Fleet(port=port)
machines = fleet.list_machines()
for instance in instances:
machine_id = find_machine_by_ip(
machines, instance.private_ip)["id"]
for i in range(1, service.scale + 1):
scale = (instance.scale + i - 1)
name = "%s_%s" % (service.name, scale)
log.info(
'Checking for service [%s] on instance [%s].' %
(name, instance.uuid))
create = False
try:
existing = fleet.get_unit(name=name)
log.info(
'Found service [%s], checking is up to date...'
% name)
if existing['options'] != service.fleet_params(tag=tag, scale=scale):
log.info(
'Existing service [%s] is outdated, removing...'
% name)
fleet.delete_unit(name=name)
create = True
except fleet.NotFound:
log.info('No existing service [%s] found.' % name)
create = True
if create:
log.info('Creating service [%s].' % name)
fleet.create_unit(
machine_id=machine_id,
name=name,
options=service.fleet_params(tag=tag, scale=scale))
log.info('Successfully created service [%s].' % name)
def configure_group(self, instances, group):
"""
Opens ports publically for instances associated based on a group's
settings.
"""
ports = group.ports['public']
if ports:
ec2_group = self.ec2().get_or_create_group(
region=group.region,
name=group.name)[0]
desired = []
for port in ports:
desired.append({
"ip_protocol": "tcp",
"from_port": str(port),
"to_port": str(port),
"to_ip": ['0.0.0.0/0']
})
outdated = outdated_rules(ec2_group.rules, desired)
for old in outdated:
revoke_group(ec2_group, **old)
for new in desired:
authorize_group(ec2_group, **new)
for instance in instances:
instance.add_group(ec2_group)
def remove_service(self, uuid):
"""
Stop a running service.
"""
instances = self.all_instances()
with ssh_tunnel(instances[0].tunnel_params(port=6001)) as port:
fleet = Fleet(port=port)
try:
fleet.get_unit(name=uuid)
fleet.delete_unit(name=uuid)
except:
raise NoSuchService()
host = Host()
def _ps_services(instances, machines, states):
services = []
for state in states:
uuid, _ = state['name'].split('.')
service, scale = uuid.split('_')
_state = state['systemdSubState']
instance = find_instance(instances, machines, state['machineID'])
services.append({
'uuid': uuid,
'state': _state,
'instance': instance.uuid,
'ip': instance.public_ip,
'service': service,
'scale': scale
})
return services
| [
"[email protected]"
]
| |
2eb41a9f7840f3f1afa3331c5e071c1d40c62578 | d3c21f0051e5ca2f45d98381b0372b4cd916b213 | /cgi-bin/module/plugins/hoster/RarefileNet.py | 8339d40eb79d560f8e907c754e90918bee1bd306 | []
| no_license | f3l/shareacc | ca165272f4265180d9178b6a066c69a0b368f8dd | 615c71216317f7ac46b5217f5672cad0c71a1e49 | refs/heads/master | 2020-04-06T06:41:11.278718 | 2013-02-05T15:15:15 | 2013-02-05T15:15:15 | 4,640,503 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | # -*- coding: utf-8 -*-
import re
from module.plugins.hoster.XFileSharingPro import XFileSharingPro, create_getInfo
from module.utils import html_unescape
class RarefileNet(XFileSharingPro):
__name__ = "RarefileNet"
__type__ = "hoster"
__pattern__ = r"http://(?:\w*\.)*rarefile.net/\w{12}"
__version__ = "0.01"
__description__ = """Rarefile.net hoster plugin"""
__author_name__ = ("zoidberg")
__author_mail__ = ("[email protected]")
FILE_NAME_PATTERN = r'<td><font color="red">(?P<N>.*?)</font></td>'
FILE_SIZE_PATTERN = r'<td>Size : (?P<S>.+?) '
def handleCaptcha(self, inputs):
captcha_div = re.search(r'<b>Enter code.*?<div.*?>(.*?)</div>', self.html, re.S).group(1)
self.logDebug(captcha_div)
numerals = re.findall('<span.*?padding-left\s*:\s*(\d+).*?>(\d)</span>', html_unescape(captcha_div))
inputs['code'] = "".join([a[1] for a in sorted(numerals, key = lambda num: int(num[0]))])
self.logDebug("CAPTCHA", inputs['code'], numerals)
return 3
getInfo = create_getInfo(RarefileNet) | [
"[email protected]"
]
| |
fccd595463832be925deef76bf019e3c94723841 | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Keras_tensorflow/source/tensorflow/python/ops/gen_array_ops.py | fa9bd628a4e28dffc4e2154ad4f8dfb562d935cc | [
"MIT"
]
| permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 178,854 | py | """Python wrappers around Brain.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
from google.protobuf import text_format as _text_format
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
_batch_matrix_band_part_outputs = ["band"]
def batch_matrix_band_part(input, num_lower, num_upper, name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`.
num_lower: A `Tensor` of type `int64`.
num_upper: A `Tensor` of type `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("BatchMatrixBandPart", input=input,
num_lower=num_lower, num_upper=num_upper,
name=name)
return result
_batch_matrix_diag_outputs = ["output"]
def batch_matrix_diag(diagonal, name=None):
r"""TODO: add doc.
Args:
diagonal: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `diagonal`.
"""
result = _op_def_lib.apply_op("BatchMatrixDiag", diagonal=diagonal,
name=name)
return result
_batch_matrix_diag_part_outputs = ["diagonal"]
def batch_matrix_diag_part(input, name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("BatchMatrixDiagPart", input=input, name=name)
return result
_batch_matrix_set_diag_outputs = ["output"]
def batch_matrix_set_diag(input, diagonal, name=None):
r"""TODO: add doc.
Args:
input: A `Tensor`.
diagonal: A `Tensor`. Must have the same type as `input`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("BatchMatrixSetDiag", input=input,
diagonal=diagonal, name=name)
return result
__batch_to_space_outputs = ["output"]
def _batch_to_space(input, crops, block_size, name=None):
r"""BatchToSpace for 4-D tensors of type T.
This is a legacy version of the more general BatchToSpaceND.
Rearranges (permutes) data from batch into blocks of spatial data, followed by
cropping. This is the reverse transformation of SpaceToBatch. More specifically,
this op outputs a copy of the input tensor where values from the `batch`
dimension are moved in spatial blocks to the `height` and `width` dimensions,
followed by cropping along the `height` and `width` dimensions.
Args:
input: A `Tensor`. 4-D tensor with shape
`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
depth]`. Note that the batch size of the input tensor must be divisible by
`block_size * block_size`.
crops: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
how many elements to crop from the intermediate result across the spatial
dimensions as follows:
crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
block_size: An `int` that is `>= 2`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
4-D with shape `[batch, height, width, depth]`, where:
height = height_pad - crop_top - crop_bottom
width = width_pad - crop_left - crop_right
The attr `block_size` must be greater than one. It indicates the block size.
Some examples:
(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
```prettyprint
[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
```
The output tensor has shape `[1, 2, 2, 1]` and value:
```prettyprint
x = [[[[1], [2]], [[3], [4]]]]
```
(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
```prettyprint
[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
```
The output tensor has shape `[1, 2, 2, 3]` and value:
```prettyprint
x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
```prettyprint
x = [[[[1], [3]], [[5], [7]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
```
The output tensor has shape `[1, 4, 4, 1]` and value:
```prettyprint
x = [[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]
```
(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
```prettyprint
x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
```
The output tensor has shape `[2, 2, 4, 1]` and value:
```prettyprint
x = [[[[1], [3]], [[5], [7]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
```
"""
result = _op_def_lib.apply_op("BatchToSpace", input=input, crops=crops,
block_size=block_size, name=name)
return result
_batch_to_space_nd_outputs = ["output"]
def batch_to_space_nd(input, block_shape, crops, name=None):
r"""BatchToSpace for N-D tensors of type T.
This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
`block_shape + [batch]`, interleaves these blocks back into the grid defined by
the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
the input. The spatial dimensions of this intermediate result are then
optionally cropped according to `crops` to produce the output. This is the
reverse of SpaceToBatch. See below for a precise description.
Args:
input: A `Tensor`.
N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
where spatial_shape has M dimensions.
block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
1-D with shape `[M]`, all values must be >= 1.
crops: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2-D with shape `[M, 2]`, all values must be >= 0.
`crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
dimension `i + 1`, which corresponds to spatial dimension `i`. It is
required that
`crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
This operation is equivalent to the following steps:
1. Reshape `input` to `reshaped` of shape:
[block_shape[0], ..., block_shape[M-1],
batch / prod(block_shape),
input_shape[1], ..., input_shape[N-1]]
2. Permute dimensions of `reshaped` to produce `permuted` of shape
[batch / prod(block_shape),
input_shape[1], block_shape[0],
...,
input_shape[M], block_shape[M-1],
input_shape[M+1], ..., input_shape[N-1]]
3. Reshape `permuted` to produce `reshaped_permuted` of shape
[batch / prod(block_shape),
input_shape[1] * block_shape[0],
...,
input_shape[M] * block_shape[M-1],
input_shape[M+1],
...,
input_shape[N-1]]
4. Crop the start and end of dimensions `[1, ..., M]` of
`reshaped_permuted` according to `crops` to produce the output of shape:
[batch / prod(block_shape),
input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
...,
input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
input_shape[M+1], ..., input_shape[N-1]]
Some examples:
(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
`crops = [[0, 0], [0, 0]]`:
```prettyprint
[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
```
The output tensor has shape `[1, 2, 2, 1]` and value:
```prettyprint
x = [[[[1], [2]], [[3], [4]]]]
```
(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
`crops = [[0, 0], [0, 0]]`:
```prettyprint
[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
```
The output tensor has shape `[1, 2, 2, 3]` and value:
```prettyprint
x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
`crops = [[0, 0], [0, 0]]`:
```prettyprint
x = [[[[1], [3]], [[5], [7]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
```
The output tensor has shape `[1, 4, 4, 1]` and value:
```prettyprint
x = [[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]
```
(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
`crops = [[0, 0], [2, 0]]`:
```prettyprint
x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
[[[0], [2], [4]]], [[[0], [10], [12]]],
[[[0], [5], [7]]], [[[0], [13], [15]]],
[[[0], [6], [8]]], [[[0], [14], [16]]]]
```
The output tensor has shape `[2, 2, 4, 1]` and value:
```prettyprint
x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]]],
[[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
```
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("BatchToSpaceND", input=input,
block_shape=block_shape, crops=crops,
name=name)
return result
_bitcast_outputs = ["output"]
def bitcast(input, type, name=None):
r"""Bitcasts a tensor from one type to another without copying data.
Given a tensor `input`, this operation returns a tensor that has the same buffer
data as `input` with datatype `type`.
If the input datatype `T` is larger than the output datatype `type` then the
shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
If `T` is smaller than `type`, the operator requires that the rightmost
dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
[..., sizeof(`type`)/sizeof(`T`)] to [...].
*NOTE*: Bitcast is implemented as a low-level cast, so machines with different
endian orderings will give different results.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
type: A `tf.DType` from: `tf.float32, tf.float64, tf.int64, tf.int32, tf.uint8, tf.uint16, tf.int16, tf.int8, tf.complex64, tf.complex128, tf.qint8, tf.quint8, tf.qint32, tf.half`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `type`.
"""
result = _op_def_lib.apply_op("Bitcast", input=input, type=type, name=name)
return result
__broadcast_args_outputs = ["r0"]
def _broadcast_args(s0, s1, name=None):
r"""Return the shape of s0 op s1 with broadcast.
Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
Args:
s0: A `Tensor`. Must be one of the following types: `int32`, `int64`.
s1: A `Tensor`. Must have the same type as `s0`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `s0`.
"""
result = _op_def_lib.apply_op("BroadcastArgs", s0=s0, s1=s1, name=name)
return result
__broadcast_gradient_args_outputs = ["r0", "r1"]
_BroadcastGradientArgsOutput = _collections.namedtuple("BroadcastGradientArgs",
__broadcast_gradient_args_outputs)
def _broadcast_gradient_args(s0, s1, name=None):
r"""Return the reduction indices for computing gradients of s0 op s1 with broadcast.
This is typically used by gradient computations for a broadcasting operation.
Args:
s0: A `Tensor`. Must be one of the following types: `int32`, `int64`.
s1: A `Tensor`. Must have the same type as `s0`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (r0, r1).
r0: A `Tensor`. Has the same type as `s0`.
r1: A `Tensor`. Has the same type as `s0`.
"""
result = _op_def_lib.apply_op("BroadcastGradientArgs", s0=s0, s1=s1,
name=name)
return _BroadcastGradientArgsOutput._make(result)
_check_numerics_outputs = ["output"]
def check_numerics(tensor, message, name=None):
r"""Checks a tensor for NaN and Inf values.
When run, reports an `InvalidArgument` error if `tensor` has any values
that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
Args:
tensor: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
message: A `string`. Prefix of the error message.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
result = _op_def_lib.apply_op("CheckNumerics", tensor=tensor,
message=message, name=name)
return result
__concat_outputs = ["output"]
def _concat(concat_dim, values, name=None):
r"""Concatenates tensors along one dimension.
Args:
concat_dim: A `Tensor` of type `int32`.
0-D. The dimension along which to concatenate. Must be in the
range [0, rank(values)).
values: A list of at least 2 `Tensor` objects of the same type.
The `N` Tensors to concatenate. Their ranks and types must match,
and their sizes must match in all dimensions except `concat_dim`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `values`.
A `Tensor` with the concatenation of values stacked along the
`concat_dim` dimension. This tensor's shape matches that of `values` except
in `concat_dim` where it has the sum of the sizes.
"""
result = _op_def_lib.apply_op("Concat", concat_dim=concat_dim,
values=values, name=name)
return result
__concat_offset_outputs = ["offset"]
def _concat_offset(concat_dim, shape, name=None):
r"""Computes offsets of concat inputs within its output.
For example:
```prettyprint
# 'x' is [2, 2, 7]
# 'y' is [2, 3, 7]
# 'z' is [2, 5, 7]
concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
```
Args:
concat_dim: A `Tensor` of type `int32`.
The dimension along which to concatenate.
shape: A list of at least 2 `Tensor` objects of type `int32`.
The `N` int32 vectors representing shape of tensors being concatenated.
name: A name for the operation (optional).
Returns:
A list with the same number of `Tensor` objects as `shape` of `Tensor` objects of type `int32`.
The `N` int32 vectors representing the starting offset
of input tensors within the concatenated output.
This is typically used by gradient computations for a concat operation.
"""
result = _op_def_lib.apply_op("ConcatOffset", concat_dim=concat_dim,
shape=shape, name=name)
return result
__concat_v2_outputs = ["output"]
def _concat_v2(values, axis, name=None):
r"""Concatenates tensors along one dimension.
Args:
values: A list of at least 2 `Tensor` objects of the same type.
List of `N` Tensors to concatenate. Their ranks and types must match,
and their sizes must match in all dimensions except `concat_dim`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
0-D. The dimension along which to concatenate. Must be in the
range [-rank(values), rank(values)).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `values`.
A `Tensor` with the concatenation of values stacked along the
`concat_dim` dimension. This tensor's shape matches that of `values` except
in `concat_dim` where it has the sum of the sizes.
"""
result = _op_def_lib.apply_op("ConcatV2", values=values, axis=axis,
name=name)
return result
__const_outputs = ["output"]
def _const(value, dtype, name=None):
r"""Returns a constant tensor.
Args:
value: . Attr `value` is the tensor to return.
dtype: A `tf.DType`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
result = _op_def_lib.apply_op("Const", value=value, dtype=dtype, name=name)
return result
_copy_outputs = ["output"]
def copy(input, tensor_name=None, name=None):
r"""Copy Op.
Performs CPU-to-CPU or GPU-to-GPU deep-copying of tensor, depending on the
device on which the tensor is allocated.
Unlike the CopyHost Op, this op does not have HostMemory constraint on its
input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
result = _op_def_lib.apply_op("Copy", input=input, tensor_name=tensor_name,
name=name)
return result
_copy_host_outputs = ["output"]
def copy_host(input, tensor_name=None, name=None):
r"""Copy Host Op.
Performs CPU-to-CPU deep-copying of tensor.
Unlike the Copy Op, this op has HostMemory constraint on its input or output.
Args:
input: A `Tensor`. Input tensor.
tensor_name: An optional `string`. Defaults to `""`.
The name of the input tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor, deep-copied from input.
"""
result = _op_def_lib.apply_op("CopyHost", input=input,
tensor_name=tensor_name, name=name)
return result
_debug_identity_outputs = ["output"]
def debug_identity(input, tensor_name=None, debug_urls=None, name=None):
r"""Debug Identity Op.
Provides an identity mapping of the non-Ref type input tensor for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Output tensor that equals the input tensor.
"""
result = _op_def_lib.apply_op("DebugIdentity", input=input,
tensor_name=tensor_name,
debug_urls=debug_urls, name=name)
return result
_debug_nan_count_outputs = ["output"]
def debug_nan_count(input, tensor_name=None, debug_urls=None, name=None):
r"""Debug NaN Value Counter Op
Counts number of NaNs in the input tensor, for debugging.
Args:
input: A `Tensor`. Input tensor, non-Reference type.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
An integer output tensor that is the number of NaNs in the input.
"""
result = _op_def_lib.apply_op("DebugNanCount", input=input,
tensor_name=tensor_name,
debug_urls=debug_urls, name=name)
return result
_debug_numeric_summary_outputs = ["output"]
def debug_numeric_summary(input, tensor_name=None, debug_urls=None,
name=None):
r"""Debug Numeric Summary Op.
Provide a basic summary of numeric value types, range and distribution.
Args:
input: A `Tensor`. Input tensor, non-Reference type, float or double.
tensor_name: An optional `string`. Defaults to `""`.
Name of the input tensor.
debug_urls: An optional list of `strings`. Defaults to `[]`.
List of URLs to debug targets, e.g.,
file:///foo/tfdbg_dump, grpc:://localhost:11011
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float64`.
A double tensor of shape [12], the elements of which are:
[0]: is initialized (1.0) or not (0.0).
[1]: total number of elements
[2]: -inf count
[3]: negative element count (excluding -inf)
[4]: zero element count
[5]: positive element count (excluding +inf)
[6]: +inf element count
[7]: NaN element count
Output elements [1:8] are all zero, if the tensor is uninitialized.
[8]: minimum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: +inf.
[9]: maximum of all non-inf and non-NaN elements.
If uninitialized or no such element exists: -inf.
[10]: mean of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
[11]: variance of all non-inf and non-NaN elements.
If uninitialized or no such element exists: NaN.
"""
result = _op_def_lib.apply_op("DebugNumericSummary", input=input,
tensor_name=tensor_name,
debug_urls=debug_urls, name=name)
return result
_depth_to_space_outputs = ["output"]
def depth_to_space(input, block_size, name=None):
r"""DepthToSpace for tensors of type T.
Rearranges data from depth into blocks of spatial data.
This is the reverse transformation of SpaceToDepth. More specifically,
this op outputs a copy of the input tensor where values from the `depth`
dimension are moved in spatial blocks to the `height` and `width` dimensions.
The attr `block_size` indicates the input block size and how the data is moved.
* Chunks of data of size `block_size * block_size` from depth are rearranged
into non-overlapping blocks of size `block_size x block_size`
* The width the output tensor is `input_depth * block_size`, whereas the
height is `input_height * block_size`.
* The depth of the input tensor must be divisible by
`block_size * block_size`.
That is, assuming the input is in the shape:
`[batch, height, width, depth]`,
the shape of the output will be:
`[batch, height*block_size, width*block_size, depth/(block_size*block_size)]`
This operation requires that the input tensor be of rank 4, and that
`block_size` be >=1 and that `block_size * block_size` be a divisor of the
input depth.
This operation is useful for resizing the activations between convolutions
(but keeping all data), e.g. instead of pooling. It is also useful for training
purely convolutional models.
For example, given this input of shape `[1, 1, 1, 4]`, and a block size of 2:
```prettyprint
x = [[[[1, 2, 3, 4]]]]
```
This operation will output a tensor of shape `[1, 2, 2, 1]`:
```prettyprint
[[[[1], [2]],
[[3], [4]]]]
```
Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
the corresponding output will have 2x2 elements and will have a depth of
1 channel (1 = `4 / (block_size * block_size)`).
The output element shape is `[2, 2, 1]`.
For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
```prettyprint
x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
```
This operation, for block size of 2, will return the following tensor of shape
`[1, 2, 2, 3]`
```prettyprint
[[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
```prettyprint
x = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
```
the operator will return the following tensor of shape `[1 4 4 1]`:
```prettyprint
x = [[ [1], [2], [5], [6]],
[ [3], [4], [7], [8]],
[ [9], [10], [13], [14]],
[ [11], [12], [15], [16]]]
```
Args:
input: A `Tensor`.
block_size: An `int` that is `>= 2`.
The size of the spatial block, same as in Space2Depth.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("DepthToSpace", input=input,
block_size=block_size, name=name)
return result
_dequantize_outputs = ["output"]
def dequantize(input, min_range, max_range, mode=None, name=None):
r"""Dequantize the 'input' tensor into a float Tensor.
[min_range, max_range] are scalar floats that specify the range for
the 'input' data. The 'mode' attribute controls exactly which calculations are
used to convert the float values to their quantized equivalents.
In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
```
if T == qint8, in[i] += (range(T) + 1)/ 2.0
out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
```
here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
*MIN_COMBINED Mode Example*
If the input comes from a QuantizedRelu6, the output type is
quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
0-6. The min_range and max_range values are therefore 0.0 and 6.0.
Dequantize on quint8 will take each value, cast to float, and multiply
by 6 / 255.
Note that if quantizedtype is qint8, the operation will additionally add
each value by 128 prior to casting.
If the mode is 'MIN_FIRST', then this approach is used:
```
number_of_steps = 1 << (# of bits in T)
range_adjust = number_of_steps / (number_of_steps - 1)
range = (range_max - range_min) * range_adjust
range_scale = range / number_of_steps
const double offset_input = static_cast<double>(input) - lowest_quantized;
result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
```
Args:
input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint16`, `quint16`, `qint32`.
min_range: A `Tensor` of type `float32`.
The minimum scalar value possibly produced for the input.
max_range: A `Tensor` of type `float32`.
The maximum scalar value possibly produced for the input.
mode: An optional `string` from: `"MIN_COMBINED", "MIN_FIRST"`. Defaults to `"MIN_COMBINED"`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
result = _op_def_lib.apply_op("Dequantize", input=input,
min_range=min_range, max_range=max_range,
mode=mode, name=name)
return result
_diag_outputs = ["output"]
def diag(diagonal, name=None):
r"""Returns a diagonal tensor with a given diagonal values.
Given a `diagonal`, this operation returns a tensor with the `diagonal` and
everything else padded with zeros. The diagonal is computed as follows:
Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
For example:
```prettyprint
# 'diagonal' is [1, 2, 3, 4]
tf.diag(diagonal) ==> [[1, 0, 0, 0]
[0, 2, 0, 0]
[0, 0, 3, 0]
[0, 0, 0, 4]]
```
Args:
diagonal: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
Rank k tensor where k is at most 3.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `diagonal`.
"""
result = _op_def_lib.apply_op("Diag", diagonal=diagonal, name=name)
return result
_diag_part_outputs = ["diagonal"]
def diag_part(input, name=None):
r"""Returns the diagonal part of the tensor.
This operation returns a tensor with the `diagonal` part
of the `input`. The `diagonal` part is computed as follows:
Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
tensor of rank `k` with dimensions `[D1,..., Dk]` where:
`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
For example:
```prettyprint
# 'input' is [[1, 0, 0, 0]
[0, 2, 0, 0]
[0, 0, 3, 0]
[0, 0, 0, 4]]
tf.diag_part(input) ==> [1, 2, 3, 4]
```
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
Rank k tensor where k is 2, 4, or 6.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`. The extracted diagonal.
"""
result = _op_def_lib.apply_op("DiagPart", input=input, name=name)
return result
__edit_distance_outputs = ["output"]
def _edit_distance(hypothesis_indices, hypothesis_values, hypothesis_shape,
truth_indices, truth_values, truth_shape, normalize=None,
name=None):
r"""Computes the (possibly normalized) Levenshtein Edit Distance.
The inputs are variable-length sequences provided by SparseTensors
(hypothesis_indices, hypothesis_values, hypothesis_shape)
and
(truth_indices, truth_values, truth_shape).
The inputs are:
Args:
hypothesis_indices: A `Tensor` of type `int64`.
The indices of the hypothesis list SparseTensor.
This is an N x R int64 matrix.
hypothesis_values: A `Tensor`.
The values of the hypothesis list SparseTensor.
This is an N-length vector.
hypothesis_shape: A `Tensor` of type `int64`.
The shape of the hypothesis list SparseTensor.
This is an R-length vector.
truth_indices: A `Tensor` of type `int64`.
The indices of the truth list SparseTensor.
This is an M x R int64 matrix.
truth_values: A `Tensor`. Must have the same type as `hypothesis_values`.
The values of the truth list SparseTensor.
This is an M-length vector.
truth_shape: A `Tensor` of type `int64`. truth indices, vector.
normalize: An optional `bool`. Defaults to `True`.
boolean (if true, edit distances are normalized by length of truth).
The output is:
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`. A dense float tensor with rank R - 1.
For the example input:
// hypothesis represents a 2x1 matrix with variable-length values:
// (0,0) = ["a"]
// (1,0) = ["b"]
hypothesis_indices = [[0, 0, 0],
[1, 0, 0]]
hypothesis_values = ["a", "b"]
hypothesis_shape = [2, 1, 1]
// truth represents a 2x2 matrix with variable-length values:
// (0,0) = []
// (0,1) = ["a"]
// (1,0) = ["b", "c"]
// (1,1) = ["a"]
truth_indices = [[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]]
truth_values = ["a", "b", "c", "a"]
truth_shape = [2, 2, 2]
normalize = true
The output will be:
// output is a 2x2 matrix with edit distances normalized by truth lengths.
output = [[inf, 1.0], // (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] // (1,0): addition, (1,1): no hypothesis
"""
result = _op_def_lib.apply_op("EditDistance",
hypothesis_indices=hypothesis_indices,
hypothesis_values=hypothesis_values,
hypothesis_shape=hypothesis_shape,
truth_indices=truth_indices,
truth_values=truth_values,
truth_shape=truth_shape, normalize=normalize,
name=name)
return result
__expand_dims_outputs = ["output"]
def _expand_dims(input, dim, name=None):
r"""Inserts a dimension of 1 into a tensor's shape.
Given a tensor `input`, this operation inserts a dimension of 1 at the
dimension index `dim` of `input`'s shape. The dimension index `dim` starts at
zero; if you specify a negative number for `dim` it is counted backward from
the end.
This operation is useful if you want to add a batch dimension to a single
element. For example, if you have a single image of shape `[height, width,
channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
which will make the shape `[1, height, width, channels]`.
Other examples:
```prettyprint
# 't' is a tensor of shape [2]
shape(expand_dims(t, 0)) ==> [1, 2]
shape(expand_dims(t, 1)) ==> [2, 1]
shape(expand_dims(t, -1)) ==> [2, 1]
# 't2' is a tensor of shape [2, 3, 5]
shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
```
This operation requires that:
`-1-input.dims() <= dim <= input.dims()`
This operation is related to `squeeze()`, which removes dimensions of
size 1.
Args:
input: A `Tensor`.
dim: A `Tensor`. Must be one of the following types: `int32`, `int64`.
0-D (scalar). Specifies the dimension index at which to
expand the shape of `input`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but its shape has an additional
dimension of size 1 added.
"""
result = _op_def_lib.apply_op("ExpandDims", input=input, dim=dim, name=name)
return result
_extract_image_patches_outputs = ["patches"]
def extract_image_patches(images, ksizes, strides, rates, padding, name=None):
r"""Extract `patches` from `images` and put them in the "depth" output dimension.
Args:
images: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `int64`, `uint8`, `int16`, `int8`, `uint16`, `half`.
4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.
ksizes: A list of `ints` that has length `>= 4`.
The size of the sliding window for each dimension of `images`.
strides: A list of `ints` that has length `>= 4`.
1-D of length 4. How far the centers of two consecutive patches are in
the images. Must be: `[1, stride_rows, stride_cols, 1]`.
rates: A list of `ints` that has length `>= 4`.
1-D of length 4. Must be: `[1, rate_rows, rate_cols, 1]`. This is the
input stride, specifying how far two consecutive patch samples are in the
input. Equivalent to extracting patches with
`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by
subsampling them spatially by a factor of `rates`.
padding: A `string` from: `"SAME", "VALID"`.
The type of padding algorithm to use.
We specify the size-related attributes as:
```python
ksizes = [1, ksize_rows, ksize_cols, 1]
strides = [1, strides_rows, strides_cols, 1]
rates = [1, rates_rows, rates_cols, 1]
```
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `images`.
4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
ksize_cols * depth]` containing image patches with size
`ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension.
"""
result = _op_def_lib.apply_op("ExtractImagePatches", images=images,
ksizes=ksizes, strides=strides, rates=rates,
padding=padding, name=name)
return result
_fake_quant_with_min_max_args_outputs = ["outputs"]
def fake_quant_with_min_max_args(inputs, min=None, max=None, name=None):
r"""Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
Attributes [min; max] define the clamping range for the 'inputs' data. Op
divides this range into 255 steps (total of 256 values), then replaces each
'inputs' value with the closest of the quantized step values.
Quantization is called fake since the output is still in floating point.
Args:
inputs: A `Tensor` of type `float32`.
min: An optional `float`. Defaults to `-6`.
max: An optional `float`. Defaults to `6`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
result = _op_def_lib.apply_op("FakeQuantWithMinMaxArgs", inputs=inputs,
min=min, max=max, name=name)
return result
_fake_quant_with_min_max_args_gradient_outputs = ["backprops"]
def fake_quant_with_min_max_args_gradient(gradients, inputs, min=None,
max=None, name=None):
r"""Compute gradients for a FakeQuantWithMinMaxArgs operation.
Args:
gradients: A `Tensor` of type `float32`.
Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.
inputs: A `Tensor` of type `float32`.
Values passed as inputs to the FakeQuantWithMinMaxArgs operation.
min: An optional `float`. Defaults to `-6`.
max: An optional `float`. Defaults to `6`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
`gradients * (inputs >= min && inputs <= max)`.
"""
result = _op_def_lib.apply_op("FakeQuantWithMinMaxArgsGradient",
gradients=gradients, inputs=inputs, min=min,
max=max, name=name)
return result
_fake_quant_with_min_max_vars_outputs = ["outputs"]
def fake_quant_with_min_max_vars(inputs, min, max, name=None):
r"""Fake-quantize the 'inputs' tensor of type float and shape `[b, h, w, d]` via
global float scalars `min` and `max` to 'outputs' tensor of same shape as
`inputs`.
[min; max] is the clamping range for the 'inputs' data. Op divides this range
into 255 steps (total of 256 values), then replaces each 'inputs' value with the
closest of the quantized step values.
This operation has a gradient and thus allows for training `min` and `max` values.
Args:
inputs: A `Tensor` of type `float32`.
min: A `Tensor` of type `float32`.
max: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
result = _op_def_lib.apply_op("FakeQuantWithMinMaxVars", inputs=inputs,
min=min, max=max, name=name)
return result
_fake_quant_with_min_max_vars_gradient_outputs = ["backprops_wrt_input",
"backprop_wrt_min",
"backprop_wrt_max"]
_FakeQuantWithMinMaxVarsGradientOutput = _collections.namedtuple("FakeQuantWithMinMaxVarsGradient",
_fake_quant_with_min_max_vars_gradient_outputs)
def fake_quant_with_min_max_vars_gradient(gradients, inputs, min, max,
name=None):
r"""Compute gradients for a FakeQuantWithMinMaxVars operation.
Args:
gradients: A `Tensor` of type `float32`.
Backpropagated gradients above the FakeQuantWithMinMaxVars operation.
inputs: A `Tensor` of type `float32`.
Values passed as inputs to the FakeQuantWithMinMaxVars operation.
min, max: Quantization interval, scalar floats.
min: A `Tensor` of type `float32`.
max: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max).
backprops_wrt_input: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. inputs:
`gradients * (inputs >= min && inputs <= max)`.
backprop_wrt_min: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. min parameter:
`sum(gradients * (inputs < min))`.
backprop_wrt_max: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. max parameter:
`sum(gradients * (inputs > max))`.
"""
result = _op_def_lib.apply_op("FakeQuantWithMinMaxVarsGradient",
gradients=gradients, inputs=inputs, min=min,
max=max, name=name)
return _FakeQuantWithMinMaxVarsGradientOutput._make(result)
_fake_quant_with_min_max_vars_per_channel_outputs = ["outputs"]
def fake_quant_with_min_max_vars_per_channel(inputs, min, max, name=None):
r"""Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`,
`[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]`
to 'outputs' tensor of same shape as `inputs`.
[min; max] is the clamping range for the 'inputs' data in the corresponding
depth channel. Op divides this range into 255 steps (total of 256 values), then
replaces each 'inputs' value with the closest of the quantized step values.
This operation has a gradient and thus allows for training `min` and `max` values.
Args:
inputs: A `Tensor` of type `float32`.
min: A `Tensor` of type `float32`.
max: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
"""
result = _op_def_lib.apply_op("FakeQuantWithMinMaxVarsPerChannel",
inputs=inputs, min=min, max=max, name=name)
return result
_fake_quant_with_min_max_vars_per_channel_gradient_outputs = ["backprops_wrt_input",
"backprop_wrt_min",
"backprop_wrt_max"]
_FakeQuantWithMinMaxVarsPerChannelGradientOutput = _collections.namedtuple("FakeQuantWithMinMaxVarsPerChannelGradient",
_fake_quant_with_min_max_vars_per_channel_gradient_outputs)
def fake_quant_with_min_max_vars_per_channel_gradient(gradients, inputs, min,
max, name=None):
r"""Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation.
Args:
gradients: A `Tensor` of type `float32`.
Backpropagated gradients above the FakeQuantWithMinMaxVars operation,
shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`.
inputs: A `Tensor` of type `float32`.
Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape
same as `gradients`.
min, max: Quantization interval, floats of shape `[d]`.
min: A `Tensor` of type `float32`.
max: A `Tensor` of type `float32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max).
backprops_wrt_input: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. inputs, shape same as
`inputs`:
`gradients * (inputs >= min && inputs <= max)`.
backprop_wrt_min: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. min parameter, shape `[d]`:
`sum_per_d(gradients * (inputs < min))`.
backprop_wrt_max: A `Tensor` of type `float32`. Backpropagated gradients w.r.t. max parameter, shape `[d]`:
`sum_per_d(gradients * (inputs > max))`.
"""
result = _op_def_lib.apply_op("FakeQuantWithMinMaxVarsPerChannelGradient",
gradients=gradients, inputs=inputs, min=min,
max=max, name=name)
return _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(result)
_fill_outputs = ["output"]
def fill(dims, value, name=None):
r"""Creates a tensor filled with a scalar value.
This operation creates a tensor of shape `dims` and fills it with `value`.
For example:
```prettyprint
# Output tensor has shape [2, 3].
fill([2, 3], 9) ==> [[9, 9, 9]
[9, 9, 9]]
```
Args:
dims: A `Tensor` of type `int32`.
1-D. Represents the shape of the output tensor.
value: A `Tensor`. 0-D (scalar). Value to fill the returned tensor.
@compatibility(numpy)
Equivalent to np.full
@end_compatibility
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `value`.
"""
result = _op_def_lib.apply_op("Fill", dims=dims, value=value, name=name)
return result
_gather_outputs = ["output"]
def gather(params, indices, validate_indices=None, name=None):
r"""Gather slices from `params` according to `indices`.
`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
```python
# Scalar indices
output[:, ..., :] = params[indices, :, ... :]
# Vector indices
output[i, :, ..., :] = params[indices[i], :, ... :]
# Higher rank indices
output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
```
If `indices` is a permutation and `len(indices) == params.shape[0]` then
this operation will permute `params` accordingly.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="../../images/Gather.png" alt>
</div>
Args:
params: A `Tensor`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
validate_indices: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
"""
result = _op_def_lib.apply_op("Gather", params=params, indices=indices,
validate_indices=validate_indices, name=name)
return result
_gather_nd_outputs = ["output"]
def gather_nd(params, indices, name=None):
r"""Gather values or slices from `params` according to `indices`.
`params` is a Tensor of rank `P` and `indices` is a Tensor of rank `Q`.
`indices` must be integer tensor, containing indices into `params`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `params`.
Produces an output tensor with shape
```
[d_0, ..., d_{Q-2}, params.shape[K], ..., params.shape[P-1]].
```
Some examples below.
Simple indexing into a matrix:
```python
indices = [[0, 0], [1, 1]]
params = [['a', 'b'], ['c', 'd']]
output = ['a', 'd']
```
Slice indexing into a matrix:
```python
indices = [[1], [0]]
params = [['a', 'b'], ['c', 'd']]
output = [['c', 'd'], ['a', 'b']]
```
Indexing into a 3-tensor:
```python
indices = [[1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['a1', 'b1'], ['c1', 'd1']]]
indices = [[0, 1], [1, 0]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['c0', 'd0'], ['a1', 'b1']]
indices = [[0, 0, 1], [1, 0, 1]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = ['b0', 'b1']
```
Batched indexing into a matrix:
```python
indices = [[[0, 0]], [[0, 1]]]
params = [['a', 'b'], ['c', 'd']]
output = [['a'], ['b']]
```
Batched slice indexing into a matrix:
```python
indices = [[[1]], [[0]]]
params = [['a', 'b'], ['c', 'd']]
output = [[['c', 'd']], [['a', 'b']]]
```
Batched indexing into a 3-tensor:
```python
indices = [[[1]], [[0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[[['a1', 'b1'], ['c1', 'd1']]],
[[['a0', 'b0'], ['c0', 'd0']]]]
indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [[['c0', 'd0'], ['a1', 'b1']],
[['a0', 'b0'], ['c1', 'd1']]]
indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
params = [[['a0', 'b0'], ['c0', 'd0']],
[['a1', 'b1'], ['c1', 'd1']]]
output = [['b0', 'b1'], ['d0', 'c1']]
```
Args:
params: A `Tensor`. `P-D`. The tensor from which to gather values.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
`Q-D`. Index tensor having shape `[d_0, ..., d_{Q-2}, K]`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `params`.
`(P+Q-K-1)-D`. Values from `params` gathered from indices given by
`indices`.
"""
result = _op_def_lib.apply_op("GatherNd", params=params, indices=indices,
name=name)
return result
_identity_outputs = ["output"]
def identity(input, name=None):
r"""Return a tensor with the same shape and contents as the input tensor or value.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("Identity", input=input, name=name)
return result
_immutable_const_outputs = ["tensor"]
def immutable_const(dtype, shape, memory_region_name, name=None):
r"""Returns immutable tensor from memory region.
The current implementation memmaps the tensor from a file.
Args:
dtype: A `tf.DType`. Type of the returned tensor.
shape: A `tf.TensorShape` or list of `ints`. Shape of the returned tensor.
memory_region_name: A `string`.
Name of readonly memory region used by the tensor, see
NewReadOnlyMemoryRegionFromFile in tensorflow::Env.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
"""
result = _op_def_lib.apply_op("ImmutableConst", dtype=dtype, shape=shape,
memory_region_name=memory_region_name,
name=name)
return result
_invert_permutation_outputs = ["y"]
def invert_permutation(x, name=None):
r"""Computes the inverse permutation of a tensor.
This operation computes the inverse of an index permutation. It takes a 1-D
integer tensor `x`, which represents the indices of a zero-based array, and
swaps each value with its index position. In other words, for an output tensor
`y` and an input tensor `x`, this operation computes the following:
`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
The values must include 0. There can be no duplicate values or negative values.
For example:
```prettyprint
# tensor `x` is [3, 4, 0, 2, 1]
invert_permutation(x) ==> [2, 4, 3, 0, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`. 1-D.
"""
result = _op_def_lib.apply_op("InvertPermutation", x=x, name=name)
return result
__list_diff_outputs = ["out", "idx"]
_ListDiffOutput = _collections.namedtuple("ListDiff", __list_diff_outputs)
def _list_diff(x, y, out_idx=None, name=None):
r"""Computes the difference between two lists of numbers or strings.
Given a list `x` and a list `y`, this operation returns a list `out` that
represents all values that are in `x` but not in `y`. The returned list `out`
is sorted in the same order that the numbers appear in `x` (duplicates are
preserved). This operation also returns a list `idx` that represents the
position of each `out` element in `x`. In other words:
`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
For example, given this input:
```prettyprint
x = [1, 2, 3, 4, 5, 6]
y = [1, 3, 5]
```
This operation would return:
```prettyprint
out ==> [2, 4, 6]
idx ==> [1, 3, 5]
```
Args:
x: A `Tensor`. 1-D. Values to keep.
y: A `Tensor`. Must have the same type as `x`. 1-D. Values to remove.
out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (out, idx).
out: A `Tensor`. Has the same type as `x`. 1-D. Values present in `x` but not in `y`.
idx: A `Tensor` of type `out_idx`. 1-D. Positions of `x` values preserved in `out`.
"""
result = _op_def_lib.apply_op("ListDiff", x=x, y=y, out_idx=out_idx,
name=name)
return _ListDiffOutput._make(result)
_matrix_band_part_outputs = ["band"]
def matrix_band_part(input, num_lower, num_upper, name=None):
r"""Copy a tensor setting everything outside a central band in each innermost matrix
to zero.
The `band` part is computed as follows:
Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
tensor with the same shape where
`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
The indicator function
`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
(num_upper < 0 || (n-m) <= num_upper)`.
For example:
```prettyprint
# if 'input' is [[ 0, 1, 2, 3]
[-1, 0, 1, 2]
[-2, -1, 0, 1]
[-3, -2, -1, 0]],
tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3]
[-1, 0, 1, 2]
[ 0, -1, 0, 1]
[ 0, 0, -1, 0]],
tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0]
[-1, 0, 1, 0]
[-2, -1, 0, 1]
[ 0, -2, -1, 0]]
```
Useful special cases:
```prettyprint
tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
tf.matrix_band_part(input, 0, 0) ==> Diagonal.
```
Args:
input: A `Tensor`. Rank `k` tensor.
num_lower: A `Tensor` of type `int64`.
0-D tensor. Number of subdiagonals to keep. If negative, keep entire
lower triangle.
num_upper: A `Tensor` of type `int64`.
0-D tensor. Number of superdiagonals to keep. If negative, keep
entire upper triangle.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Rank `k` tensor of the same shape as input. The extracted banded tensor.
"""
result = _op_def_lib.apply_op("MatrixBandPart", input=input,
num_lower=num_lower, num_upper=num_upper,
name=name)
return result
_matrix_diag_outputs = ["output"]
def matrix_diag(diagonal, name=None):
r"""Returns a batched diagonal tensor with a given batched diagonal values.
Given a `diagonal`, this operation returns a tensor with the `diagonal` and
everything else padded with zeros. The diagonal is computed as follows:
Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
For example:
```prettyprint
# 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
and diagonal.shape = (2, 4)
tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
[0, 2, 0, 0]
[0, 0, 3, 0]
[0, 0, 0, 4]],
[[5, 0, 0, 0]
[0, 6, 0, 0]
[0, 0, 7, 0]
[0, 0, 0, 8]]]
which has shape (2, 4, 4)
```
Args:
diagonal: A `Tensor`. Rank `k`, where `k >= 1`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `diagonal`.
Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.
"""
result = _op_def_lib.apply_op("MatrixDiag", diagonal=diagonal, name=name)
return result
_matrix_diag_part_outputs = ["diagonal"]
def matrix_diag_part(input, name=None):
r"""Returns the batched diagonal part of a batched tensor.
This operation returns a tensor with the `diagonal` part
of the batched `input`. The `diagonal` part is computed as follows:
Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where:
`diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`.
The input must be at least a matrix.
For example:
```prettyprint
# 'input' is [[[1, 0, 0, 0]
[0, 2, 0, 0]
[0, 0, 3, 0]
[0, 0, 0, 4]],
[[5, 0, 0, 0]
[0, 6, 0, 0]
[0, 0, 7, 0]
[0, 0, 0, 8]]]
and input.shape = (2, 4, 4)
tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]]
which has shape (2, 4)
```
Args:
input: A `Tensor`. Rank `k` tensor where `k >= 2`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The extracted diagonal(s) having shape
`diagonal.shape = input.shape[:-2] + [min(input.shape[-2:])]`.
"""
result = _op_def_lib.apply_op("MatrixDiagPart", input=input, name=name)
return result
_matrix_set_diag_outputs = ["output"]
def matrix_set_diag(input, diagonal, name=None):
r"""Returns a batched matrix tensor with new batched diagonal values.
Given `input` and `diagonal`, this operation returns a tensor with the
same shape and values as `input`, except for the main diagonal of the
innermost matrices. These will be overwritten by the values in `diagonal`.
The output is computed as follows:
Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
`k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a
tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
* `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
* `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
Args:
input: A `Tensor`. Rank `k+1`, where `k >= 1`.
diagonal: A `Tensor`. Must have the same type as `input`.
Rank `k`, where `k >= 1`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Rank `k+1`, with `output.shape = input.shape`.
"""
result = _op_def_lib.apply_op("MatrixSetDiag", input=input,
diagonal=diagonal, name=name)
return result
__mirror_pad_outputs = ["output"]
def _mirror_pad(input, paddings, mode, name=None):
r"""Pads a tensor with mirrored values.
This operation pads a `input` with mirrored values according to the `paddings`
you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
how many values to add before the contents of `input` in that dimension, and
`paddings[D, 1]` indicates how many values to add after the contents of `input`
in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
(if false, respectively).
The padded size of each dimension D of the output is:
`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
For example:
```prettyprint
# 't' is [[1, 2, 3], [4, 5, 6]].
# 'paddings' is [[1, 1]], [2, 2]].
# 'mode' is SYMMETRIC.
# rank of 't' is 2.
pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
[2, 1, 1, 2, 3, 3, 2]
[5, 4, 4, 5, 6, 6, 5]
[5, 4, 4, 5, 6, 6, 5]]
```
Args:
input: A `Tensor`. The input tensor to be padded.
paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of `input`.
mode: A `string` from: `"REFLECT", "SYMMETRIC"`.
Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions
do not include the borders, while in symmetric mode the padded regions
do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings`
is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and
it is `[1, 2, 3, 3, 2]` in symmetric mode.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`. The padded tensor.
"""
result = _op_def_lib.apply_op("MirrorPad", input=input, paddings=paddings,
mode=mode, name=name)
return result
__mirror_pad_grad_outputs = ["output"]
def _mirror_pad_grad(input, paddings, mode, name=None):
r"""Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
This operation folds the padded areas of `input` by `MirrorPad` according to the
`paddings` you specify. `paddings` must be the same as `paddings` argument
given to the corresponding `MirrorPad` op.
The folded size of each dimension D of the output is:
`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
For example:
```prettyprint
# 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
# 'paddings' is [[0, 1]], [0, 1]].
# 'mode' is SYMMETRIC.
# rank of 't' is 2.
pad(t, paddings) ==> [[ 1, 5]
[11, 28]]
```
Args:
input: A `Tensor`. The input tensor to be folded.
paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A two-column matrix specifying the padding sizes. The number of
rows must be the same as the rank of `input`.
mode: A `string` from: `"REFLECT", "SYMMETRIC"`.
The mode used in the `MirrorPad` op.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`. The folded tensor.
"""
result = _op_def_lib.apply_op("MirrorPadGrad", input=input,
paddings=paddings, mode=mode, name=name)
return result
__one_hot_outputs = ["output"]
def _one_hot(indices, depth, on_value, off_value, axis=None, name=None):
r"""Returns a one-hot tensor.
The locations represented by indices in `indices` take value `on_value`,
while all other locations take value `off_value`.
If the input `indices` is rank `N`, the output will have rank `N+1`,
The new axis is created at dimension `axis` (default: the new axis is
appended at the end).
If `indices` is a scalar the output shape will be a vector of length `depth`.
If `indices` is a vector of length `features`, the output shape will be:
```
features x depth if axis == -1
depth x features if axis == 0
```
If `indices` is a matrix (batch) with shape `[batch, features]`,
the output shape will be:
```
batch x features x depth if axis == -1
batch x depth x features if axis == 1
depth x batch x features if axis == 0
```
Examples
=========
Suppose that
```
indices = [0, 2, -1, 1]
depth = 3
on_value = 5.0
off_value = 0.0
axis = -1
```
Then output is `[4 x 3]`:
```output =
[5.0 0.0 0.0] // one_hot(0)
[0.0 0.0 5.0] // one_hot(2)
[0.0 0.0 0.0] // one_hot(-1)
[0.0 5.0 0.0] // one_hot(1)
```
Suppose that
```
indices = [0, 2, -1, 1]
depth = 3
on_value = 0.0
off_value = 3.0
axis = 0
```
Then output is `[3 x 4]`:
```output =
[0.0 3.0 3.0 3.0]
[3.0 3.0 3.0 0.0]
[3.0 3.0 3.0 3.0]
[3.0 0.0 3.0 3.0]
// ^ one_hot(0)
// ^ one_hot(2)
// ^ one_hot(-1)
// ^ one_hot(1)
```
Suppose that
```
indices = [[0, 2], [1, -1]]
depth = 3
on_value = 1.0
off_value = 0.0
axis = -1
```
Then output is `[2 x 2 x 3]`:
```output =
[
[1.0, 0.0, 0.0] // one_hot(0)
[0.0, 0.0, 1.0] // one_hot(2)
][
[0.0, 1.0, 0.0] // one_hot(1)
[0.0, 0.0, 0.0] // one_hot(-1)
]```
Args:
indices: A `Tensor`. Must be one of the following types: `uint8`, `int32`, `int64`.
A tensor of indices.
depth: A `Tensor` of type `int32`.
A scalar defining the depth of the one hot dimension.
on_value: A `Tensor`.
A scalar defining the value to fill in output when `indices[j] = i`.
off_value: A `Tensor`. Must have the same type as `on_value`.
A scalar defining the value to fill in output when `indices[j] != i`.
axis: An optional `int`. Defaults to `-1`.
The axis to fill (default: -1, a new inner-most axis).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `on_value`. The one-hot tensor.
"""
result = _op_def_lib.apply_op("OneHot", indices=indices, depth=depth,
on_value=on_value, off_value=off_value,
axis=axis, name=name)
return result
__pack_outputs = ["output"]
def _pack(values, axis=None, name=None):
r"""Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
Packs the `N` tensors in `values` into a tensor with rank one higher than each
tensor in `values`, by packing them along the `axis` dimension.
Given a list of tensors of shape `(A, B, C)`;
if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
Etc.
For example:
```prettyprint
# 'x' is [1, 4]
# 'y' is [2, 5]
# 'z' is [3, 6]
pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
```
This is the opposite of `unpack`.
Args:
values: A list of at least 1 `Tensor` objects of the same type.
Must be of same shape and type.
axis: An optional `int`. Defaults to `0`.
Dimension along which to pack. Negative values wrap around, so the
valid range is `[-(R+1), R+1)`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `values`. The packed tensor.
"""
result = _op_def_lib.apply_op("Pack", values=values, axis=axis, name=name)
return result
__pad_outputs = ["output"]
def _pad(input, paddings, name=None):
r"""Pads a tensor with zeros.
This operation pads a `input` with zeros according to the `paddings` you
specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
how many zeros to add before the contents of `input` in that dimension, and
`paddings[D, 1]` indicates how many zeros to add after the contents of `input`
in that dimension.
The padded size of each dimension D of the output is:
`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
For example:
```prettyprint
# 't' is [[1, 1], [2, 2]]
# 'paddings' is [[1, 1], [2, 2]]
# rank of 't' is 2
pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
[0, 0, 1, 1, 0, 0]
[0, 0, 2, 2, 0, 0]
[0, 0, 0, 0, 0, 0]]
```
Args:
input: A `Tensor`.
paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("Pad", input=input, paddings=paddings,
name=name)
return result
__parallel_concat_outputs = ["output"]
def _parallel_concat(values, shape, name=None):
r"""Concatenates a list of `N` tensors along the first dimension.
The input tensors are all required to have size 1 in the first dimension.
For example:
```prettyprint
# 'x' is [[1, 4]]
# 'y' is [[2, 5]]
# 'z' is [[3, 6]]
parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim.
```
The difference between concat and parallel_concat is that concat requires all
of the inputs be computed before the operation will begin but doesn't require
that the input shapes be known during graph construction. Parallel concat
will copy pieces of the input into the output as they become available, in
some situations this can provide a performance benefit.
Args:
values: A list of at least 1 `Tensor` objects of the same type.
Tensors to be concatenated. All must have size 1 in the first dimension
and same shape.
shape: A `tf.TensorShape` or list of `ints`.
the final shape of the result; should be equal to the shapes of any input
but with the number of input values in the first dimension.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `values`. The concatenated tensor.
"""
result = _op_def_lib.apply_op("ParallelConcat", values=values, shape=shape,
name=name)
return result
__placeholder_outputs = ["output"]
def _placeholder(dtype, shape=None, name=None):
r"""A placeholder op for a value that will be fed into the computation.
N.B. This operation will fail with an error if it is executed. It is
intended as a way to represent a value that will always be fed, and to
provide attrs that enable the fed value to be checked at runtime.
Args:
dtype: A `tf.DType`. The type of elements in the tensor.
shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `[]`.
(Optional) The shape of the tensor. If the shape has 0 dimensions, the
shape is unconstrained.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
A placeholder tensor that must be replaced using the feed mechanism.
"""
result = _op_def_lib.apply_op("Placeholder", dtype=dtype, shape=shape,
name=name)
return result
_placeholder_v2_outputs = ["output"]
def placeholder_v2(dtype, shape, name=None):
r"""A placeholder op for a value that will be fed into the computation.
N.B. This operation will fail with an error if it is executed. It is
intended as a way to represent a value that will always be fed, and to
provide attrs that enable the fed value to be checked at runtime.
Args:
dtype: A `tf.DType`. The type of elements in the tensor.
shape: A `tf.TensorShape` or list of `ints`.
The shape of the tensor. The shape can be any partially-specified
shape. To be unconstrained, pass in a shape with unknown rank.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `dtype`.
A placeholder tensor that must be replaced using the feed mechanism.
"""
result = _op_def_lib.apply_op("PlaceholderV2", dtype=dtype, shape=shape,
name=name)
return result
_placeholder_with_default_outputs = ["output"]
def placeholder_with_default(input, shape, name=None):
r"""A placeholder op that passes through `input` when its output is not fed.
Args:
input: A `Tensor`. The default value to produce when `output` is not fed.
shape: A `tf.TensorShape` or list of `ints`.
The (possibly partial) shape of the tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
A placeholder tensor that defaults to `input` if it is not fed.
"""
result = _op_def_lib.apply_op("PlaceholderWithDefault", input=input,
shape=shape, name=name)
return result
_prevent_gradient_outputs = ["output"]
def prevent_gradient(input, name=None):
r"""An identity op that triggers an error if a gradient is requested.
When executed in a graph, this op outputs its input tensor as-is.
When building ops to compute gradients, the TensorFlow gradient system
will return an error when trying to lookup the gradient of this op,
because no gradient must ever be registered for this function. This
op exists to prevent subtle bugs from silently returning unimplemented
gradients in some corner cases.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("PreventGradient", input=input, name=name)
return result
_quantize_and_dequantize_outputs = ["output"]
def quantize_and_dequantize(input, signed_input=None, num_bits=None,
range_given=None, input_min=None, input_max=None,
name=None):
r"""Quantizes then dequantizes a tensor.
This op simulates the precision loss from the quantized forward pass by:
1. Quantizing the tensor to fixed point numbers, which should match the target
quantization method when it is used in inference.
2. Dequantizing it back to floating point numbers for the following ops, most
likely matmul.
There are different ways to quantize. This version does not use the full range
of the output type, choosing to elide the lowest possible value for symmetry
(e.g., output range is -127 to 127, not -128 to 127 for signed 8 bit
quantization), so that 0.0 maps to 0.
To perform this op, we first find the range of values in our tensor. The range
we use is always centered on 0, so we find m such that
1. m = max(abs(input_min), abs(input_max)) if range_given is true,
2. m = max(max(abs(min_elem(input)), abs(max_elem(input))) otherwise.
Our input tensor range is then [-m, m].
Next, we choose our fixed-point quantization buckets, [min_fixed, max_fixed].
If signed_input is true, this is
[min_fixed, max_fixed ] =
[-(1 << (num_bits - 1) - 1), (1 << (num_bits - 1)) - 1].
Otherwise, if signed_input is false, the fixed-point range is
[min_fixed, max_fixed] = [0, (1 << num_bits) - 1].
From this we compute our scaling factor, s:
s = (max_fixed - min_fixed) / (2 * m).
Now we can quantize and dequantize the elements of our tensor. An element e
is transformed into e':
e' = (e * s).round_to_nearest() / s.
Note that we have a different number of buckets in the signed vs. unsigned
cases. For example, if num_bits == 8, we get 254 buckets in the signed case
vs. 255 in the unsigned case.
For example, suppose num_bits = 8 and m = 1. Then
[min_fixed, max_fixed] = [-127, 127], and
s = (127 + 127) / 2 = 127.
Given the vector {-1, -0.5, 0, 0.3}, this is quantized to
{-127, -63, 0, 38}, and dequantized to {-1, -63.0/127, 0, 38.0/127}.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`.
Tensor to quantize and then dequantize.
signed_input: An optional `bool`. Defaults to `True`.
If the quantization is signed or unsigned.
num_bits: An optional `int`. Defaults to `8`.
The bitwidth of the quantization.
range_given: An optional `bool`. Defaults to `False`.
If the range is given or should be computed from the tensor.
input_min: An optional `float`. Defaults to `0`.
If range is given, this is the min of the range.
input_max: An optional `float`. Defaults to `0`.
If range is given, this is the max of the range.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("QuantizeAndDequantize", input=input,
signed_input=signed_input, num_bits=num_bits,
range_given=range_given, input_min=input_min,
input_max=input_max, name=name)
return result
_quantize_v2_outputs = ["output", "output_min", "output_max"]
_QuantizeV2Output = _collections.namedtuple("QuantizeV2",
_quantize_v2_outputs)
def quantize_v2(input, min_range, max_range, T, mode=None, name=None):
r"""Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
[min_range, max_range] are scalar floats that specify the range for
the 'input' data. The 'mode' attribute controls exactly which calculations are
used to convert the float values to their quantized equivalents.
In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
```
out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
if T == qint8, out[i] -= (range(T) + 1) / 2.0
```
here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
*MIN_COMBINED Mode Example*
Assume the input is type float and has a possible range of [0.0, 6.0] and the
output type is quint8 ([0, 255]). The min_range and max_range values should be
specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
value of the input by 255/6 and cast to quint8.
If the output type was qint8 ([-128, 127]), the operation will additionally
subtract each value by 128 prior to casting, so that the range of values aligns
with the range of qint8.
If the mode is 'MIN_FIRST', then this approach is used:
```
number_of_steps = 1 << (# of bits in T)
range_adjust = number_of_steps / (number_of_steps - 1)
range = (range_max - range_min) * range_adjust
range_scale = number_of_steps / range
quantized = round(input * range_scale) - round(range_min * range_scale) +
numeric_limits<T>::min()
quantized = max(quantized, numeric_limits<T>::min())
quantized = min(quantized, numeric_limits<T>::max())
```
The biggest difference between this and MIN_COMBINED is that the minimum range
is rounded first, before it's subtracted from the rounded value. With
MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
and dequantizing will introduce a larger and larger error.
One thing to watch out for is that the operator may choose to adjust the
requested minimum and maximum values slightly during the quantization process,
so you should always use the output ports as the range for further calculations.
For example, if the requested minimum and maximum values are close to equal,
they will be separated by a small epsilon value to prevent ill-formed quantized
buffers from being created. Otherwise, you can end up with buffers where all the
quantized values map to the same float value, which causes problems for
operations that have to perform further calculations on them.
Args:
input: A `Tensor` of type `float32`.
min_range: A `Tensor` of type `float32`.
The minimum scalar value possibly produced for the input.
max_range: A `Tensor` of type `float32`.
The maximum scalar value possibly produced for the input.
T: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint16, tf.quint16, tf.qint32`.
mode: An optional `string` from: `"MIN_COMBINED", "MIN_FIRST"`. Defaults to `"MIN_COMBINED"`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, output_min, output_max).
output: A `Tensor` of type `T`. The quantized data produced from the float input.
output_min: A `Tensor` of type `float32`. The actual minimum scalar value used for the output.
output_max: A `Tensor` of type `float32`. The actual maximum scalar value used for the output.
"""
result = _op_def_lib.apply_op("QuantizeV2", input=input,
min_range=min_range, max_range=max_range, T=T,
mode=mode, name=name)
return _QuantizeV2Output._make(result)
_quantized_concat_outputs = ["output", "output_min", "output_max"]
_QuantizedConcatOutput = _collections.namedtuple("QuantizedConcat",
_quantized_concat_outputs)
def quantized_concat(concat_dim, values, input_mins, input_maxes, name=None):
r"""Concatenates quantized tensors along one dimension.
Args:
concat_dim: A `Tensor` of type `int32`.
0-D. The dimension along which to concatenate. Must be in the
range [0, rank(values)).
values: A list of at least 2 `Tensor` objects of the same type.
The `N` Tensors to concatenate. Their ranks and types must match,
and their sizes must match in all dimensions except `concat_dim`.
input_mins: A list with the same number of `Tensor` objects as `values` of `Tensor` objects of type `float32`.
The minimum scalar values for each of the input tensors.
input_maxes: A list with the same number of `Tensor` objects as `values` of `Tensor` objects of type `float32`.
The maximum scalar values for each of the input tensors.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, output_min, output_max).
output: A `Tensor`. Has the same type as `values`. A `Tensor` with the concatenation of values stacked along the
`concat_dim` dimension. This tensor's shape matches that of `values` except
in `concat_dim` where it has the sum of the sizes.
output_min: A `Tensor` of type `float32`. The float value that the minimum quantized output value represents.
output_max: A `Tensor` of type `float32`. The float value that the maximum quantized output value represents.
"""
result = _op_def_lib.apply_op("QuantizedConcat", concat_dim=concat_dim,
values=values, input_mins=input_mins,
input_maxes=input_maxes, name=name)
return _QuantizedConcatOutput._make(result)
_quantized_instance_norm_outputs = ["y", "y_min", "y_max"]
_QuantizedInstanceNormOutput = _collections.namedtuple("QuantizedInstanceNorm",
_quantized_instance_norm_outputs)
def quantized_instance_norm(x, x_min, x_max, output_range_given=None,
given_y_min=None, given_y_max=None,
variance_epsilon=None, min_separation=None,
name=None):
r"""Quantized Instance normalization.
Args:
x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint16`, `quint16`, `qint32`.
A 4D input Tensor.
x_min: A `Tensor` of type `float32`.
The value represented by the lowest quantized input.
x_max: A `Tensor` of type `float32`.
The value represented by the highest quantized input.
output_range_given: An optional `bool`. Defaults to `False`.
If True, `given_y_min` and `given_y_min`
and `given_y_max` are used as the output range. Otherwise,
the implementation computes the output range.
given_y_min: An optional `float`. Defaults to `0`.
Output in `y_min` if `output_range_given` is True.
given_y_max: An optional `float`. Defaults to `0`.
Output in `y_max` if `output_range_given` is True.
variance_epsilon: An optional `float`. Defaults to `1e-05`.
A small float number to avoid dividing by 0.
min_separation: An optional `float`. Defaults to `0.001`.
Minimum value of `y_max - y_min`
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, y_min, y_max).
y: A `Tensor`. Has the same type as `x`. A 4D Tensor.
y_min: A `Tensor` of type `float32`. The value represented by the lowest quantized output.
y_max: A `Tensor` of type `float32`. The value represented by the highest quantized output.
"""
result = _op_def_lib.apply_op("QuantizedInstanceNorm", x=x, x_min=x_min,
x_max=x_max,
output_range_given=output_range_given,
given_y_min=given_y_min,
given_y_max=given_y_max,
variance_epsilon=variance_epsilon,
min_separation=min_separation, name=name)
return _QuantizedInstanceNormOutput._make(result)
_quantized_reshape_outputs = ["output", "output_min", "output_max"]
_QuantizedReshapeOutput = _collections.namedtuple("QuantizedReshape",
_quantized_reshape_outputs)
def quantized_reshape(tensor, shape, input_min, input_max, name=None):
r"""Reshapes a quantized tensor as per the Reshape op.
```
Args:
tensor: A `Tensor`.
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Defines the shape of the output tensor.
input_min: A `Tensor` of type `float32`. The minimum value of the input.
input_max: A `Tensor` of type `float32`. The maximum value of the input.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (output, output_min, output_max).
output: A `Tensor`. Has the same type as `tensor`.
output_min: A `Tensor` of type `float32`. This value is copied from input_min.
output_max: A `Tensor` of type `float32`. This value is copied from input_max.
"""
result = _op_def_lib.apply_op("QuantizedReshape", tensor=tensor,
shape=shape, input_min=input_min,
input_max=input_max, name=name)
return _QuantizedReshapeOutput._make(result)
_rank_outputs = ["output"]
def rank(input, name=None):
r"""Returns the rank of a tensor.
This operation returns an integer representing the rank of `input`.
For example:
```prettyprint
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
# shape of tensor 't' is [2, 2, 3]
rank(t) ==> 3
```
**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
of a tensor is the number of indices required to uniquely select each element
of the tensor. Rank is also known as "order", "degree", or "ndims."
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
result = _op_def_lib.apply_op("Rank", input=input, name=name)
return result
__ref_identity_outputs = ["output"]
def _ref_identity(input, name=None):
r"""Return the same ref tensor as the input ref tensor.
Args:
input: A mutable `Tensor`.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("RefIdentity", input=input, name=name)
return result
_reshape_outputs = ["output"]
def reshape(tensor, shape, name=None):
r"""Reshapes a tensor.
Given `tensor`, this operation returns a tensor that has the same values
as `tensor` with shape `shape`.
If one component of `shape` is the special value -1, the size of that dimension
is computed so that the total size remains constant. In particular, a `shape`
of `[-1]` flattens into 1-D. At most one component of `shape` can be -1.
If `shape` is 1-D or higher, then the operation returns a tensor with shape
`shape` filled with the values of `tensor`. In this case, the number of elements
implied by `shape` must be the same as the number of elements in `tensor`.
For example:
```prettyprint
# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
# tensor 't' has shape [9]
reshape(t, [3, 3]) ==> [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
# tensor 't' is [[[1, 1], [2, 2]],
# [[3, 3], [4, 4]]]
# tensor 't' has shape [2, 2, 2]
reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
[3, 3, 4, 4]]
# tensor 't' is [[[1, 1, 1],
# [2, 2, 2]],
# [[3, 3, 3],
# [4, 4, 4]],
# [[5, 5, 5],
# [6, 6, 6]]]
# tensor 't' has shape [3, 2, 3]
# pass '[-1]' to flatten 't'
reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
# -1 can also be used to infer the shape
# -1 is inferred to be 9:
reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 2:
reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
[4, 4, 4, 5, 5, 5, 6, 6, 6]]
# -1 is inferred to be 3:
reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
[2, 2, 2],
[3, 3, 3]],
[[4, 4, 4],
[5, 5, 5],
[6, 6, 6]]]
# tensor 't' is [7]
# shape `[]` reshapes to a scalar
reshape(t, []) ==> 7
```
Args:
tensor: A `Tensor`.
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
Defines the shape of the output tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`.
"""
result = _op_def_lib.apply_op("Reshape", tensor=tensor, shape=shape,
name=name)
return result
__reverse_outputs = ["output"]
def _reverse(tensor, dims, name=None):
r"""Reverses specific dimensions of a tensor.
Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
of `tensor`, this operation reverses each dimension i of `tensor` where
`dims[i]` is `True`.
`tensor` can have up to 8 dimensions. The number of dimensions
of `tensor` must equal the number of elements in `dims`. In other words:
`rank(tensor) = size(dims)`
For example:
```prettyprint
# tensor 't' is [[[[ 0, 1, 2, 3],
# [ 4, 5, 6, 7],
# [ 8, 9, 10, 11]],
# [[12, 13, 14, 15],
# [16, 17, 18, 19],
# [20, 21, 22, 23]]]]
# tensor 't' shape is [1, 2, 3, 4]
# 'dims' is [False, False, False, True]
reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
[ 7, 6, 5, 4],
[ 11, 10, 9, 8]],
[[15, 14, 13, 12],
[19, 18, 17, 16],
[23, 22, 21, 20]]]]
# 'dims' is [False, True, False, False]
reverse(t, dims) ==> [[[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]]]]
# 'dims' is [False, False, True, False]
reverse(t, dims) ==> [[[[8, 9, 10, 11],
[4, 5, 6, 7],
[0, 1, 2, 3]]
[[20, 21, 22, 23],
[16, 17, 18, 19],
[12, 13, 14, 15]]]]
```
Args:
tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int32`, `int64`, `bool`, `half`, `float32`, `float64`, `complex64`, `complex128`.
Up to 8-D.
dims: A `Tensor` of type `bool`. 1-D. The dimensions to reverse.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`. The same shape as `tensor`.
"""
result = _op_def_lib.apply_op("Reverse", tensor=tensor, dims=dims,
name=name)
return result
_reverse_sequence_outputs = ["output"]
def reverse_sequence(input, seq_lengths, seq_dim, batch_dim=None, name=None):
r"""Reverses variable length slices.
This op first slices `input` along the dimension `batch_dim`, and for each
slice `i`, reverses the first `seq_lengths[i]` elements along
the dimension `seq_dim`.
The elements of `seq_lengths` must obey `seq_lengths[i] < input.dims[seq_dim]`,
and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
The output slice `i` along dimension `batch_dim` is then given by input
slice `i`, with the first `seq_lengths[i]` slices along dimension
`seq_dim` reversed.
For example:
```prettyprint
# Given this:
batch_dim = 0
seq_dim = 1
input.dims = (4, 8, ...)
seq_lengths = [7, 2, 3, 5]
# then slices of input are reversed on seq_dim, but only up to seq_lengths:
output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
# while entries past seq_lens are copied through:
output[0, 7:, :, ...] = input[0, 7:, :, ...]
output[1, 2:, :, ...] = input[1, 2:, :, ...]
output[2, 3:, :, ...] = input[2, 3:, :, ...]
output[3, 2:, :, ...] = input[3, 2:, :, ...]
```
In contrast, if:
```prettyprint
# Given this:
batch_dim = 2
seq_dim = 0
input.dims = (8, ?, 4, ...)
seq_lengths = [7, 2, 3, 5]
# then slices of input are reversed on seq_dim, but only up to seq_lengths:
output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
# while entries past seq_lens are copied through:
output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
```
Args:
input: A `Tensor`. The input to reverse.
seq_lengths: A `Tensor`. Must be one of the following types: `int32`, `int64`.
1-D with length `input.dims(batch_dim)` and
`max(seq_lengths) < input.dims(seq_dim)`
seq_dim: An `int`. The dimension which is partially reversed.
batch_dim: An optional `int`. Defaults to `0`.
The dimension along which reversal is performed.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
The partially reversed input. It has the same shape as `input`.
"""
result = _op_def_lib.apply_op("ReverseSequence", input=input,
seq_lengths=seq_lengths, seq_dim=seq_dim,
batch_dim=batch_dim, name=name)
return result
_reverse_v2_outputs = ["output"]
def reverse_v2(tensor, axis, name=None):
r"""Reverses specific dimensions of a tensor.
NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
`tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
Given a `tensor`, and a `int32` tensor `axis` representing the set of
dimensions of `tensor` to reverse. This operation reverses each dimension
`i` for which there exists `j` s.t. `axis[j] == i`.
`tensor` can have up to 8 dimensions. The number of dimensions specified
in `axis` may be 0 or more entries. If an index is specified more than
once, a InvalidArgument error is raised.
For example:
```prettyprint
# tensor 't' is [[[[ 0, 1, 2, 3],
# [ 4, 5, 6, 7],
# [ 8, 9, 10, 11]],
# [[12, 13, 14, 15],
# [16, 17, 18, 19],
# [20, 21, 22, 23]]]]
# tensor 't' shape is [1, 2, 3, 4]
# 'dims' is [3] or 'dims' is -1
reverse(t, dims) ==> [[[[ 3, 2, 1, 0],
[ 7, 6, 5, 4],
[ 11, 10, 9, 8]],
[[15, 14, 13, 12],
[19, 18, 17, 16],
[23, 22, 21, 20]]]]
# 'dims' is '[1]' (or 'dims' is '[-3]')
reverse(t, dims) ==> [[[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]
[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]]]]
# 'dims' is '[2]' (or 'dims' is '[-2]')
reverse(t, dims) ==> [[[[8, 9, 10, 11],
[4, 5, 6, 7],
[0, 1, 2, 3]]
[[20, 21, 22, 23],
[16, 17, 18, 19],
[12, 13, 14, 15]]]]
```
Args:
tensor: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int32`, `int64`, `bool`, `half`, `float32`, `float64`, `complex64`, `complex128`.
Up to 8-D.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
1-D. The indices of the dimensions to reverse.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `tensor`. The same shape as `tensor`.
"""
result = _op_def_lib.apply_op("ReverseV2", tensor=tensor, axis=axis,
name=name)
return result
_scatter_nd_outputs = ["output"]
def scatter_nd(indices, updates, shape, name=None):
r"""Creates a new tensor by applying sparse `updates` to individual
values or slices within a zero tensor of the given `shape` tensor according to
indices. This operator is the inverse of the [tf.gather_nd](#gather_nd)
operator which extracts values or slices from a given tensor.
TODO(simister): Add a link to Variable.__getitem__ documentation on slice
syntax.
`shape` is a `TensorShape` with rank `P` and `indices` is a `Tensor` of rank
`Q`.
`indices` must be integer tensor, containing indices into `shape`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `shape`.
`updates` is Tensor of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, shape[K], ..., shape[P-1]].
```
The simplest form of scatter is to insert individual elements in a tensor by
index. For example, say we want to insert 4 scattered elements in a rank-1
tensor with 8 elements.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="../../images/ScatterNd1.png" alt>
</div>
In Python, this scatter operation would look like this:
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
shape = tf.constant([8])
scatter = tf.scatter_nd(indices, updates, shape)
with tf.Session() as sess:
print sess.run(scatter)
The resulting tensor would look like this:
[0, 11, 0, 10, 9, 0, 0, 12]
We can also, insert entire slices of a higher rank tensor all at once. For
example, if we wanted to insert two slices in the first dimension of a
rank-3 tensor with two matrices of new values.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="../../images/ScatterNd2.png" alt>
</div>
In Python, this scatter operation would look like this:
indices = tf.constant([[0], [2]])
updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
[7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6],
[7, 7, 7, 7], [8, 8, 8, 8]]])
shape = tf.constant([4, 4, 4])
scatter = tf.scatter_nd(indices, updates, shape)
with tf.Session() as sess:
print sess.run(scatter)
The resulting tensor would look like this:
[[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
Args:
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A Tensor. Must be one of the following types: int32, int64.
A tensor of indices into ref.
updates: A `Tensor`.
A Tensor. Must have the same type as tensor. A tensor of updated values
to store in ref.
shape: A `Tensor`. Must have the same type as `indices`.
A vector. The shape of the resulting tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `updates`.
A new tensor with the given shape and updates applied according
to the indices.
"""
result = _op_def_lib.apply_op("ScatterNd", indices=indices, updates=updates,
shape=shape, name=name)
return result
_shape_outputs = ["output"]
def shape(input, out_type=None, name=None):
r"""Returns the shape of a tensor.
This operation returns a 1-D integer tensor representing the shape of `input`.
For example:
```prettyprint
# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
shape(t) ==> [2, 2, 3]
```
Args:
input: A `Tensor`.
out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
result = _op_def_lib.apply_op("Shape", input=input, out_type=out_type,
name=name)
return result
_shape_n_outputs = ["output"]
def shape_n(input, out_type=None, name=None):
r"""Returns shape of tensors.
This operation returns N 1-D integer tensors representing shape of `input[i]s`.
Args:
input: A list of at least 1 `Tensor` objects of the same type.
out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A list with the same number of `Tensor` objects as `input` of `Tensor` objects of type out_type.
"""
result = _op_def_lib.apply_op("ShapeN", input=input, out_type=out_type,
name=name)
return result
_size_outputs = ["output"]
def size(input, out_type=None, name=None):
r"""Returns the size of a tensor.
This operation returns an integer representing the number of elements in
`input`.
For example:
```prettyprint
# 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
size(t) ==> 12
```
Args:
input: A `Tensor`.
out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `out_type`.
"""
result = _op_def_lib.apply_op("Size", input=input, out_type=out_type,
name=name)
return result
__slice_outputs = ["output"]
def _slice(input, begin, size, name=None):
r"""Return a slice from 'input'.
The output tensor is a tensor with dimensions described by 'size'
whose values are extracted from 'input' starting at the offsets in
'begin'.
*Requirements*:
0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n)
Args:
input: A `Tensor`.
begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.
begin[i] specifies the offset into the 'i'th dimension of
'input' to slice from.
size: A `Tensor`. Must have the same type as `begin`.
size[i] specifies the number of elements of the 'i'th dimension
of 'input' to slice. If size[i] is -1, all remaining elements in dimension
i are included in the slice (i.e. this is equivalent to setting
size[i] = input.dim_size(i) - begin[i]).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("Slice", input=input, begin=begin, size=size,
name=name)
return result
__space_to_batch_outputs = ["output"]
def _space_to_batch(input, paddings, block_size, name=None):
r"""SpaceToBatch for 4-D tensors of type T.
This is a legacy version of the more general SpaceToBatchND.
Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
More specifically, this op outputs a copy of the input tensor where values from
the `height` and `width` dimensions are moved to the `batch` dimension. After
the zero-padding, both `height` and `width` of the input must be divisible by the
block size.
Args:
input: A `Tensor`. 4-D with shape `[batch, height, width, depth]`.
paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
the padding of the input with zeros across the spatial dimensions as follows:
paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
The effective spatial dimensions of the zero-padded input tensor will be:
height_pad = pad_top + height + pad_bottom
width_pad = pad_left + width + pad_right
The attr `block_size` must be greater than one. It indicates the block size.
* Non-overlapping blocks of size `block_size x block size` in the height and
width dimensions are rearranged into the batch dimension at each location.
* The batch of the output tensor is `batch * block_size * block_size`.
* Both height_pad and width_pad must be divisible by block_size.
The shape of the output will be:
[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
depth]
Some examples:
(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
```prettyprint
x = [[[[1], [2]], [[3], [4]]]]
```
The output tensor has shape `[4, 1, 1, 1]` and value:
```prettyprint
[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
```
(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
```prettyprint
x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
The output tensor has shape `[4, 1, 1, 3]` and value:
```prettyprint
[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
```
(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
```prettyprint
x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
```
The output tensor has shape `[4, 2, 2, 1]` and value:
```prettyprint
x = [[[[1], [3]], [[5], [7]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
```
(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
```prettyprint
x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]]],
[[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
```
The output tensor has shape `[8, 1, 2, 1]` and value:
```prettyprint
x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
[[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
```
Among others, this operation is useful for reducing atrous convolution into
regular convolution.
block_size: An `int` that is `>= 2`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("SpaceToBatch", input=input,
paddings=paddings, block_size=block_size,
name=name)
return result
_space_to_batch_nd_outputs = ["output"]
def space_to_batch_nd(input, block_shape, paddings, name=None):
r"""SpaceToBatch for N-D tensors of type T.
This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
grid of blocks of shape `block_shape`, and interleaves these blocks with the
"batch" dimension (0) such that in the output, the spatial dimensions
`[1, ..., M]` correspond to the position within the grid, and the batch
dimension combines both the position within a spatial block and the original
batch position. Prior to division into blocks, the spatial dimensions of the
input are optionally zero padded according to `paddings`. See below for a
precise description.
Args:
input: A `Tensor`.
N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
where spatial_shape has `M` dimensions.
block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
1-D with shape `[M]`, all values must be >= 1.
paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2-D with shape `[M, 2]`, all values must be >= 0.
`paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
`i + 1`, which corresponds to spatial dimension `i`. It is required that
`block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
This operation is equivalent to the following steps:
1. Zero-pad the start and end of dimensions `[1, ..., M]` of the
input according to `paddings` to produce `padded` of shape `padded_shape`.
2. Reshape `padded` to `reshaped_padded` of shape:
[batch] +
[padded_shape[1] / block_shape[0],
block_shape[0],
...,
padded_shape[M] / block_shape[M-1],
block_shape[M-1]] +
remaining_shape
3. Permute dimensions of `reshaped_padded` to produce
`permuted_reshaped_padded` of shape:
block_shape +
[batch] +
[padded_shape[1] / block_shape[0],
...,
padded_shape[M] / block_shape[M-1]] +
remaining_shape
4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
dimension, producing an output tensor of shape:
[batch * prod(block_shape)] +
[padded_shape[1] / block_shape[0],
...,
padded_shape[M] / block_shape[M-1]] +
remaining_shape
Some examples:
(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
`paddings = [[0, 0], [0, 0]]`:
```prettyprint
x = [[[[1], [2]], [[3], [4]]]]
```
The output tensor has shape `[4, 1, 1, 1]` and value:
```prettyprint
[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
```
(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
`paddings = [[0, 0], [0, 0]]`:
```prettyprint
x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
The output tensor has shape `[4, 1, 1, 3]` and value:
```prettyprint
[[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
```
(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
`paddings = [[0, 0], [0, 0]]`:
```prettyprint
x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]],
[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
```
The output tensor has shape `[4, 2, 2, 1]` and value:
```prettyprint
x = [[[[1], [3]], [[5], [7]]],
[[[2], [4]], [[10], [12]]],
[[[5], [7]], [[13], [15]]],
[[[6], [8]], [[14], [16]]]]
```
(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
paddings = `[[0, 0], [2, 0]]`:
```prettyprint
x = [[[[1], [2], [3], [4]],
[[5], [6], [7], [8]]],
[[[9], [10], [11], [12]],
[[13], [14], [15], [16]]]]
```
The output tensor has shape `[8, 1, 3, 1]` and value:
```prettyprint
x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
[[[0], [2], [4]]], [[[0], [10], [12]]],
[[[0], [5], [7]]], [[[0], [13], [15]]],
[[[0], [6], [8]]], [[[0], [14], [16]]]]
```
Among others, this operation is useful for reducing atrous convolution into
regular convolution.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("SpaceToBatchND", input=input,
block_shape=block_shape, paddings=paddings,
name=name)
return result
_space_to_depth_outputs = ["output"]
def space_to_depth(input, block_size, name=None):
r"""SpaceToDepth for tensors of type T.
Rearranges blocks of spatial data, into depth. More specifically,
this op outputs a copy of the input tensor where values from the `height`
and `width` dimensions are moved to the `depth` dimension.
The attr `block_size` indicates the input block size and how the data is moved.
* Non-overlapping blocks of size `block_size x block size` are rearranged
into depth at each location.
* The depth of the output tensor is `input_depth * block_size * block_size`.
* The input tensor's height and width must be divisible by block_size.
That is, assuming the input is in the shape:
`[batch, height, width, depth]`,
the shape of the output will be:
`[batch, height/block_size, width/block_size, depth*block_size*block_size]`
This operation requires that the input tensor be of rank 4, and that
`block_size` be >=1 and a divisor of both the input `height` and `width`.
This operation is useful for resizing the activations between convolutions
(but keeping all data), e.g. instead of pooling. It is also useful for training
purely convolutional models.
For example, given this input of shape `[1, 2, 2, 1]`, and block_size of 2:
```prettyprint
x = [[[[1], [2]],
[[3], [4]]]]
```
This operation will output a tensor of shape `[1, 1, 1, 4]`:
```prettyprint
[[[[1, 2, 3, 4]]]]
```
Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
the corresponding output will have a single element (i.e. width and height are
both 1) and will have a depth of 4 channels (1 * block_size * block_size).
The output element shape is `[1, 1, 4]`.
For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
```prettyprint
x = [[[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]]]]
```
This operation, for block_size of 2, will return the following tensor of shape
`[1, 1, 1, 12]`
```prettyprint
[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
```
Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
```prettyprint
x = [[[[1], [2], [5], [6]],
[[3], [4], [7], [8]],
[[9], [10], [13], [14]],
[[11], [12], [15], [16]]]]
```
the operator will return the following tensor of shape `[1 2 2 4]`:
```prettyprint
x = [[[[1, 2, 3, 4],
[5, 6, 7, 8]],
[[9, 10, 11, 12],
[13, 14, 15, 16]]]]
```
Args:
input: A `Tensor`.
block_size: An `int` that is `>= 2`. The size of the spatial block.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("SpaceToDepth", input=input,
block_size=block_size, name=name)
return result
__split_outputs = ["output"]
def _split(split_dim, value, num_split, name=None):
r"""Splits a tensor into `num_split` tensors along one dimension.
Args:
split_dim: A `Tensor` of type `int32`.
0-D. The dimension along which to split. Must be in the range
`[0, rank(value))`.
value: A `Tensor`. The tensor to split.
num_split: An `int` that is `>= 1`.
The number of ways to split. Must evenly divide
`value.shape[split_dim]`.
name: A name for the operation (optional).
Returns:
A list of `num_split` `Tensor` objects of the same type as value.
They are identically shaped tensors, whose shape matches that of `value`
except along `split_dim`, where their sizes are
`values.shape[split_dim] / num_split`.
"""
result = _op_def_lib.apply_op("Split", split_dim=split_dim, value=value,
num_split=num_split, name=name)
return result
__split_v_outputs = ["output"]
def _split_v(value, size_splits, split_dim, num_split, name=None):
r"""Splits a tensor into `num_split` tensors along one dimension.
Args:
value: A `Tensor`. The tensor to split.
size_splits: A `Tensor`. Must be one of the following types: `int32`, `int64`.
list containing the sizes of each output tensor along the split
dimension. Must sum to the dimension of value along split_dim.
Can contain one -1 indicating that dimension is to be inferred.
split_dim: A `Tensor` of type `int32`.
0-D. The dimension along which to split. Must be in the range
`[0, rank(value))`.
num_split: An `int` that is `>= 1`.
name: A name for the operation (optional).
Returns:
A list of `num_split` `Tensor` objects of the same type as value.
Tensors whose shape matches that of `value`
except along `split_dim`, where their sizes are
`size_splits[i]`.
"""
result = _op_def_lib.apply_op("SplitV", value=value,
size_splits=size_splits, split_dim=split_dim,
num_split=num_split, name=name)
return result
__squeeze_outputs = ["output"]
def _squeeze(input, squeeze_dims=None, name=None):
r"""Removes dimensions of size 1 from the shape of a tensor.
Given a tensor `input`, this operation returns a tensor of the same type with
all dimensions of size 1 removed. If you don't want to remove all size 1
dimensions, you can remove specific size 1 dimensions by specifying
`squeeze_dims`.
For example:
```prettyprint
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
shape(squeeze(t)) ==> [2, 3]
```
Or, to remove specific size 1 dimensions:
```prettyprint
# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
```
Args:
input: A `Tensor`. The `input` to squeeze.
squeeze_dims: An optional list of `ints`. Defaults to `[]`.
If specified, only squeezes the dimensions listed. The dimension
index starts at 0. It is an error to squeeze a dimension that is not 1.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
Contains the same data as `input`, but has one or more dimensions of
size 1 removed.
"""
result = _op_def_lib.apply_op("Squeeze", input=input,
squeeze_dims=squeeze_dims, name=name)
return result
_stop_gradient_outputs = ["output"]
def stop_gradient(input, name=None):
r"""Stops gradient computation.
When executed in a graph, this op outputs its input tensor as-is.
When building ops to compute gradients, this op prevents the contribution of
its inputs to be taken into account. Normally, the gradient generator adds ops
to a graph to compute the derivatives of a specified 'loss' by recursively
finding out inputs that contributed to its computation. If you insert this op
in the graph it inputs are masked from the gradient generator. They are not
taken into account for computing gradients.
This is useful any time you want to compute a value with TensorFlow but need
to pretend that the value was a constant. Some examples include:
* The *EM* algorithm where the *M-step* should not involve backpropagation
through the output of the *E-step*.
* Contrastive divergence training of Boltzmann machines where, when
differentiating the energy function, the training must not backpropagate
through the graph that generated the samples from the model.
* Adversarial training, where no backprop should happen through the adversarial
example generation process.
Args:
input: A `Tensor`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("StopGradient", input=input, name=name)
return result
_strided_slice_outputs = ["output"]
def strided_slice(input, begin, end, strides, begin_mask=None, end_mask=None,
ellipsis_mask=None, new_axis_mask=None,
shrink_axis_mask=None, name=None):
r"""Return a strided slice from `input`.
Note, most python users will want to use the Python `Tensor.__getitem__`
or `Variable.__getitem__` rather than this op directly.
The goal of this op is to produce a new tensor with a subset of
the elements from the `n` dimensional `input` tensor. The subset is chosen using
a sequence of `m` sparse range specifications encoded into the arguments
of this function. Note, in some cases
`m` could be equal to `n`, but this need not be the case. Each
range specification entry can be one of the following:
- An ellipsis (...). Ellipses are used to imply zero or more
dimensions of full-dimension selection and are produced using
`ellipsis_mask`. For example, `foo[...]` is the identity slice.
- A new axis. This is used to insert a new shape=1 dimension and is
produced using `new_axis_mask`. For example, `foo[:, ...]` where
`foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
- A range `begin:end:stride`. This is used to specify how much to choose from
a given dimension. `stride` can be any integer but 0. `begin` is an integer
which represents the index of the first value to select while `end` represents
the index of the last value to select. The number of values selected in each
dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
`begin` and `end` can be negative where `-1` is the last element, `-2` is
the second to last. `begin_mask` controls whether to replace the explicitly
given `begin` with an implicit effective value of `0` if `stride > 0` and
`-1` if `stride < 0`. `end_mask` is analogous but produces the number
required to create the largest open interval. For example, given a shape
`(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
first dimension of a tensor while dropping the last two (in the original
order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
- A single index. This is used to keep only elements that have a given
index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
shape `(6,)` tensor. This is encoded in `begin` and `end` and
`shrink_axis_mask`.
Each conceptual range specification is encoded in the op's argument. This
encoding is best understand by considering a non-trivial example. In
particular,
`foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
```prettyprint
begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
end = [2, 4, x, x, -3, x]
strides = [1, 1, x, x, -1, 1]
begin_mask = 1<<4 | 1 << 5 = 48
end_mask = 1<<5 = 32
ellipsis_mask = 1<<3 = 8
new_axis_mask = 1<<2 4
shrink_axis_mask = 1<<0
```
In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
the slice becomes (2, 1, 5, 5, 2, 5).
Let us walk step by step through each argument specification.
1. The first argument in the example slice is turned into `begin = 1` and
`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
also set the appropriate bit in `shrink_axis_mask`.
2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
zero bits contributed.
3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
dimension in the final shape. Dummy values are contributed to begin,
end and stride, while the new_axis_mask bit is set.
4. `...` grab the full ranges from as many dimensions as needed to
fully specify a slice for every dimension of the input shape.
5. `:-3:-1` shows the use of negative indices. A negative index `i` associated
with a dimension that has shape `s` is converted to a positive index
`s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
is done internally so begin, end and strides receive x, -3, and -1.
The appropriate begin_mask bit is set to indicate the start range is the
full range (ignoring the x).
6. `:` indicates that the entire contents of the corresponding dimension
is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
`end_mask` are also set.
*Requirements*:
`0 != strides[i] for i in [0, m)`
`ellipsis_mask must be a power of two (only one ellipsis)`
Args:
input: A `Tensor`.
begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.
`begin[k]` specifies the offset into the `k`th range specification.
The exact dimension this corresponds to will be determined by context.
Out-of-bounds values will be silently clamped. If the `k`th bit of
`begin_mask` then `begin[k]` is ignored and the full range of the
appropriate dimension is used instead. Negative values causes indexing
to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.
end: A `Tensor`. Must have the same type as `begin`.
`end[i]` is like `begin` with the exception that `end_mask` is
used to determine full ranges.
strides: A `Tensor`. Must have the same type as `begin`.
`strides[i]` specifies the increment in the `i`th specification
after extracting a given element. Negative indices will reverse
the original order. Out or range values are
clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`
begin_mask: An optional `int`. Defaults to `0`.
a bitmask where a bit i being 1 means to ignore the begin
value and instead use the largest interval possible. At runtime
begin[i] will be replaced with `[0, n-1) if `stride[i] > 0` or
`[-1, n-1]` if `stride[i] < 0`
end_mask: An optional `int`. Defaults to `0`. analogous to `begin_mask`
ellipsis_mask: An optional `int`. Defaults to `0`.
a bitmask where bit `i` being 1 means the `i`th
position is actually an ellipsis. One bit at most can be 1.
If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)`
is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis
implicitly creates as many range specifications as necessary to fully
specify the sliced range for every dimension. For example for a 4-dimensional
tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`.
new_axis_mask: An optional `int`. Defaults to `0`.
a bitmask where bit `i` being 1 means the `i`th
specification creates a new shape 1 dimension. For example
`foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor.
shrink_axis_mask: An optional `int`. Defaults to `0`.
a bitmask where bit `i` implies that the `i`th
specification should shrink the dimensionality. begin and end
must imply a slice of size 1 in the dimension. For example in
python one might do `foo[:, 3, :]` which would result in
`shrink_axis_mask` being 2.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("StridedSlice", input=input, begin=begin,
end=end, strides=strides,
begin_mask=begin_mask, end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask, name=name)
return result
_strided_slice_assign_outputs = ["output_ref"]
def strided_slice_assign(ref, begin, end, strides, value, begin_mask=None,
end_mask=None, ellipsis_mask=None,
new_axis_mask=None, shrink_axis_mask=None,
name=None):
r"""Assign `value` to the sliced l-value reference of `ref`.
The values of `value` are assigned to the positions in the variable
`ref` that are selected by the slice parameters. The slice parameters
`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
NOTE this op currently does not support broadcasting and so `value`'s
shape must be exactly the shape produced by the slice of `ref`.
Args:
ref: A mutable `Tensor`.
begin: A `Tensor`. Must be one of the following types: `int32`, `int64`.
end: A `Tensor`. Must have the same type as `begin`.
strides: A `Tensor`. Must have the same type as `begin`.
value: A `Tensor`. Must have the same type as `ref`.
begin_mask: An optional `int`. Defaults to `0`.
end_mask: An optional `int`. Defaults to `0`.
ellipsis_mask: An optional `int`. Defaults to `0`.
new_axis_mask: An optional `int`. Defaults to `0`.
shrink_axis_mask: An optional `int`. Defaults to `0`.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
result = _op_def_lib.apply_op("StridedSliceAssign", ref=ref, begin=begin,
end=end, strides=strides, value=value,
begin_mask=begin_mask, end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask, name=name)
return result
_strided_slice_grad_outputs = ["output"]
def strided_slice_grad(shape, begin, end, strides, dy, begin_mask=None,
end_mask=None, ellipsis_mask=None, new_axis_mask=None,
shrink_axis_mask=None, name=None):
r"""Returns the gradient of `StridedSlice`.
Since `StridedSlice` cuts out pieces of its `input` which is size
`shape`, its gradient will have the same shape (which is passed here
as `shape`). The gradient will be zero in any element that the slice
does not select.
Arguments are the same as StridedSliceGrad with the exception that
`dy` is the input gradient to be propagated and `shape` is the
shape of `StridedSlice`'s `input`.
Args:
shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
begin: A `Tensor`. Must have the same type as `shape`.
end: A `Tensor`. Must have the same type as `shape`.
strides: A `Tensor`. Must have the same type as `shape`.
dy: A `Tensor`.
begin_mask: An optional `int`. Defaults to `0`.
end_mask: An optional `int`. Defaults to `0`.
ellipsis_mask: An optional `int`. Defaults to `0`.
new_axis_mask: An optional `int`. Defaults to `0`.
shrink_axis_mask: An optional `int`. Defaults to `0`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `dy`.
"""
result = _op_def_lib.apply_op("StridedSliceGrad", shape=shape, begin=begin,
end=end, strides=strides, dy=dy,
begin_mask=begin_mask, end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask, name=name)
return result
_tile_outputs = ["output"]
def tile(input, multiples, name=None):
r"""Constructs a tensor by tiling a given tensor.
This operation creates a new tensor by replicating `input` `multiples` times.
The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
and the values of `input` are replicated `multiples[i]` times along the 'i'th
dimension. For example, tiling `[a b c d]` by `[2]` produces
`[a b c d a b c d]`.
Args:
input: A `Tensor`. 1-D or higher.
multiples: A `Tensor`. Must be one of the following types: `int32`, `int64`.
1-D. Length must be the same as the number of dimensions in `input`
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("Tile", input=input, multiples=multiples,
name=name)
return result
__tile_grad_outputs = ["output"]
def _tile_grad(input, multiples, name=None):
r"""Returns the gradient of `Tile`.
Since `Tile` takes an input and repeats the input `multiples` times
along each dimension, `TileGrad` takes in `multiples` and aggregates
each repeated tile of `input` into `output`.
Args:
input: A `Tensor`.
multiples: A `Tensor` of type `int32`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `input`.
"""
result = _op_def_lib.apply_op("TileGrad", input=input, multiples=multiples,
name=name)
return result
_transpose_outputs = ["y"]
def transpose(x, perm, name=None):
r"""Shuffle dimensions of x according to a permutation.
The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
`y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
Args:
x: A `Tensor`.
perm: A `Tensor`. Must be one of the following types: `int32`, `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
result = _op_def_lib.apply_op("Transpose", x=x, perm=perm, name=name)
return result
_unique_outputs = ["y", "idx"]
_UniqueOutput = _collections.namedtuple("Unique", _unique_outputs)
def unique(x, out_idx=None, name=None):
r"""Finds unique elements in a 1-D tensor.
This operation returns a tensor `y` containing all of the unique elements of `x`
sorted in the same order that they occur in `x`. This operation also returns a
tensor `idx` the same size as `x` that contains the index of each value of `x`
in the unique output `y`. In other words:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
For example:
```prettyprint
# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
y, idx = unique(x)
y ==> [1, 2, 4, 7, 8]
idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
```
Args:
x: A `Tensor`. 1-D.
out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, idx).
y: A `Tensor`. Has the same type as `x`. 1-D.
idx: A `Tensor` of type `out_idx`. 1-D.
"""
result = _op_def_lib.apply_op("Unique", x=x, out_idx=out_idx, name=name)
return _UniqueOutput._make(result)
_unique_with_counts_outputs = ["y", "idx", "count"]
_UniqueWithCountsOutput = _collections.namedtuple("UniqueWithCounts",
_unique_with_counts_outputs)
def unique_with_counts(x, out_idx=None, name=None):
r"""Finds unique elements in a 1-D tensor.
This operation returns a tensor `y` containing all of the unique elements of `x`
sorted in the same order that they occur in `x`. This operation also returns a
tensor `idx` the same size as `x` that contains the index of each value of `x`
in the unique output `y`. Finally, it returns a third tensor `count` that
contains the count of each element of `y` in `x`. In other words:
`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
For example:
```prettyprint
# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
y, idx, count = unique_with_counts(x)
y ==> [1, 2, 4, 7, 8]
idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
count ==> [2, 1, 3, 1, 2]
```
Args:
x: A `Tensor`. 1-D.
out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (y, idx, count).
y: A `Tensor`. Has the same type as `x`. 1-D.
idx: A `Tensor` of type `out_idx`. 1-D.
count: A `Tensor` of type `out_idx`. 1-D.
"""
result = _op_def_lib.apply_op("UniqueWithCounts", x=x, out_idx=out_idx,
name=name)
return _UniqueWithCountsOutput._make(result)
__unpack_outputs = ["output"]
def _unpack(value, num, axis=None, name=None):
r"""Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
For example, given a tensor of shape `(A, B, C, D)`;
If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
and each tensor in `output` will have shape `(B, C, D)`. (Note that the
dimension unpacked along is gone, unlike `split`).
If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
and each tensor in `output` will have shape `(A, C, D)`.
Etc.
This is the opposite of `pack`.
Args:
value: A `Tensor`.
1-D or higher, with `axis` dimension size equal to `num`.
num: An `int` that is `>= 0`.
axis: An optional `int`. Defaults to `0`.
Dimension along which to unpack. Negative values wrap around, so the
valid range is `[-R, R)`.
name: A name for the operation (optional).
Returns:
A list of `num` `Tensor` objects of the same type as value.
The list of tensors unpacked from `value`.
"""
result = _op_def_lib.apply_op("Unpack", value=value, num=num, axis=axis,
name=name)
return result
_where_outputs = ["index"]
def where(input, name=None):
r"""Returns locations of true values in a boolean tensor.
This operation returns the coordinates of true elements in `input`. The
coordinates are returned in a 2-D tensor where the first dimension (rows)
represents the number of true elements, and the second dimension (columns)
represents the coordinates of the true elements. Keep in mind, the shape of
the output tensor can vary depending on how many true values there are in
`input`. Indices are output in row-major order.
For example:
```prettyprint
# 'input' tensor is [[True, False]
# [True, False]]
# 'input' has two true values, so output has two coordinates.
# 'input' has rank of 2, so coordinates have two indices.
where(input) ==> [[0, 0],
[1, 0]]
# `input` tensor is [[[True, False]
# [True, False]]
# [[False, True]
# [False, True]]
# [[False, False]
# [False, True]]]
# 'input' has 5 true values, so output has 5 coordinates.
# 'input' has rank of 3, so coordinates have three indices.
where(input) ==> [[0, 0, 0],
[0, 1, 0],
[1, 0, 1],
[1, 1, 1],
[2, 1, 1]]
```
Args:
input: A `Tensor` of type `bool`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`.
"""
result = _op_def_lib.apply_op("Where", input=input, name=name)
return result
__zeros_like_outputs = ["y"]
def _zeros_like(x, name=None):
r"""Returns a tensor of zeros with the same shape and type as x.
Args:
x: A `Tensor`. a tensor of type T.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
a tensor of the same shape and type as x but filled with zeros.
"""
result = _op_def_lib.apply_op("ZerosLike", x=x, name=name)
return result
def _InitOpDefLibrary():
op_list = _op_def_pb2.OpList()
_text_format.Merge(_InitOpDefLibrary.op_list_ascii, op_list)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
_InitOpDefLibrary.op_list_ascii = """op {
name: "BatchMatrixBandPart"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "num_lower"
type: DT_INT64
}
input_arg {
name: "num_upper"
type: DT_INT64
}
output_arg {
name: "band"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
deprecation {
version: 14
explanation: "Use MatrixBandPart"
}
}
op {
name: "BatchMatrixDiag"
input_arg {
name: "diagonal"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
deprecation {
version: 14
explanation: "Use MatrixDiag"
}
}
op {
name: "BatchMatrixDiagPart"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "diagonal"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
deprecation {
version: 14
explanation: "Use MatrixDiagPart"
}
}
op {
name: "BatchMatrixSetDiag"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "diagonal"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
deprecation {
version: 14
explanation: "Use MatrixSetDiag"
}
}
op {
name: "BatchToSpace"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "crops"
type_attr: "Tidx"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "block_size"
type: "int"
has_minimum: true
minimum: 2
}
attr {
name: "Tidx"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "BatchToSpaceND"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "block_shape"
type_attr: "Tblock_shape"
}
input_arg {
name: "crops"
type_attr: "Tcrops"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tblock_shape"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "Tcrops"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Bitcast"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "type"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
attr {
name: "type"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT64
type: DT_INT32
type: DT_UINT8
type: DT_UINT16
type: DT_INT16
type: DT_INT8
type: DT_COMPLEX64
type: DT_COMPLEX128
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT32
type: DT_HALF
}
}
}
}
op {
name: "BroadcastArgs"
input_arg {
name: "s0"
type_attr: "T"
}
input_arg {
name: "s1"
type_attr: "T"
}
output_arg {
name: "r0"
type_attr: "T"
}
attr {
name: "T"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "BroadcastGradientArgs"
input_arg {
name: "s0"
type_attr: "T"
}
input_arg {
name: "s1"
type_attr: "T"
}
output_arg {
name: "r0"
type_attr: "T"
}
output_arg {
name: "r1"
type_attr: "T"
}
attr {
name: "T"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "CheckNumerics"
input_arg {
name: "tensor"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
attr {
name: "message"
type: "string"
}
}
op {
name: "Concat"
input_arg {
name: "concat_dim"
type: DT_INT32
}
input_arg {
name: "values"
type_attr: "T"
number_attr: "N"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 2
}
attr {
name: "T"
type: "type"
}
}
op {
name: "ConcatOffset"
input_arg {
name: "concat_dim"
type: DT_INT32
}
input_arg {
name: "shape"
type: DT_INT32
number_attr: "N"
}
output_arg {
name: "offset"
type: DT_INT32
number_attr: "N"
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 2
}
}
op {
name: "ConcatV2"
input_arg {
name: "values"
type_attr: "T"
number_attr: "N"
}
input_arg {
name: "axis"
type_attr: "Tidx"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 2
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tidx"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Const"
output_arg {
name: "output"
type_attr: "dtype"
}
attr {
name: "value"
type: "tensor"
}
attr {
name: "dtype"
type: "type"
}
}
op {
name: "Copy"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "tensor_name"
type: "string"
default_value {
s: ""
}
}
allows_uninitialized_input: true
}
op {
name: "CopyHost"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "tensor_name"
type: "string"
default_value {
s: ""
}
}
allows_uninitialized_input: true
}
op {
name: "DebugIdentity"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "tensor_name"
type: "string"
default_value {
s: ""
}
}
attr {
name: "debug_urls"
type: "list(string)"
default_value {
list {
}
}
}
allows_uninitialized_input: true
}
op {
name: "DebugNanCount"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type: DT_INT64
}
attr {
name: "T"
type: "type"
}
attr {
name: "tensor_name"
type: "string"
default_value {
s: ""
}
}
attr {
name: "debug_urls"
type: "list(string)"
default_value {
list {
}
}
}
allows_uninitialized_input: true
}
op {
name: "DebugNumericSummary"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type: DT_DOUBLE
}
attr {
name: "T"
type: "type"
}
attr {
name: "tensor_name"
type: "string"
default_value {
s: ""
}
}
attr {
name: "debug_urls"
type: "list(string)"
default_value {
list {
}
}
}
allows_uninitialized_input: true
}
op {
name: "DepthToSpace"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "block_size"
type: "int"
has_minimum: true
minimum: 2
}
}
op {
name: "Dequantize"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "min_range"
type: DT_FLOAT
}
input_arg {
name: "max_range"
type: DT_FLOAT
}
output_arg {
name: "output"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT16
type: DT_QUINT16
type: DT_QINT32
}
}
}
attr {
name: "mode"
type: "string"
default_value {
s: "MIN_COMBINED"
}
allowed_values {
list {
s: "MIN_COMBINED"
s: "MIN_FIRST"
}
}
}
}
op {
name: "Diag"
input_arg {
name: "diagonal"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_INT64
type: DT_COMPLEX64
type: DT_COMPLEX128
}
}
}
}
op {
name: "DiagPart"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "diagonal"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_INT64
type: DT_COMPLEX64
type: DT_COMPLEX128
}
}
}
}
op {
name: "EditDistance"
input_arg {
name: "hypothesis_indices"
type: DT_INT64
}
input_arg {
name: "hypothesis_values"
type_attr: "T"
}
input_arg {
name: "hypothesis_shape"
type: DT_INT64
}
input_arg {
name: "truth_indices"
type: DT_INT64
}
input_arg {
name: "truth_values"
type_attr: "T"
}
input_arg {
name: "truth_shape"
type: DT_INT64
}
output_arg {
name: "output"
type: DT_FLOAT
}
attr {
name: "normalize"
type: "bool"
default_value {
b: true
}
}
attr {
name: "T"
type: "type"
}
}
op {
name: "ExpandDims"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "dim"
type_attr: "Tdim"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tdim"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "ExtractImagePatches"
input_arg {
name: "images"
type_attr: "T"
}
output_arg {
name: "patches"
type_attr: "T"
}
attr {
name: "ksizes"
type: "list(int)"
has_minimum: true
minimum: 4
}
attr {
name: "strides"
type: "list(int)"
has_minimum: true
minimum: 4
}
attr {
name: "rates"
type: "list(int)"
has_minimum: true
minimum: 4
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
type: DT_INT32
type: DT_INT64
type: DT_UINT8
type: DT_INT16
type: DT_INT8
type: DT_UINT16
type: DT_HALF
}
}
}
attr {
name: "padding"
type: "string"
allowed_values {
list {
s: "SAME"
s: "VALID"
}
}
}
}
op {
name: "FakeQuantWithMinMaxArgs"
input_arg {
name: "inputs"
type: DT_FLOAT
}
output_arg {
name: "outputs"
type: DT_FLOAT
}
attr {
name: "min"
type: "float"
default_value {
f: -6
}
}
attr {
name: "max"
type: "float"
default_value {
f: 6
}
}
}
op {
name: "FakeQuantWithMinMaxArgsGradient"
input_arg {
name: "gradients"
type: DT_FLOAT
}
input_arg {
name: "inputs"
type: DT_FLOAT
}
output_arg {
name: "backprops"
type: DT_FLOAT
}
attr {
name: "min"
type: "float"
default_value {
f: -6
}
}
attr {
name: "max"
type: "float"
default_value {
f: 6
}
}
}
op {
name: "FakeQuantWithMinMaxVars"
input_arg {
name: "inputs"
type: DT_FLOAT
}
input_arg {
name: "min"
type: DT_FLOAT
}
input_arg {
name: "max"
type: DT_FLOAT
}
output_arg {
name: "outputs"
type: DT_FLOAT
}
}
op {
name: "FakeQuantWithMinMaxVarsGradient"
input_arg {
name: "gradients"
type: DT_FLOAT
}
input_arg {
name: "inputs"
type: DT_FLOAT
}
input_arg {
name: "min"
type: DT_FLOAT
}
input_arg {
name: "max"
type: DT_FLOAT
}
output_arg {
name: "backprops_wrt_input"
type: DT_FLOAT
}
output_arg {
name: "backprop_wrt_min"
type: DT_FLOAT
}
output_arg {
name: "backprop_wrt_max"
type: DT_FLOAT
}
}
op {
name: "FakeQuantWithMinMaxVarsPerChannel"
input_arg {
name: "inputs"
type: DT_FLOAT
}
input_arg {
name: "min"
type: DT_FLOAT
}
input_arg {
name: "max"
type: DT_FLOAT
}
output_arg {
name: "outputs"
type: DT_FLOAT
}
}
op {
name: "FakeQuantWithMinMaxVarsPerChannelGradient"
input_arg {
name: "gradients"
type: DT_FLOAT
}
input_arg {
name: "inputs"
type: DT_FLOAT
}
input_arg {
name: "min"
type: DT_FLOAT
}
input_arg {
name: "max"
type: DT_FLOAT
}
output_arg {
name: "backprops_wrt_input"
type: DT_FLOAT
}
output_arg {
name: "backprop_wrt_min"
type: DT_FLOAT
}
output_arg {
name: "backprop_wrt_max"
type: DT_FLOAT
}
}
op {
name: "Fill"
input_arg {
name: "dims"
type: DT_INT32
}
input_arg {
name: "value"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "Gather"
input_arg {
name: "params"
type_attr: "Tparams"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "output"
type_attr: "Tparams"
}
attr {
name: "validate_indices"
type: "bool"
default_value {
b: true
}
}
attr {
name: "Tparams"
type: "type"
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "GatherNd"
input_arg {
name: "params"
type_attr: "Tparams"
}
input_arg {
name: "indices"
type_attr: "Tindices"
}
output_arg {
name: "output"
type_attr: "Tparams"
}
attr {
name: "Tparams"
type: "type"
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Identity"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "ImmutableConst"
output_arg {
name: "tensor"
type_attr: "dtype"
}
attr {
name: "dtype"
type: "type"
}
attr {
name: "shape"
type: "shape"
}
attr {
name: "memory_region_name"
type: "string"
}
}
op {
name: "InvertPermutation"
input_arg {
name: "x"
type_attr: "T"
}
output_arg {
name: "y"
type_attr: "T"
}
attr {
name: "T"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "ListDiff"
input_arg {
name: "x"
type_attr: "T"
}
input_arg {
name: "y"
type_attr: "T"
}
output_arg {
name: "out"
type_attr: "T"
}
output_arg {
name: "idx"
type_attr: "out_idx"
}
attr {
name: "T"
type: "type"
}
attr {
name: "out_idx"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "MatrixBandPart"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "num_lower"
type: DT_INT64
}
input_arg {
name: "num_upper"
type: DT_INT64
}
output_arg {
name: "band"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "MatrixDiag"
input_arg {
name: "diagonal"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "MatrixDiagPart"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "diagonal"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "MatrixSetDiag"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "diagonal"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "MirrorPad"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "paddings"
type_attr: "Tpaddings"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tpaddings"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "mode"
type: "string"
allowed_values {
list {
s: "REFLECT"
s: "SYMMETRIC"
}
}
}
}
op {
name: "MirrorPadGrad"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "paddings"
type_attr: "Tpaddings"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tpaddings"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "mode"
type: "string"
allowed_values {
list {
s: "REFLECT"
s: "SYMMETRIC"
}
}
}
}
op {
name: "OneHot"
input_arg {
name: "indices"
type_attr: "TI"
}
input_arg {
name: "depth"
type: DT_INT32
}
input_arg {
name: "on_value"
type_attr: "T"
}
input_arg {
name: "off_value"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "axis"
type: "int"
default_value {
i: -1
}
}
attr {
name: "T"
type: "type"
}
attr {
name: "TI"
type: "type"
default_value {
type: DT_INT64
}
allowed_values {
list {
type: DT_UINT8
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Pack"
input_arg {
name: "values"
type_attr: "T"
number_attr: "N"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 1
}
attr {
name: "T"
type: "type"
}
attr {
name: "axis"
type: "int"
default_value {
i: 0
}
}
}
op {
name: "Pad"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "paddings"
type_attr: "Tpaddings"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tpaddings"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "ParallelConcat"
input_arg {
name: "values"
type_attr: "T"
number_attr: "N"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 1
}
attr {
name: "T"
type: "type"
}
attr {
name: "shape"
type: "shape"
}
}
op {
name: "Placeholder"
output_arg {
name: "output"
type_attr: "dtype"
}
attr {
name: "dtype"
type: "type"
}
attr {
name: "shape"
type: "shape"
default_value {
shape {
}
}
}
}
op {
name: "PlaceholderV2"
output_arg {
name: "output"
type_attr: "dtype"
}
attr {
name: "dtype"
type: "type"
}
attr {
name: "shape"
type: "shape"
}
}
op {
name: "PlaceholderWithDefault"
input_arg {
name: "input"
type_attr: "dtype"
}
output_arg {
name: "output"
type_attr: "dtype"
}
attr {
name: "dtype"
type: "type"
}
attr {
name: "shape"
type: "shape"
}
}
op {
name: "PreventGradient"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "QuantizeAndDequantize"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "signed_input"
type: "bool"
default_value {
b: true
}
}
attr {
name: "num_bits"
type: "int"
default_value {
i: 8
}
}
attr {
name: "range_given"
type: "bool"
default_value {
b: false
}
}
attr {
name: "input_min"
type: "float"
default_value {
f: 0
}
}
attr {
name: "input_max"
type: "float"
default_value {
f: 0
}
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_FLOAT
type: DT_DOUBLE
}
}
}
}
op {
name: "QuantizeV2"
input_arg {
name: "input"
type: DT_FLOAT
}
input_arg {
name: "min_range"
type: DT_FLOAT
}
input_arg {
name: "max_range"
type: DT_FLOAT
}
output_arg {
name: "output"
type_attr: "T"
}
output_arg {
name: "output_min"
type: DT_FLOAT
}
output_arg {
name: "output_max"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT16
type: DT_QUINT16
type: DT_QINT32
}
}
}
attr {
name: "mode"
type: "string"
default_value {
s: "MIN_COMBINED"
}
allowed_values {
list {
s: "MIN_COMBINED"
s: "MIN_FIRST"
}
}
}
}
op {
name: "QuantizedConcat"
input_arg {
name: "concat_dim"
type: DT_INT32
}
input_arg {
name: "values"
type_attr: "T"
number_attr: "N"
}
input_arg {
name: "input_mins"
type: DT_FLOAT
number_attr: "N"
}
input_arg {
name: "input_maxes"
type: DT_FLOAT
number_attr: "N"
}
output_arg {
name: "output"
type_attr: "T"
}
output_arg {
name: "output_min"
type: DT_FLOAT
}
output_arg {
name: "output_max"
type: DT_FLOAT
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 2
}
attr {
name: "T"
type: "type"
}
}
op {
name: "QuantizedInstanceNorm"
input_arg {
name: "x"
type_attr: "T"
}
input_arg {
name: "x_min"
type: DT_FLOAT
}
input_arg {
name: "x_max"
type: DT_FLOAT
}
output_arg {
name: "y"
type_attr: "T"
}
output_arg {
name: "y_min"
type: DT_FLOAT
}
output_arg {
name: "y_max"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_QINT8
type: DT_QUINT8
type: DT_QINT16
type: DT_QUINT16
type: DT_QINT32
}
}
}
attr {
name: "output_range_given"
type: "bool"
default_value {
b: false
}
}
attr {
name: "given_y_min"
type: "float"
default_value {
f: 0
}
}
attr {
name: "given_y_max"
type: "float"
default_value {
f: 0
}
}
attr {
name: "variance_epsilon"
type: "float"
default_value {
f: 1e-05
}
}
attr {
name: "min_separation"
type: "float"
default_value {
f: 0.001
}
}
}
op {
name: "QuantizedReshape"
input_arg {
name: "tensor"
type_attr: "T"
}
input_arg {
name: "shape"
type_attr: "Tshape"
}
input_arg {
name: "input_min"
type: DT_FLOAT
}
input_arg {
name: "input_max"
type: DT_FLOAT
}
output_arg {
name: "output"
type_attr: "T"
}
output_arg {
name: "output_min"
type: DT_FLOAT
}
output_arg {
name: "output_max"
type: DT_FLOAT
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tshape"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Rank"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type: DT_INT32
}
attr {
name: "T"
type: "type"
}
}
op {
name: "RefIdentity"
input_arg {
name: "input"
type_attr: "T"
is_ref: true
}
output_arg {
name: "output"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
}
allows_uninitialized_input: true
}
op {
name: "Reshape"
input_arg {
name: "tensor"
type_attr: "T"
}
input_arg {
name: "shape"
type_attr: "Tshape"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tshape"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Reverse"
input_arg {
name: "tensor"
type_attr: "T"
}
input_arg {
name: "dims"
type: DT_BOOL
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT32
type: DT_INT64
type: DT_BOOL
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
type: DT_COMPLEX64
type: DT_COMPLEX128
}
}
}
}
op {
name: "ReverseSequence"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "seq_lengths"
type_attr: "Tlen"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "seq_dim"
type: "int"
}
attr {
name: "batch_dim"
type: "int"
default_value {
i: 0
}
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tlen"
type: "type"
default_value {
type: DT_INT64
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "ReverseV2"
input_arg {
name: "tensor"
type_attr: "T"
}
input_arg {
name: "axis"
type_attr: "Tidx"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "Tidx"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "T"
type: "type"
allowed_values {
list {
type: DT_UINT8
type: DT_INT8
type: DT_INT32
type: DT_INT64
type: DT_BOOL
type: DT_HALF
type: DT_FLOAT
type: DT_DOUBLE
type: DT_COMPLEX64
type: DT_COMPLEX128
}
}
}
}
op {
name: "ScatterNd"
input_arg {
name: "indices"
type_attr: "Tindices"
}
input_arg {
name: "updates"
type_attr: "T"
}
input_arg {
name: "shape"
type_attr: "Tindices"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tindices"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Shape"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "out_type"
}
attr {
name: "T"
type: "type"
}
attr {
name: "out_type"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "ShapeN"
input_arg {
name: "input"
type_attr: "T"
number_attr: "N"
}
output_arg {
name: "output"
type_attr: "out_type"
number_attr: "N"
}
attr {
name: "N"
type: "int"
has_minimum: true
minimum: 1
}
attr {
name: "T"
type: "type"
}
attr {
name: "out_type"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Size"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "out_type"
}
attr {
name: "T"
type: "type"
}
attr {
name: "out_type"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Slice"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "begin"
type_attr: "Index"
}
input_arg {
name: "size"
type_attr: "Index"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Index"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "SpaceToBatch"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "paddings"
type_attr: "Tpaddings"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tpaddings"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "block_size"
type: "int"
has_minimum: true
minimum: 2
}
}
op {
name: "SpaceToBatchND"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "block_shape"
type_attr: "Tblock_shape"
}
input_arg {
name: "paddings"
type_attr: "Tpaddings"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tblock_shape"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "Tpaddings"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "SpaceToDepth"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "block_size"
type: "int"
has_minimum: true
minimum: 2
}
}
op {
name: "Split"
input_arg {
name: "split_dim"
type: DT_INT32
}
input_arg {
name: "value"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
number_attr: "num_split"
}
attr {
name: "num_split"
type: "int"
has_minimum: true
minimum: 1
}
attr {
name: "T"
type: "type"
}
}
op {
name: "SplitV"
input_arg {
name: "value"
type_attr: "T"
}
input_arg {
name: "size_splits"
type_attr: "Tlen"
}
input_arg {
name: "split_dim"
type: DT_INT32
}
output_arg {
name: "output"
type_attr: "T"
number_attr: "num_split"
}
attr {
name: "num_split"
type: "int"
has_minimum: true
minimum: 1
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tlen"
type: "type"
default_value {
type: DT_INT64
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Squeeze"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "squeeze_dims"
type: "list(int)"
default_value {
list {
}
}
has_minimum: true
}
}
op {
name: "StopGradient"
input_arg {
name: "input"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
op {
name: "StridedSlice"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "begin"
type_attr: "Index"
}
input_arg {
name: "end"
type_attr: "Index"
}
input_arg {
name: "strides"
type_attr: "Index"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Index"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "begin_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "end_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "ellipsis_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "new_axis_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "shrink_axis_mask"
type: "int"
default_value {
i: 0
}
}
}
op {
name: "StridedSliceAssign"
input_arg {
name: "ref"
type_attr: "T"
is_ref: true
}
input_arg {
name: "begin"
type_attr: "Index"
}
input_arg {
name: "end"
type_attr: "Index"
}
input_arg {
name: "strides"
type_attr: "Index"
}
input_arg {
name: "value"
type_attr: "T"
}
output_arg {
name: "output_ref"
type_attr: "T"
is_ref: true
}
attr {
name: "T"
type: "type"
}
attr {
name: "Index"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "begin_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "end_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "ellipsis_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "new_axis_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "shrink_axis_mask"
type: "int"
default_value {
i: 0
}
}
}
op {
name: "StridedSliceGrad"
input_arg {
name: "shape"
type_attr: "Index"
}
input_arg {
name: "begin"
type_attr: "Index"
}
input_arg {
name: "end"
type_attr: "Index"
}
input_arg {
name: "strides"
type_attr: "Index"
}
input_arg {
name: "dy"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Index"
type: "type"
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
attr {
name: "begin_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "end_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "ellipsis_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "new_axis_mask"
type: "int"
default_value {
i: 0
}
}
attr {
name: "shrink_axis_mask"
type: "int"
default_value {
i: 0
}
}
}
op {
name: "Tile"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "multiples"
type_attr: "Tmultiples"
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tmultiples"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "TileGrad"
input_arg {
name: "input"
type_attr: "T"
}
input_arg {
name: "multiples"
type: DT_INT32
}
output_arg {
name: "output"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
deprecation {
version: 3
explanation: "TileGrad has been replaced with reduce_sum"
}
}
op {
name: "Transpose"
input_arg {
name: "x"
type_attr: "T"
}
input_arg {
name: "perm"
type_attr: "Tperm"
}
output_arg {
name: "y"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
attr {
name: "Tperm"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Unique"
input_arg {
name: "x"
type_attr: "T"
}
output_arg {
name: "y"
type_attr: "T"
}
output_arg {
name: "idx"
type_attr: "out_idx"
}
attr {
name: "T"
type: "type"
}
attr {
name: "out_idx"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "UniqueWithCounts"
input_arg {
name: "x"
type_attr: "T"
}
output_arg {
name: "y"
type_attr: "T"
}
output_arg {
name: "idx"
type_attr: "out_idx"
}
output_arg {
name: "count"
type_attr: "out_idx"
}
attr {
name: "T"
type: "type"
}
attr {
name: "out_idx"
type: "type"
default_value {
type: DT_INT32
}
allowed_values {
list {
type: DT_INT32
type: DT_INT64
}
}
}
}
op {
name: "Unpack"
input_arg {
name: "value"
type_attr: "T"
}
output_arg {
name: "output"
type_attr: "T"
number_attr: "num"
}
attr {
name: "num"
type: "int"
has_minimum: true
}
attr {
name: "T"
type: "type"
}
attr {
name: "axis"
type: "int"
default_value {
i: 0
}
}
}
op {
name: "Where"
input_arg {
name: "input"
type: DT_BOOL
}
output_arg {
name: "index"
type: DT_INT64
}
}
op {
name: "ZerosLike"
input_arg {
name: "x"
type_attr: "T"
}
output_arg {
name: "y"
type_attr: "T"
}
attr {
name: "T"
type: "type"
}
}
"""
_op_def_lib = _InitOpDefLibrary()
| [
"[email protected]"
]
| |
a9749c69fb2f2bd8734986e55dce50c7af2f6123 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-outboundbot/aliyunsdkoutboundbot/request/v20191226/ListDialogueFlowsRequest.py | 5d55b4950f49b839ff202310f382ed0eb02fd0c3 | [
"Apache-2.0"
]
| permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,628 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoutboundbot.endpoint import endpoint_data
class ListDialogueFlowsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OutboundBot', '2019-12-26', 'ListDialogueFlows','outboundbot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ScriptId(self):
return self.get_query_params().get('ScriptId')
def set_ScriptId(self,ScriptId):
self.add_query_param('ScriptId',ScriptId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId) | [
"[email protected]"
]
| |
284d78f1fcbbd919d674c1a864e74677d41bcf25 | 8a8438419c211d1c3f3333d9215d501ee5aa745a | /annotation/annot_express.py | 58959152c5c2b2d83da9965f1eaa45af7241adbb | [
"LicenseRef-scancode-warranty-disclaimer"
]
| no_license | WhiteLab/RNAseq | fb6478f0c8df3ee816d13b7e8998e4440a0ce7a3 | ea34f2d0b886076007fa382a87d37cd41e8417bb | refs/heads/master | 2021-01-25T15:56:56.386057 | 2018-04-19T17:03:40 | 2018-04-19T17:03:40 | 33,335,010 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,728 | py | #!/usr/bin/env python3
import sys
def make_index(index_ref):
ind = {}
index = open(index_ref, 'r')
next(index)
for line in index:
ann = line.rstrip('\n').split('\t')
ind[ann[0]] = {}
ind[ann[0]]['name'] = ann[1]
ind[ann[0]]['type'] = ann[2]
index.close()
return ind
def annot_express(index_ref, sample):
ind = make_index(index_ref)
table = open(sample + '.express_quantification.txt', 'r')
head = next(table)
out_fn = sample + '.express_quantification_annotated.txt'
out_fh = open(out_fn, 'w')
out_fh.write('name\ttype\t' + head)
for line in table:
info = line.split('\t')
if float(info[3]) > 0:
out_fh.write(ind[info[1]]['name'] + '\t ' + ind[info[1]]['type'] + '\t' + line)
# skipping this part, ends up being a fat log generator
# else:
# sys.stderr.write('Skipped ' + ind[info[1]]['name'] + ' ' + ind[info[1]]['type'] + ' ' + info[1]
# + ' no reads!\n')
table.close()
return 0
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Quantify transcripts using STAR output bam')
parser.add_argument('-i', '--index', action='store', dest='index', help='Reference file with RNA gene names,'
' type and trasncript ids')
parser.add_argument('-sa', '--sample', action='store', dest='sample', help='Sample name prefix')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
inputs = parser.parse_args()
index = inputs.index
sample = inputs.sample
annot_express(index, sample)
| [
"[email protected]"
]
| |
c70cf1d502370239756e3024f3a99e04f5a385ee | b5cba88ce8c86740c8c3453134610fd5bafbb8c4 | /Leetcode/1295. Find Numbers with Even Number of Digits/solution.py | 49f4d0d266abbbea8614c1133f3ae970bf4a6cf3 | []
| no_license | EduardoSantos7/Algorithms4fun | 55fcf9d515ea3b70b93298ac96a58d2ae68dee11 | 6ff182ed596b6322322b087f29e6ad98baec3f97 | refs/heads/master | 2023-07-23T01:38:08.216313 | 2023-07-23T01:35:58 | 2023-07-23T01:35:58 | 227,448,848 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | class Solution:
def findNumbers(self, nums: List[int]) -> int:
result = 0
seen = {}
for num in nums:
if num in seen:
if not seen[num] % 2:
result += 1
continue
count = 0
while num > 0:
num //= 10
count += 1
seen[num] = count
if not count % 2:
result += 1
return result
| [
"[email protected]"
]
| |
b32a32aa16e78d200314b7da8479cb85659f5783 | d74913eda69ee1799c887a645c574fa5a4da8fba | /code/download/download_soils.py | 225e8cea96a34dccc9d990085c4111021268eeee | [
"Apache-2.0"
]
| permissive | Fweek/pyMETRIC | efd6fe8c6ea74f5c87d19ecbb6653549fb3ba943 | 0e7eec57fedd33b81e6e7efe58290f50ebbebfab | refs/heads/master | 2021-05-03T10:23:15.066106 | 2018-02-06T19:32:36 | 2018-02-06T19:32:36 | 120,534,046 | 1 | 0 | null | 2018-02-06T23:00:49 | 2018-02-06T23:00:48 | null | UTF-8 | Python | false | false | 2,953 | py | #--------------------------------
# Name: download_soils.py
# Purpose: Download soil AWC raster
# Python: 2.7, 3.5, 3.6
#--------------------------------
import argparse
import datetime as dt
import logging
import os
import sys
import zipfile
from python_common import url_download
def main(output_folder, overwrite_flag=False):
"""Download soil Available Water Capacity (AWC) raster
Args:
output_folder (str): folder path where files will be saved
overwrite_flag (bool): If True, overwrite existing files
Returns:
None
"""
# Composite SSURGO/STATSGO
download_url = 'https://storage.googleapis.com/openet/ssurgo/AWC_WTA_0to10cm_composite.tif'
# STATSGO Only
# download_url = 'https://storage.googleapis.com/openet/statsgo/AWC_WTA_0to10cm_statsgo.tif'
output_name = download_url.split('/')[-1]
output_path = os.path.join(output_folder, output_name)
if not os.path.isdir(output_folder):
os.makedirs(output_folder)
if not os.path.isfile(output_path) or overwrite_flag:
logging.info('\nDownloading AWC')
logging.info(' {}'.format(download_url))
logging.info(' {}'.format(output_path))
url_download(download_url, output_path)
else:
logging.debug('\nAWC raster already downloaded')
def arg_parse():
"""Base all default folders from script location
scripts: ./pyMETRIC/code/download
code: ./pyMETRIC/code
output: ./pyMETRIC/soils
"""
script_folder = sys.path[0]
code_folder = os.path.dirname(script_folder)
project_folder = os.path.dirname(code_folder)
output_folder = os.path.join(project_folder, 'soils')
parser = argparse.ArgumentParser(
description='Download Soil Available Water Capcity (AWC)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--output', help='Output folder', metavar='FOLDER',
default=os.path.join(project_folder, 'soils'))
parser.add_argument(
'-o', '--overwrite', default=None, action="store_true",
help='Force overwrite of existing files')
parser.add_argument(
'-d', '--debug', default=logging.INFO, const=logging.DEBUG,
help='Debug level logging', action="store_const", dest="loglevel")
args = parser.parse_args()
# Convert output folder to an absolute path
if args.output and os.path.isdir(os.path.abspath(args.output)):
args.output = os.path.abspath(args.output)
return args
if __name__ == '__main__':
args = arg_parse()
logging.basicConfig(level=args.loglevel, format='%(message)s')
logging.info('\n{}'.format('#' * 80))
log_f = '{:<20s} {}'
logging.info(log_f.format('Run Time Stamp:', dt.datetime.now().isoformat(' ')))
logging.info(log_f.format('Script:', os.path.basename(sys.argv[0])))
main(output_folder=args.output, overwrite_flag=args.overwrite)
| [
"[email protected]"
]
| |
9bb59c34c03b8d36943b1d47bfe4981e476e6a7a | dc19e59cac871b172eb357499159b92fca81e5ca | /docs/toHTML.py | 2fa74dd72b05a9428d6b26b2a2bd5657a4b5a5b1 | [
"MIT"
]
| permissive | numython-rd/moro | ed3fe76b35ce190244503c40a445fbbd42143278 | 8d6e15ba21a0de7ec354ccbc23c38123a570904a | refs/heads/master | 2023-03-26T18:48:07.588409 | 2021-03-18T01:50:14 | 2021-03-18T01:50:14 | 300,115,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | # -*- coding: utf-8 -*-
import os
source = "source"
build = "."
instr = "sphinx-build -b html "+source+" "+build
os.system(instr)
os.startfile(build+r"\index.html")
| [
"[email protected]"
]
| |
9b69bc22bdfcfa2d1afe985f945375682c176ad1 | 57fab09b925d5ad305c7d3768c06e82e0b867d47 | /bag/reorder_po.py | e821d44e2e5d57009c4748129ada5ee0dee22408 | [
"MIT"
]
| permissive | nandoflorestan/bag | f6b042341cdf96812ae34320879a6b67fb6884c9 | 63f6fbd3e768bf55d79ac96964aa3bf7702f3f9a | refs/heads/master | 2023-08-04T23:43:27.961604 | 2023-07-31T15:12:44 | 2023-07-31T15:12:44 | 9,621,828 | 24 | 6 | null | 2015-07-17T05:10:23 | 2013-04-23T11:46:23 | Python | UTF-8 | Python | false | false | 1,266 | py | """Reorder the translations inside a .po file.
This script was written because transifex is messy and when you pull
translations from transifex, the order of the strings completely changes and
when you do a ``git diff`` you cannot make sense of the alterations.
It is even hard to see whether any translations have been lost.
But if you always reorder the .po after pulling from transifex, then
the diff will be readable and the version history will make sense.
"""
from argh import ArghParser, arg
from pathlib import Path
from polib import pofile
@arg("path", help=".po file to be sorted, or a directory containing .po files")
@arg("-e", "--encoding", default="utf-8", help=".po file encoding")
def reorder_po(path, encoding="utf-8"):
p = Path(path)
if p.is_dir():
for path in p.glob("**.po"):
_reorder_one(str(path), encoding=encoding)
else:
_reorder_one(str(path), encoding=encoding)
def _reorder_one(path, encoding="utf-8"):
po = pofile(path, encoding=encoding)
po.sort()
po.save(path)
def _command():
# http://argh.readthedocs.org/en/latest/
parser = ArghParser(description=__doc__)
parser.set_default_command(reorder_po)
parser.dispatch()
if __name__ == "__main__":
_command()
| [
"[email protected]"
]
| |
7aa043d24cb175a85f93b7c12cd0cba2de709b61 | 5da2c116d3d0dc4f3811cec144c9f8b5a74afede | /lncrawl/sources/fanfiction.py | c29ccaba499a13167460d62aad29c76ad932190c | [
"Apache-2.0"
]
| permissive | NNTin/lightnovel-crawler | a08bd252f2e72f41f931f0b2165f906b64d33692 | 451e816ab03c8466be90f6f0b3eaa52d799140ce | refs/heads/master | 2021-06-23T12:07:43.668329 | 2021-04-25T01:51:26 | 2021-04-25T01:51:26 | 361,695,538 | 2 | 0 | Apache-2.0 | 2021-04-26T16:48:21 | 2021-04-26T09:40:46 | null | UTF-8 | Python | false | false | 3,425 | py | # -*- coding: utf-8 -*-
import json
import logging
import re
from urllib.parse import urlparse
from ..utils.crawler import Crawler
logger = logging.getLogger(__name__)
chapter_url = 'https://www.fanfiction.net/s/%s/%s'
search_url = 'https://www.fanfiction.net/search/?keywords=%s&type=story&match=title&ready=1&categoryid=202'
class FanFictionCrawler(Crawler):
base_url = 'https://www.fanfiction.net/'
def search_novel(self, query):
query = query.lower().replace(' ', '+')
soup = self.get_soup(search_url % query)
results = []
for div in soup.select('#content_wrapper .z-list')[:25]:
a = div.select_one('a.stitle')
a.select_one('img').decompose()
info = div.select_one('.xgray').text.strip()
chapters = re.findall(r'Chapters: \d+', info)[0]
origin_book = re.findall(r'^.+Rated:', info)[0][:-9]
writer = div.select_one('a[href*="/u/"]').text.strip()
results.append({
'title': a.text.strip(),
'url': self.absolute_url(a['href']),
'info': '%s | %s | By, %s' % (origin_book, chapters, writer)
})
# end for
return results
# end def
def read_novel_info(self):
'''Get novel title, autor, cover etc'''
logger.debug('Visiting %s', self.novel_url)
soup = self.get_soup(self.novel_url)
self.novel_title = soup.select_one(
'#profile_top b.xcontrast_txt, #content b').text.strip()
logger.info('Novel title: %s', self.novel_title)
possible_image = soup.select_one('#profile_top img.cimage')
if possible_image:
self.novel_cover = self.absolute_url(possible_image['src'])
# end if
logger.info('Novel cover: %s', self.novel_cover)
self.novel_author = soup.select_one(
'#profile_top, #content').select_one('a[href*="/u/"]').text.strip()
logger.info('Novel author: %s', self.novel_author)
self.novel_id = urlparse(self.novel_url).path.split('/')[2]
logger.info('Novel id: %s', self.novel_id)
if soup.select_one('#pre_story_links'):
origin_book = soup.select('#pre_story_links a')[-1]
self.volumes.append({
'id': 1,
'title': origin_book.text.strip(),
})
else:
self.volumes.append({'id': 1})
# end if
chapter_select = soup.select_one('#chap_select, select#jump')
if chapter_select:
for option in chapter_select.select('option'):
self.chapters.append({
'volume': 1,
'id': int(option['value']),
'title': option.text.strip(),
'url': chapter_url % (self.novel_id, option['value']),
})
# end for
else:
self.chapters.append({
'id': 1,
'volume': 1,
'url': self.novel_url,
})
# end if
# end def
def download_chapter_body(self, chapter):
'''Download body of a single chapter and return as clean html format.'''
logger.info('Downloading %s', chapter['url'])
soup = self.get_soup(chapter['url'])
contents = soup.select_one('#storytext, #storycontent')
return str(contents)
# end def
# end class
| [
"[email protected]"
]
| |
02f21d9e9666ff708866722614bcd616ca5d00fc | 9ecb6a1d3a71e7f87f3784af6b808f23a2abe348 | /spinningup/spinup/algos/td3/core.py | b9e2b6ad2e3dcfd388dc765ca58d71b8d7a6a6b4 | [
"MIT"
]
| permissive | HumanCompatibleAI/interactive-behaviour-design | 13ae305b39d29595e8fd5907f8d9e9fa6c2efc16 | 226db7a55d64ce15edfb8d7b3352c7bf7b81b533 | refs/heads/master | 2020-05-02T16:54:02.232639 | 2019-08-08T14:29:11 | 2019-08-08T14:29:11 | 178,082,205 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | import numpy as np
import tensorflow as tf
def placeholder(dim=None):
return tf.placeholder(dtype=tf.float32, shape=(None,dim) if dim else (None,))
def placeholders(*args):
return [placeholder(dim) for dim in args]
def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):
for h in hidden_sizes[:-1]:
x = tf.layers.dense(x, units=h, activation=activation)
return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation, kernel_initializer=tf.initializers.orthogonal(gain=0.01))
def get_vars(scope):
return [x for x in tf.global_variables() if scope in x.name]
def count_vars(scope):
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v])
"""
Actor-Critics
"""
def mlp_actor_critic(x, a, hidden_sizes=(400,300), activation=tf.nn.relu,
output_activation=tf.tanh, action_space=None):
act_dim = a.shape.as_list()[-1]
act_limit = action_space.high[0]
with tf.variable_scope('pi'):
pi = act_limit * mlp(x, list(hidden_sizes)+[act_dim], activation, output_activation)
with tf.variable_scope('q1'):
q1 = tf.squeeze(mlp(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None), axis=1)
with tf.variable_scope('q2'):
q2 = tf.squeeze(mlp(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None), axis=1)
with tf.variable_scope('q1', reuse=True):
q1_pi = tf.squeeze(mlp(tf.concat([x,pi], axis=-1), list(hidden_sizes)+[1], activation, None), axis=1)
return pi, q1, q2, q1_pi | [
"[email protected]"
]
| |
f29c1c8d352d400edd2c54e09c41c0ff74480335 | 2761a7b1b89c3bc250d5848c0fb2bde23a46989c | /costom_tfp_model_/bayesian/train.py | 732910532341d2d516189d906694cc89e4632fb7 | []
| no_license | mahdisharifloo/tensorflow_lite_projects | 9daa051297395903f788ed2841c7b7d3406c284e | 0f178b484ed667581e5edc882339c4699a5d0378 | refs/heads/master | 2022-11-29T05:34:29.525744 | 2020-08-12T20:26:40 | 2020-08-12T20:26:40 | 284,647,945 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,877 | py | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import itertools
import tensorflow_probability as tfp
import tensorflow as tf
import tensorflow.keras as keras
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from tensorflow.keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from tensorflow.keras.models import Sequential
from tensorflow.keras import optimizers
from tensorflow.keras.preprocessing import image
from tensorflow.keras.layers import Dropout, Flatten, Dense
from tensorflow.keras import applications
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import math
import datetime
import time
# load the bottleneck features saved earlier
train_data = np.load('bottleneck_features_train.npy')
test_data = np.load('bottleneck_features_test.npy')
# get the class labels for the training data, in the original order
train_labels = generator_top.classes
test_labels = val_generator_top.classes
# convert the training labels to categorical vectors
train_labels = to_categorical(train_labels, num_classes=num_classes)
test_labels = to_categorical(test_labels, num_classes=num_classes)
start = datetime.datetime.now()
kernel_divergence_fn=lambda q, p, _: tfp.distributions.kl_divergence(q, p) / (train_data.shape[0] *1.0)
model_vi = Sequential()
# model_vi.add(Flatten(input_shape=train_data.shape[1:]))
model_vi.add(tf.keras.layers.InputLayer(input_shape=train_data.shape[1:], name="input"))
model_vi.add(tfp.layers.Convolution2DFlipout(8,kernel_size=(3,3),padding="same", activation = 'relu', kernel_divergence_fn=kernel_divergence_fn,input_shape=(32,32,3)))
model_vi.add(tfp.layers.Convolution2DFlipout(8,kernel_size=(3,3),padding="same", activation = 'relu', kernel_divergence_fn=kernel_divergence_fn))
model_vi.add(tf.keras.layers.MaxPooling2D((2,2)))
model_vi.add(tfp.layers.Convolution2DFlipout(16,kernel_size=(3,3),padding="same", activation = 'relu', kernel_divergence_fn=kernel_divergence_fn))
model_vi.add(tfp.layers.Convolution2DFlipout(16,kernel_size=(3,3),padding="same", activation = 'relu', kernel_divergence_fn=kernel_divergence_fn))
model_vi.add(tf.keras.layers.MaxPooling2D((2,2)))
model_vi.add(tf.keras.layers.Flatten())
model_vi.add(tfp.layers.DenseFlipout(100, activation = 'relu', kernel_divergence_fn=kernel_divergence_fn))
model_vi.add(tfp.layers.DenseFlipout(100, activation = 'relu', kernel_divergence_fn=kernel_divergence_fn))
model_vi.add(tfp.layers.DenseFlipout(num_classes, activation = 'softmax', kernel_divergence_fn=kernel_divergence_fn))
model_vi.compile(loss='categorical_crossentropy', optimizer=optimizers.RMSprop(lr=1e-4), metrics=['accuracy'])
model_vi.summary()
history = model_vi.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(test_data, test_labels))
(eval_loss, eval_accuracy) = model_vi.evaluate( test_data, test_labels, batch_size=batch_size,verbose=1)
print("[INFO] accuracy: {:.2f}%".format(eval_accuracy * 100))
print("[INFO] Loss: {}".format(eval_loss))
end= datetime.datetime.now()
elapsed= end-start
print ("Time: ", elapsed)
model_vi.save_weights(top_model_weights_path)
#Graphing our training and validation
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend()
plt.show()
| [
"[email protected]"
]
| |
98b6a0fc1ecd8d42cca81d839946e53a7f267439 | 8668830f34ce260565217ea3b49e090778780b44 | /coupon/admin.py | e13087784a9298c5d7e7cbf896f700f48fef0e77 | []
| no_license | wcirillo/ten | 72baf94da958b2ee6f34940c1fc3116660436762 | a780ccdc3350d4b5c7990c65d1af8d71060c62cc | refs/heads/master | 2016-09-06T13:39:03.966370 | 2015-07-02T12:37:36 | 2015-07-02T12:37:36 | 15,700,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,009 | py | """ Admin config for coupon app. """
#pylint: disable=W0612,W0613,R0201
from django import forms
from django.contrib import admin
from django.db import connection
from advertiser.models import Location
from common.custom_cleaning import AdminFormClean
from coupon.models import (Action, Coupon, CouponCode, CouponType, Offer,
Flyer, FlyerCoupon, FlyerSubdivision, FlyerPlacement,
FlyerPlacementSubdivision, FlyerSubject, Slot, SlotTimeFrame)
from market.models import Site
def make_approved(modeladmin, request, queryset):
""" Set is_approved for each instance in queryset. """
queryset.update(is_approved=True)
make_approved.short_description = "Mark these items approved"
def get_data(coupons):
""" Raw sql pull to populate the coupon admin interface. """
coupons = tuple(coupons.values_list('id', flat=True))
if len(coupons) == 0:
return None
cursor = connection.cursor()
cursor.execute("""
SELECT c.id,
COALESCE(c_a1.count, 0) AS view,
COALESCE(c_a2.count, 0) AS clicks,
COALESCE(c_a3.count, 0) AS prints,
COALESCE(c_a4.count, 0) AS texts,
COALESCE(c_a11.count, 0) AS blasted
FROM coupon_coupon c
LEFT JOIN coupon_couponaction c_a1
ON c_a1.coupon_id = c.id
AND c_a1.action_id = 1
LEFT JOIN coupon_couponaction c_a2
ON c_a2.coupon_id = c.id
AND c_a2.action_id = 2
LEFT JOIN coupon_couponaction c_a3
ON c_a3.coupon_id = c.id
AND c_a3.action_id = 3
LEFT JOIN coupon_couponaction c_a4
ON c_a4.coupon_id = c.id
AND c_a4.action_id = 4
LEFT JOIN coupon_couponaction c_a11
ON c_a11.coupon_id = c.id
AND c_a11.action_id = 11
WHERE c.id IN %s
GROUP BY c.id, c_a1.count, c_a2.count, c_a3.count, c_a4.count, c_a11.count
""", [coupons])
query = cursor.fetchall()
data = {}
for row in query:
data[row[0]] = row[1:]
return data
class OfferAdmin(admin.ModelAdmin):
""" Admin config of Offer model. """
list_display = ('business', 'headline')
search_fields = ['headline', 'business__business_name',
'business__advertiser__site__name']
raw_id_fields = ('business',)
form = AdminFormClean
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "business__advertiser__site":
kwargs["queryset"] = Site.objects.defer('envelope', 'geom', 'point')
return super(OfferAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
def queryset(self, request):
""" Customizes the queryset for improved performance. """
qs = super(OfferAdmin, self).queryset(request)
qs = Offer.objects.select_related().filter(id__in=qs
).defer('business__advertiser__site__envelope',
'business__advertiser__site__geom',
'business__advertiser__site__point')
return qs
class CouponForm(AdminFormClean):
""" Coupon Form in the admin. """
def __init__(self, *args, **kwargs):
"""
Customize widgets to show related objects only.
Better usability, much better performance.
"""
super(CouponForm, self).__init__(*args, **kwargs)
try:
offers = Offer.objects.filter(
business=self.instance.offer.business
)
except AttributeError:
offers = []
self.fields['offer'].widget.choices = [
(choice.id, choice.headline) for choice in offers
]
try:
locations = Location.objects.filter(
business=self.instance.offer.business
)
except AttributeError:
locations = []
self.fields['location'].widget.choices = [
(choice.id, choice) for choice in locations
]
class CouponAdmin(admin.ModelAdmin):
""" Admin config for coupon model. """
actions = [make_approved]
date_hierarchy = 'start_date'
filter_horizontal = ('location', 'redemption_method',
'default_restrictions',)
form = CouponForm
list_display = ('offer', 'business', 'coupon_type', 'advertiser_site',
'start_date', 'expiration_date', 'approved', 'metrics')
list_filter = ('start_date', 'coupon_type', 'is_approved')
search_fields = ['id', 'offer__headline', 'offer__business__business_name',
'offer__business__advertiser__site__name']
save_on_top = True
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "offer__business__advertiser__site":
kwargs["queryset"] = Site.objects.defer('envelope', 'geom', 'point')
return super(CouponAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
def queryset(self, request):
""" Customizes the queryset for improved performance. """
coupons = super(CouponAdmin, self).queryset(request)
self.data = get_data(coupons)
return coupons.select_related('offer', 'offer__business', 'coupon_type',
'offer__business__advertiser__site__name'
).defer('offer__business__advertiser__site__geom',
'offer__business__advertiser__site__envelope',
'offer__business__advertiser__site__point')
def approved(self, obj):
""" Long way to get a short description. """
return bool(obj.is_approved)
approved.boolean = True
approved.short_description = u'Ap?'
def business(self, obj):
""" The business this coupon is for. """
business = obj.offer.business
return ("%s" % (business))
business.admin_order_field = 'offer__business__business_name'
def advertiser_site(self, obj):
""" The site the advertiser is related to. """
advertiser_site = obj.offer.business.advertiser.site.name
return ("%s" % (advertiser_site))
advertiser_site.admin_order_field = 'offer__business__advertiser__site__name'
def metrics(self, obj):
""" Display various coupon action datapoints. """
if obj.coupon_type.id in (1, 7) :
return ""
try:
row = self.data[obj.id]
except KeyError:
row = ('', '', '', '', '')
return ("%s / %s / %s / %s / %s" %
(row[0], row[1], row[2], row[3], row[4]))
metrics.short_description = u'View/Click/Print/SMS/Blast'
class CouponCodeAdmin(admin.ModelAdmin):
""" Admin config for CouponCode model. """
list_display = ('code', 'coupon',)
raw_id_fields = ('coupon',)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "coupon__offer__business__advertiser__site":
kwargs["queryset"] = Site.objects.defer('envelope', 'geom', 'point')
return super(CouponCodeAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
def queryset(self, request):
""" Customizes the queryset for improved performance. """
qs = super(CouponCodeAdmin, self).queryset(request)
qs = CouponCode.objects.select_related().filter(id__in=qs
).defer('coupon__offer__business__advertiser__site__envelope',
'coupon__offer__business__advertiser__site__geom',
'coupon__offer__business__advertiser__site__point')
return qs
class FlyerCouponInline(admin.StackedInline):
""" Inline for relating coupons to flyers. """
model = FlyerCoupon
extra = 0
max_num = 10
raw_id_fields = ("coupon",)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "flyer__site":
kwargs["queryset"] = Site.objects.defer('envelope', 'geom', 'point')
return super(FlyerCouponInline, self).formfield_for_foreignkey(
db_field, request, **kwargs)
def queryset(self, request):
""" Customizes the queryset for improved performance. """
qs = super(FlyerCouponInline, self).queryset(request)
qs = FlyerCoupon.objects.select_related().filter(id__in=qs).defer(
'flyer__site__envelope', 'flyer__site__geom', 'flyer__site__point')
return qs
class FlyerAdmin(admin.ModelAdmin):
""" Admin config of Flyer model. """
actions = [make_approved]
date_hierarchy = 'send_date'
inlines = [FlyerCouponInline,]
list_display = ('__unicode__', 'send_date', 'send_status', 'is_approved',
'num_consumers')
list_filter = ('send_date', 'send_status', 'is_approved',
'site__name')
list_select_related = True
save_on_top = True
form = AdminFormClean
def created(self, obj):
""" Pretty date. """
return obj.create_datetime.strftime('%b %d %Y %H:%M')
created.admin_order_field = 'create_datetime'
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "site":
kwargs["queryset"] = Site.objects.defer('envelope', 'geom', 'point')
return super(FlyerAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
def queryset(self, request):
""" Customizes the queryset for improved performance. """
qs = super(FlyerAdmin, self).queryset(request)
self.data = Flyer.objects.select_related().filter(id__in=qs).defer(
'site__geom', 'site__envelope', 'site__point')
return self.data
class FlyerSubjectAdmin(admin.ModelAdmin):
""" Admin config for FlyerSubject model. """
date_hierarchy = 'send_date'
list_display = ('week', 'send_date', 'title')
list_select_related = True
form = AdminFormClean
class FlyerSubdivisionAdmin(admin.ModelAdmin):
""" Admin config for FlyerSubdivision model. """
list_display = ('id', 'flyer', 'geolocation_object')
list_select_related = True
readonly_fields = ('geolocation_object',)
class FlyerPlacementSubdivisionAdmin(admin.ModelAdmin):
""" Admin config for FlyerPlacementSubdivision model. """
list_display = ('id', 'flyer_placement', 'geolocation_object')
list_select_related = True
readonly_fields = ('geolocation_object',)
class FlyerPlacementSubdivisionInline(admin.StackedInline):
""" Inline for relating coupons to flyers. """
model = FlyerPlacementSubdivision
extra = 1
readonly_fields = ('geolocation_object',)
class FlyerPlacementAdmin(admin.ModelAdmin):
""" Admin config for FlyerPlacement model. """
inlines = [FlyerPlacementSubdivisionInline,]
list_display = ('site', 'send_date', 'slot')
list_filter = ('site__name', 'send_date')
class SlotTimeFrameInline(admin.StackedInline):
""" Slot Inline class for the Slot admin"""
model = SlotTimeFrame
extra = 0
raw_id_fields = ("coupon",)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "slot__site":
kwargs["queryset"] = Site.objects.defer('envelope', 'geom', 'point')
return super(SlotTimeFrameInline, self).formfield_for_foreignkey(
db_field, request, **kwargs)
def queryset(self, request):
""" Customizes the queryset for improved performance. """
qs = super(SlotTimeFrameInline, self).queryset(request)
qs = SlotTimeFrame.objects.select_related().filter(id__in=qs
).defer('slot__site__envelope',
'slot__site__geom', 'slot__site__point')
return qs
class SlotTimeFrameForm(forms.ModelForm):
""" SlotTimeFrame change form in the admin. """
def __init__(self, *args, **kwargs):
super(SlotTimeFrameForm, self).__init__(*args, **kwargs)
try:
coupons = Coupon.objects.filter(
offer__business=self.instance.slot.business
).defer('offer__business__advertiser__site__envelope',
'offer__business__advertiser__site__geom',
'offer__business__advertiser__site__point')
except AttributeError:
coupons = []
except Slot.DoesNotExist:
coupons = Coupon.objects.all().defer(
'offer__business__advertiser__site__envelope',
'offer__business__advertiser__site__geom',
'offer__business__advertiser__site__point')
self.fields['coupon'].widget.choices = [
(choice.id, choice.offer) for choice in coupons
]
class SlotTimeFrameAdmin(admin.ModelAdmin):
""" Admin config for SlotTimeFrame. """
date_hierarchy = 'start_datetime'
form = SlotTimeFrameForm
list_display = ('id', 'slot_site', 'slot_business', 'coupon_offer',
'start_datetime', 'end_datetime')
list_filter = ('slot__site__name',)
raw_id_fields = ("coupon", "slot")
search_fields = ['id', 'coupon__offer__headline',
'coupon__offer__business__business_name', 'slot__site__name']
def slot_site(self, obj):
""" The site the slot is related to. """
slot_site = obj.slot.site.name
return ("%s" % (slot_site))
slot_site.admin_order_field = 'slot__site__name'
def slot_business(self, obj):
""" The business the slot is related to. """
slot_business = obj.slot.business.business_name
return ("%s" % (slot_business))
slot_business.admin_order_field = 'slot__business__business_name'
def coupon_offer(self, obj):
""" The offer the slot timeframe is related to. """
coupon_offer = obj.coupon.offer.headline
return ("%s" % (coupon_offer))
coupon_offer.admin_order_field = 'coupon__offer__headline'
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "slot__site":
kwargs["queryset"] = Site.objects.defer('envelope', 'geom', 'point')
return super(SlotTimeFrameAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
def queryset(self, request):
""" Customizes the queryset for improved performance. """
qs = super(SlotTimeFrameAdmin, self).queryset(request)
self.data = SlotTimeFrame.objects.select_related().filter(id__in=qs
).defer('slot__site__envelope',
'slot__site__geom', 'slot__site__point')
return self.data
class SlotAdmin(admin.ModelAdmin):
""" Admin config for Slot model. """
date_hierarchy = 'start_date'
inlines = [SlotTimeFrameInline,]
list_display = ('id', 'site', 'business', 'renewal_rate', 'is_autorenew',
'start_date', 'end_date')
list_filter = ('site__name',)
search_fields = ['id', 'business__business_name', 'site__name']
raw_id_fields = ('business', 'parent_slot')
form = AdminFormClean
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "site":
kwargs["queryset"] = Site.objects.defer('envelope', 'geom', 'point')
return super(SlotAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
def queryset(self, request):
""" Customizes the queryset for improved performance. """
qs = super(SlotAdmin, self).queryset(request)
self.data = Slot.objects.select_related().filter(id__in=qs).defer(
'site__envelope', 'site__geom', 'site__point')
return self.data
admin.site.register(Action)
admin.site.register(Coupon, CouponAdmin)
admin.site.register(CouponCode, CouponCodeAdmin)
admin.site.register(CouponType)
admin.site.register(Offer, OfferAdmin)
admin.site.register(Flyer, FlyerAdmin)
admin.site.register(FlyerSubject, FlyerSubjectAdmin)
admin.site.register(FlyerSubdivision, FlyerSubdivisionAdmin)
admin.site.register(FlyerPlacement, FlyerPlacementAdmin)
admin.site.register(FlyerPlacementSubdivision, FlyerPlacementSubdivisionAdmin)
admin.site.register(Slot, SlotAdmin)
admin.site.register(SlotTimeFrame, SlotTimeFrameAdmin)
| [
"[email protected]"
]
| |
f7060334136865396f4d86572f5cd846a5112c05 | 6a33cb94d4af1d8a7329ddc6c9d42f870c35bb2f | /python/euler29.py | d9b80faeb8ae6bc4abadea9e824c11439db0308a | []
| no_license | vochong/project-euler | 836321cc8e7d2e7cdf22b3b136d44dcba74a8701 | 6a0c7103861ff825bf84800b6e2e62819a41e36d | refs/heads/master | 2020-04-29T10:41:48.487159 | 2018-09-19T00:13:34 | 2018-09-19T00:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | def euler29():
"""
Consider all integer combinations of ab for 2 <= a <= 5 and 2 <= b <= 5:
22=4, 23=8, 24=16, 25=32
32=9, 33=27, 34=81, 35=243
42=16, 43=64, 44=256, 45=1024
52=25, 53=125, 54=625, 55=3125
If they are then placed in numerical order, with any repeats removed, we
get the following sequence of 15 distinct terms:
4, 8, 9, 16, 25, 27, 32, 64, 81, 125, 243, 256, 625, 1024, 3125
How many distinct terms are in the sequence generated by ab for 2 <= a <=
100 and 2 <= b <= 100?
"""
pows = []
for a in range(2, 101):
for b in range(2, 101):
pows.append(a**b)
return len(set(pows))
if __name__ == "__main__":
print euler29()
| [
"[email protected]"
]
| |
21ac3eeda637128a5f315d8c4bcc6ebe54e963fd | b06b6ac432961322f5a55a4cf8ad9bba27622042 | /pages/migrations/0002_auto__add_field_page_header_image.py | 6e136301447e3fe351716caa165da6d8bfacd5d2 | [
"BSD-2-Clause"
]
| permissive | jpic/pescator | 97a4e2ba62485d3fa537c060876b7fd04d3286e4 | 0d3140ac4c8b07940c2f4753c6f8cb5ac95d8292 | refs/heads/master | 2021-01-01T15:45:16.610383 | 2013-11-29T19:03:20 | 2013-11-29T19:03:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,847 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Page.header_image'
db.add_column(u'pages_page', 'header_image',
self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Page.header_image'
db.delete_column(u'pages_page', 'header_image')
models = {
u'pages.block': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'Block'},
'body': ('redactor.fields.RedactorField', [], {}),
'body_en': ('redactor.fields.RedactorField', [], {'null': 'True', 'blank': 'True'}),
'body_fr': ('redactor.fields.RedactorField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "u'pages/block.html'", 'max_length': '50'})
},
u'pages.page': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'Page'},
'blocks': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['pages.Block']", 'symmetrical': 'False', 'blank': 'True'}),
'body': ('redactor.fields.RedactorField', [], {}),
'body_en': ('redactor.fields.RedactorField', [], {'null': 'True', 'blank': 'True'}),
'body_fr': ('redactor.fields.RedactorField', [], {'null': 'True', 'blank': 'True'}),
'header_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'photos': ('sortedm2m.fields.SortedManyToManyField', [], {'to': u"orm['pages.Photo']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "u'pages/page_detail.html'", 'max_length': '50'})
},
u'pages.photo': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'Photo'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['pages'] | [
"[email protected]"
]
| |
aca6dc26331eb079509dee9b0cf54cd9b1ad2064 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_30302.py | a906fda52cb8473eaa1157dbc383627b70b68076 | []
| no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,837 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((352.476, 638.724, 582.291), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((360.245, 594.001, 530.72), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((362.02, 534.376, 474.55), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((260.763, 536.832, 570.55), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((400.919, 419.15, 323.421), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((362.689, 603.98, 553.31), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((362.902, 604.679, 554.71), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((385.236, 588.414, 559.155), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((408.55, 575.977, 568.593), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((416.28, 548.811, 568.149), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((414.417, 547.303, 596.059), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((408.453, 558.875, 620.91), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((361.849, 632.084, 558.034), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((460.895, 488.424, 680.522), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((478.101, 417.206, 492.149), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((478.101, 417.206, 492.149), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((467.648, 442.76, 481.622), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((455.318, 467.279, 472.073), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((442.374, 493.261, 474.971), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((432.524, 517.25, 487.733), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((421.465, 539.433, 502.011), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((405.747, 559.374, 515.768), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((478.264, 339.255, 627.619), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((322.506, 787.84, 421.064), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((401.17, 560.276, 479.165), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((401.17, 560.276, 479.165), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((372.799, 566.276, 475.294), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((343.894, 561.912, 472.368), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((329.697, 538.982, 484.141), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((298.217, 618.506, 574.727), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((356.832, 450.051, 399.963), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((333.861, 595.647, 541.972), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((333.281, 595.691, 542.131), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((337.683, 623.482, 541.292), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((363.552, 634.226, 538.396), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((382.69, 632.189, 559.26), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((398.411, 618.019, 578.407), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((414.815, 599.742, 592.786), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((429.518, 578.341, 604.747), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((431.783, 615.262, 527.931), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((425.852, 536.885, 682.177), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((402.265, 622.66, 493.199), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((381.687, 606.291, 495.733), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((337.092, 570.587, 503.818), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((292.665, 534.69, 508.972), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((253.962, 602.149, 531.025), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((261.609, 435.337, 499.567), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((385.955, 642.442, 513.305), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((390.52, 642.011, 485.528), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((382.032, 627.715, 462.808), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((362.185, 610.29, 452.993), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((347.864, 586.499, 448.623), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((341.113, 563.407, 433.793), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((356.516, 603.72, 502.695), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((325.684, 522.843, 364.971), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
]
| |
98d31e2bc1edb6dbcb1566b0a6c71cd60d65e3f1 | 922227eabe29f0a6779bd28ae037a46acbe06d6a | /apps/fsq/views.py | d44303731e3604fb369584f6849601bcbcb8116d | []
| no_license | rootart/geojam | 2acf6f0289c372e482998732faa31bee222b065a | 1ac99f5428ddae5c5e7f10c685075cf8a52843c8 | refs/heads/master | 2020-05-18T07:44:06.126567 | 2011-08-19T19:49:16 | 2011-08-19T19:49:16 | 2,235,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,223 | py | from django.http import (
HttpResponseRedirect,
HttpResponse,
HttpResponseForbidden
)
from django.conf import settings
from django.contrib.auth import login, authenticate, logout
from django.core.urlresolvers import reverse
from django.contrib.gis.geos import *
import oauth2 as oauth
import urllib
try:
import simplejson as json
except ImportError:
import json
from core.models import Checkin
SERVER = getattr(settings, 'OAUTH_SERVER', 'foursquare.com')
AUTHENTICATE_URL = getattr(settings, 'OAUTH_REQUEST_TOKEN_URL', 'http://%s/oauth2/authenticate' % SERVER)
ACCESS_TOKEN_URL = getattr(settings, 'OAUTH_ACCESS_TOKEN_URL', 'http://%s/oauth2/access_token' % SERVER)
API_URL= "https://api.foursquare.com/v2/"
FOURSQUARE_CONSUMER_KEY = getattr(settings, 'FOURSQUARE_CONSUMER_KEY', 'YOUR_KEY')
FOURSQUARE_CONSUMER_SECRET = getattr(settings, 'FOURSQUARE_CONSUMER_SECRET', 'YOUR_SECRET')
def auth(request):
params = {
'response_type': 'code',
'client_id': FOURSQUARE_CONSUMER_KEY,
'redirect_uri': 'http://geojam.djangostars.com/auth/return/'
}
url = AUTHENTICATE_URL + "?" + urllib.urlencode(params)
return HttpResponseRedirect(url)
def logout_(request):
request.session.clear()
logout(request)
return HttpResponseRedirect(reverse('dashboard'))
def fetch_data(api, token):
link = API_URL + api + "?oauth_token=" + token
try:
f = urllib.urlopen(link)
except:
pass
else:
return json.loads(f.read())
def fetch_checkins(token):
api = 'users/self/checkins'
link = API_URL + api + "?oauth_token=" + token
try:
f = urllib.urlopen(link)
except:
pass
data = json.loads(f.read())
for item in data['response']['checkins']['items']:
geodata = Point(
item['venue']['location']['lng'],
item['venue']['location']['lat'])
# import pdb
# pdb.set_trace()
checkin = Checkin(
type = 2,
title = item['venue']['name'],
geodata = geodata
)
checkin.save()
def return_(request):
code = request.GET.get('code', None)
if code:
params = {
'client_id': FOURSQUARE_CONSUMER_KEY,
'client_secret': FOURSQUARE_CONSUMER_SECRET,
'grant_type': 'authorization_code',
'redirect_uri': 'http://geojam.djangostars.com/auth/return/',
'code': code,
}
params = urllib.urlencode(params)
f = urllib.urlopen(ACCESS_TOKEN_URL+"?"+params)
token = f.read()
token = json.loads(token)['access_token']
# Get user information using api request
data = fetch_data('users/self', token)
foursquare_user_data = data['response']['user']
user = authenticate(foursquare_user_data=foursquare_user_data,
access_token=token)
login(request, user)
request.session['token_%s' % user.username] = token
# Fetch last 100 checkins from foursquare
fetch_checkins(token)
return HttpResponseRedirect(reverse('dashboard'))
else:
return HttpResponseForbidden("Hei, Go away!")
| [
"[email protected]"
]
| |
8b5625755b9e41df88d4a4fa0ad12b81868a1b34 | 35a6f993b84afdd2c0ade094e0312b3aaa53294f | /src/tag_server/mit-cf/tag/views.py | c82f2d03e392253686f1b08a315d5390d5336484 | []
| no_license | MIT-repository/tag_server | 3266875ac8efee255b324eead419a62a0320f511 | 488b0582b1341d0a1d372681d57aa456b7d5c9d6 | refs/heads/master | 2022-12-09T00:21:41.601852 | 2020-08-19T12:24:01 | 2020-08-19T12:24:01 | 288,722,644 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,410 | py | from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
import os, boto3
from .models import Song
from .DeepAudioClassification.main import predict
# Create your views here.
service_name = os.environ.get('service_name')
endpoint_url = os.environ.get('s3_url')
region_name = os.environ.get('reg')
access_key = os.environ.get('s3_key')
secret_key = os.environ.get('secret_key')
s3 = boto3.client(service_name, endpoint_url=endpoint_url, aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
base = os.path.dirname( os.path.abspath( __file__ ) )
master_path = base+'/DeepAudioClassification/Prediction/'
#master_path = '/'
def tag(requests, bucket,folder, name):
down_name = folder+'/'+name
local_file_path = master_path + 'Raw/{}'.format(name)
# try:
# song = Song.objects.get(name=name,bucket=bucket)
print("download :", local_file_path)
#except Exception as err:
#print(err)
s3.download_file(bucket, down_name, local_file_path)
#song = Song.objects.create(name=name, bucket=bucket, genre='test')
genre = predict()
os.remove(local_file_path)
os.remove(master_path+'Slices/predict/*')
return JsonResponse(
{
"bucket": bucket,
"name": name,
"genre": genre
},
safe=False, json_dumps_params={'ensure_ascii': False}
)
| [
"[email protected]"
]
| |
392292d26e820beee08966256dc3b7e7deabcff6 | 13b46582bb6bbfe08a2e24127198ded24e6c0ad3 | /server/security/migrations/0002_auto_20210103_2232.py | 6e941810356d692616f510085d017f85a27449ad | []
| no_license | dmetrosoft/seo-audits-toolkit | 9b12735d8345ef5075e87e6ea09440e01e32746f | c3e95fc4bf51d72e61c0507c14bd384d2368f475 | refs/heads/master | 2023-08-25T06:11:54.055464 | 2021-04-08T15:51:23 | 2021-04-08T15:51:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | # Generated by Django 3.1.4 on 2021-01-03 22:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('security', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='security_result',
old_name='pwa_score',
new_name='result',
),
migrations.AddField(
model_name='security',
name='score',
field=models.CharField(max_length=20, null=True),
),
migrations.AddField(
model_name='security_result',
name='score',
field=models.CharField(max_length=20, null=True),
),
]
| [
"[email protected]"
]
| |
0254a942224af44e67171f739c52b55757946efb | 84688ba401bb0d82f5345d7b2c1c91c1b82b8c41 | /notifications/migrations/0002_auto_20190726_2030.py | 88f0133b4bf298f0baf1018c1867724b1a9f4611 | []
| no_license | AbrahamMayowa/edupam | 14878df433f356b60c0bda700457123c6db8c734 | 97908dc56913d0a06fa0340c5985496935b9a097 | refs/heads/master | 2023-05-10T23:42:37.043219 | 2022-10-11T09:19:58 | 2022-10-11T09:19:58 | 167,404,444 | 0 | 0 | null | 2023-04-30T07:46:03 | 2019-01-24T17:02:44 | JavaScript | UTF-8 | Python | false | false | 1,124 | py | # Generated by Django 2.1.3 on 2019-07-26 20:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='thumpednotification',
name='is_comment_thump',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='thumpednotification',
name='created',
field=models.DateTimeField(auto_now_add=True, db_index=True),
),
migrations.AlterField(
model_name='thumpednotification',
name='target_id',
field=models.PositiveIntegerField(blank=True, db_index=True, null=True),
),
migrations.AlterField(
model_name='thumpednotification',
name='thumped_target',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='thumped_up_target', to='contenttypes.ContentType'),
),
]
| [
"[email protected]"
]
| |
6d0ad61beb6bbe67d3f27b262343a23f62135b59 | d9307c79b8f3bbe87e2589985ef2c2452b1329c1 | /trustonic/ida/scripts/tlApi.py | c7fd41e68bb7eb805b28942d80a930e833cbc629 | [
"MIT"
]
| permissive | H0K5/tee_research | 29a69efc635819137ba5e1beb806898be17bc1a7 | eab012c6e9df268a4471117a93025b90e4034e78 | refs/heads/master | 2022-02-17T21:08:41.983727 | 2019-09-29T15:54:19 | 2019-09-29T15:54:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,411 | py | #!/usr/bin/env python
#TlApi Func Auto-Renamer (c) B.Kerler 2019
#Licensed under MIT license
#IDA Pro plugin. Use MCLF Loader, let autoanalysis finish, then run this script for fun
tlapilist = {
0x1:"tlApiGetVersion",
0x2:"tlApiGetMobicoreVersion",
0x4:"tlApiExit",
0x6:"tlApiWaitNotification",
0x5:"tlApiLogvPrintf",
0x7:"tlApiNotify",
0x8:"tlApi_callDriver",
0x9:"tlApiWrapObjectExt",
0xA:"tlApiUnwrapObjectExt",
0xB:"tlApiGetSuid",
0xC:"tlApi_callDriverEx",
0xD:"tlApiCrAbort",
0xE:"tlApiRandomGenerateData",
0xF:"tlApiGenerateKeyPair",
0x10:"tlApiCipherInitWithData",
0x11:"tlApiCipherUpdate",
0x12:"tlApiCipherDoFinal",
0x13:"tlApiSignatureInitWithData",
0x14:"tlApiSignatureUpdate",
0x15:"tlApiSignatureSign",
0x16:"tlApiSignatureVerify",
0x17:"tlApiMessageDigestInitWithData",
0x18:"tlApiMessageDigestUpdate",
0x19:"tlApiMessageDigestDoFinal",
0x1A:"tlApiGetVirtMemType",
0x1B:"tlApiDeriveKey",
0x1C:"tlApiMalloc",
0x1D:"tlApiRealloc",
0x1E:"tlApiFree",
0x55:"tlApiEndorse",
0x56:"tlApiTuiGetScreenInfo",
0x57:"tlApiTuiOpenSession",
0x58:"tlApiTuiCloseSession",
0x59:"tlApiTuiSetImage",
0x5A:"tlApiTuiGetTouchEvent",
0x5D:"tlApiDrmOpenSession",
0x5E:"tlApiDrmCloseSession",
0x5F:"tlApiDrmCheckLink",
0x62:"tlApiGetSecureTimestamp",
0x64:"tlApiAddEntropy",
0x65:"tlApiCreateHeap",
0x68:"TEE_TBase_CloseSessions",
0x69:"TEE_TBase_TakeIdentity",
0x6A:"TEE_TBase_RevertIdentity",
0x6B:"TEE_TBase_DeleteAllObjects"
}
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def search_ea(sig, segment="", callback=None):
eas=[]
if segment!="":
seg = idaapi.get_segm_by_name(segment)
if not seg:
return
ea, maxea = seg.startEA, seg.endEA
count = 0
while ea != idaapi.BADADDR:
ea = idaapi.find_binary(ea, maxea, sig, 16, idaapi.SEARCH_DOWN)
if ea != idaapi.BADADDR:
count = count + 1
if callback!=None:
callback(ea)
else:
eas.append(ea)
ea += 2
else:
for seg in Segments():
ea=SegStart(seg)
maxea=SegEnd(ea)
count = 0
while ea != idaapi.BADADDR:
ea = idaapi.find_binary(ea, maxea, sig, 16, idaapi.SEARCH_DOWN)
if ea != idaapi.BADADDR:
count = count + 1
if callback!=None:
callback(ea)
else:
eas.append(ea)
ea += 2
return eas
def generatefunc(ea,name):
idc.MakeCode(ea)
idc.MakeFunction(ea)
idc.MakeNameEx(ea, name, idc.SN_NOWARN)
logger.debug("Rename %x:%s" % (ea,name))
def addr_to_bhex(ea):
ea=hex(ea)[2:]
return ea[6:8]+" "+ea[4:6]+" "+ea[2:4]+" "+ea[0:2]
def get_tlapi_num(addr):
i=addr
while (i<addr+100):
op = GetMnem(i).lower()
if op in ("bx", "blx", "ret"):
return -1
if op=="movs":
opnd=GetOpnd(i, 1)
return int(opnd[1:],16)
i = idc.NextHead(i)
return -1
def getcaller(ea):
pea=[]
for addr in XrefsTo(ea, flags=0):
pea.append(GetFunctionAttr(addr.frm, idc.FUNCATTR_START))
return pea
def get_string(addr):
out = ""
while True:
if Byte(addr) != 0:
out += chr(Byte(addr))
else:
break
addr += 1
return out
def tlapicallback(ea):
val=get_tlapi_num(ea)
if val in tlapilist:
name=tlapilist[val]
paddr = idc.PrevHead(ea)
pop=GetMnem(paddr).lower()
if not pop in ("bx","blx","pop","ret"):
ea = GetFunctionAttr(ea, idc.FUNCATTR_START)
print("[TlApi Call] %08X:%s" % (ea, name))
generatefunc(ea,name)
if val==1:
generatefunc(getcaller(ea)[0],"tlMain")
elif val==5:
pea=getcaller(ea)[0]
generatefunc(pea,"tlApiLogPrintf")
for sea in XrefsTo(pea, flags=0):
mea=sea.frm
for pos in range(0,0x100):
mea = idc.PrevHead(mea)
ppop=GetMnem(mea).lower()
if ppop in ("mov","movw","adr"):
fopnd=GetOpnd(mea,0).lower()
found=False
if fopnd=="r0":
ropnd=GetOpnd(mea,1).lower()
#print(ropnd)
if ropnd=="#0":
continue
if "#0x" in ropnd:
op_plain_offset(mea, 1, 0)
ropnd=GetOpnd(mea,1).lower()
found=True
if "#a" in ropnd:
opv=GetOperandValue(mea,1)
pstring=get_string(opv)
#print(pstring)
#MTK here be lions
if ("[" in pstring) and ("]" in pstring):
idx=pstring.index("[")
if idx==0:
funcname=pstring[idx+1:]
funcname=funcname[:funcname.index("]")]
if not funcname.lower() in ("key","key1","ki"):
if not " " in funcname:
fea=GetFunctionAttr(mea,idc.FUNCATTR_START)
if not fea is 0xFFFFFFFF:
print("0x%08x:%s" % (fea,funcname))
generatefunc(fea,funcname)
if ropnd[0]=="a":
opv=GetOperandValue(mea,1)
#pstring=get_string(opv) #Uncomment to see printf for samsung
#print("%s:0x%08X" % (pstring,mea)) #Uncomment me as well
if found==True:
break
return 0
for opcode in ("2D E9 FF","2D E9 F0","F0 B5","00 B5","30 B5"): #Make sure undefined funcs are defined
funcs=search_ea(opcode,"",None)
for ea in funcs:
idc.MakeCode(ea)
idc.MakeFunction(ea)
for addr in XrefsTo(0x108C, flags=0): #References to tlApi
tlapicallback(addr.frm)
#Here be Samsungs (s6, s7)
kprintfs=search_ea("0F B4 1C B5 0C 00 07 AA 00 90 01 D0","",None) #log_I
for ea in kprintfs:
generatefunc(ea,"LOG_I")
for addr in XrefsTo(ea, flags=0):
subea=addr.frm
break
break
#Here be Samsungs (s6, s7)
strlens=search_ea("42 1C 02 E0 10 F8 01 1B 69 B1","",None) #strlen
for ea in strlens:
generatefunc(ea,"strlen")
for addr in XrefsTo(ea, flags=0):
subea=addr.frm
nea=subea
pea=subea
for i in range(0,20):
nea = idc.NextHead(nea)
ppop=GetMnem(nea).lower()
if ppop in ("ldr"):
fopnd=GetOpnd(nea,0).lower()
if fopnd=="r1":
nnd=GetOpnd(nea,1).lower()
if nnd[:2]=="=a":
opv=GetOperandValue(nea,1)
sopv=Dword(opv)
#print(hex(sopv))
pstring=get_string(sopv)
#print("0x%08X:%s" % (nea,pstring)) #Here we see all command references from Samsung
for x in range(0,20):
pea=idc.PrevHead(pea)
ppop=GetMnem(pea).lower()
if ppop in ("bl","blx"):
sop=GetOperandValue(pea,0)
if "(" in pstring and ")" in pstring:
funcname=pstring[:pstring.index("(")]
print("[Samsung Func Ref] 0x%08X:%s" % (sop,funcname))
generatefunc(sop,funcname)
break
break
break
| [
"[email protected]"
]
| |
2d7d3ae843ecbf8f13ec880f6cb812646381f7ff | 1c0509a06cec726735048f00f63d2529f5e43ce6 | /code_physicians_france/data_acquisition/build_ameli_db/build_df_gyneco_75_2014.py | 0bc4cb069807b6f16c706b917534cc019da36132 | []
| no_license | etiennecha/master_code | e99c62e93aa052a66d4cdd3f3e3aa25a3aec4880 | 48821f6c854a1c6aa05cf81b653b3b757212b6f8 | refs/heads/master | 2021-01-23T14:35:45.904595 | 2018-03-11T18:57:38 | 2018-03-11T18:57:38 | 16,312,906 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,925 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import add_to_path
from add_to_path import path_data
from generic_ameli import *
import re
import time
from collections import Counter
import pprint
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels as sm
import statsmodels.formula.api as smf
path_source_2014 = os.path.join(path_data,
u'data_ameli',
u'data_source',
u'ameli_2014')
path_built = os.path.join(path_data,
u'data_ameli',
'data_built')
path_built_csv = os.path.join(path_built, 'data_csv')
path_built_json = os.path.join(path_built, 'data_json')
# LOAD DATA
file_extension = u'gynecologue-medical_75'
dict_physicians = dec_json(os.path.join(path_source_2014,
u'dict_%s' %file_extension))
# Content for each physician: [ls_address_name, ls_places, ls_infos, ls_actes]
# Explore field contents (all are lists, None are not counted)
ls_dict_counters = [{} for i in dict_physicians[dict_physicians.keys()[0]]]
for id_physician, ls_physician in dict_physicians.items():
for i, field in enumerate(ls_physician):
if field:
ls_dict_counters[i].setdefault(len(field), []).append(id_physician)
print u'\nComponents by diversity of contents'
for i, dict_counters in enumerate(ls_dict_counters):
print 'field', i, ':', [(k, len(v)) for k,v in dict_counters.items()]
# Clean (0) name-address-phone component and (2) info component (checked always 3 same items)
ls_formatted = []
for id_physician, ls_physician in dict_physicians.items():
# e.g. u"M\xe9decin g\xe9n\xe9raliste \u2013 Acupuncteur en mode d'exercice exclusif"
ind_name = None # breaks if following does not match
for i, elt in enumerate(ls_physician[0]):
if re.match(u'Médecin gynécologue', elt):
ind_name = i
ls_name = ls_physician[0][:ind_name]
ls_address_phone = ls_physician[0][ind_name + 1:]
if re.match(u'0[0-9]{9}$', ls_address_phone[-1]):
phone = ls_address_phone[-1]
ls_address = ls_address_phone[:-1]
else:
ls_address = ls_address_phone
phone = None
# specific to paris for now!
if not re.match(u'75[0-9]{3}.*', ls_address[-1]):
print 'Problem end of address, no zip:', ls_address
# todo: further improve (not ok for geocoding now)
if not re.match(u'[0-9]+', ls_address[-2]):
print 'Want to keep more than last two items?', ls_address
ls_address = ls_address [-3:]
else:
ls_address = ls_address[-2:]
dict_physicians[id_physician][0] = [ls_name, phone, ls_address]
dict_physicians[id_physician][2] = [x for ls_x in ls_physician[2] for x in ls_x]
# Check addresses with more than 2 components
# Standard extraction: ls_address[0] => street, ls_address[-1] => zip_city
# Fix if would yield bad result
for k,v in dict_physicians.items():
if len(v[0][2]) != 2:
print v[0][2]
# Clean (1) multiple location component
ls_nb_other_locations = [len(v[1]) if v[1] else 0 for k,v in dict_physicians.items()]
print u'\nPhysicians with multiple locations'
print pd.Series(ls_nb_other_locations).value_counts()
# build mapping between id_physicians: how to work with it?
dict_other_locations = {k:[re.search('autre-lieu-(.*?)-', x[0]).group(1) for x in v[1]]\
if v[1] else [] for k,v in dict_physicians.items()}
# check those not available i.e. a priori other location(s?) not in paris
dict_not_paris = {}
for k,v in dict_other_locations.items():
for x in v:
if x not in dict_other_locations.keys():
dict_not_paris.setdefault(k, []).append(x)
# Can NOT safely keep only first component as name of services i.e. service[0][0][0]
ls_secondary_title = []
ls_all_physician_services = []
for id_physician, ls_physician in dict_physicians.items():
ls_physician_services = []
if ls_physician[3]:
for service in ls_physician[3]:
# designed to break if not sure
title, ind_title_service = None, None
if service[0][0] == ['Consultation']:
title = u'Consultation'
else:
for i, x in enumerate(service[0][0]):
if (x == u'En savoir plus') |\
(x == u"Qu'est-ce que c'est?"):
ind_title_service = i
break
title = ' '.join(service[0][0][:ind_title_service])
rembt = service[0][1]
ls_secondary_title.append(service[0][2])
ls_tarifs = zip(*[[' '.join(ls_elts) for ls_elts in ls_elt] for ls_elt in service[1]])
ls_physician_services.append([title, rembt, ls_tarifs])
dict_physicians[id_physician][3] = ls_physician_services
# Clean (3) services/prices component
ls_unique_services = []
for id_physician, ls_physician in dict_physicians.items():
if ls_physician[3]:
for service in ls_physician[3]:
ls_unique_services.append(service[0])
ls_unique_services = list(set(ls_unique_services))
# ls_unique_services = [service[0] for service in ls_unique_services]
# Seems ok: stats desc about service available and price (vs. sec 1. sec.2...)
ls_consultations = []
ls_consultations_prices = []
for id_physician, ls_physician in dict_physicians.items():
avg_price = None
for service in ls_physician[3]:
if service[0] == u'Consultation' and service[2]:
ls_consultations.append((i, service[2]))
if len(service[2]) == 1:
# ls_prices = re.findall(u'[0-9]{2,3}\u20ac', service[2][0][1])
ls_prices = re.findall(u'[0-9]{2,4}(?:,[0-9]{0,2})?\u20ac', service[2][0][1])
else:
# ls_prices = re.findall(u'[0-9]{2,3}\u20ac', service[2][1][1])
ls_prices = re.findall(u'[0-9]{2,4}(?:,[0-9]{0,2})?\u20ac', service[2][1][1])
ls_consultations_prices.append(ls_prices)
avg_price = np.mean(map(lambda x: float(x.rstrip(u'\u20ac').replace(u',', u'.')), ls_prices))
ls_consultations_avg = [np.mean(map(lambda x:\
float(x.rstrip(u'\u20ac').replace(u',', u'.')),
ls_prices))\
for ls_prices in ls_consultations_prices]
# BUILD DF PHYSICIANS
ls_rows_physicians = []
for id_physician, ls_physician in dict_physicians.items():
ls_physician_info = [id_physician] +\
ls_physician[0][0] +\
ls_physician[0][2][0:1]+\
ls_physician[0][2][-1:] +\
ls_physician[2]
# prices
ls_physician_prices = [None for i in ls_unique_services]
for service in ls_physician[3]:
service_ind = ls_unique_services.index(service[0])
if service[2]:
if len(service[2]) == 1:
# ls_service_prices = re.findall(u'[0-9]{2,3}\u20ac', service[2][0][1])
ls_service_prices = re.findall(u'[0-9]{2,4}(?:,[0-9]{0,2})?\u20ac', service[2][0][1])
else:
# ls_service_prices = re.findall(u'[0-9]{2,3}\u20ac', service[2][1][1])
ls_service_prices = re.findall(u'[0-9]{2,4}(?:,[0-9]{0,2})?\u20ac', service[2][1][1])
avg_price = np.mean(map(lambda x: float(x.rstrip(u'\u20ac').replace(u',', u'.')),
ls_service_prices))
ls_physician_prices[service_ind] = avg_price
ls_rows_physicians.append(ls_physician_info + ls_physician_prices)
columns = ['id_physician', 'gender', 'name', 'surname', 'street',
'zip_city', 'convention', 'carte_vitale', 'status'] + ls_unique_services
df_physicians = pd.DataFrame(ls_rows_physicians, columns = columns)
df_physicians.set_index('id_physician', inplace= True)
print u'\nPhysician dataframe'
print df_physicians.info() #seems displays column count only if index set
#print df_physicians[['gender', 'name', 'surname', 'street', 'zip_city',
# 'convention', 'carte_vitale', 'status', 'Consultation']].to_string()
# CLEAN DF TO IMPROVE DISPLAY
# overview of fields to be standardized
for field in ['gender', 'zip_city', 'convention', 'carte_vitale', 'status']:
print u'\n', field
print df_physicians[field].value_counts()
# convention
dict_convention_rep = {u"Conventionné secteur 2" : u'2',
u"Conventionné secteur 1" : u'1',
u"Conventionné secteur 2 avec contrat d'accès aux soins" : u'2 AS',
u"Conventionné secteur 1 avec droit permanent à dépassement" : u'1 DPD',
u"Conventionné secteur 1 avec contrat d'accès aux soins" : u'1 AS',
u"Conventionn\xe9 secteur 1 avec droit \xe0 d\xe9passement permanent"+\
u"et contrat d'acc\xe8s aux soins" : u"1 DPD AS",
u"Non conventionné" : u'NC'}
df_physicians['convention'] =\
df_physicians['convention'].apply(
lambda x: dict_convention_rep.get(x, None))
# carte_vitale
dict_carte_vitale_rep = {u"Carte Vitale : Oui" : u'oui',
u"Carte Vitale : Non" : u'non'}
df_physicians['carte_vitale'] =\
df_physicians['carte_vitale'].apply(
lambda x: dict_carte_vitale_rep.get(x, None))
# status
dict_status_rep =\
{u"Temps partagé entre activité libérale et activité hospitalière" : u'Mixte H',
u"Libéral intégral": u"Liberal",
u"Temps partagé entre activité libérale et activité salariée (hors hôpital)" : u'Mixte NH',
u"Hospitalier avec activité libérale au sein de l'hôpital" : u"Hopital L"}
df_physicians['status'] =\
df_physicians['status'].apply(
lambda x: dict_status_rep.get(x, None))
# Some standardization of service columns
df_physicians.rename(\
columns = {u'Consultation' : 'consultation',
u"Pose d'un stérilet" : "sterilet"}, inplace = True)
# zip_city
def standardize_string(dict_search_replace, str_to_std):
for k,v in dict_search_replace.items():
if re.search(k, str_to_std):
return v
return str_to_std
# not sure if bad in case of geocoding to replace 75116 by 75016: check
dict_zip_city_rep = {u'CEDEX 14' : u'75014 PARIS',
u'CEDEX 12' : u'75012 PARIS',
u'PARIS CEDEX 20': u'75020 PARIS',
u'PARIS CEDEX 10' : u'75010 PARIS',
u'75116 PARIS' : '75016 PARIS'}
df_physicians['zip_city'] =\
df_physicians['zip_city'].apply(
lambda x: standardize_string(dict_zip_city_rep, x))
# df_physicians = df_physicians[df_physicians['zip_city'] != '75175 ROUEN CEDEX'].copy()
df_physicians['zip'] = df_physicians['zip_city'].str.slice(stop = 5)
df_physicians['city'] = df_physicians['zip_city'].str.slice(start = 5).str.strip()
df_physicians['CODGEO'] = df_physicians['zip'].apply(\
lambda x: re.sub(u'750([0-9]{2})', u'751\\1', x))
df_physicians.drop(['zip_city'], axis = 1, inplace = True)
# DISPLAY
ls_disp_base_1 = ['gender','name', 'surname', 'street', 'zip', 'city',
'convention', 'carte_vitale', 'status']
ls_disp_base_2 = ['gender','name', 'surname', 'zip', 'city',
'convention', 'carte_vitale', 'status']
ls_disp_services = ['consultation', 'sterilet']
print '\nOverview of physicians'
print df_physicians[ls_disp_base_1].to_string()
print '\nOverview of physician prices'
print df_physicians[ls_disp_base_2 + ls_disp_services].to_string()
# DEAL WITH DUPLICATES I.E. PHYSICIANS WITH SEVERAL LOCATIONS
# locations to be kept.. ok for several line but identify (propagate tarifs?)
# #######
# OUTPUT
# #######
file_extension = 'gyneco_75_2014'
# CSV
df_physicians[ls_disp_base_1 + ls_disp_services].\
to_csv(os.path.join(path_built_csv, 'df_{:s}.csv'.format(file_extension)),
encoding = u'utf-8',
float_format = u'%.1f')
## JSON
#df_physicians = df_physicians[ls_disp_base_1 + ls_disp_services].copy()
#df_physicians.reset_index(inplace = True)
#ls_ls_physicians = [list(x) for x in df_physicians.values]
#enc_json(ls_ls_physicians, os.path.join(path_built_json,
# 'ls_{:s}.json'.format(file_extension)))
| [
"[email protected]"
]
| |
2c6f7329cf5f3fe94f8c1b248451d1d8a25c4bf2 | 5d8fbd0ae055a830be62de0191113f6e03f91046 | /practice4v.py | 9c6f45f76ba2a4b62ff72c2bb4abf339ca89b233 | []
| no_license | harmansehmbi/Project4 | ba892d921c6b1a9b595a0aa1781afcd79fee5269 | baf7e75199d99b60431ba191fbb53fbcd25116cb | refs/heads/master | 2020-06-01T16:02:57.311321 | 2019-06-08T04:01:07 | 2019-06-08T04:01:07 | 190,843,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | dishes = {"dish1":100, "dish2":200, "dish3":300, "dish4":150}
print()
keys = list(dishes.keys())
values = list(dishes.values())
print(keys, type(keys))
print()
print(values, type(values))
print()
print(max(dishes.keys()))
print()
print(max(dishes.values())) | [
"[email protected]"
]
| |
1e3e7d87108b4bf2008a6bcf63f8e833328da5e4 | 73b73f15adefd4c8d1fc2888406fc7f6b567c9f0 | /blogapp/urls.py | 2a66e3ab5b4a63a0add5b35819cd5fcf89b51d67 | []
| no_license | codenotespy/www.codenotes.site_dj | ed4b736814b783b32d41ee1add70bda4eb9522dc | e1cd0dcb2e6fbdfec4b7ebdf7b19e2338a297e48 | refs/heads/main | 2023-06-09T22:51:46.291207 | 2021-06-29T03:56:14 | 2021-06-29T03:56:14 | 381,232,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,505 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.blog_menu, name='blog_menu'),
path('contact', views.contact, name='contact'),
path('upload_blog', views.upload_blog, name='upload_blog'),
path('blog/<int:id>/', views.blog, name='blog'),
path('blog_like/<int:id>/', views.blog_like, name='blog_like'),
path('blog_up/<int:id>/', views.blog_up, name='blog_up'),
path('blog_down/<int:id>/', views.blog_down, name='blog_down'),
path('priorityform/<int:id>/', views.priorityform, name='priorityform'),
path('delete_blog/<int:pk>/', views.delete_blog, name='delete_blog'),
path('update_blog/<int:id>/', views.update_blog, name='update_blog'),
path('update_blog_cover/<int:pk>/', views.update_blog_cover, name='update_blog_cover'),
path('bio_update', views.bio_update, name='bio_update'),
path('mybio', views.mybio, name='mybio'),
path('tags/<slug:tag_slug>/', views.TagIndexBlog, name='posts_by_tag'),
path('blogsforuser/<int:id>/', views.blogsforuser, name='blogsforuser'),
path('bioforuser/<int:id>/', views.bioforuser, name='bioforuser'),
path('about_update', views.about_update, name='about_update'),
path('site_about', views.site_about, name='site_about'),
path('authormenu', views.authormenu, name='authormenu'),
]
from django.conf import settings
from django.conf.urls.static import static
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
]
| |
dbe56d846912e13b4c72c12ef311a8562813ec85 | ab5cdf8f2de94c327e4679da84f941b1f3c04db4 | /kubernetes/test/test_v1_glusterfs_volume_source.py | 39a802f1d91d71050744ebdb7a46060e9740cc45 | [
"Apache-2.0"
]
| permissive | diannaowa/client-python | a4a92a125178db26004eaef5062f9b1b581b49a8 | 5e268fb0b6f21a535a14a7f968b84ed4486f6774 | refs/heads/master | 2020-12-02T22:06:03.687696 | 2017-06-30T21:42:50 | 2017-06-30T21:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_glusterfs_volume_source import V1GlusterfsVolumeSource
class TestV1GlusterfsVolumeSource(unittest.TestCase):
""" V1GlusterfsVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1GlusterfsVolumeSource(self):
"""
Test V1GlusterfsVolumeSource
"""
model = kubernetes.client.models.v1_glusterfs_volume_source.V1GlusterfsVolumeSource()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
c1895e6e58c9a7846f94b1a64c77145bf2a32ad3 | 3f69ffa4e42526c50100dc0f7e7ad88502a664e1 | /pyforms_gui/controls/control_event_timeline/events/pointer.py | 18b837dbd20aa023d08cb86c2a9135e181aa04f9 | []
| no_license | UmSenhorQualquer/pyforms-gui | d09854839ff20d26703034e596658e28730362ec | a25bcfe3229e9eaab94fdd84dd749c5de071d836 | refs/heads/v4 | 2022-05-05T11:20:38.432276 | 2021-06-03T16:13:59 | 2021-06-03T16:13:59 | 134,008,541 | 121 | 25 | null | 2022-03-24T16:23:47 | 2018-05-18T22:09:20 | Python | UTF-8 | Python | false | false | 2,379 | py | # !/usr/bin/python
# -*- coding: utf-8 -*-
from AnyQt.QtGui import QColor
from AnyQt import QtCore
class Pointer(object):
def __init__(self, position, parent):
"""
:param position:
:param parent:
"""
self._position = position
self._parent = parent
def draw(self, painter, showvalues=False, highlight=False):
"""
:param painter:
:param showvalues:
:return:
"""
if highlight:
painter.setPen(QColor(0, 150, 150))
painter.setBrush(QColor(0, 150, 150))
else:
painter.setPen(QColor(0, 255, 0))
painter.setBrush(QColor(0, 255, 0))
painter.drawLine(
self.xposition, 8, self.xposition, self._parent.height())
painter.drawEllipse(QtCore.QPoint(self.xposition, 8), 5, 5)
painter.drawText(self.xposition + 8, 8 + 4, str(self._position))
##########################################################################
#### HELPERS/FUNCTIONS ###################################################
##########################################################################
def moveEvent(self):
"""
:return:
"""
pass
def collide(self, x, y):
"""
:param x:
:param y:
:return:
"""
return self.begin <= x <= self.end and 0 <= y <= self._parent.TOPTRACK_HEIGHT
def can_slide_begin(self, x, y):
"""
:param x:
:param y:
:return:
"""
return False
def can_slide_end(self, x, y):
"""
:param x:
:param y:
:return:
"""
return False
def move(self, x, y):
"""
:param x:
:param y:
:return:
"""
x = int(round(x / self._parent._scale))
if (self._position - x) >= 0 and (self._position - x) <= (self._parent.width() / self._parent._scale):
self._position += x
self.moveEvent()
##########################################################################
#### PROPERTIES ##########################################################
##########################################################################
@property
def xposition(self):
return self._parent.frame2x(self.position)
@property
def position(self):
return self._position
@position.setter
def position(self, value):
self._position = value
self.moveEvent()
@property
def frame(self): return self._position
@property
def begin(self):
return self._position * self._parent.scale - 5
@property
def end(self):
return self._position * self._parent.scale + 5
| [
"[email protected]"
]
| |
53ba26d148765218e395e84938a2a3ffeb9919db | 9b32771b7d1513ee37bc62dd347675abcfc1bfc9 | /example_snippets/multimenus_snippets/Snippets/Python/Lists/Sort two lists at the same time.py | b4c74625d9ae14493dd58f86cee4a739b2617268 | [
"BSD-3-Clause"
]
| permissive | listar0810/jupyterlab-snippets-multimenus | 44087ef1aeb030a3074862a337508b57d50072c6 | 477f51cfdbad7409eab45abe53cf774cd70f380c | refs/heads/master | 2022-12-12T18:19:25.221083 | 2020-09-08T01:11:01 | 2020-09-08T01:11:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | x, y = [list(tmp) for tmp in zip(*sorted(zip(x,y), key=lambda pair: pair[0]))] | [
"[email protected]"
]
| |
5c875bc14088390b5c80f58390c190466aa572b3 | 31b9c04fd1edc1401721e9010a171cdb0da8e77f | /mysite/main/migrations/0002_post_mainphoto.py | c2907525a31e84b2a70cdab6c5c5c4b1a5a153c0 | []
| no_license | ChoiKangM/replyWithDjango | 2b0685bf1f9934d0dcb48ac7525e8a6f41c46d2d | 2bdafabddfaca48b1b5b91cb09a3b7404e4472e7 | refs/heads/master | 2023-08-08T03:32:46.305187 | 2019-05-14T21:08:39 | 2019-05-14T21:08:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 2.1.8 on 2019-05-07 01:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='post',
name='mainphoto',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| [
"[email protected]"
]
| |
97eb9f25753cf83fffb0064435595d2456eb9fce | c839961aeab22795200d9edef9ba043fe42eeb9c | /data/script824.py | 8654697f896dc73e1e710145b6819c9ce67f61df | []
| no_license | StevenLOL/kaggleScape | ad2bb1e2ed31794f1ae3c4310713ead1482ffd52 | 18bede8420ab8d2e4e7c1eaf6f63280e20cccb97 | refs/heads/master | 2020-03-17T05:12:13.459603 | 2018-05-02T19:35:55 | 2018-05-02T19:35:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,902 | py |
# coding: utf-8
# ##Exploring several ways to understand the data##
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import sys
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import matplotlib.pyplot as plt # plotting
import seaborn as sns
import itertools
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set(style="whitegrid")
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# In[ ]:
import kagglegym
env = kagglegym.make()
# get the reference to the api
observation = env.reset()
# In[ ]:
with pd.HDFStore('../input/train.h5') as train:
df = train.get('train')
# In[ ]:
train = df.copy(deep = True)
ID = "id"
TSTAMP = "timestamp"
TARGET = "y"
print("There are {} rows and {} columns in the dataset".format(*train.shape))
# **Let us divide the columns according to the prefixes**
# In[ ]:
def findMatchedColumnsUsingPrefix(prefix, df):
columns = df.columns[df.columns.str.startswith(prefix)]
return list(columns.values)
# In[ ]:
derived_columns = findMatchedColumnsUsingPrefix("derived", train)
fundamental_columns = findMatchedColumnsUsingPrefix("fundamental", train)
technical_columns = findMatchedColumnsUsingPrefix("technical", train)
print("There are {} derived columns".format(len(derived_columns)))
print("There are {} fundamental columns".format(len(fundamental_columns)))
print("There are {} technical columns".format(len(technical_columns)))
# In[ ]:
ids = train[ID].unique()
tstamps = train[TSTAMP].unique()
# **This function calculates the number of missing records within an asset for the group of columns passes as a reference**
# In[ ]:
def findMissingEntriesForIds(columns, train, identifier):
indexes = []
# prepare the header
rows = {}
rows['id'] = []
for column in columns:
rows[column] = []
# count number of missing entries in a column for an id group
for id, group in train.groupby(identifier):
rows['id'].append(id)
for column in columns:
rows[column].append(pd.isnull(group[column]).sum())
df = pd.DataFrame(rows)
#df.columns = pd.MultiIndex.from_tuples([tuple(c.split('_')) for c in df.columns])
#df = df.stack(0).reset_index(1)
#df.sort_index()
return df
# **Calculates count of missing records for derived columns group by assets**
# In[ ]:
derived = findMissingEntriesForIds(derived_columns, train, ID)
derived.head(2)
# **Calculates count of missing records for fundamental columns group by assets**
# In[ ]:
fundamental = findMissingEntriesForIds(fundamental_columns, train, ID)
fundamental.head(2)
# **Calculates count of missing records for technical columns group by assets**
# In[ ]:
technical = findMissingEntriesForIds(technical_columns, train, ID)
technical.head(2)
# **Find total count of missing entries for each column**
# In[ ]:
def calculateColumnSum(res, columns):
names = []
values = []
for column in columns:
names.append(column)
values.append(res[column].sum())
data = pd.DataFrame({'columns' : names, 'counts' : values})
data = data.sort_values(by=['counts'], ascending = False)
return data
# In[ ]:
def createHorizontalBarPlot(labels, missing, plot_width, plot_height, width, title):
N = len(labels)
ind = np.arange(N)
fig, ax = plt.subplots(figsize = (plot_width, plot_height))
rects = ax.barh(ind, missing, width)
ax.set_yticks(ind + width / 2)
ax.set_yticklabels(labels)
ax.set_title(title)
plt.show()
plt.close()
# In[ ]:
result = calculateColumnSum(derived, derived_columns)
print("The columns with minimum null is {}".format(result.iloc[-1]['columns']))
print("The columns with maximum null is {}".format(result.iloc[0]['columns']))
createHorizontalBarPlot(result['columns'], result['counts'], 7, 6, 0.45, 'Missing counts by columns')
# In[ ]:
result = calculateColumnSum(fundamental, fundamental_columns)
print("The columns with minimum null is {}".format(result.iloc[-1]['columns']))
print("The columns with maximum null is {}".format(result.iloc[0]['columns']))
createHorizontalBarPlot(result['columns'], result['counts'], 10, 15, 0.65, 'Missing counts by columns')
# In[ ]:
result = calculateColumnSum(technical, technical_columns)
print("The columns with minimum null is {}".format(result.iloc[-1]['columns']))
print("The columns with maximum null is {}".format(result.iloc[0]['columns']))
createHorizontalBarPlot(result['columns'], result['counts'], 10, 15, 0.45, 'Missing counts for technicals')
# **Join the frames of missing records counts for all columns**
# In[ ]:
merged = derived.merge(fundamental, how = 'inner', on = 'id')
merged = merged.merge(technical, how = 'inner', on = 'id')
merged.head(2)
# **Add a new column sum which calculates count of missing records for an asset across all columns**
# In[ ]:
copy = merged.copy(deep = True)
copy['sum'] = copy.apply(lambda x : x.sum(), axis = 1)
copy = copy.sort_values(by='sum', ascending = True)
result = copy[['id', 'sum']]
result.head(2)
# In[ ]:
print("The asset with minimum missing records for all columns = {}".format(result.iloc[0][ID]))
print("The asset with maximum missing records for all columns = {}".format(result.iloc[-1][ID]))
# **This chart prints the top x assets with minimum missing records**
# In[ ]:
plot_df = result[:5]
createHorizontalBarPlot(plot_df['id'], plot_df['sum'], 5, 4, 0.75, 'Missing by assets')
# **This chart prints the top x assets with maximum missing records**
# In[ ]:
plot_df = result.tail(5)
createHorizontalBarPlot(plot_df['id'], plot_df['sum'], 5, 4, 0.75, 'Missing by assets')
# **This function calculates the timestamps where data is missing for an asset**
# In[ ]:
def getTstampForMissingRecordsInColumn(df, columns, identifier):
records = []
for id, group in df.groupby(identifier):
group_index_dict = {}
group_index_dict[identifier] = id
for col in columns:
group_index_dict[col] = list(group[pd.isnull(group[col])][TSTAMP])
records.append(group_index_dict)
return records
# **Execute the above function for derived columns**
# In[ ]:
missing_tstamps = getTstampForMissingRecordsInColumn(train, derived_columns, ID)
missing_tstamps[:1]
# **From the result of the above function we calculate the difference between the maximum and the minimum timestamp to determine if they are consecutive in nature**
# In[ ]:
def findTstampDiffForMissingRecordsByID(tstamp_indices):
rows = []
for item in tstamp_indices:
row_dict = {}
for key, value in item.items():
if key == 'id':
row_dict[key] = value
else:
row_dict[key] = int((value[-1] - value[0] + 1) / len(value)) if len(value) > 1 else len(value)
rows.append(row_dict)
return pd.DataFrame(rows)
# In[ ]:
tstamp_diff_df = findTstampDiffForMissingRecordsByID(missing_tstamps)
tstamp_diff_df.head(2)
# ***1* - The timestamps for the missing entries are in succession.</br>**
#
# ***0* - There are no missing records.</br>**
#
# ***Anything besides 1 and 0 means that the timestamps are not in succession, some entries are there between the missing values.***
# **This function brings out those columns with non-consecutive missing timestamps.**
# In[ ]:
def findColumnsWithNonConsecutiveMissingTstampDiff(columns, tstamp_diff_dataframe):
columns_with_discrete_missing_tstamps = {}
for column in columns:
unique_arr = list(tstamp_diff_dataframe[column].unique())
temp = [i for i in unique_arr if i not in [1, 0]]
if len(temp) > 0:
columns_with_discrete_missing_tstamps[column] = temp
return columns_with_discrete_missing_tstamps
# In[ ]:
tstamp_diff_dict = findColumnsWithNonConsecutiveMissingTstampDiff(derived_columns, tstamp_diff_df)
tstamp_diff_dict
# **Determine the identifiers where the missing timestamps are not in succession**
# In[ ]:
def findAssetsWithNonConsecutiveMissingTstamp(discrete_missing_tstamp_diff_dict, tstamp_diff_data):
assets_with_discrete_missing_tstamps = []
for key, values in discrete_missing_tstamp_diff_dict.items():
data = tstamp_diff_data.ix[tstamp_diff_data[key].isin(values)]
assets_with_discrete_missing_tstamps += list(data[ID].values)
return list(set(assets_with_discrete_missing_tstamps))
# In[ ]:
id_with_discrete_missing_tstamp = findAssetsWithNonConsecutiveMissingTstamp(tstamp_diff_dict, tstamp_diff_df)
id_with_discrete_missing_tstamp
# In[ ]:
missing_tstamps = getTstampForMissingRecordsInColumn(train, fundamental_columns, ID)
tstamp_diff_df = findTstampDiffForMissingRecordsByID(missing_tstamps)
tstamp_diff_dict = findColumnsWithNonConsecutiveMissingTstampDiff(fundamental_columns, tstamp_diff_df)
id_with_discrete_missing_tstamp += findAssetsWithNonConsecutiveMissingTstamp(tstamp_diff_dict, tstamp_diff_df)
id_with_discrete_missing_tstamp = list(set(id_with_discrete_missing_tstamp))
id_with_discrete_missing_tstamp
# In[ ]:
missing_indices = getTstampForMissingRecordsInColumn(train, technical_columns, ID)
tstamp_diff_df = findTstampDiffForMissingRecordsByID(missing_indices)
tstamp_diff_dict = findColumnsWithNonConsecutiveMissingTstampDiff(technical_columns, tstamp_diff_df)
id_with_discrete_missing_tstamp += findAssetsWithNonConsecutiveMissingTstamp(tstamp_diff_dict, tstamp_diff_df)
id_with_discrete_missing_tstamp = list(set(id_with_discrete_missing_tstamp))
id_with_discrete_missing_tstamp
# In[ ]:
print("The assets with missing entries that are not consecutive is {}".format(id_with_discrete_missing_tstamp))
# In[ ]:
def barplot(res, width, index, column):
N = int(res.shape[0])
fig, ax = plt.subplots(figsize = (2, 1))
ind = np.arange(N)
rects = plt.bar(res['diff'], res['size'])
ax.set_title(column)
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(res['diff'])
plt.show()
# ## Calculate statistics ##
# In[ ]:
def calculate_statistics(data, columns, identifier, excludes):
rows = {}
rows[identifier] = []
min = {}
max = {}
sum = {}
mean = {}
median = {}
std = {}
total = {}
null_counts = {}
non_null_counts = {}
for column in columns:
if column in excludes:
continue
min[column + '-' + 'min'] = []
max[column + '-' + 'max'] = []
sum[column + '-' + 'sum'] = []
mean[column + '-' + 'mean'] = []
median[column + '-' + 'median'] = []
std[column + '-' + 'std'] = []
total[column + '-' + 'total'] = []
non_null_counts[column+ '-' + 'non_null'] = []
for id, group in train.groupby(identifier):
rows[identifier].append(id)
for column in columns:
if column in excludes:
continue
min[column + '-' + 'min'].append(group[column].dropna().min())
max[column + '-' + 'max'].append(group[column].max())
sum[column + '-' + 'sum'].append(group[column].sum())
mean[column + '-' + 'mean'].append(group[column].mean())
median[column + '-' + 'median'].append(group[column].median())
std[column + '-' + 'std'].append(group[column].std())
total[column + '-' + 'total'].append(group[column].shape[0])
non_null_counts[column+ '-' + 'non_null'].append(pd.notnull(group[column]).sum())
records = {}
records['id'] = rows['id']
for feature in [min, max, mean, median, sum, std, total, non_null_counts]:
for key, values in feature.items():
records[key] = values
stats_df = pd.DataFrame(records)
stats_df.columns = pd.MultiIndex.from_tuples([tuple(c.split('-')) for c in stats_df.columns])
stats_df = stats_df.set_index('id')
stats_df = stats_df.stack(0).reset_index(1)
return stats_df
# In[ ]:
stats_df_der = calculate_statistics(train, derived_columns , 'id', ['id, timestamp', 'y'])
stats_df_der.head(1)
# In[ ]:
stats_df_fund = calculate_statistics(train, fundamental_columns , 'id', ['id, timestamp', 'y'])
stats_df_fund.head(1)
# In[ ]:
stats_df_tech = calculate_statistics(train, technical_columns , 'id', ['id, timestamp', 'y'])
stats_df_tech.head(1)
# **Find the total percentage of records present in each column**
# In[ ]:
def get_total_count_of_occurring_records(column_name, column_values, data, order_dict):
header_dict = {}
header_dict[column_name] = []
header_dict['total_present'] = []
header_dict['total'] = []
for value in column_values:
df = data.loc[data[column_name] == value]
header_dict[column_name].append(value)
non_nulls = len(list(df.loc[df['non_null'] > 0].index))
header_dict['total_present'].append(non_nulls)
header_dict['total'].append(len(df.index))
result = pd.DataFrame(header_dict)
ordered_column = next(iter(order_dict.keys()))
order = next(iter(order_dict.values()))
result = result.sort_values(by = ordered_column, ascending = order)
return result
# **Execute the above function for derived columns**
# In[ ]:
percentage_df = get_total_count_of_occurring_records('level_1', derived_columns, stats_df_der, {'total_present' : False})
percentage_df['percentage'] = (percentage_df['total_present'] / percentage_df['total']) * 100
percentage_df
# In[ ]:
percentage_df = get_total_count_of_occurring_records('level_1', fundamental_columns, stats_df_fund, {'total_present' : False})
percentage_df['percentage'] = (percentage_df['total_present'] / percentage_df['total']) * 100
percentage_df.head(2)
# In[ ]:
percentage_df = get_total_count_of_occurring_records('level_1', technical_columns, stats_df_tech, {'total_present' : False})
percentage_df['percentage'] = (percentage_df['total_present'] / percentage_df['total']) * 100
percentage_df.head(2)
# ## Calculate the assets with missing values ##
# In[ ]:
def get_assets_with_missing(columns, stats_df):
result = {}
for column in columns:
missing_ids_df = stats_df.loc[stats_df['level_1'] == column]
missing_ids_df = missing_ids_df.loc[missing_ids_df['non_null'] == 0]
result[column] = list(missing_ids_df.index)
return result
# In[ ]:
der_missing_assets = get_assets_with_missing(derived_columns, stats_df_der)
fun_missing_assets = get_assets_with_missing(fundamental_columns, stats_df_fund)
tech_missing_assets = get_assets_with_missing(technical_columns, stats_df_tech)
# **Calculate maximum and minimum asset and its value from the statistics dataframe**
# In[ ]:
def calculate_maxmin_id_and_value_from_stats(dataframe, field_name, field_values, feature_values):
data_copy = dataframe.copy(deep = True)
headers = list(itertools.product(field_values, feature_values))
headers = [":".join(item) for item in headers]
headers = list(itertools.product(headers, ['high', 'low', 'highid', 'lowid']))
headers = ["-".join(item) for item in headers]
columns_dict = {}
for item in headers:
columns_dict[item] = []
for key in columns_dict:
stats_column = key.split('-')[0]
feature_to_find = key.split('-')[1]
original_column = stats_column.split(':')[0]
stats_feature = stats_column.split(':')[1]
temp = data_copy.loc[data_copy[field_name] == original_column]
temp = temp.sort_values(by=stats_feature, ascending = False)
temp = temp[[stats_feature]]
temp = temp.reset_index()
if feature_to_find == 'high':
columns_dict[key].append(temp.head(1)[stats_feature][0])
if feature_to_find == 'highid':
columns_dict[key].append(temp.head(1)['id'][0])
if feature_to_find == 'low':
columns_dict[key].append(temp.iloc[-1][stats_feature])
if feature_to_find == 'lowid':
columns_dict[key].append(temp.iloc[-1]['id'])
result = pd.DataFrame(columns_dict)
result.columns = pd.MultiIndex.from_tuples([tuple(c.split('-')) for c in result.columns])
result = result.stack(0).reset_index(1)
#data_copy = data_copy.sort_values(by=max_column, ascending = False)
#data_copy = data_copy[[max_column]]
#data_copy = data_copy.reset_index()
#return data_copy
return result
# In[ ]:
columns = list(stats_df_der.columns)
columns.remove('level_1')
result = calculate_maxmin_id_and_value_from_stats(stats_df_der, 'level_1', derived_columns, columns)
result.head(2)
# In[ ]:
IDS_MAX = list(result['highid'].unique())
IDS_MIN = list(result['lowid'].unique())
print(IDS_MAX[:2])
# ## Understand the distribution of the target variable ##
# In[ ]:
grouped = train.groupby(ID)
IDS = train[ID]
# In[ ]:
def get_random_id():
return random.randint(0, len(IDS) - 1)
# In[ ]:
sns.distplot(train['y'], kde = False, bins = 20)
# **We can infer from the above plot that the target is normally distributed with ruggedness.**
# In[ ]:
first_asset_df = grouped.get_group(IDS[0])
first_asset_df.head(2)
# In[ ]:
sns.distplot(first_asset_df['y'])
# In[ ]:
random_df = grouped.get_group(1749)
random_df.head(3)
| [
"[email protected]"
]
| |
f7ffd0f4812bee9b9bd47aa0f720b0e299eb38f3 | 14483184f0eabafab14f75a052a69e363f973f99 | /101.symmetric-tree.py | 80f9454d04af06824f3e4018d4db3b084a9ef349 | []
| no_license | DizzyYunxuan/Leetcode_answers | d38679935214597bae973a1fef03334005802819 | 9605165b684bc5a1299a418bb274dc0d3be117a6 | refs/heads/master | 2021-06-19T15:01:34.991783 | 2021-03-31T12:10:52 | 2021-03-31T12:10:52 | 196,497,506 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | #
# @lc app=leetcode id=101 lang=python3
#
# [101] Symmetric Tree
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
return self.isSameTree(root.left, root.right) if root else True
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if not p and not q:
return True
if (not p or not q):
return False
elif p.val != q.val:
return False
left = self.isSameTree(p.left, q.right)
right = self.isSameTree(p.right, q.left)
return left and right
✔ Accepted
✔ 195/195 cases passed (44 ms)
✔ Your runtime beats 40.99 % of python3 submissions
✔ Your memory usage beats 5.17 % of python3 submissions (14 MB)
| [
"[email protected]"
]
| |
00f589526d0fa2d4c1641fdcd926f14682be16c1 | 24a9c8f2fac4e2b20f731387336ec4e22d5fd2c7 | /司法题1/词云.py | d4b2be329075aba70d6f82c1eede21a6a482937d | []
| no_license | yunli45/pycharmProjectHome | 94833822e3036bf2baf8700c4493132e63177d4c | 9a382c060963eb801a3da07423e84a4132257b02 | refs/heads/master | 2020-05-23T23:35:20.476973 | 2019-05-16T09:45:58 | 2019-05-16T09:49:16 | 186,986,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,770 | py | # -*- coding: utf-8 -*-
"""
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
"""
Minimal Example
===============
使用默认参数生成方形的词云
"""
from wordcloud import WordCloud
from os import path
d = path.dirname(__file__)
# 读取整个文本
text = open(path.join(d, "Alice.txt")).read()
# 生成词云图像
# wordcloud = WordCloud.generate(text)
# matplotlib的方式展示生成的词云图像
import matplotlib.pyplot as plt
# plt.imshow(wordcloud, interpolation="bilinear") # 展示
# plt.axis("off")
#max_font_size设定生成词云中的文字最大大小
#width,height,margin可以设置图片属性
# generate 可以对全部文本进行自动分词,但是他对中文支持不好
wordcloud = WordCloud(max_font_size=66).generate(text)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
#保存到本地
wordcloud.to_file(path.join(d, "alice.png"))
# pil方式展示生成的词云图像(如果你没有matplotlib)
# image = wordcloud.to_image()
# image.show()
| [
"[email protected]"
]
| |
4396e7581798f07422042200815e7f3b1af1ea69 | b32300bf27d391fd9441cfbd9c86cd2943617e42 | /py_osm_cluster/util/coords.py | 8fedcd390dc44d775b94e294fe802641438b7c96 | []
| no_license | jakubwida/py_osm_cluster_bachelors | 71668552e5895ea7e0622d9fa54eb13ad264c31c | ea151268b184a38a6371c8df919fec7ef10cffaa | refs/heads/master | 2021-01-25T07:49:09.313509 | 2017-07-04T09:40:24 | 2017-07-04T09:40:24 | 93,666,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,144 | py |
class Coords:
def __init__(self):
self.labels = []
self.coords = []
self.c_number = 0
self.c_positions = []
def assimilate(self,other_coords):
self.coords = self.coords+other_coords.coords
self.c_number = self.c_number+other_coords.c_number
self.c_positions = self.c_positions+other_coords.c_positions
max_label = max(self.labels)
self.labels = self.labels + [x+max_label for x in other_coords.labels]
def write_file(self,filename):
f = open(filename,'w')
f.write(str(self))
"""
f.write('coords\n')
for element in self.coords:
f.write(str(element[0])+' '+str(element[1])+'\n')
f.write('labels\n')
for element in self.labels:
f.write(str(element)+'\n')
f.write('c_number\n')
f.write(str(self.c_number)+'\n')
f.write('c_positions\n')
for element in self.c_positions:
f.write(str(element[0])+' '+str(element[1])+'\n')
"""
def read_file(self,filename):
f = open(filename,'r')
modes =["coords","labels","c_number","c_positions"]
mode = None
for line in f:
line = line.strip("\n")
#print(repr(line))
if line in modes:
mode = line
else:
line = [float(x) for x in line.split(" ")]
if mode == "coords":
self.coords.append(line)
elif mode == "c_positions":
self.c_positions.append(line)
elif mode == "c_number":
self.c_number = int(line[0])
elif mode == "labels":
self.labels.append(int(line[0]))
def __str__(self):
out =""
out = out + 'coords\n'
for element in self.coords:
out = out + str(element[0])+' '+str(element[1])+'\n'
out = out + 'labels\n'
for element in self.labels:
out = out + str(element)+'\n'
out = out + 'c_number\n'
out = out + str(self.c_number)+'\n'
out = out + 'c_positions\n'
for element in self.c_positions:
out = out + str(element[0])+' '+str(element[1])+'\n'
return out
""" returns a dictionary where keys = label values, values = lists of coords under these labels"""
def clusters_into_lists_dict(self):
out = {}
for num,i in enumerate(self.coords):
label = self.labels[num]
if label in out:
out[label].append(i)
else:
out[label] = [i]
return out
| [
"[email protected]"
]
| |
b7db7b09def0133568d7d7375d0082f32737f6db | 36d7b748cb0452a03eb83822e8d5e4e8437b676a | /code/weinberg.py | f74f1baf4e9a6db8110cbaf211f340cd931f3326 | [
"BSD-3-Clause"
]
| permissive | glouppe/paper-avo | d5726b0a700de0ee735a9a51a5e2e29c8e889bb3 | 1d86349e6da165273918a182e9d061810395d373 | refs/heads/master | 2021-03-19T18:51:43.816577 | 2018-12-17T20:09:22 | 2018-12-17T20:09:22 | 94,195,189 | 14 | 6 | BSD-3-Clause | 2018-12-17T20:09:23 | 2017-06-13T09:26:42 | TeX | UTF-8 | Python | false | false | 8,983 | py | import autograd as ag
import autograd.numpy as np
import matplotlib.pyplot as plt
import copy
from nn import glorot_uniform
from nn import relu
from nn import AdamOptimizer
from proposals import make_gaussian_proposal
from proposals import gaussian_draw
from proposals import grad_gaussian_logpdf
from proposals import grad_gaussian_entropy
from sklearn.utils import check_random_state
from scipy.spatial.distance import mahalanobis
# Global params
seed = 777
rng = check_random_state(seed)
batch_size = 64
n_epochs = 300+1
lambda_gp = 0.0025
true_theta = np.array([(42.0-40)/(50-40),
(0.9 - 0.5) / (1.5-0.5)])
make_plots = True
plt.rcParams["figure.dpi"] = 200
# Simulator
def a_fb(sqrtshalf, gf):
MZ = 90
GFNom = 1.0
sqrts = sqrtshalf * 2.
A_FB_EN = np.tanh((sqrts - MZ) / MZ * 10)
A_FB_GF = gf / GFNom
return 2 * A_FB_EN*A_FB_GF
def diffxsec(costheta, sqrtshalf, gf):
norm = 2. * (1. + 1. / 3.)
return ((1 + costheta ** 2) + a_fb(sqrtshalf, gf) * costheta) / norm
def rej_sample_costheta(n_samples, theta, rng):
sqrtshalf = theta[0] * (50-40) + 40
gf = theta[1] * (1.5 - 0.5) + 0.5
ntrials = 0
samples = []
x = np.linspace(-1, 1, num=1000)
maxval = np.max(diffxsec(x, sqrtshalf, gf))
while len(samples) < n_samples:
ntrials = ntrials+1
xprop = rng.uniform(-1, 1)
ycut = rng.rand()
yprop = diffxsec(xprop, sqrtshalf, gf)/maxval
if yprop/maxval < ycut:
continue
samples.append(xprop)
return np.array(samples)
def simulator(theta, n_samples, random_state=None):
rng = check_random_state(random_state)
samples = rej_sample_costheta(n_samples, theta, rng)
return samples.reshape(-1, 1)
X_obs = simulator(true_theta, 50000, random_state=rng)
n_params = len(true_theta)
n_features = X_obs.shape[1]
# Critic
gammas = [0.0, 5.0]
colors = ["C1", "C2"]
def make_critic(n_features, n_hidden, random_state=None):
rng = check_random_state(random_state)
params = {"W": [glorot_uniform(n_hidden, n_features, rng),
glorot_uniform(n_hidden, n_hidden, rng),
glorot_uniform(n_hidden, 0, rng)],
"b": [np.zeros(n_hidden),
np.zeros(n_hidden),
np.zeros(1)]}
return params
def predict(X, params):
h = X
h = relu(np.dot(params["W"][0], h.T).T + params["b"][0], alpha=0.1)
h = relu(np.dot(params["W"][1], h.T).T + params["b"][1], alpha=0.1)
h = np.dot(params["W"][2], h.T).T + params["b"][2]
return h
grad_predict_critic = ag.elementwise_grad(predict)
history = [{"gamma": gammas[i],
"color": colors[i],
"loss_d": [],
"params_proposal": make_gaussian_proposal(n_params,
mu=0.5,
log_sigma=np.log(0.1)),
"params_critic": make_critic(n_features, 10, random_state=rng)}
for i in range(len(gammas))]
# Global loop over gammas
for state in history:
# WGAN + GP
def loss_critic(params_critic, i, lambda_gp=lambda_gp,
batch_size=batch_size):
y_critic = np.zeros(batch_size)
# y_critic[:batch_size // 2] = 0.0 # 0 == fake
y_critic[batch_size // 2:] = 1.0
rng = check_random_state(i)
# WGAN loss
thetas = gaussian_draw(state["params_proposal"],
batch_size // 2, random_state=rng)
_X_gen = np.zeros((batch_size // 2, n_features))
for j, theta in enumerate(thetas):
_X_gen[j, :] = simulator(theta, 1, random_state=rng).ravel()
indices = rng.permutation(len(X_obs))
_X_obs = X_obs[indices[:batch_size // 2]]
X = np.vstack([_X_gen, _X_obs])
y_pred = predict(X, params_critic)
l_wgan = np.mean(-y_critic * y_pred + (1. - y_critic) * y_pred)
# Gradient penalty
eps = rng.rand(batch_size // 2, 1)
_X_hat = eps * _X_obs + (1. - eps) * _X_gen
grad_Dx = grad_predict_critic(_X_hat, params_critic)
norms = np.sum(grad_Dx ** 2, axis=1) ** 0.5
l_gp = np.mean((norms - 1.0) ** 2.0)
return l_wgan + lambda_gp * l_gp
grad_loss_critic = ag.grad(loss_critic)
# grad_psi E_theta~q_psi, z~p_z(theta) [ d(g(z, theta) ]
def approx_grad_u(params_proposal, i):
rng = check_random_state(i)
grad_u = {k: np.zeros(len(params_proposal[k]))
for k in params_proposal}
grad_ent = {k: np.zeros(len(params_proposal[k]))
for k in params_proposal}
thetas = gaussian_draw(params_proposal, batch_size, random_state=rng)
for theta in thetas:
x = simulator(theta, 1, random_state=rng)
dx = predict(x, state["params_critic"]).ravel()
grad_q = grad_gaussian_logpdf(params_proposal, theta)
for k, v in grad_q.items():
grad_u[k] += -dx * v
grad_entropy = grad_gaussian_entropy(params_proposal)
for k, v in grad_entropy.items():
grad_ent[k] += v
M = len(thetas)
for k in grad_u:
grad_u[k] = 1. / M * grad_u[k] + state["gamma"] * grad_ent[k]
return grad_u
# Training loop
init_critic = copy.copy(state["params_critic"])
opt_critic = AdamOptimizer(grad_loss_critic, state["params_critic"],
step_size=0.01)
opt_proposal = AdamOptimizer(approx_grad_u, state["params_proposal"],
step_size=0.01)
opt_critic.step(1000)
opt_critic.move_to(state["params_critic"])
for i in range(n_epochs):
# fit simulator
opt_proposal.step(1)
opt_proposal.move_to(state["params_proposal"])
# fit critic
opt_critic.reset() # reset moments
opt_critic.step(100)
opt_critic.move_to(state["params_critic"])
state["loss_d"].append(-loss_critic(state["params_critic"], i,
batch_size=10000))
print(i, state["gamma"], state["params_proposal"], state["loss_d"][-1])
# Plot
if make_plots:
fig = plt.figure(figsize=(6, 6))
# proposal
ax1 = plt.subplot2grid((2, 2), (0, 0))
offset = 0.15
for state in history:
x = np.linspace(true_theta[0]-offset, true_theta[0]+offset, num=300)
y = np.linspace(true_theta[1]-offset, true_theta[1]+offset, num=300)
X, Y = np.meshgrid(x, y)
mu = state["params_proposal"]["mu"]
sigma = np.diag(np.exp(state["params_proposal"]["log_sigma"])) ** 2.0
sigma_inv = np.linalg.inv(sigma)
Z = [mahalanobis(theta, mu, sigma_inv)
for theta in np.hstack([X.reshape(-1, 1), Y.reshape(-1, 1)])]
Z = np.array(Z).reshape(X.shape)
CS = plt.contour(X*(50-40)+40, Y*(1.5-0.5)+0.5, Z, [1.0, 2.0, 3.0], colors=state["color"])
fmt = {l:s for l, s in zip(CS.levels, [r"$1\sigma$", r"$2\sigma$", r"$3\sigma$"])}
plt.clabel(CS, fmt=fmt)
plt.scatter(mu[0]*(50-40)+40, mu[1]*(1.5-0.5)+0.5, c=state["color"], marker="+")
plt.plot([-999], [-999],
label=r"$q(\theta|\psi)\ \gamma=%d$" % state["gamma"],
color=state["color"])
plt.scatter(true_theta[0]*(50-40)+40, true_theta[1]*(1.5-0.5)+0.5,
c="C0",
label=r"$\theta^* = (%d, %.1f)$" % (true_theta[0]*(50-40)+40,
true_theta[1]*(1.5-0.5)+0.5))
plt.xlabel(r"$E_{beam}$")
plt.ylabel(r"$G_f$")
plt.xlim((true_theta[0]-offset)*(50-40)+40, (true_theta[0]+offset)*(50-40)+40)
plt.ylim((true_theta[1]-offset)*(1.5-0.5)+0.5, (true_theta[1]+offset)*(1.5-0.5)+0.5)
plt.legend(loc="lower right")
# histograms
ax2 = plt.subplot2grid((2, 2), (0, 1))
plt.xlim(-1, 1)
Xs = [X_obs]
for state in history:
thetas = gaussian_draw(state["params_proposal"], 50000, random_state=rng)
X_ = np.zeros((len(thetas), 1))
for j, theta in enumerate(thetas):
X_[j, :] = simulator(theta, 1).ravel()
Xs.append(X_)
plt.hist(Xs, histtype="bar",
label=[r"$x \sim p_r(x)$"] +
[r"$x \sim p(x|\psi)\ \gamma=%d$" % state["gamma"] for state in history],
color=["C0"] + [state["color"] for state in history],
range=(-1, 1), bins=15, normed=1)
plt.legend(loc="upper right")
# U_g
ax3 = plt.subplot2grid((2, 2), (1, 0), colspan=2)
xs = np.arange(n_epochs)
for state in history:
plt.plot(xs,
state["loss_d"],
label=r"$-U_d\ \gamma=%d$" % state["gamma"],
color=state["color"])
plt.xlim(0, n_epochs-1)
plt.legend(loc="upper right")
plt.tight_layout()
plt.savefig("figs/weinberg-%d.pdf" % seed)
plt.close()
| [
"[email protected]"
]
| |
cc0502e3cab1bd6a7d5f807163d274ebcf5b5a7d | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/storsimple/get_manager_extended_info.py | f1ef7dd5ffdca94ce009fca602b31b92407d3c83 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,305 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'GetManagerExtendedInfoResult',
'AwaitableGetManagerExtendedInfoResult',
'get_manager_extended_info',
]
@pulumi.output_type
class GetManagerExtendedInfoResult:
"""
The extended info of the manager.
"""
def __init__(__self__, algorithm=None, encryption_key=None, encryption_key_thumbprint=None, etag=None, id=None, integrity_key=None, kind=None, name=None, portal_certificate_thumbprint=None, type=None, version=None):
if algorithm and not isinstance(algorithm, str):
raise TypeError("Expected argument 'algorithm' to be a str")
pulumi.set(__self__, "algorithm", algorithm)
if encryption_key and not isinstance(encryption_key, str):
raise TypeError("Expected argument 'encryption_key' to be a str")
pulumi.set(__self__, "encryption_key", encryption_key)
if encryption_key_thumbprint and not isinstance(encryption_key_thumbprint, str):
raise TypeError("Expected argument 'encryption_key_thumbprint' to be a str")
pulumi.set(__self__, "encryption_key_thumbprint", encryption_key_thumbprint)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if integrity_key and not isinstance(integrity_key, str):
raise TypeError("Expected argument 'integrity_key' to be a str")
pulumi.set(__self__, "integrity_key", integrity_key)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if portal_certificate_thumbprint and not isinstance(portal_certificate_thumbprint, str):
raise TypeError("Expected argument 'portal_certificate_thumbprint' to be a str")
pulumi.set(__self__, "portal_certificate_thumbprint", portal_certificate_thumbprint)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if version and not isinstance(version, str):
raise TypeError("Expected argument 'version' to be a str")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def algorithm(self) -> str:
"""
Represents the encryption algorithm used to encrypt the keys. None - if Key is saved in plain text format. Algorithm name - if key is encrypted
"""
return pulumi.get(self, "algorithm")
@property
@pulumi.getter(name="encryptionKey")
def encryption_key(self) -> Optional[str]:
"""
Represents the CEK of the resource.
"""
return pulumi.get(self, "encryption_key")
@property
@pulumi.getter(name="encryptionKeyThumbprint")
def encryption_key_thumbprint(self) -> Optional[str]:
"""
Represents the Cert thumbprint that was used to encrypt the CEK.
"""
return pulumi.get(self, "encryption_key_thumbprint")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
The etag of the resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The path ID that uniquely identifies the object.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="integrityKey")
def integrity_key(self) -> str:
"""
Represents the CIK of the resource.
"""
return pulumi.get(self, "integrity_key")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
The Kind of the object. Currently only Series8000 is supported
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the object.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="portalCertificateThumbprint")
def portal_certificate_thumbprint(self) -> Optional[str]:
"""
Represents the portal thumbprint which can be used optionally to encrypt the entire data before storing it.
"""
return pulumi.get(self, "portal_certificate_thumbprint")
@property
@pulumi.getter
def type(self) -> str:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
The version of the extended info being persisted.
"""
return pulumi.get(self, "version")
class AwaitableGetManagerExtendedInfoResult(GetManagerExtendedInfoResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagerExtendedInfoResult(
algorithm=self.algorithm,
encryption_key=self.encryption_key,
encryption_key_thumbprint=self.encryption_key_thumbprint,
etag=self.etag,
id=self.id,
integrity_key=self.integrity_key,
kind=self.kind,
name=self.name,
portal_certificate_thumbprint=self.portal_certificate_thumbprint,
type=self.type,
version=self.version)
def get_manager_extended_info(manager_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagerExtendedInfoResult:
"""
The extended info of the manager.
API Version: 2017-06-01.
:param str manager_name: The manager name
:param str resource_group_name: The resource group name
"""
__args__ = dict()
__args__['managerName'] = manager_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:storsimple:getManagerExtendedInfo', __args__, opts=opts, typ=GetManagerExtendedInfoResult).value
return AwaitableGetManagerExtendedInfoResult(
algorithm=__ret__.algorithm,
encryption_key=__ret__.encryption_key,
encryption_key_thumbprint=__ret__.encryption_key_thumbprint,
etag=__ret__.etag,
id=__ret__.id,
integrity_key=__ret__.integrity_key,
kind=__ret__.kind,
name=__ret__.name,
portal_certificate_thumbprint=__ret__.portal_certificate_thumbprint,
type=__ret__.type,
version=__ret__.version)
| [
"[email protected]"
]
| |
2429afb1390e940a0538dd6d4cebf7258e4c4ab6 | f0181afd2eea9b086ce9487fb8d7fd949282140a | /perl/alignment/local_alignment.py | db92f56beb54b2cb64580db945895b78d465058e | [
"MIT"
]
| permissive | linsalrob/EdwardsLab | 4a571676859c8b7238e733a0d3ad98ceb2e83c63 | 3c466acc07f1a56b575860ad26c92f900b272a53 | refs/heads/master | 2023-08-20T17:13:35.466103 | 2023-08-17T09:17:36 | 2023-08-17T09:17:36 | 25,702,093 | 36 | 25 | MIT | 2020-09-23T12:44:44 | 2014-10-24T18:27:16 | Python | UTF-8 | Python | false | false | 2,920 | py | __author__ = 'redwards'
import os
from array import array
from matrices import blosum62
def score(a, b):
blosum = blosum62()
if a in blosum and b in blosum[a]:
return blosum[a][b]
elif b in blosum and a in blosum[b]:
return blosum[b][a]
else:
sys.stderr.write("Can not score amino acids " + a + " and " + b + "\n")
return -8
def local_alignment(seq1, seq2, gap_open=11, gap_extn=1):
"""
Perform a ungapped local alignment. This approach uses 6 matrices.
:param seq1: The first sequence
:param seq2: The second sequence
:param gap_open: The gap opening penalty (default = 11)
:param gap_extn: The gap extention penalty (default = 1)
:return: The score, and the two sequences with gaps in them
"""
################################################
# Create Score Matrix and Helper Matrices
################################################
scorematrix = [array('i', [0 for x in xrange(0, len(seq1) + 1)]) for t in xrange(0, len(seq2) + 1)]
maxhigheri = [array('i', [0 for x in xrange(0, len(seq1) + 1)]) for t in xrange(0, len(seq2) + 1)]
maxhigherj = [array('i', [0 for x in xrange(0, len(seq1) + 1)]) for t in xrange(0, len(seq2) + 1)]
helpermatrix = [array('i', [0 for x in xrange(0, len(seq1) + 1)]) for t in xrange(0, len(seq2) + 1)]
endi = 0
endj = 0
bestscore = -1
for i in xrange(0, len(seq2)):
for j in xrange(0, len(seq1)):
match = scorematrix[i][j] + score(seq2[i],seq1[j])
maxhigheri[i+1][j+1] = max([maxhigheri[i+1][j] - gap_extn, scorematrix[i+1][j] - gap_open])
maxhigherj[i+1][j+1] = max([maxhigherj[i][j+1] - gap_extn, scorematrix[i][j+1] - gap_open])
maxv = max([match,maxhigheri[i+1][j+1],maxhigherj[i+1][j+1],0])
scorematrix[i+1][j+1] = maxv
# how did we get here?
if maxv <= 0:
helpermatrix[i+1][j+1] = 4
elif maxv == match:
helpermatrix[i+1][j+1] = 1
elif maxv == maxhigherj[i+1][j+1]:
helpermatrix[i+1][j+1] = 2
elif maxv == maxhigheri[i+1][j+1]:
helpermatrix[i+1][j+1] = 3
else:
helpermatrix[i+1][j+1] = 4
newscore = scorematrix[i+1][j+1]
if newscore > bestscore:
bestscore = newscore
endi = i+1
endj = j+1
i = endi
j = endj
hm = helpermatrix[i][j]
while i * j != 0 and hm != 4:
if hm == 1:
i = i - 1
j = j - 1
elif hm == 2:
i = i - 1
elif hm == 3:
j = j - 1
hm = helpermatrix[i][j]
print(bestscore)
print(seq1[j:endj])
print(seq2[i:endi])
if __name__ == "__main__":
s1 = 'ATGLVRRLGSFLVEDFSRYKLL'
s2 = 'GLMRRSGSPLVESRYKLL'
local_alignment(s1, s2)
| [
"[email protected]"
]
| |
9222dbde7d346213b91034145a348a5cff4cbbf6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03161/s980102866.py | deba467f47fe65240ad40d992abb931cd5df9b26 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 319 | py | if __name__ == '__main__':
n,k = map(int,input().split())
h = list(map(int,input().split()))
#TLE pypy
INF = 10**9
dp = [INF] * (n+1)
dp[0] = 0
k = min(k,n)
for i in range(1,n):
for j in range(1,k+1):
dp[i] = min(dp[i],dp[i-j]+abs(h[i]-h[i-j]))
print(dp[n-1])
| [
"[email protected]"
]
| |
b0458242d96796db2944d822cfd3c245a1bd89f5 | 48616fb53c96dafed001ab264f819bad38769371 | /pydesica/src/plot_hist_death.py | 095c91df24e3a2541de58093361d3756ab945c71 | []
| no_license | mdekauwe/SE_AUS_drought_risk_paper | 1ee650ba9ef553dc062223c77277f66b5cb8721b | a908e82844ea5ecf927c1039dd0059a50027be9f | refs/heads/master | 2020-09-17T08:57:07.200626 | 2020-06-01T23:51:35 | 2020-06-01T23:51:35 | 224,061,720 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,100 | py | #!/usr/bin/env python
# coding: utf-8
"""
Plot the pixel output
That's all folks.
"""
__author__ = "Martin De Kauwe"
__version__ = "1.0 (12.03.2018)"
__email__ = "[email protected]"
import pandas as pd
import sys
import numpy as np
import matplotlib.pyplot as plt
import os
import seaborn as sns
rf = pd.read_csv("outputs/rf_trait_sensitivity_all.csv")
wsf = pd.read_csv("outputs/wsf_trait_sensitivity_all.csv")
dsf = pd.read_csv("outputs/dsf_trait_sensitivity_all.csv")
grw = pd.read_csv("outputs/grw_trait_sensitivity_all.csv")
saw = pd.read_csv("outputs/saw_trait_sensitivity_all.csv")
#rf = pd.read_csv("outputs/rf_trait_sens_OAT.csv")
#wsf = pd.read_csv("outputs/wsf_trait_sens_OAT.csv")
#dsf = pd.read_csv("outputs/dsf_trait_sens_OAT.csv")
#grw = pd.read_csv("outputs/grw_trait_sens_OAT.csv")
#saw = pd.read_csv("outputs/saw_trait_sens_OAT.csv")
rf = rf[rf.day_of_death > 0]
wsf = wsf[wsf.day_of_death > 0]
dsf = dsf[dsf.day_of_death > 0]
grw = grw[grw.day_of_death > 0]
saw = saw[saw.day_of_death > 0]
width = 9
height = 6
fig = plt.figure(figsize=(width, height))
fig.subplots_adjust(hspace=0.13)
fig.subplots_adjust(wspace=0.13)
plt.rcParams['text.usetex'] = False
plt.rcParams['font.family'] = "sans-serif"
plt.rcParams['font.sans-serif'] = "Helvetica"
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['font.size'] = 16
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
colours = sns.color_palette("Set2", 8)
ax = fig.add_subplot(111)
sns.distplot(rf.day_of_death, ax=ax, rug=False, norm_hist=True,
kde_kws={"label": "RF"}, kde=True, color=colours[0])
sns.distplot(wsf.day_of_death, ax=ax, rug=False, norm_hist=True,
kde_kws={"label": "WSF"}, kde=True, color=colours[1])
sns.distplot(dsf.day_of_death, ax=ax, rug=False, norm_hist=True,
kde_kws={"label": "DSF"}, kde=True, color=colours[2])
ax.tick_params(direction='in', length=4)
ax.set_xlabel("Day of death")
ax.set_ylabel("Probability density")
ax.legend(numpoints=1, ncol=1, loc="best", frameon=False)
ofdir = "/Users/mdekauwe/Desktop"
ofname = "day_of_death_RF_WSF_DSF.png"
fig.savefig(os.path.join(ofdir, ofname),
bbox_inches='tight', pad_inches=0.1, dpi=300)
width = 9
height = 6
fig = plt.figure(figsize=(width, height))
fig.subplots_adjust(hspace=0.13)
fig.subplots_adjust(wspace=0.13)
plt.rcParams['text.usetex'] = False
plt.rcParams['font.family'] = "sans-serif"
plt.rcParams['font.sans-serif'] = "Helvetica"
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['font.size'] = 16
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
colours = sns.color_palette("Set2", 8)
ax = fig.add_subplot(111)
sns.distplot(grw.day_of_death, ax=ax, rug=False, norm_hist=True,
kde_kws={"label": "GRW"}, kde=True, color=colours[3])
sns.distplot(saw.day_of_death, ax=ax, rug=False, norm_hist=True,
kde_kws={"label": "SAW"}, kde=True, color=colours[4])
ax.set_xlim(0, 600)
ax.tick_params(direction='in', length=4)
ax.set_xlabel("Day of death")
ax.set_ylabel("Probability density")
ax.legend(numpoints=1, ncol=1, loc="best", frameon=False)
ofdir = "/Users/mdekauwe/Desktop"
ofname = "day_of_death_GRW_SAW.png"
fig.savefig(os.path.join(ofdir, ofname),
bbox_inches='tight', pad_inches=0.1, dpi=300)
width = 9
height = 6
fig = plt.figure(figsize=(width, height))
fig.subplots_adjust(hspace=0.13)
fig.subplots_adjust(wspace=0.13)
plt.rcParams['text.usetex'] = False
plt.rcParams['font.family'] = "sans-serif"
plt.rcParams['font.sans-serif'] = "Helvetica"
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['font.size'] = 16
plt.rcParams['legend.fontsize'] = 12
plt.rcParams['xtick.labelsize'] = 16
plt.rcParams['ytick.labelsize'] = 16
colours = sns.color_palette("Set2", 8)
ax = fig.add_subplot(111)
sns.distplot(rf.day_of_death, ax=ax, rug=False, norm_hist=True,
kde_kws={"label": "RF"}, kde=True, color=colours[0])
sns.distplot(wsf.day_of_death, ax=ax, rug=False, norm_hist=True,
kde_kws={"label": "WSF"}, kde=True, color=colours[1])
sns.distplot(dsf.day_of_death, ax=ax, rug=False, norm_hist=True,
kde_kws={"label": "DSF"}, kde=True, color=colours[2])
sns.distplot(grw.day_of_death, ax=ax, rug=False, norm_hist=True,
kde_kws={"label": "GRW"}, kde=True, color=colours[3])
sns.distplot(saw.day_of_death, ax=ax, rug=False, norm_hist=True,
kde_kws={"label": "SAW"}, kde=True, color=colours[4])
#ax.set_xlim(100, 600)
#ax.set_ylim(0.0, 0.005)
#plt.xticks([], [])
#splt.setp(ax.get_xticklabels(), visible=False)
ax.tick_params(direction='in', length=4)
ax.set_xlabel("Day of hydraulic failure ($\Psi$$_{\mathrm{crit}}$)", labelpad=10)
ax.set_ylabel("Probability density")
ax.legend(numpoints=1, ncol=1, loc="best", frameon=False)
#ofdir = "/Users/mdekauwe/Desktop"
ofdir = "/Users/mdekauwe/Dropbox/Drought_risk_paper/figures/figs"
ofname = "day_of_death_all.pdf"
fig.savefig(os.path.join(ofdir, ofname),
bbox_inches='tight', pad_inches=0.1)
| [
"[email protected]"
]
| |
0ab75df4472e4622b1a049811be92712ca4fc93a | d3170930a453d7f4ab62c4637f21e8e1d74ac971 | /axf/urls_order_apis.py | 9833c9dbe2456599f9cc0cf30e0d0f25e7c0eb66 | []
| no_license | whoareyou0401/1901axf_vue | 97fd03439ef919024832348f61ea3c1f5cf2469c | 4bb0fcf2000a624be294bbcfdbfc52895fedd36e | refs/heads/master | 2020-05-29T19:45:36.536110 | 2019-05-31T02:47:41 | 2019-05-31T02:47:41 | 189,338,601 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | from django.conf.urls import url
from .order_apis import *
urlpatterns = [
url(r"^order$", OrderAPI.as_view()),
url(r"^confirm$", ConfirmOrder.as_view({"post":"confirm"})),
] | [
"[email protected]"
]
| |
3fc13f2b5432026514c79268d90a33390a6b94bb | f949cf76d051abb911be3e3adbfe777aa6f9fc95 | /code1/analysis/loadFactor_vs_collision_test_speed.py | 2764cb51d46d32ff4500dd5c9ad782dc7db8ac44 | []
| no_license | xeniaqian94/11711-Algorithm-for-NLP | ae8137849654ccdd77b2da059a955eeecc71e76f | f63743ecb937231835ea394a8ad64d876346f802 | refs/heads/master | 2021-01-23T23:12:21.073989 | 2017-11-12T21:31:17 | 2017-11-12T21:31:17 | 102,955,520 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,259 | py | import sys
import re
import matplotlib.pyplot as plt
# python loadFactor_vs_collision_test_speed.py ../loadFactor_log ../plot/loadFactor_vs_collision_test_speed
f=open(sys.argv[1],"r")
loadFactor=[]
testTime=[]
ratio_bigram=[]
ratio_trigram=[]
prev_line=""
for line in f.readlines():
if (re.match(r"^load factor ([\.0-9]+)",line)):
loadFactor+=[float(re.match(r"^load factor ([\.0-9]+)",line).group(1))]
if (re.match(r"^Decoding took ([\.0-9]+)s",line)): # Building took 160.537s
testTime+=[float(re.match(r"^Decoding took ([\.0-9]+)s",line).group(1))]
# class edu.berkeley.nlp.assignments.assign1.student.longIntOpenHashMapBigram num_collision 266264368 num_access 332076077 ratio 0.8018173739145925
if (re.match(r".*longIntOpenHashMapBigram .*ratio ([\.0-9]+)",line)):
ratio_bigram+=[float(re.match(r".*longIntOpenHashMapBigram.*ratio ([\.0-9]+)",line).group(1))]
if (re.match(r".*longIntOpenHashMap .*ratio ([\.0-9]+)",line)):
ratio_trigram+=[float(re.match(r".*longIntOpenHashMap .*ratio ([\.0-9]+)",line).group(1))]
print len(loadFactor),len(testTime),len(ratio_bigram),len(ratio_trigram)
assert len(loadFactor)==len(testTime)==len(ratio_bigram)==len(ratio_trigram)
testTime=[x for _,x in sorted(zip(loadFactor,testTime))]
ratio_bigram=[x for _,x in sorted(zip(loadFactor,ratio_bigram))]
ratio_trigram=[x for _,x in sorted(zip(loadFactor,ratio_trigram))]
fig, ax1 = plt.subplots()
ax1.plot(loadFactor, testTime, 'b.-')
ax1.set_xlabel('Load Factor')
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel('LM Decode Time (s)',color='b')
ax1.tick_params('y', colors='b')
ax2 = ax1.twinx()
ax2.plot(loadFactor,ratio_bigram, 'r^--',label="Bigram")
ax2.plot(loadFactor,ratio_trigram,'m*-',label="Trigram")
ax2.plot(loadFactor,loadFactor,"g-")
ax2.set_ylabel('Ratio = # collision / # access',color='r')
ax2.tick_params('y', colors='r')
ax1.grid()
ax2.grid()
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(handles, labels)
fig.tight_layout()
plt.savefig(sys.argv[2])
plt.show()
# fig=plt.figure()
# ax=fig.add_subplot(111)
# ax.set_xlabel("training size")
# ax.set_ylabel("perplexity")
# ax.grid()
# ax.plot(training_size,perplexity,"b.-")
# plt.savefig(sys.argv[2])
# plt.show()
| [
"[email protected]"
]
| |
aacab90637ea6957ea3d6e2c1ea9b511bf075c02 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/2510.py | 337278eb13b6bb47614cb3a25617d675dc147176 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py | class TicTac():
blen=range(4)
def __init__(self,inBoardStr):
self.board=[]
for (crow,cline) in zip(self.blen,inBoardStr.split('\n')):
cbline=[clet.upper() for (ccol,clet) in zip(self.blen,cline)]
self.board+=[cbline]
def checkwin(self):
if self.checkvic('X'): return 'X won'
if self.checkvic('O'): return 'O won'
if self.notfinished(): return 'Game has not completed'
else: return 'Draw'
def notfinished(self):
isEmpty=lambda x: x=='.'
return any(map(lambda cline: any(map(isEmpty,cline)),self.board))
def checkvic(self,val):
if any(map(lambda i: self.checkvicr(val,i),self.blen)): return True
if any(map(lambda i: self.checkvicc(val,i),self.blen)): return True
if any(self.checkvicd(val)): return True
return False
def checkvicr(self,val,i):
isFull=lambda x: (x==val) | (x=='T')
oVals=map(isFull,self.board[i])
print ('r',val,i,oVals)
return all(oVals)
def checkvicc(self,val,i):
isFull=lambda x: (x==val) | (x=='T')
oVals=map(lambda cline: isFull(cline[i]),self.board)
print ('c',val,i,oVals)
return all(oVals)
def checkvicd(self,val):
isFull=lambda x: (x==val) | (x=='T')
diagA=zip(self.blen,self.blen)
diagB=zip(self.blen[::-1],self.blen)
checkVals=lambda diag: all(map(lambda cc: isFull(self.board[cc[0]][cc[1]]),diag))
oVals=map(checkVals,[diagA,diagB])
print (val,oVals)
return oVals
def __str__(self):
return str(self.board)
f=open('/mnt/sdcard/Download/A-large.in','r')
bds=int(f.readline())
oList=[]
for i in range(bds):
cboard=map(lambda x: f.readline(),range(4))
f.readline()
oList+=[TicTac(''.join(cboard))]
f.close()
winList=map(lambda x: x.checkwin(),oList)
f=open('/mnt/sdcard/Download/tictacout5.txt','w')
for (caseN,status) in enumerate(winList): f.write('Case #%i: %s\n' % (caseN+1,status))
f.close()
#b=TicTac('..XO\nX.TX\n.O.T\nOXXO')
#print b.checkwin()
| [
"[email protected]"
]
| |
91f6d94527fcfade170591c20976d3428b4037ca | 4e4caa3a259893ecb639a3cac8658f99e51cec22 | /allel/stats/fst.py | c3fff74a319c9c99626a4344b017b1bc4b571969 | [
"MIT"
]
| permissive | oxpeter/scikit-allel | e9fcc39deee7ec755cf8693d652358438ea8e73d | 566eab62bc026c7f91cf202853c6a2ef5fd8df54 | refs/heads/master | 2021-01-24T01:14:19.906571 | 2016-02-01T17:04:25 | 2016-02-01T17:04:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,594 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import logging
import itertools
import numpy as np
from allel.util import asarray_ndim, check_dim0_aligned, ensure_dim1_aligned
from allel.model.ndarray import GenotypeArray
from allel.stats.window import windowed_statistic, moving_statistic
from allel.stats.diversity import mean_pairwise_difference, \
mean_pairwise_difference_between
from allel.stats.misc import jackknife
from allel.chunked import get_blen_array
logger = logging.getLogger(__name__)
debug = logger.debug
def weir_cockerham_fst(g, subpops, max_allele=None, blen=None):
"""Compute the variance components from the analyses of variance of
allele frequencies according to Weir and Cockerham (1984).
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
max_allele : int, optional
The highest allele index to consider.
blen : int, optional
Block length to use for chunked computation.
Returns
-------
a : ndarray, float, shape (n_variants, n_alleles)
Component of variance between populations.
b : ndarray, float, shape (n_variants, n_alleles)
Component of variance between individuals within populations.
c : ndarray, float, shape (n_variants, n_alleles)
Component of variance between gametes within individuals.
Examples
--------
Calculate variance components from some genotype data::
>>> import allel
>>> g = [[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 0], [0, 0], [0, 0], [0, 0]],
... [[0, 1], [1, 2], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [0, 1], [-1, -1]]]
>>> subpops = [[0, 1], [2, 3]]
>>> a, b, c = allel.stats.weir_cockerham_fst(g, subpops)
>>> a
array([[ 0.5 , 0.5 , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , -0.125, -0.125],
[-0.375, -0.375, 0. ]])
>>> b
array([[ 0. , 0. , 0. ],
[-0.25 , -0.25 , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0.125 , 0.25 ],
[ 0.41666667, 0.41666667, 0. ]])
>>> c
array([[ 0. , 0. , 0. ],
[ 0.5 , 0.5 , 0. ],
[ 0. , 0. , 0. ],
[ 0.125 , 0.25 , 0.125 ],
[ 0.16666667, 0.16666667, 0. ]])
Estimate the parameter theta (a.k.a., Fst) for each variant
and each allele individually::
>>> fst = a / (a + b + c)
>>> fst
array([[ 1. , 1. , nan],
[ 0. , 0. , nan],
[ nan, nan, nan],
[ 0. , -0.5, -0.5],
[-1.8, -1.8, nan]])
Estimate Fst for each variant individually (averaging over alleles)::
>>> fst = (np.sum(a, axis=1) /
... (np.sum(a, axis=1) + np.sum(b, axis=1) + np.sum(c, axis=1)))
>>> fst
array([ 1. , 0. , nan, -0.4, -1.8])
Estimate Fst averaging over all variants and alleles::
>>> fst = np.sum(a) / (np.sum(a) + np.sum(b) + np.sum(c))
>>> fst
-4.3680905886891398e-17
Note that estimated Fst values may be negative.
"""
# check inputs
if not hasattr(g, 'shape') or not hasattr(g, 'ndim'):
g = GenotypeArray(g, copy=False)
if g.ndim != 3:
raise ValueError('g must have three dimensions')
if g.shape[2] != 2:
raise NotImplementedError('only diploid genotypes are supported')
# determine highest allele index
if max_allele is None:
max_allele = g.max()
# compute in chunks to avoid loading big arrays into memory
blen = get_blen_array(g, blen)
n_variants = g.shape[0]
shape = (n_variants, max_allele + 1)
a = np.zeros(shape, dtype='f8')
b = np.zeros(shape, dtype='f8')
c = np.zeros(shape, dtype='f8')
for i in range(0, n_variants, blen):
j = min(n_variants, i+blen)
gb = g[i:j]
ab, bb, cb = _weir_cockerham_fst(gb, subpops, max_allele)
a[i:j] = ab
b[i:j] = bb
c[i:j] = cb
return a, b, c
# noinspection PyPep8Naming
def _weir_cockerham_fst(g, subpops, max_allele):
# check inputs
g = GenotypeArray(g, copy=False)
n_variants, n_samples, ploidy = g.shape
n_alleles = max_allele + 1
# number of populations sampled
r = len(subpops)
n_populations = r
debug('r: %r', r)
# count alleles within each subpopulation
ac = [g.count_alleles(subpop=s, max_allele=max_allele) for s in subpops]
# stack allele counts from each sub-population into a single array
ac = np.dstack(ac)
assert ac.shape == (n_variants, n_alleles, n_populations)
debug('ac: %s, %r', ac.shape, ac)
# count number of alleles called within each population by summing
# allele counts along the alleles dimension
an = np.sum(ac, axis=1)
assert an.shape == (n_variants, n_populations)
debug('an: %s, %r', an.shape, an)
# compute number of individuals sampled from each population
n = an // 2
assert n.shape == (n_variants, n_populations)
debug('n: %s, %r', n.shape, n)
# compute the total number of individuals sampled across all populations
n_total = np.sum(n, axis=1)
assert n_total.shape == (n_variants,)
debug('n_total: %s, %r', n_total.shape, n_total)
# compute the average sample size across populations
n_bar = np.mean(n, axis=1)
assert n_bar.shape == (n_variants,)
debug('n_bar: %s, %r', n_bar.shape, n_bar)
# compute the term n sub C incorporating the coefficient of variation in
# sample sizes
n_C = (n_total - (np.sum(n**2, axis=1) / n_total)) / (r - 1)
assert n_C.shape == (n_variants,)
debug('n_C: %s, %r', n_C.shape, n_C)
# compute allele frequencies within each population
p = ac / an[:, np.newaxis, :]
assert p.shape == (n_variants, n_alleles, n_populations)
debug('p: %s, %r', p.shape, p)
# compute the average sample frequency of each allele
ac_total = np.sum(ac, axis=2)
an_total = np.sum(an, axis=1)
p_bar = ac_total / an_total[:, np.newaxis]
assert p_bar.shape == (n_variants, n_alleles)
debug('p_bar: %s, %r', p_bar.shape, p_bar)
# add in some extra dimensions to enable broadcasting
n_bar = n_bar[:, np.newaxis]
n_C = n_C[:, np.newaxis]
n = n[:, np.newaxis, :]
p_bar = p_bar[:, :, np.newaxis]
# compute the sample variance of allele frequencies over populations
s_squared = (
np.sum(n * ((p - p_bar) ** 2),
axis=2) /
(n_bar * (r - 1))
)
assert s_squared.shape == (n_variants, n_alleles)
debug('s_squared: %s, %r', s_squared.shape, s_squared)
# remove extra dimensions for correct broadcasting
p_bar = p_bar[:, :, 0]
# compute the average heterozygosity over all populations
# N.B., take only samples in subpops of interest
gs = g.take(list(itertools.chain(*subpops)), axis=1)
h_bar = [gs.count_het(allele=allele, axis=1) / n_total
for allele in range(n_alleles)]
h_bar = np.column_stack(h_bar)
assert h_bar.shape == (n_variants, n_alleles)
debug('h_bar: %s, %r', h_bar.shape, h_bar)
# now comes the tricky bit...
# component of variance between populations
a = ((n_bar / n_C) *
(s_squared -
((1 / (n_bar - 1)) *
((p_bar * (1 - p_bar)) -
((r - 1) * s_squared / r) -
(h_bar / 4)))))
assert a.shape == (n_variants, n_alleles)
# component of variance between individuals within populations
b = ((n_bar / (n_bar - 1)) *
((p_bar * (1 - p_bar)) -
((r - 1) * s_squared / r) -
(((2 * n_bar) - 1) * h_bar / (4 * n_bar))))
assert b.shape == (n_variants, n_alleles)
# component of variance between gametes within individuals
c = h_bar / 2
assert c.shape == (n_variants, n_alleles)
return a, b, c
def hudson_fst(ac1, ac2, fill=np.nan):
"""Calculate the numerator and denominator for Fst estimation using the
method of Hudson (1992) elaborated by Bhatia et al. (2013).
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
fill : float
Use this value where there are no pairs to compare (e.g.,
all allele calls are missing).
Returns
-------
num : ndarray, float, shape (n_variants,)
Divergence between the two populations minus average
of diversity within each population.
den : ndarray, float, shape (n_variants,)
Divergence between the two populations.
Examples
--------
Calculate numerator and denominator for Fst estimation::
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 0], [1, 1], [1, 1]],
... [[0, 1], [0, 1], [0, 1], [0, 1]],
... [[0, 0], [0, 0], [0, 0], [0, 0]],
... [[0, 1], [1, 2], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [0, 1], [-1, -1]]])
>>> subpops = [[0, 1], [2, 3]]
>>> ac1 = g.count_alleles(subpop=subpops[0])
>>> ac2 = g.count_alleles(subpop=subpops[1])
>>> num, den = allel.stats.hudson_fst(ac1, ac2)
>>> num
array([ 1. , -0.16666667, 0. , -0.125 , -0.33333333])
>>> den
array([ 1. , 0.5 , 0. , 0.625, 0.5 ])
Estimate Fst for each variant individually::
>>> fst = num / den
>>> fst
array([ 1. , -0.33333333, nan, -0.2 , -0.66666667])
Estimate Fst averaging over variants::
>>> fst = np.sum(num) / np.sum(den)
>>> fst
0.1428571428571429
""" # flake8: noqa
# check inputs
ac1 = asarray_ndim(ac1, 2)
ac2 = asarray_ndim(ac2, 2)
check_dim0_aligned(ac1, ac2)
ac1, ac2 = ensure_dim1_aligned(ac1, ac2)
# calculate these once only
an1 = np.sum(ac1, axis=1)
an2 = np.sum(ac2, axis=1)
# calculate average diversity (a.k.a. heterozygosity) within each
# population
within = (mean_pairwise_difference(ac1, an1, fill=fill) +
mean_pairwise_difference(ac2, an2, fill=fill)) / 2
# calculate divergence (a.k.a. heterozygosity) between each population
between = mean_pairwise_difference_between(ac1, ac2, an1, an2, fill=fill)
# define numerator and denominator for Fst calculations
num = between - within
den = between
return num, den
def patterson_fst(aca, acb):
"""Estimator of differentiation between populations A and B based on the
F2 parameter.
Parameters
----------
aca : array_like, int, shape (n_variants, 2)
Allele counts for population A.
acb : array_like, int, shape (n_variants, 2)
Allele counts for population B.
Returns
-------
num : ndarray, shape (n_variants,), float
Numerator.
den : ndarray, shape (n_variants,), float
Denominator.
Notes
-----
See Patterson (2012), Appendix A.
TODO check if this is numerically equivalent to Hudson's estimator.
"""
from allel.stats.admixture import patterson_f2, h_hat
num = patterson_f2(aca, acb)
den = num + h_hat(aca) + h_hat(acb)
return num, den
def windowed_weir_cockerham_fst(pos, g, subpops, size=None, start=None,
stop=None, step=None, windows=None,
fill=np.nan, max_allele=None):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Weir and Cockerham (1984).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
size : int
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
max_allele : int, optional
The highest allele index to consider.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variant
a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele)
# define the statistic to compute within each window
def average_fst(wa, wb, wc):
return np.nansum(wa) / (np.nansum(wa) + np.nansum(wb) + np.nansum(wc))
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(a, b, c),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts
def windowed_hudson_fst(pos, ac1, ac2, size=None, start=None, stop=None,
step=None, windows=None, fill=np.nan):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Hudson (1992) elaborated by Bhatia et al. (2013).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variants
num, den = hudson_fst(ac1, ac2)
# define the statistic to compute within each window
def average_fst(wn, wd):
return np.nansum(wn) / np.nansum(wd)
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(num, den),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts
def windowed_patterson_fst(pos, ac1, ac2, size=None, start=None, stop=None,
step=None, windows=None, fill=np.nan):
"""Estimate average Fst in windows over a single chromosome/contig,
following the method of Patterson (2012).
Parameters
----------
pos : array_like, int, shape (n_items,)
Variant positions, using 1-based coordinates, in ascending order.
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
size : int, optional
The window size (number of bases).
start : int, optional
The position at which to start (1-based).
stop : int, optional
The position at which to stop (1-based).
step : int, optional
The distance between start positions of windows. If not given,
defaults to the window size, i.e., non-overlapping windows.
windows : array_like, int, shape (n_windows, 2), optional
Manually specify the windows to use as a sequence of (window_start,
window_stop) positions, using 1-based coordinates. Overrides the
size/start/stop/step parameters.
fill : object, optional
The value to use where there are no variants within a window.
Returns
-------
fst : ndarray, float, shape (n_windows,)
Average Fst in each window.
windows : ndarray, int, shape (n_windows, 2)
The windows used, as an array of (window_start, window_stop) positions,
using 1-based coordinates.
counts : ndarray, int, shape (n_windows,)
Number of variants in each window.
"""
# compute values per-variants
num, den = patterson_fst(ac1, ac2)
# define the statistic to compute within each window
def average_fst(wn, wd):
return np.nansum(wn) / np.nansum(wd)
# calculate average Fst in windows
fst, windows, counts = windowed_statistic(pos, values=(num, den),
statistic=average_fst,
size=size, start=start,
stop=stop, step=step,
windows=windows, fill=fill)
return fst, windows, counts
def blockwise_weir_cockerham_fst(g, subpops, blen, max_allele=None):
"""Estimate average Fst and standard error using the block-jackknife.
Parameters
----------
g : array_like, int, shape (n_variants, n_samples, ploidy)
Genotype array.
subpops : sequence of sequences of ints
Sample indices for each subpopulation.
blen : int
Block size (number of variants).
max_allele : int, optional
The highest allele index to consider.
Returns
-------
fst : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
"""
# calculate per-variant values
a, b, c = weir_cockerham_fst(g, subpops, max_allele=max_allele)
# calculate overall estimate
a_sum = np.nansum(a)
b_sum = np.nansum(b)
c_sum = np.nansum(c)
fst = a_sum / (a_sum + b_sum + c_sum)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(a, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(a + b + c, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
return fst, se, vb, vj
def blockwise_hudson_fst(ac1, ac2, blen):
"""Estimate average Fst between two populations and standard error using
the block-jackknife.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
blen : int
Block size (number of variants).
Returns
-------
fst : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
"""
# calculate per-variant values
num, den = hudson_fst(ac1, ac2, fill=np.nan)
# calculate overall estimate
fst = np.nansum(num) / np.nansum(den)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(num, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(den, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
return fst, se, vb, vj
def blockwise_patterson_fst(ac1, ac2, blen):
"""Estimate average Fst between two populations and standard error using
the block-jackknife.
Parameters
----------
ac1 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the first population.
ac2 : array_like, int, shape (n_variants, n_alleles)
Allele counts array from the second population.
blen : int
Block size (number of variants).
Returns
-------
fst : float
Estimated value of the statistic using all data.
se : float
Estimated standard error.
vb : ndarray, float, shape (n_blocks,)
Value of the statistic in each block.
vj : ndarray, float, shape (n_blocks,)
Values of the statistic from block-jackknife resampling.
"""
# calculate per-variant values
num, den = patterson_fst(ac1, ac2)
# calculate overall estimate
fst = np.nansum(num) / np.nansum(den)
# compute the numerator and denominator within each block
num_bsum = moving_statistic(num, statistic=np.nansum, size=blen)
den_bsum = moving_statistic(den, statistic=np.nansum, size=blen)
# calculate the statistic values in each block
vb = num_bsum / den_bsum
# estimate standard error
_, se, vj = jackknife((num_bsum, den_bsum),
statistic=lambda n, d: np.sum(n) / np.sum(d))
return fst, se, vb, vj
| [
"[email protected]"
]
| |
35fa7fe8cb27b270e1efe832507fc58db96eee12 | 29dcd6c3475acde11bfc8dd7b6826ffddb40e1ab | /Week7Python/Week7/randomAverageSumMaxMin.py | 8c0c8ac3b260f0158a0d860e0aaeec63863337ca | []
| no_license | quangdbui9999/CS-171 | 24ca7f7f27847c7b729d74b2c85abc0464cd8c1a | 963b2e83e6939703ca2b7617941e5fc04d374570 | refs/heads/master | 2022-09-18T13:11:38.619101 | 2020-06-07T22:51:51 | 2020-06-07T22:51:51 | 270,453,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | def main():
fileName = "randomNumber.txt"
file = open(fileName, "r")
getSum = 0
average = 0
maxNumber = -1
minNumber = 1000
for i in range(0, 1000):
line = file.readline()
number = int(line)
getSum += number
if(number < minNumber):
minNumber = number
if(number > maxNumber):
maxNumber = number
average = float(getSum / 1000)
print("Average is: " + str(average))
print("Max number is : " + str(maxNumber))
print("Min number is : " + str(minNumber))
file.close()
main() | [
"[email protected]"
]
| |
dab23a2f681f7e8d2b14073564382513078fd1e1 | 4577d8169613b1620d70e3c2f50b6f36e6c46993 | /students/1808084/homework01/program02.py | 649e76a862e1916832e6b105fcdaf485735f8fd4 | []
| no_license | Fondamenti18/fondamenti-di-programmazione | cbaf31810a17b5bd2afaa430c4bf85d05b597bf0 | 031ec9761acb1a425fcc4a18b07884b45154516b | refs/heads/master | 2020-03-24T03:25:58.222060 | 2018-08-01T17:52:06 | 2018-08-01T17:52:06 | 142,419,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,843 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 17:55:21 2017
@author: Lorenzo
"""
''' In determinate occasioni ci capita di dover scrivere i numeri in lettere,
ad esempio quando dobbiamo compilare un assegno.
Puo' capitare che alcuni numeri facciano sorgere in noi qualche dubbio.
Le perplessita' nascono soprattutto nella scrittura dei numeri composti con 1 e 8.
Tutti i numeri come venti, trenta, quaranta, cinquanta, ecc... elidono la vocale
finale (la "i" per 20, la "a" per tutti gli altri) fondendola con la vocale iniziale
del numero successivo; scriveremo quindi ventuno, ventotto, trentotto,
cinquantuno ecc...
Il numero cento, nella formazione dei numeri composti con uno e otto, non si comporta
cosi'; il numero "cento" e tutte le centinaia (duecento, trecento, ecc...),
infatti, non elidono la vocale finale. Dunque non scriveremo centuno, trecentotto ma centouno,
trecentootto, ecc...
I numeri composti dalle centinaia e dalla decina "ottanta" invece tornano ad elidere
la vocale finale; scriveremo quindi centottanta, duecentottanta, ecc...,
non centoottanta, duecentoottanta, ...
Il numero "mille" non elide in nessun numero composto la vocale finale; scriveremo
quindi milleuno, milleotto, milleottanta, ecc...
Altri esempi sono elencati nel file grade02.txt
Scrivere una funzione conv(n) che prende in input un intero n, con 0<n<1000000000000,
e restituisce in output una stringa con il numero espresso in lettere
ATTENZIONE: NON USATE LETTERE ACCENTATE.
ATTENZIONE: Se il grader non termina entro 30 secondi il punteggio dell'esercizio e' zero.
'''
def conv(j):
if j==0:
return ''
elif j<=19:
return ('uno', 'due', 'tre', 'quattro', 'cinque', 'sei', 'sette', 'otto', 'nove', 'dieci', 'undici', 'dodici', 'tredici', 'quattordici', 'quindici', 'sedici', 'diciassette', 'diciotto', 'diciannove')[j-1]
elif j<=99:
decine=('venti', 'trenta','quaranta','cinquanta','sessanta','settanta','ottanta','novanta')
letter=decine[int(j/10)-2]
z=j%10
if z==1 or z==8:
letter=letter[:-1]
return letter + conv(j%10)
elif j<=199:
return 'cento'+conv(j%100)
elif j<=999:
m=j%100
m=int(m/10)
letter='cent'
if m!=8:
letter=letter+'o'
return conv(int(j/100))+ letter+ conv(j%100)
elif j<=1999:
return 'mille'+conv(j%1000)
elif j<=999999:
return conv(int(j/1000))+'mila'+conv(j%1000)
elif j<=1999999:
return 'unmilione'+conv(int(j%1000000))
elif j<=999999999:
return conv(int(j/1000000))+'milioni'+conv(j%1000000)
elif j<=1999999999:
return 'unmiliardo'+ conv(j%1000000000)
else:
return conv(int(j/1000000000))+'miliardi'+conv(j%1000000000)
| [
"[email protected]"
]
| |
67c4fcf63b42d176656a5364095de76ee9f01326 | 69bcc45028038351a7f891025df1f8e7d4b855f1 | /pipeline/0x02-databases/33-schools_by_topic.py | 17fb419f2a03fb8de363b332cb2bff9883340c20 | []
| no_license | linkjavier/holbertonschool-machine_learning | 6db799844821d450fed2a33a8819cb8df0fef911 | c7b6ea4c37b7c5dc41e63cdb8142b3cdfb3e1d23 | refs/heads/main | 2023-08-17T21:00:24.182003 | 2021-09-09T05:47:06 | 2021-09-09T05:47:06 | 304,503,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/env python3
""" Where can I learn Python? """
def schools_by_topic(mongo_collection, topic):
""" Function that returns the list of school having a specific topic """
return list(mongo_collection.find({"topics": {"$all": [topic]}}))
| [
"[email protected]"
]
| |
17c40c0786dd78ceab94a787c68a02c570d12f07 | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/E/elie222/fantasy_football_-_premierleaguecom_cleaner_versio.py | 7f72eb5eacdfb0f99c528de816d9ca166b44d636 | []
| no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,216 | py | import scraperwiki
import json
from datetime import date
for i in range(1,1000):
try:
jsonPage = scraperwiki.scrape('http://fantasy.premierleague.com/web/api/elements/%d/'%(i))
playerObj = json.loads(jsonPage)
data = {}
data["code"] = playerObj["code"]
data["transfers_balance"] = playerObj["transfers_balance"]
data["news_updated"] = playerObj["news_updated"]
data["news_added"] = playerObj["news_added"]
data["web_name"] = playerObj["web_name"]
data["in_dreamteam"] = playerObj["in_dreamteam"]
data["id"] = playerObj["id"]
data["first_name"] = playerObj["first_name"]
data["transfers_out_event"] = playerObj["transfers_out_event"]
data["selected"] = playerObj["selected"]
data["total_points"] = playerObj["total_points"]
data["type_name"] = playerObj["type_name"]
data["team_name"] = playerObj["team_name"]
data["status"] = playerObj["status"]
data["added"] = playerObj["added"]
data["now_cost"] = playerObj["now_cost"]
data["transfers_in"] = playerObj["transfers_in"]
data["news"] = playerObj["news"]
data["news_return"] = playerObj["news_return"]
data["transfers_in_event"] = playerObj["transfers_in_event"]
data["selected_by"] = playerObj["selected_by"]
data["second_name"] = playerObj["second_name"]
data["date_downloaded"] = date.today()
scraperwiki.sqlite.save(unique_keys=["id", "date_downloaded"], data=data)
except:
print 'Stopped at %d'%(i)
breakimport scraperwiki
import json
from datetime import date
for i in range(1,1000):
try:
jsonPage = scraperwiki.scrape('http://fantasy.premierleague.com/web/api/elements/%d/'%(i))
playerObj = json.loads(jsonPage)
data = {}
data["code"] = playerObj["code"]
data["transfers_balance"] = playerObj["transfers_balance"]
data["news_updated"] = playerObj["news_updated"]
data["news_added"] = playerObj["news_added"]
data["web_name"] = playerObj["web_name"]
data["in_dreamteam"] = playerObj["in_dreamteam"]
data["id"] = playerObj["id"]
data["first_name"] = playerObj["first_name"]
data["transfers_out_event"] = playerObj["transfers_out_event"]
data["selected"] = playerObj["selected"]
data["total_points"] = playerObj["total_points"]
data["type_name"] = playerObj["type_name"]
data["team_name"] = playerObj["team_name"]
data["status"] = playerObj["status"]
data["added"] = playerObj["added"]
data["now_cost"] = playerObj["now_cost"]
data["transfers_in"] = playerObj["transfers_in"]
data["news"] = playerObj["news"]
data["news_return"] = playerObj["news_return"]
data["transfers_in_event"] = playerObj["transfers_in_event"]
data["selected_by"] = playerObj["selected_by"]
data["second_name"] = playerObj["second_name"]
data["date_downloaded"] = date.today()
scraperwiki.sqlite.save(unique_keys=["id", "date_downloaded"], data=data)
except:
print 'Stopped at %d'%(i)
break | [
"[email protected]"
]
| |
a49f0908a19d55d7b3eee998f7ebec8be51c1ee0 | 37a92a9c4b03c9a8d485413ea0ca804c3860e4b2 | /multi-label-cls/util.py | 44259f234cdee61a0f420ee0d9681720d64aa6d2 | [
"Apache-2.0"
]
| permissive | JulianYu123456/icnn | 0502950f281fdf6fdee3ed430bbbcf6250bc299c | 0aaf4b5cd13d71d98b0d05f367e1f71657ea6eb8 | refs/heads/master | 2021-04-17T08:39:18.704713 | 2020-04-05T08:45:59 | 2020-04-05T08:45:59 | 249,430,014 | 0 | 0 | Apache-2.0 | 2020-03-23T12:52:24 | 2020-03-23T12:52:24 | null | UTF-8 | Python | false | false | 441 | py | import numpy as np
import sklearn
from sklearn.metrics import f1_score
def macroF1(trueY, predY):
# trueY and predY should be (nExamples, nLabels)
predY_bin = (predY >= 0.5).astype(np.int)
trueY_bin = trueY.astype(np.int)
# The transpose is here because sklearn's f1_score expects multi-label
# data to be formatted as (nLabels, nExamples).
return f1_score(trueY_bin.T, predY_bin.T, average='macro', pos_label=None)
| [
"[email protected]"
]
| |
53c0b78d84a3165bf0d2d3d584bc9787c6f5e5f8 | ad4c2aa0398406ccb7e70562560e75fa283ffa1a | /inorder-successor-in-bst/inorder-successor-in-bst.py | c32fe6518bfadb60f56d65c472aab78b0a7dea4b | [
"Apache-2.0"
]
| permissive | kmgowda/kmg-leetcode-python | 427d58f1750735618dfd51936d33240df5ba9ace | 4d32e110ac33563a8bde3fd3200d5804db354d95 | refs/heads/main | 2023-08-22T06:59:43.141131 | 2021-10-16T14:04:32 | 2021-10-16T14:04:32 | 417,841,590 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | // https://leetcode.com/problems/inorder-successor-in-bst
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inorder_successor(self, root, p , found):
node = None
if root:
found, node = self.inorder_successor(root.left, p, found)
if node:
return True, node
if found:
return True, root
elif root == p:
found = True
found, node = self.inorder_successor(root.right, p, found)
return found, node
def inorderSuccessor(self, root, p):
"""
:type root: TreeNode
:type p: TreeNode
:rtype: TreeNode
"""
found, node = self.inorder_successor(root, p, False)
return node
| [
"[email protected]"
]
| |
40b04c99c8cc4e9c2cac9d79520874d3404d15ea | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_blarneying.py | d66e3c6b974c393c850ae6c85b2b91e1dacb8a8a | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _BLARNEYING():
def __init__(self,):
self.name = "BLARNEYING"
self.definitions = blarney
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['blarney']
| [
"[email protected]"
]
| |
92578a4d2ea658a02029df6288200900ba1d402a | dacf092e82b5cc841554178e5117c38fd0b28827 | /day6/session_6.1.py | dacdb0f52bdb94e716e19f74900a9dfb4b077b57 | []
| no_license | RainMoun/python_programming_camp | f9bbee707e7468a7b5d6633c2364f5dd75abc8a4 | f8e06cdd2e6174bd6986d1097cb580a6a3b7201f | refs/heads/master | 2020-04-15T11:27:09.680587 | 2019-04-06T02:21:14 | 2019-04-06T02:21:14 | 164,630,838 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | menu_university = {
'浙江': {
'杭州': {
'下沙区': {
'杭州电子科技大学': {},
'浙江工商大学': {},
'浙江理工大学': {}
},
'西湖区': {
'浙江大学': {},
},
},
'宁波': {
'江北区': {
'宁波大学': {}
},
'鄞州区': {
"宁波诺丁汉大学": {}
}
}
}
}
sign_exit = False
while not sign_exit:
menu = menu_university
for key in menu.keys():
print(key)
choose_first = input("第一层:").strip()
if choose_first == 'b':
break
elif choose_first == 'exit':
sign_exit = True
break
elif choose_first in menu:
pass
else:
continue
while not sign_exit:
menu_2 = menu[choose_first]
for key in menu_2.keys():
print(key)
choose_second = input("第二层:").strip()
if choose_second == 'b':
break
elif choose_second == 'exit':
sign_exit = True
break
elif choose_second in menu_2:
pass
else:
continue
while not sign_exit:
menu_3 = menu_2[choose_second]
for key in menu_3.keys():
print(key)
choose_third = input("第三层:").strip()
if choose_third == 'b':
break
elif choose_third == 'exit':
sign_exit = True
break
elif choose_third in menu_3:
pass
else:
continue
while not sign_exit:
menu_4 = menu_3[choose_third]
for key in menu_4.keys():
print(key)
choose_forth = input("第四层:").strip()
if choose_forth == 'b':
break
elif choose_forth == 'exit':
sign_exit = True
break
else:
pass
| [
"[email protected]"
]
| |
a96ee8bc1593d9e1aca979ef320b877278c56d1d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02898/s573617643.py | 49e45394d939cd09bb260a01e61c9b5692d1e3d6 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | a,b=map(int,input().split())
c=0
l=list(map(int,input().split()))
for i in range(len(l)):
if l[i]>=b:
c+=1
print(c) | [
"[email protected]"
]
| |
479041b670289ff52f5224e1e10b9ca4dbdbae7f | c22a4957e6874b23f771d8174794ee4e31c5c792 | /2_Tuples_and_sets/11. Longest Intersection.py | 9f484292a01107c26761fd1b903ef54d82b3896a | []
| no_license | GalyaBorislavova/SoftUni_Python_Advanced_May_2021 | d57ff7f6480c9011190bb52880bb1fe48298fcdb | 67ad5acd43acc90042b3ac0fd5a8f4fe81bc0f53 | refs/heads/main | 2023-06-14T22:00:21.904387 | 2021-07-07T06:15:08 | 2021-07-07T06:15:08 | 381,794,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | def input_to_list(count):
lines = []
for _ in range(count):
line = input()
if " " in line:
data = line.split()
for el in data:
lines.append(el)
else:
lines.append(line)
return lines
n = int(input())
input_sequences = input_to_list(n)
longest_intersection = set()
max_len_intersection = 0
for i in input_sequences:
range_first_set, range_second_set = i.split("-")
start_first_set, end_first_set = range_first_set.split(",")
start_second_set, end_second_set = range_second_set.split(",")
current_first_set = set(num for num in range(int(start_first_set), int(end_first_set) + 1))
current_second_set = set(num for num in range(int(start_second_set), int(end_second_set) + 1))
intersection = current_first_set.intersection(current_second_set)
if len(intersection) > max_len_intersection:
longest_intersection = intersection
max_len_intersection = len(intersection)
print(f"Longest intersection is {[*longest_intersection]} with length {max_len_intersection}")
| [
"[email protected]"
]
| |
54ab222cfbc270bfb8f73f764064bcca188236eb | 1f6cdaead83b425ba2fda2b8b4f9eb1d03d97426 | /app/models/skill.py | 12d070e18395fda9465c6eb7791077266148341f | []
| no_license | simpuid/cvbuilder | 3291f18a52bc5cdceb81d000e418c8c74ba3680d | c27cb0cfec0b8fc19245deb957b577a1e789db64 | refs/heads/master | 2023-01-01T08:13:55.755812 | 2020-10-30T21:55:37 | 2020-10-30T21:55:37 | 306,163,829 | 0 | 2 | null | 2020-10-28T18:20:31 | 2020-10-21T22:36:52 | Python | UTF-8 | Python | false | false | 1,109 | py | from db import execute, fetch
class Skill:
def __init__(self, uid: int, skill_list: list):
self.id = uid
self.skill_list = skill_list
def update(self, index, skill_name):
if skill_name != self.skill_list[index]:
self.delete(index)
self.add(skill_name)
def delete(self, index):
execute('DELETE FROM skill_table WHERE student_id = %s AND skill_name = %s', (self.id, self.skill_list[index]))
def add(self, skill_name):
execute("""INSERT INTO skill_table
VALUES (%s, %s)
ON DUPLICATE KEY UPDATE
student_id = VALUES(student_id),
skill_name = VALUES(skill_name)""",
(self.id, skill_name))
@staticmethod
def load(uid: int):
execute('SELECT * FROM skill_table WHERE student_id = %s', (uid,))
data = fetch()
if len(data) == 0:
return Skill(uid, [])
skill_list = [entry['skill_name'] for entry in data]
return Skill(uid, skill_list)
| [
"[email protected]"
]
| |
6490f5050d4387318453349a7e996ab8d03a6356 | f47d17b53977cf745d453b654529e8cd6be7890f | /3level_N20_ain1.py | 304e553f3aa064c0c6025fb98dbaa10a04cdac15 | []
| no_license | rareearthquantum/model_upconversion_peter | b4cce7556a167ba0e9813625dc924d3542d33cd1 | dcf08000ec21770659318409a686bb2b88a7a1be | refs/heads/master | 2020-04-28T19:54:34.795590 | 2019-06-14T09:43:28 | 2019-06-14T09:43:28 | 175,526,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,349 | py | from Frequency_response_3level import *
p = {}
p['deltamu'] = 0.
p['deltao'] = 0.
p['d13'] = 2e-32*math.sqrt(1/3)
p['d23'] = 2e-32*math.sqrt(2/3)
p['gamma13'] = p['d13']**2/(p['d13']**2+p['d23']**2)*1/11e-3
p['gamma23'] = p['d23']**2/(p['d13']**2+p['d23']**2)*1/11e-3
p['gamma2d'] = 1e6
p['gamma3d'] = 1e6
p['nbath'] = 20
p['gammamu'] = 1/(p['nbath']+1) * 1e3
p['go'] = 51.9 #optical coupling
p['No'] = 1.28e15 # number of atoms in the optical mode
p['deltac']=0 #detuning for
p['kappaoi']=2*pi*7.95e6 # intrinsic loss for optical resonator
p['kappaoc']=2*pi*1.7e6 # coupling loss for optical resonator
#p['df']=0.1e6 # how small descretisation step to take when integrating over the
# inhomogeneous lines
p['mean_delam']=0
p['sd_delam']=2*pi*25e6/2.355 #microwave inhomogeneous broadening
#2.355is to turn FWHM into standard deviation
p['mean_delao']=0
p['sd_delao']=2*pi*170e6/2.355 #optical inhomogeneous broadening
p['kappami'] = 650e3*2*pi # intrinsic loss for microwave cavity
p['kappamc'] = 70e3*2*pi # coupling loss for optical cavity
# this is for one of the two output ports
p['Nm'] = 2.22e16 #toal number of atoms
p['gm'] = 1.04 #coupling between atoms and microwave field
p['gammaoc']=2*pi*1.7e6
p['gammaoi']=2*pi*7.95e6
p['gammamc']=2*pi*70e3
p['gammami']=2*pi*650e3
muBohr=927.4009994e-26; # Bohr magneton in J/T in J* T^-1
p['mu12'] = 4.3803*muBohr # transition dipole moment for microwave cavity (J T^-1)
p['Lsample']=12e-3 # the length of the sample, in m
p['dsample']=5e-3 # the diameter of the sample, in m
p['fillfactor']=0.8 #microwave filling factor
p['freqmu'] = 5.186e9
p['freq_pump'] = 195113.36e9 #pump frequency
p['freqo']=p['freqmu']+p['freq_pump']
p['Lcavity_vac'] = 49.5e-3 # length of the vacuum part of the optical
# Fabry Perot (m)
p['Wcavity'] = 0.6e-3# width of optical resonator beam in sample (m)
p['nYSO'] = 1.76
p['Omega']=-492090.88755145477
delovals=np.linspace(-20e5,20e5,31)
delmvals=np.linspace(-1e6,1e6,31)
binvals=[0]
ainvals=[600000]
aoutvals,boutvals,effic_a,effic_b=find_outputs(ainvals,binvals,delovals,delmvals,p)
np.savez('output_N20_ain1',aoutvals=aoutvals,boutvals=boutvals,effic_a=effic_a,effic_b=effic_b,ainvals=ainvals,binvals=binvals,delovals=delovals,delmvals=delmvals,p=p)
| [
"[email protected]"
]
| |
655329fb24105ab3d49e79b703e9372d94130f5d | 73a5eca1ddee1d74a3c2be9ca4e5e67ebe3d16f7 | /src/util/main.py | 268fafaef1f85aa7a5b43a86fbd49cf82644a789 | [
"MIT"
]
| permissive | ychnlgy/Chebyshev-Lagrange | 34346692a2925cde620377e8fbcb8d588623fac7 | 74292e72b83f992d6c42a2f2db04dfdce5a52aea | refs/heads/master | 2020-05-23T06:20:10.831035 | 2020-02-12T16:31:38 | 2020-02-12T16:31:38 | 186,661,893 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | MAIN = "__main__"
def main(name=None):
def main_wrapper(prog):
if name == MAIN:
import sys
args = dict([d.split("=") for d in sys.argv[1:]])
prog(**args)
return prog
if callable(name):
prog = name
name = MAIN
return main_wrapper(prog)
else:
return main_wrapper
| [
"[email protected]"
]
| |
fbd858342f10655883bdb5247be8b585be37030b | 9405aa570ede31a9b11ce07c0da69a2c73ab0570 | /aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/GetAuditLogsRequest.py | 6103a63b5d1ff06daa042f0bb7494e9c42ca2bce | [
"Apache-2.0"
]
| permissive | liumihust/aliyun-openapi-python-sdk | 7fa3f5b7ea5177a9dbffc99e73cf9f00e640b72b | c7b5dd4befae4b9c59181654289f9272531207ef | refs/heads/master | 2020-09-25T12:10:14.245354 | 2019-12-04T14:43:27 | 2019-12-04T14:43:27 | 226,002,339 | 1 | 0 | NOASSERTION | 2019-12-05T02:50:35 | 2019-12-05T02:50:34 | null | UTF-8 | Python | false | false | 2,948 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class GetAuditLogsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'GetAuditLogs','emr')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_PageCount(self):
return self.get_query_params().get('PageCount')
def set_PageCount(self,PageCount):
self.add_query_param('PageCount',PageCount)
def get_OrderMode(self):
return self.get_query_params().get('OrderMode')
def set_OrderMode(self,OrderMode):
self.add_query_param('OrderMode',OrderMode)
def get_EntityId(self):
return self.get_query_params().get('EntityId')
def set_EntityId(self,EntityId):
self.add_query_param('EntityId',EntityId)
def get_PageNumber(self):
return self.get_query_params().get('PageNumber')
def set_PageNumber(self,PageNumber):
self.add_query_param('PageNumber',PageNumber)
def get_Limit(self):
return self.get_query_params().get('Limit')
def set_Limit(self,Limit):
self.add_query_param('Limit',Limit)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CurrentSize(self):
return self.get_query_params().get('CurrentSize')
def set_CurrentSize(self,CurrentSize):
self.add_query_param('CurrentSize',CurrentSize)
def get_OrderField(self):
return self.get_query_params().get('OrderField')
def set_OrderField(self,OrderField):
self.add_query_param('OrderField',OrderField)
def get_Operation(self):
return self.get_query_params().get('Operation')
def set_Operation(self,Operation):
self.add_query_param('Operation',Operation) | [
"[email protected]"
]
| |
212d8d6048601c474ec7d30cb6db83bb7e054684 | c6a4069e265325e836e4ee79fae0f5490f1a1c47 | /tests/test_fight.py | e8c2d1ee8f3ae4f2bede2c65675418e6c0ac5f38 | []
| no_license | astoeff/clean-code-course-project | b2ca1d10b226ea95b602d2535810c9af5aadb244 | 2b64956ea1b33cba405ccd500bf1a5472a65e9c4 | refs/heads/master | 2022-11-19T05:04:20.992189 | 2020-07-17T17:12:59 | 2020-07-17T17:12:59 | 274,676,681 | 0 | 0 | null | 2020-07-17T17:13:00 | 2020-06-24T13:32:49 | Python | UTF-8 | Python | false | false | 6,529 | py | import unittest
from main.models.hero import Hero
from main.models.enemy import Enemy
from main.fight import Fight
from main.treasures.weapon import Weapon
from main.constants import (FIGHT_INITIAL_INFORMATION_PART, FIGHT_HERO_CANNOT_ATTACK_INFORMATION_PART,
FIGHT_HERO_ATTACK_INFORMATION_PART, DEFAULT_WEAPON_NAME, DEFAULT_WEAPON_DAMAGE,
FIGHT_ENEMY_CANNOT_ATTACK_INFORMATION_PART, FIGHT_ENEMY_ATTACK_INFORMATION_PART)
class TestFight(unittest.TestCase):
#__init__()
def test_with_given_correct_arguments_should_initialise_correctly(self):
hero = Hero('hero', 'bravest', 100, 100, 2)
enemy = Enemy()
fight = Fight(hero, enemy)
expected_hero = hero
expected_enemy = enemy
expected_distance = 0
expected_direction = 0
expected_information_parts = [FIGHT_INITIAL_INFORMATION_PART + str(enemy)]
self.assertEqual(expected_hero, fight.hero)
self.assertEqual(expected_enemy, fight.enemy)
self.assertEqual(expected_distance, fight.distance)
self.assertEqual(expected_direction, fight.direction)
self.assertEqual(expected_information_parts, fight.information_parts)
#hero_attack()
def test_when_hero_can_not_attack_should_set_correct_information_part_and_enemy_takes_zero_damage(self):
hero = Hero('hero', 'bravest', 100, 100, 2)
enemy = Enemy()
enemy_health_before_attack = enemy.health
fight = Fight(hero, enemy)
fight.hero_attack()
result_information_parts = fight.information_parts
expected_information_parts = [FIGHT_INITIAL_INFORMATION_PART + str(enemy),
FIGHT_HERO_CANNOT_ATTACK_INFORMATION_PART]
self.assertEqual(enemy_health_before_attack, enemy.health)
self.assertEqual(expected_information_parts, result_information_parts)
def test_when_hero_can_attack_should_set_correct_information_part_and_enemy_takes_damage_from_attack(self):
hero = Hero('hero', 'bravest', 100, 100, 2)
default_weapon = Weapon(name=DEFAULT_WEAPON_NAME, damage=DEFAULT_WEAPON_DAMAGE)
hero.equip(default_weapon)
enemy = Enemy()
initial_enemy_as_string = str(enemy)
enemy_health_before_attack = enemy.health
fight = Fight(hero, enemy)
fight.hero_attack()
result_enemy_health = fight.enemy.health
expected_enemy_health = enemy_health_before_attack - default_weapon.damage
result_information_parts = fight.information_parts
expected_information_parts = [FIGHT_INITIAL_INFORMATION_PART + initial_enemy_as_string,
FIGHT_HERO_ATTACK_INFORMATION_PART + str(default_weapon.damage)]
self.assertEqual(expected_enemy_health, result_enemy_health)
self.assertEqual(expected_information_parts, result_information_parts)
#enemy_attack()
def test_when_enemy_can_not_attack_should_set_correct_information_part_and_hero_takes_zero_damage(self):
hero = Hero('hero', 'bravest', 100, 100, 2)
enemy = Enemy()
enemy.damage = 0
fight = Fight(hero, enemy)
hero_health_before_attack = hero.health
fight.enemy_attack()
result_information_parts = fight.information_parts
expected_information_parts = [FIGHT_INITIAL_INFORMATION_PART + str(enemy),
FIGHT_ENEMY_CANNOT_ATTACK_INFORMATION_PART]
self.assertEqual(hero_health_before_attack, hero.health)
self.assertEqual(expected_information_parts, result_information_parts)
def test_when_enemy_can_attack_should_set_correct_information_part_and_hero_takes_damage_from_attack(self):
hero = Hero('hero', 'bravest', 100, 100, 2)
enemy = Enemy()
fight = Fight(hero, enemy)
hero_health_before_attack = hero.health
fight.enemy_attack()
result_hero_health = fight.hero.health
expected_hero_health = hero_health_before_attack - enemy.damage
result_information_parts = fight.information_parts
expected_information_parts = [FIGHT_INITIAL_INFORMATION_PART + str(enemy),
FIGHT_ENEMY_ATTACK_INFORMATION_PART + str(enemy.damage)]
self.assertEqual(expected_hero_health, result_hero_health)
self.assertEqual(expected_information_parts, result_information_parts)
#execute()
def test_with_hero_that_can_not_attack_should_stop_when_hero_is_dead(self):
hero = Hero('hero', 'bravest', 100, 100, 2)
enemy = Enemy()
enemy.damage = hero.health
fight = Fight(hero, enemy)
fight.execute()
result_hero_alive_status = hero.is_alive()
expected_hero_alive_status = False
result_enemy_alive_status = enemy.is_alive()
expected_enemy_alive_status = True
result_information_parts = fight.information_parts
expected_information_parts = [FIGHT_INITIAL_INFORMATION_PART + str(enemy),
FIGHT_HERO_CANNOT_ATTACK_INFORMATION_PART,
FIGHT_ENEMY_ATTACK_INFORMATION_PART + str(enemy.damage)]
self.assertEqual(expected_hero_alive_status, result_hero_alive_status)
self.assertEqual(expected_enemy_alive_status, result_enemy_alive_status)
self.assertEqual(expected_information_parts, result_information_parts)
def test_with_hero_that_can_attack_and_enemy_that_can_not_should_stop_when_enemy_is_dead(self):
hero = Hero('hero', 'bravest', 100, 100, 2)
enemy = Enemy()
weapon = Weapon(name=DEFAULT_WEAPON_NAME, damage=enemy.health)
hero.equip(weapon)
fight = Fight(hero, enemy)
initial_enemy_as_string = str(enemy)
fight.execute()
result_hero_alive_status = hero.is_alive()
expected_hero_alive_status = True
result_enemy_alive_status = enemy.is_alive()
expected_enemy_alive_status = False
result_information_parts = fight.information_parts
expected_information_parts = [FIGHT_INITIAL_INFORMATION_PART + initial_enemy_as_string,
FIGHT_HERO_ATTACK_INFORMATION_PART + str(weapon.damage)]
self.assertEqual(expected_hero_alive_status, result_hero_alive_status)
self.assertEqual(expected_enemy_alive_status, result_enemy_alive_status)
self.assertEqual(expected_information_parts, result_information_parts)
| [
"[email protected]"
]
| |
bec6065716cd4195abdc899d00334578c3a51678 | c81ea73e93df307d35191ab184a85d6c67c57112 | /dockers/mvcnn2/Model.py | 33a386975482c00e07a61ec744468d7c35b5098f | []
| no_license | BlenderCN-Org/diplomka | 8d0503fc5902dfede8317aed84f5a17f691f687f | 575fe3f2436b9c511496c1dc019d9cc3423ba5f0 | refs/heads/master | 2020-05-22T15:42:00.143738 | 2019-05-07T07:37:46 | 2019-05-07T07:37:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | import torch
import torch.nn as nn
import os
import glob
class Model(nn.Module):
def __init__(self, name):
super(Model, self).__init__()
self.name = name
def save(self, file):
torch.save(self.state_dict(), file)
def save_results(self, path, data):
raise NotImplementedError("Model subclass must implement this method.")
def load(self, file):
if not os.path.exists(file):
raise IOError("File {} does not exist!".format(file))
self.load_state_dict(torch.load(file))
| [
"[email protected]"
]
| |
9f030bfe6258938a897a89c3d9be1473ed319955 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/MSSM_HiggsToMuMu/fragment_mhmodp_MA450_tb25_ggA.py | bdbd2e8c4f539ec84f6da6ae2be00f6cbe09699a | []
| no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 18,815 | py | COM_ENERGY = 13000.0 # GeV
CROSS_SECTION = 1 # pb
PROCESS = 'HiggsBSM:gg2A3 = on'
SLHA_TABLE = """BLOCK SPINFO
1 FeynHiggs
2 2.12.0
2 built on ott 13, 2016
BLOCK MODSEL
1 0 # Model
2 1 # GridPts
3 0 # Content
4 0 # RPV
5 0 # CPV
6 0 # FV
BLOCK SMINPUTS
1 1.28952828E+02 # invAlfaMZ
2 1.16637000E-05 # GF
3 1.19000000E-01 # AlfasMZ
4 9.11876000E+01 # MZ
5 4.16000000E+00 # Mb
6 1.73200000E+02 # Mt
7 1.77703000E+00 # Mtau
11 5.10998902E-04 # Me
13 1.05658357E-01 # Mmu
21 6.00000000E-03 # Md
22 3.00000000E-03 # Mu
23 9.50000000E-02 # Ms
24 1.28600000E+00 # Mc
BLOCK MINPAR
3 2.50000000E+01 # TB
BLOCK EXTPAR
0 0.00000000E+00 # Q
1 9.54716519E+01 # M1
2 2.00000000E+02 # M2
3 1.50000000E+03 # M3
11 1.50800000E+03 # At
12 1.50800000E+03 # Ab
13 1.50800000E+03 # Atau
23 2.00000000E+02 # MUE
25 2.50000000E+01 # TB
26 4.50000000E+02 # MA0
27 4.57125627E+02 # MHp
31 5.00000000E+02 # MSL(1)
32 5.00000000E+02 # MSL(2)
33 1.00000000E+03 # MSL(3)
34 5.00000000E+02 # MSE(1)
35 5.00000000E+02 # MSE(2)
36 1.00000000E+03 # MSE(3)
41 1.50000000E+03 # MSQ(1)
42 1.50000000E+03 # MSQ(2)
43 1.00000000E+03 # MSQ(3)
44 1.50000000E+03 # MSU(1)
45 1.50000000E+03 # MSU(2)
46 1.00000000E+03 # MSU(3)
47 1.50000000E+03 # MSD(1)
48 1.50000000E+03 # MSD(2)
49 1.00000000E+03 # MSD(3)
BLOCK MASS
1000012 4.95838375E+02 # MSf(1,1,1)
1000011 5.02293635E+02 # MSf(1,2,1)
2000011 5.01842018E+02 # MSf(2,2,1)
1000002 1.49902834E+03 # MSf(1,3,1)
2000002 1.49958985E+03 # MSf(2,3,1)
1000001 1.50117603E+03 # MSf(1,4,1)
2000001 1.50020494E+03 # MSf(2,4,1)
1000014 4.95838375E+02 # MSf(1,1,2)
1000013 5.02640084E+02 # MSf(1,2,2)
2000013 5.01495040E+02 # MSf(2,2,2)
1000004 1.49902888E+03 # MSf(1,3,2)
2000004 1.49959042E+03 # MSf(2,3,2)
1000003 1.50119986E+03 # MSf(1,4,2)
2000003 1.50018109E+03 # MSf(2,4,2)
1000016 9.97925696E+02 # MSf(1,1,3)
1000015 9.97930750E+02 # MSf(1,2,3)
2000015 1.00413388E+03 # MSf(2,2,3)
1000006 8.76427249E+02 # MSf(1,3,3)
2000006 1.13478079E+03 # MSf(2,3,3)
1000005 9.94259418E+02 # MSf(1,4,3)
2000005 1.00778084E+03 # MSf(2,4,3)
25 1.24950037E+02 # Mh0
35 4.49736514E+02 # MHH
36 4.50000000E+02 # MA0
37 4.57472106E+02 # MHp
1000022 8.81217311E+01 # MNeu(1)
1000023 1.51770777E+02 # MNeu(2)
1000025 -2.10328954E+02 # MNeu(3)
1000035 2.65908097E+02 # MNeu(4)
1000024 1.48224451E+02 # MCha(1)
1000037 2.66377906E+02 # MCha(2)
1000021 1.50000000E+03 # MGl
BLOCK DMASS
0 1.73200000E+02 # Q
25 7.08660496E-01 # Delta Mh0
35 3.14328351E-03 # Delta MHH
36 0.00000000E+00 # Delta MA0
37 1.96990173E-02 # Delta MHp
BLOCK NMIX
1 1 9.31988478E-01 # ZNeu(1,1)
1 2 -1.14406215E-01 # ZNeu(1,2)
1 3 3.11018111E-01 # ZNeu(1,3)
1 4 -1.46889171E-01 # ZNeu(1,4)
2 1 -3.18015719E-01 # ZNeu(2,1)
2 2 -6.93652772E-01 # ZNeu(2,2)
2 3 5.10771151E-01 # ZNeu(2,3)
2 4 -3.96010941E-01 # ZNeu(2,4)
3 1 9.68557338E-02 # ZNeu(3,1)
3 2 -1.34875686E-01 # ZNeu(3,2)
3 3 -6.78023080E-01 # ZNeu(3,3)
3 4 -7.16039258E-01 # ZNeu(3,4)
4 1 -1.44507597E-01 # ZNeu(4,1)
4 2 6.98258977E-01 # ZNeu(4,2)
4 3 4.27393577E-01 # ZNeu(4,3)
4 4 -5.55775752E-01 # ZNeu(4,4)
BLOCK UMIX
1 1 -6.07017863E-01 # UCha(1,1)
1 2 7.94688187E-01 # UCha(1,2)
2 1 7.94688187E-01 # UCha(2,1)
2 2 6.07017863E-01 # UCha(2,2)
BLOCK VMIX
1 1 -7.94688187E-01 # VCha(1,1)
1 2 6.07017863E-01 # VCha(1,2)
2 1 6.07017863E-01 # VCha(2,1)
2 2 7.94688187E-01 # VCha(2,2)
BLOCK STAUMIX
1 1 6.94077482E-01 # USf(1,1)
1 2 7.19900305E-01 # USf(1,2)
2 1 7.19900305E-01 # USf(2,1)
2 2 -6.94077482E-01 # USf(2,2)
BLOCK STOPMIX
1 1 7.08251522E-01 # USf(1,1)
1 2 -7.05960185E-01 # USf(1,2)
2 1 7.05960185E-01 # USf(2,1)
2 2 7.08251522E-01 # USf(2,2)
BLOCK SBOTMIX
1 1 6.67964822E-01 # USf(1,1)
1 2 7.44192849E-01 # USf(1,2)
2 1 7.44192849E-01 # USf(2,1)
2 2 -6.67964822E-01 # USf(2,2)
BLOCK ALPHA
-4.58583096E-02 # Alpha
BLOCK DALPHA
1.13640194E-04 # Delta Alpha
BLOCK HMIX Q= -0.99900000E+03
1 2.00000000E+02 # MUE
2 2.50000000E+01 # TB
BLOCK MSOFT Q= 0.00000000E+00
1 9.54716519E+01 # M1
2 2.00000000E+02 # M2
3 1.50000000E+03 # M3
31 5.00000000E+02 # MSL(1)
32 5.00000000E+02 # MSL(2)
33 1.00000000E+03 # MSL(3)
34 5.00000000E+02 # MSE(1)
35 5.00000000E+02 # MSE(2)
36 1.00000000E+03 # MSE(3)
41 1.50000000E+03 # MSQ(1)
42 1.50000000E+03 # MSQ(2)
43 1.00000000E+03 # MSQ(3)
44 1.50000000E+03 # MSU(1)
45 1.50000000E+03 # MSU(2)
46 1.00000000E+03 # MSU(3)
47 1.50000000E+03 # MSD(1)
48 1.50000000E+03 # MSD(2)
49 1.00000000E+03 # MSD(3)
BLOCK AE Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.50800000E+03 # Af(3,3)
BLOCK AU Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.50800000E+03 # Af(3,3)
BLOCK AD Q= 0.00000000E+00
1 1 0.00000000E+00 # Af(1,1)
2 2 0.00000000E+00 # Af(2,2)
3 3 1.50800000E+03 # Af(3,3)
BLOCK YE Q= 0.00000000E+00
1 1 7.34341041E-05 # Yf(1,1)
2 2 1.51838424E-02 # Yf(2,2)
3 3 2.55371598E-01 # Yf(3,3)
BLOCK YU Q= 0.00000000E+00
1 1 1.72448364E-05 # Yf(1,1)
2 2 7.39228656E-03 # Yf(2,2)
3 3 9.95601891E-01 # Yf(3,3)
BLOCK YD Q= 0.00000000E+00
1 1 8.40743677E-04 # Yf(1,1)
2 2 1.33110713E-02 # Yf(2,2)
3 3 5.53780613E-01 # Yf(3,3)
BLOCK VCKMIN
1 2.25300000E-01 # lambda
2 8.08000000E-01 # A
3 1.32000000E-01 # rhobar
4 3.41000000E-01 # etabar
BLOCK MSL2 Q= 0.00000000E+00
1 1 2.50000000E+05 # MSL2(1,1)
2 2 2.50000000E+05 # MSL2(2,2)
3 3 1.00000000E+06 # MSL2(3,3)
BLOCK MSE2 Q= 0.00000000E+00
1 1 2.50000000E+05 # MSE2(1,1)
2 2 2.50000000E+05 # MSE2(2,2)
3 3 1.00000000E+06 # MSE2(3,3)
BLOCK MSQ2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSQ2(1,1)
2 2 2.25000000E+06 # MSQ2(2,2)
3 3 1.00000000E+06 # MSQ2(3,3)
BLOCK MSU2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSU2(1,1)
2 2 2.25000000E+06 # MSU2(2,2)
3 3 1.00000000E+06 # MSU2(3,3)
BLOCK MSD2 Q= 0.00000000E+00
1 1 2.25000000E+06 # MSD2(1,1)
2 2 2.25000000E+06 # MSD2(2,2)
3 3 1.00000000E+06 # MSD2(3,3)
BLOCK TE Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 3.85100369E+02 # Tf(3,3)
BLOCK TU Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 1.50136765E+03 # Tf(3,3)
BLOCK TD Q= 0.00000000E+00
1 1 0.00000000E+00 # Tf(1,1)
2 2 0.00000000E+00 # Tf(2,2)
3 3 8.35101165E+02 # Tf(3,3)
BLOCK SELMIX
1 1 9.99984128E-01 # UASf(1,1)
1 4 -5.63422645E-03 # UASf(1,4)
2 2 8.34980634E-01 # UASf(2,2)
2 5 -5.50279330E-01 # UASf(2,5)
3 3 6.94077482E-01 # UASf(3,3)
3 6 7.19900305E-01 # UASf(3,6)
4 1 5.63422645E-03 # UASf(4,1)
4 4 9.99984128E-01 # UASf(4,4)
5 2 5.50279330E-01 # UASf(5,2)
5 5 8.34980634E-01 # UASf(5,5)
6 3 7.19900305E-01 # UASf(6,3)
6 6 -6.94077482E-01 # UASf(6,6)
BLOCK USQMIX
1 1 1.00000000E+00 # UASf(1,1)
1 4 1.42539883E-05 # UASf(1,4)
2 2 9.99981335E-01 # UASf(2,2)
2 5 6.10986751E-03 # UASf(2,5)
3 3 7.08251522E-01 # UASf(3,3)
3 6 -7.05960185E-01 # UASf(3,6)
4 1 -1.42539883E-05 # UASf(4,1)
4 4 1.00000000E+00 # UASf(4,4)
5 2 -6.10986751E-03 # UASf(5,2)
5 5 9.99981335E-01 # UASf(5,5)
6 3 7.05960185E-01 # UASf(6,3)
6 6 7.08251522E-01 # UASf(6,6)
BLOCK DSQMIX
1 1 9.99949630E-01 # UASf(1,1)
1 4 -1.00368449E-02 # UASf(1,4)
2 2 9.88183453E-01 # UASf(2,2)
2 5 -1.53275777E-01 # UASf(2,5)
3 3 6.67964822E-01 # UASf(3,3)
3 6 7.44192849E-01 # UASf(3,6)
4 1 1.00368449E-02 # UASf(4,1)
4 4 9.99949630E-01 # UASf(4,4)
5 2 1.53275777E-01 # UASf(5,2)
5 5 9.88183453E-01 # UASf(5,5)
6 3 7.44192849E-01 # UASf(6,3)
6 6 -6.67964822E-01 # UASf(6,6)
BLOCK CVHMIX
1 1 9.99996944E-01 # UH(1,1)
1 2 2.47225788E-03 # UH(1,2)
1 3 0.00000000E+00 # UH(1,3)
2 1 -2.47225788E-03 # UH(2,1)
2 2 9.99996944E-01 # UH(2,2)
2 3 0.00000000E+00 # UH(2,3)
3 1 0.00000000E+00 # UH(3,1)
3 2 0.00000000E+00 # UH(3,2)
3 3 1.00000000E+00 # UH(3,3)
DECAY 25 4.52022416E-03 # Gamma(h0)
2.03958027E-03 2 22 22 # BR(h0 -> photon photon)
1.29970517E-03 2 22 23 # BR(h0 -> photon Z)
2.35882992E-02 2 23 23 # BR(h0 -> Z Z)
1.95050325E-01 2 -24 24 # BR(h0 -> W W)
6.18354425E-02 2 21 21 # BR(h0 -> gluon gluon)
5.78362743E-09 2 -11 11 # BR(h0 -> Electron electron)
2.57265975E-04 2 -13 13 # BR(h0 -> Muon muon)
7.37821118E-02 2 -15 15 # BR(h0 -> Tau tau)
1.78209266E-07 2 -2 2 # BR(h0 -> Up up)
2.46838052E-02 2 -4 4 # BR(h0 -> Charm charm)
9.39296839E-07 2 -1 1 # BR(h0 -> Down down)
2.35887364E-04 2 -3 3 # BR(h0 -> Strange strange)
6.17226455E-01 2 -5 5 # BR(h0 -> Bottom bottom)
DECAY 35 5.86451368E+00 # Gamma(HH)
1.71737478E-06 2 22 22 # BR(HH -> photon photon)
4.26250798E-06 2 22 23 # BR(HH -> photon Z)
7.03480481E-05 2 23 23 # BR(HH -> Z Z)
1.51567241E-04 2 -24 24 # BR(HH -> W W)
1.43647489E-04 2 21 21 # BR(HH -> gluon gluon)
7.72143836E-09 2 -11 11 # BR(HH -> Electron electron)
3.43595610E-04 2 -13 13 # BR(HH -> Muon muon)
9.65010052E-02 2 -15 15 # BR(HH -> Tau tau)
6.34630946E-13 2 -2 2 # BR(HH -> Up up)
8.78255589E-08 2 -4 4 # BR(HH -> Charm charm)
1.98878857E-03 2 -6 6 # BR(HH -> Top top)
9.38266022E-07 2 -1 1 # BR(HH -> Down down)
2.35621608E-04 2 -3 3 # BR(HH -> Strange strange)
5.68890460E-01 2 -5 5 # BR(HH -> Bottom bottom)
1.00733272E-01 2 -1000024 1000024 # BR(HH -> Chargino1 chargino1)
5.55560217E-02 2 -1000037 1000024 # BR(HH -> Chargino2 chargino1)
5.55560217E-02 2 -1000024 1000037 # BR(HH -> Chargino1 chargino2)
1.69985450E-02 2 1000022 1000022 # BR(HH -> neutralino1 neutralino1)
3.99758290E-02 2 1000022 1000023 # BR(HH -> neutralino1 neutralino2)
3.00425658E-02 2 1000022 1000025 # BR(HH -> neutralino1 neutralino3)
7.35664215E-06 2 1000022 1000035 # BR(HH -> neutralino1 neutralino4)
1.67497861E-02 2 1000023 1000023 # BR(HH -> neutralino2 neutralino2)
1.42237953E-02 2 1000023 1000025 # BR(HH -> neutralino2 neutralino3)
4.55665659E-04 2 1000023 1000035 # BR(HH -> neutralino2 neutralino4)
4.88987066E-04 2 1000025 1000025 # BR(HH -> neutralino3 neutralino3)
8.80106741E-04 2 25 25 # BR(HH -> h0 h0)
DECAY 36 5.99474019E+00 # Gamma(A0)
-4.12180697E-06 2 22 22 # BR(A0 -> photon photon)
-9.72933502E-06 2 22 23 # BR(A0 -> photon Z)
-1.56369622E-04 2 21 21 # BR(A0 -> gluon gluon)
-7.50308222E-09 2 -11 11 # BR(A0 -> Electron electron)
3.33879090E-04 2 -13 13 # BR(A0 -> Muon muon)
-9.38338080E-02 2 -15 15 # BR(A0 -> Tau tau)
-5.00587727E-13 2 -2 2 # BR(A0 -> Up up)
-6.94048133E-08 2 -4 4 # BR(A0 -> Charm charm)
-3.87257181E-03 2 -6 6 # BR(A0 -> Top top)
-9.12321360E-07 2 -1 1 # BR(A0 -> Down down)
-2.29106234E-04 2 -3 3 # BR(A0 -> Strange strange)
-5.53155093E-01 2 -5 5 # BR(A0 -> Bottom bottom)
-1.92964792E-01 2 -1000024 1000024 # BR(A0 -> Chargino1 chargino1)
-7.46646557E-03 2 -1000037 1000024 # BR(A0 -> Chargino2 chargino1)
-7.46646557E-03 2 -1000024 1000037 # BR(A0 -> Chargino1 chargino2)
-2.13349093E-02 2 1000022 1000022 # BR(A0 -> neutralino1 neutralino1)
-6.02128638E-02 2 1000022 1000023 # BR(A0 -> neutralino1 neutralino2)
-1.42387285E-02 2 1000022 1000025 # BR(A0 -> neutralino1 neutralino3)
-1.31558636E-04 2 1000022 1000035 # BR(A0 -> neutralino1 neutralino4)
-3.44493677E-02 2 1000023 1000023 # BR(A0 -> neutralino2 neutralino2)
-3.71924887E-03 2 1000023 1000025 # BR(A0 -> neutralino2 neutralino3)
-3.09803856E-03 2 1000023 1000035 # BR(A0 -> neutralino2 neutralino4)
-3.20651758E-03 2 1000025 1000025 # BR(A0 -> neutralino3 neutralino3)
-1.15375614E-04 2 23 25 # BR(A0 -> Z h0)
-2.97171217E-36 2 25 25 # BR(A0 -> h0 h0)
DECAY 37 4.80138250E+00 # Gamma(Hp)
9.85572406E-09 2 -11 12 # BR(Hp -> Electron nu_e)
4.21362868E-04 2 -13 14 # BR(Hp -> Muon nu_mu)
1.19185937E-01 2 -15 16 # BR(Hp -> Tau nu_tau)
1.09446218E-06 2 -1 2 # BR(Hp -> Down up)
1.24964753E-05 2 -3 2 # BR(Hp -> Strange up)
7.18973984E-06 2 -5 2 # BR(Hp -> Bottom up)
5.43540052E-08 2 -1 4 # BR(Hp -> Down charm)
2.73977469E-04 2 -3 4 # BR(Hp -> Strange charm)
1.00680537E-03 2 -5 4 # BR(Hp -> Bottom charm)
3.68519833E-07 2 -1 6 # BR(Hp -> Down top)
8.32588561E-06 2 -3 6 # BR(Hp -> Strange top)
5.19689769E-01 2 -5 6 # BR(Hp -> Bottom top)
9.38253439E-02 2 1000022 1000024 # BR(Hp -> neutralino1 chargino1)
1.83403958E-03 2 1000022 1000037 # BR(Hp -> neutralino1 chargino2)
1.43082363E-02 2 1000023 1000024 # BR(Hp -> neutralino2 chargino1)
1.07192302E-01 2 1000023 1000037 # BR(Hp -> neutralino2 chargino2)
7.17900461E-02 2 1000024 1000025 # BR(Hp -> chargino1 neutralino3)
7.02835096E-02 2 1000024 1000035 # BR(Hp -> chargino1 neutralino4)
1.59116821E-04 2 24 25 # BR(Hp -> W h0)
7.69634262E-09 2 24 35 # BR(Hp -> W HH)
6.47547011E-09 2 24 36 # BR(Hp -> W A0)
DECAY 6 1.37127534E+00 # Gamma(top)
1.00000000E+00 2 5 24 # BR(top -> bottom W)
"""
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Higgs:useBSM = on',
PROCESS,
'SLHA:allowUserOverride = off',
'SLHA:minMassSM = 100.',
'PhaseSpace:mHatMin = 56.0'
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters'
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
]
| |
d0c3a1f5eb693cf578a1067dd019d36cfe2056e1 | b7b243902150a1aa5b774523ac01d7016de13477 | /cyc/math/628.py | aff725b4b44ac4e418c1fa95533e1c5c2ce2506c | []
| no_license | Veraph/LeetCode_Practice | 7e97a93464911a1f33b3133043d96c88cd54016a | eafadd711f6ec1b60d78442280f1c44b6296209d | refs/heads/master | 2023-03-23T11:49:19.046474 | 2021-03-18T02:22:50 | 2021-03-18T02:22:50 | 273,317,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # 628.py -- Maximum Product of Three Numbers
'''
Given an integer array, find three numbers whose product is maximum and output the maximum product.
'''
def maximumProduct(nums):
'''
read the description carefully.
be careful about the negative nums.
'''
nums.sort(reverse=True)
# two situations, find the bigger one
ans1 = nums[0] * nums[1] * nums[2]
ans2 = nums[0] * nums[-1] * nums[-2]
return max(ans1, ans2)
| [
"[email protected]"
]
| |
e63722dcf8d043c89d4fba02b0bbade77d3104dd | b6f377fce8b8d63bd00b5c10485439ddba7f0717 | /server/__init__.py | de758ecbe3217f05fdf7f2e0f9d10f567b7f74a0 | [
"MIT"
]
| permissive | HackCameroon/Coronalert | 9635d041e92f59fccaaac1e71f01c0f9cf347c2e | df7d66bec147ea1f47105102582bc25469e4bee2 | refs/heads/master | 2022-04-12T08:52:30.442765 | 2020-04-08T19:21:54 | 2020-04-08T19:21:54 | 262,270,364 | 1 | 0 | MIT | 2020-05-08T08:37:15 | 2020-05-08T08:37:14 | null | UTF-8 | Python | false | false | 2,879 | py | from flask import Flask, render_template
# from flask_jwt_extended import JWTManager
from flask_sqlalchemy import SQLAlchemy
from twilio.rest import Client
from flask_migrate import Migrate
import os
STATIC_FOLDER = "../client/build/static"
TEMPLATE_FOLDER = "../client/build"
CONFIG_FILE = "./config.py"
CONFIG_EXAMPLE = "server/config.example"
def load_from_env(app, *args):
for a in args:
app.config[a] = os.environ[a]
def load_models():
"""
Load all database models and create tables
"""
from server.models import User # noqa
from server.models import Location # noqa
db.create_all()
def load_blueprints():
"""
Load all blueprints for app
"""
from server.user.views import user_bp
from server.location.views import location_bp
from server.datapull.views import datapull_bp
from server.sms.views import sms_bp
app.register_blueprint(user_bp, url_prefix="/api/user")
app.register_blueprint(location_bp, url_prefix="/api/location")
app.register_blueprint(datapull_bp, url_prefix="/api/datapull")
app.register_blueprint(sms_bp, url_prefix="/api/sms")
def setup_default_routes():
"""
Set up default routes for app
"""
@app.errorhandler(404)
def default(error):
return render_template("index.html")
def setup_debug():
"""
Set up debug settings
"""
from flask_cors import CORS
app.config["JWT_COOKIE_CSRF_PROTECT"] = False
CORS(app, origins=[app.config["FRONTEND_URL"]], supports_credentials=True)
def setup_jwt():
"""
Set up JWT for app
"""
app.config["JWT_TOKEN_LOCATION"] = ["cookies"]
app.config["JWT_REFRESH_COOKIE_PATH"] = "/api/auth/token/refresh"
def create_app():
"""
Creates flask app, setting up database and blueprints
"""
global app
global db
global jwt
global twilio_client
global migrate
# Set up and configure app
app = Flask(__name__, static_folder=STATIC_FOLDER, template_folder=TEMPLATE_FOLDER)
try:
app.config.from_pyfile(CONFIG_FILE)
print("Loading secret configs from file")
except FileNotFoundError as e:
env_vars = [line.split("=")[0] for line in open(CONFIG_EXAMPLE, "r")]
load_from_env(app, *env_vars)
print("Loading secret configs from env")
if app.config["DEBUG"]:
setup_debug()
# Set up database
db = SQLAlchemy(app)
load_models()
# Set up Flask Migrations
migrate = Migrate(app, db)
# Set up Twilio
twilio_client = Client(app.config["TWILIO_SID"], app.config["TWILIO_AUTH_TOKEN"])
# Setup routes and bps
setup_default_routes()
load_blueprints()
# Set up JWT for app
# setup_jwt()
# jwt = JWTManager(app)
return app
"""
TODO:
- Self-reporting feature for sms
- Domain name
- Manifest stuff
""" | [
"[email protected]"
]
| |
48b71cb3fb8bc7bb76a91b40b2ce5b27e47ffabb | bc441bb06b8948288f110af63feda4e798f30225 | /cmdb_extend_sdk/api/instance/get_instances_pb2.pyi | 171bdfbd8163b6e22ba892a60c71cc8a169b7012 | [
"Apache-2.0"
]
| permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,195 | pyi | # @generated by generate_proto_mypy_stubs.py. Do not edit!
import sys
from google.protobuf.descriptor import (
Descriptor as google___protobuf___descriptor___Descriptor,
)
from google.protobuf.internal.containers import (
RepeatedCompositeFieldContainer as google___protobuf___internal___containers___RepeatedCompositeFieldContainer,
)
from google.protobuf.message import (
Message as google___protobuf___message___Message,
)
from google.protobuf.struct_pb2 import (
Struct as google___protobuf___struct_pb2___Struct,
)
from typing import (
Iterable as typing___Iterable,
Optional as typing___Optional,
Text as typing___Text,
Union as typing___Union,
)
from typing_extensions import (
Literal as typing_extensions___Literal,
)
builtin___bool = bool
builtin___bytes = bytes
builtin___float = float
builtin___int = int
if sys.version_info < (3,):
builtin___buffer = buffer
builtin___unicode = unicode
class GetInstancesRequest(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
objectId = ... # type: typing___Text
instanceIds = ... # type: typing___Text
page = ... # type: builtin___int
pageSize = ... # type: builtin___int
@property
def query(self) -> google___protobuf___struct_pb2___Struct: ...
def __init__(self,
*,
objectId : typing___Optional[typing___Text] = None,
instanceIds : typing___Optional[typing___Text] = None,
page : typing___Optional[builtin___int] = None,
pageSize : typing___Optional[builtin___int] = None,
query : typing___Optional[google___protobuf___struct_pb2___Struct] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetInstancesRequest: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetInstancesRequest: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"query",b"query"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"instanceIds",b"instanceIds",u"objectId",b"objectId",u"page",b"page",u"pageSize",b"pageSize",u"query",b"query"]) -> None: ...
class GetInstancesResponse(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
page = ... # type: builtin___int
pageSize = ... # type: builtin___int
total = ... # type: builtin___int
@property
def list(self) -> google___protobuf___internal___containers___RepeatedCompositeFieldContainer[google___protobuf___struct_pb2___Struct]: ...
def __init__(self,
*,
page : typing___Optional[builtin___int] = None,
pageSize : typing___Optional[builtin___int] = None,
total : typing___Optional[builtin___int] = None,
list : typing___Optional[typing___Iterable[google___protobuf___struct_pb2___Struct]] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetInstancesResponse: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetInstancesResponse: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def ClearField(self, field_name: typing_extensions___Literal[u"list",b"list",u"page",b"page",u"pageSize",b"pageSize",u"total",b"total"]) -> None: ...
class GetInstancesResponseWrapper(google___protobuf___message___Message):
DESCRIPTOR: google___protobuf___descriptor___Descriptor = ...
code = ... # type: builtin___int
codeExplain = ... # type: typing___Text
error = ... # type: typing___Text
@property
def data(self) -> GetInstancesResponse: ...
def __init__(self,
*,
code : typing___Optional[builtin___int] = None,
codeExplain : typing___Optional[typing___Text] = None,
error : typing___Optional[typing___Text] = None,
data : typing___Optional[GetInstancesResponse] = None,
) -> None: ...
if sys.version_info >= (3,):
@classmethod
def FromString(cls, s: builtin___bytes) -> GetInstancesResponseWrapper: ...
else:
@classmethod
def FromString(cls, s: typing___Union[builtin___bytes, builtin___buffer, builtin___unicode]) -> GetInstancesResponseWrapper: ...
def MergeFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def CopyFrom(self, other_msg: google___protobuf___message___Message) -> None: ...
def HasField(self, field_name: typing_extensions___Literal[u"data",b"data"]) -> builtin___bool: ...
def ClearField(self, field_name: typing_extensions___Literal[u"code",b"code",u"codeExplain",b"codeExplain",u"data",b"data",u"error",b"error"]) -> None: ...
| [
"[email protected]"
]
| |
bb7bb7ba73a4dbedb7c358b976265b17ddd63cf5 | 249ae31366937f62e048f0fb858028432a2ec769 | /apple-and-orange.py | 8840c4437d9f1690530d6de43c8ebc2fb7893f58 | []
| no_license | Mayurg6832/HackerRank-Problem-solving | 198dccade1c79a5573362f8d656d447910ff1053 | c8f2719f2728ed227e4f6ffea00163ddd8e17163 | refs/heads/master | 2022-10-12T22:36:29.500145 | 2020-06-10T09:18:49 | 2020-06-10T09:18:49 | 244,108,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | s,t=map(int,input().split())
a,b=map(int,input().split())
m,n=map(int,input().split())
apple=list(map(int,input().split()))
orange=list(map(int,input().split()))
countA=0
countO=0
for i in range(len(apple)):
temp=a+apple[i]
if temp in range(s,t+1):
countA+=1
for i in range(len(orange)):
temp=b+orange[i]
if temp in range(s,t+1):
countO+=1
print(countA)
print(countO)
| [
"[email protected]"
]
| |
d32de0ad23b35433aeaaee25ab27bdb519c0dfdb | ebbe9b78464976119e619d683aa1f6b370508ba7 | /testdrf/tdrf/wsgi.py | 824146f6d0f901ec80ed48a580c2fb5e85a9162e | []
| no_license | taojy123/openapi_speech | bc98dda45ba913b3408b5649a5c574b5badd21f0 | 751dde6b952c08d09bb6c7cbc0e65040fa2498e7 | refs/heads/master | 2022-11-07T14:03:32.104921 | 2020-06-27T15:19:19 | 2020-06-27T15:19:19 | 271,430,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | """
WSGI config for tdrf project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tdrf.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
52cc571ad9fd0579f0fdcb429ce18b6da15f9104 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/85/usersdata/174/55054/submittedfiles/funcoes1.py | 4d3c33384d44b3a8fcf1a02d2bb63d5e462d030d | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | # -*- coding: utf-8 -*-
def crescente (lista):
#código da função crescente
for i in range(0,len(lista)-1,1):
contd=0
if lista[i]>lista[i+1]:
contd=contd+1
if (contd)!=len(lista)-1:
return True
else:
return False
def decrescente (lista):
#código da função decrescente
for i in range(0,len(lista)-1,1):
contc=0
if lista[i]<lista[i+1]:
contc=contc+1
if contc==len(lista)-1:
return True
else:
return False
def consecutivos (lista):
#código da função de elementos consecutivos
for i in range(0,len(lista)-1,1):
cont = 0
if lista[i]==lista[i+1]:
cont=cont+1
if cont!=1:
return True
else:
return False
#escreva as demais funções
# Pedir a quantidade de elementos das listas
n = int(input('Quantidade de elementos: '))
# Criar as listas vázias
a=[]
b=[]
c=[]
# Inserir elementos à lista
for i in range(1,n+1,1):
a.append(input('Lista A - %dº Valor: '%i))
for i in range(1,n+1,1):
a.append(input('Lista B - %dº Valor: '%i))
for i in range(1,n+1,1):
a.append(input('Lista C - %dº Valor: '%i))
#escreva o programa principal
def testar(lista):
# código da função para fazer os 3 testes
if crescente(lista):
print ('S')
else:
print ('N')
if decrescente(lista):
print ('S')
else:
print ('N')
if consecutivos(lista):
print ('S')
else:
print ('N')
# Imprimir os resultados
print (testar(a))
print (testar(b))
print (testar(c)) | [
"[email protected]"
]
| |
4c12c2b031f013b218218fdfe3a52bacb9141085 | d2ea8079864f6f0b99937645c2d4cab8817e75f8 | /05_lekcja6OperacjeNaPlikach/Zad6Karty.py | ae96e5509ce0b2438c098833c86ccf03a29cb229 | []
| no_license | FlyingMedusa/PythonCourse | ea960ab20415180d6e164abf4af5619ad9d256fb | 4be6ffd1054b25285205d19987fb0fade0f72c14 | refs/heads/master | 2022-04-09T15:59:38.742786 | 2020-02-12T17:49:56 | 2020-02-12T17:49:56 | 213,706,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | def is_visa(is_card, number):
if is_card == False:
return False
if len(number) == 16 or len(number) == 13:
if card_number[0] == '4':
return True
def is_mastercard(is_card, number):
if is_card == False:
return False
if len(number) == 16:
if int(number[0:2]) in range(51, 56) or int(number[0:4]) in range(2221, 2721):
return True
def is_AmE(is_card, number):
if is_card == False:
return False
if len(number) == 15:
if int(number[0:2]) in (34, 37):
return True
filename = 'cardsnumbers.txt'
with open(filename) as fopen:
cards = fopen.read()
cards = cards.split()
i = 1
for card_number in cards:
can_be_card_number = False
print(i)
if 13 > len(card_number) or 16 < len(card_number):
print("\tWrong number")
else:
if card_number.isdecimal():
print("\tCan be a card number")
can_be_card_number = True
else:
print("\tNot a number")
if is_visa(can_be_card_number, card_number):
print("\tI'm a Visa")
with open('visa.txt', 'a') as f:
f.write(card_number)
elif is_mastercard(can_be_card_number, card_number):
print("\tI'm a Mastercard")
with open('mastercard.txt', 'a') as f:
f.write(card_number)
elif is_AmE(can_be_card_number, card_number):
print("\tI'm an American Express")
with open('American.txt', 'a') as f:
f.write(card_number)
else:
print("\tNot known card type")
i += 1 | [
"[email protected]"
]
| |
5a1775df5cda2474ee302d546f93b09146350649 | ab8a34e5b821dde7b09abe37c838de046846484e | /twilio/sample-code-master/taskrouter/v1/task_queues_statistics/read-default/read-default.6.x.py | e63be41889aa29f493efff5404bcbd35336aaf55 | []
| no_license | sekharfly/twilio | 492b599fff62618437c87e05a6c201d6de94527a | a2847e4c79f9fbf5c53f25c8224deb11048fe94b | refs/heads/master | 2020-03-29T08:39:00.079997 | 2018-09-21T07:20:24 | 2018-09-21T07:20:24 | 149,721,431 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 547 | py | # Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
statistics = client.taskrouter \
.workspaces('WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.task_queues \
.statistics \
.list()
for record in statistics:
print(record.cumulative)
| [
"[email protected]"
]
| |
376231c1647b7584c2972dbc259b12d4122f151e | 2761d1c5563950f919953240e569dd7397ad2a28 | /clip/ViT-B-32-cpu/code/__torch__/torch/nn/modules/linear/___torch_mangle_9547.py | 2f0fb6831db918b275671b098939d68f5e4e1cbe | [
"MIT"
]
| permissive | shawwn/CLIP | c8e65523481d4f48852600eda1909a853f46bc05 | ba33b4eb956e6f507b4b39468b3b7336ac2260a1 | refs/heads/main | 2023-02-21T13:03:51.522948 | 2021-01-16T03:45:59 | 2021-01-16T03:45:59 | 327,780,194 | 6 | 2 | null | 2021-01-08T02:41:08 | 2021-01-08T02:41:08 | null | UTF-8 | Python | false | false | 646 | py | class Linear(Module):
__parameters__ = ["weight", "bias", ]
__buffers__ = []
weight : Tensor
bias : Tensor
training : bool
def forward(self: __torch__.torch.nn.modules.linear.___torch_mangle_9547.Linear,
argument_1: Tensor) -> Tensor:
_0 = self.bias
output = torch.matmul(argument_1.float(), torch.t(self.weight.float()))
return torch.add_(output, _0, alpha=1)
def forward1(self: __torch__.torch.nn.modules.linear.___torch_mangle_9547.Linear,
argument_1: Tensor) -> Tensor:
_1 = self.bias
output = torch.matmul(argument_1.float(), torch.t(self.weight.float()))
return torch.add_(output, _1, alpha=1)
| [
"[email protected]"
]
| |
d2f0dd4e289f55fd0a8ef460b27fab5cd1d64156 | e7005d4b461c7dfc9f660dff642892cbb7b66948 | /docker/server/project/app/controller/monitoring/views.py | fbef82c64d0eb24efffc86b35f83c41891006ae4 | []
| no_license | ardikabs/fipro-core | 963ca6e73dc9a961e94be7681fa919058acc414a | 123ecad130fbf103b69604343fff96486419998c | refs/heads/master | 2022-12-13T14:00:34.463893 | 2019-07-05T07:42:01 | 2019-07-05T07:42:01 | 130,592,675 | 5 | 0 | null | 2022-12-08T02:33:05 | 2018-04-22T17:50:06 | CSS | UTF-8 | Python | false | false | 6,857 | py |
import datetime
import pytz
import json
from bson import json_util
from flask import (
current_app,
jsonify,
render_template,
request,
redirect,
url_for,
flash,
make_response
)
from flask_login import (
current_user,
login_required
)
from . import monitoring
from app.utils import get_datetime, current_datetime
from app.commons.MongoInterface import MongoInterface as MoI
from app.models import Agents, OldAgents
@monitoring.route('/')
@login_required
def index():
return redirect(url_for('main.index'))
@monitoring.route('/top-attacks/')
@login_required
def top_attacks():
title = "Top Attacks"
moi = MoI()
if moi.check_conn() is False:
return render_template('monitoring/top_attacks.html', title=title, db_info=False)
return render_template('monitoring/top_attacks.html', title=title, db_info=True)
@monitoring.route('/event-statistics/')
@login_required
def event_statistics():
import time
start = time.time()
title = "Event Statistics"
moi = MoI()
if moi.check_conn() is False:
return render_template('monitoring/event_statistics.html', title=title, db_info=False)
events = moi.logs.events_histogram(identifier= current_user.identifier, limit=10)
print ("1. {}".format(time.time() - start))
countries = moi.logs.countries_histogram(identifier= current_user.identifier, limit=10)
print ("2. {}".format(time.time() - start))
ports = moi.logs.ports_histogram(identifier= current_user.identifier, limit=11)
print ("3. {}".format(time.time() - start))
sensor_events_histogram = json.dumps(events, default=json_util.default)
countries_event_histogram = json.dumps(countries, default=json_util.default)
ports_event_histogram = json.dumps(ports, default=json_util.default)
sensor_events = moi.logs.events_count(identifier= current_user.identifier)
print ("4. {}".format(time.time() - start))
ports_events = moi.logs.ports_events_count(identifier= current_user.identifier, limit=15)
print ("5. {}".format(time.time() - start))
countries_ports_events = moi.logs.top_countries_port(identifier= current_user.identifier, limit=6)
print ("6. {}".format(time.time() - start))
return render_template(
'monitoring/event_statistics.html',
title=title,
db_info=True,
sensor_event_histogram=sensor_events_histogram,
countries_event_histogram=countries_event_histogram,
ports_event_histogram=ports_event_histogram,
sensor_events=sensor_events,
ports_events=ports_events,
countries_ports_events=countries_ports_events
)
@monitoring.route('/event-hourly-statistics/')
@login_required
def event_hourly_statistics():
title = "Event Hourly Statistics"
moi = MoI()
if moi.check_conn() is False:
return render_template('monitoring/event_hourly.html', title=title, db_info=False)
current = current_datetime()
date = current.strftime("%Y-%m-%d")
ts = datetime.datetime.strptime(date, "%Y-%m-%d")
sensor_event = moi.logs.sensor_event_statistics(identifier= current_user.identifier, date=ts)
agent_event = moi.logs.agents_event_statistics(identifier= current_user.identifier, date=ts)
ports_event = moi.logs.ports_event_statistics(identifier= current_user.identifier, date=ts, limit=10)
countries_event = moi.logs.countries_event_statistics(identifier= current_user.identifier, date=ts, limit=10)
for agent in agent_event:
ag = Agents.query.filter_by(ipaddr=agent.get('label')).first()
agent['agent_ip'] = agent['label']
agent['label'] = ag.show_info()
return render_template(
'monitoring/event_hourly.html',
title=title,
db_info=True,
sensor_event = sensor_event,
agent_event = agent_event,
ports_event = ports_event,
countries_event = countries_event
)
# AJAX ENDPOINT
@monitoring.route('/top-attacks/ajax/', methods=['GET','POST'])
@login_required
def top_attacks_ajax():
moi = MoI()
if moi.check_conn() is False:
return make_response(jsonify([]), 500)
res = None
type = request.args.get('type', None)
limit = request.args.get('limit', 10)
identifier = current_user.identifier
options = {'limit': limit}
if identifier is None:
return make_response(jsonify({'message': 'Identifier required'}), 403)
if type == 'top_srcip_port':
res = moi.logs.top_sourceip_port(identifier = identifier)
elif type == 'top_asn':
res = moi.logs.top_asn(identifier = identifier, options=options)
elif type == 'top_countries':
res = moi.logs.top_countries(identifier = identifier, options=options)
elif type == 'top_src_ip':
res = moi.logs.top_sourceip(identifier = identifier, options=options)
elif type == 'top_unknown':
res = moi.logs.top_unknown_sourceip(identifier = identifier, options=options)
if res:
return make_response(jsonify(res), 200)
else:
if res is not None:
res = {'message': 'No data available', 'status':False}
return make_response(jsonify(res), 503)
return make_response(jsonify({"message":"Type is not recognized", "status": False}), 404)
@monitoring.route('/event-hourly-statistics/ajax/')
@login_required
def event_hourly_ajax():
moi = MoI()
if moi.check_conn() is False:
return make_response(jsonify([]), 500)
type = request.args.get('type')
date = request.args.get('date')
identifier = current_user.identifier
if identifier is None:
return make_response(jsonify({'message': 'Identifier required'}), 403)
if date is None:
return make_response(jsonify({'message': 'Date param is not initialized'}), 404)
ts = datetime.datetime.strptime(date, "%Y-%m-%d").replace(tzinfo=pytz.utc)
if type == 'sensor-event':
data = moi.logs.sensor_event_statistics(identifier= identifier, date=ts)
elif type == 'agents-event':
data = moi.logs.agents_event_statistics(identifier= identifier, date=ts)
for agent in data:
ag = Agents.query.filter_by(ipaddr=agent.get('label')).first()
if ag is None:
ag = OldAgents.query.filter_by(ipaddr=agent.get('label')).first()
agent['agent_ip'] = agent['label']
agent['label'] = ag.show_info()
elif type == 'ports-event':
data = moi.logs.ports_event_statistics(identifier= identifier, date=ts, limit=10)
elif type == 'countries-event':
data = moi.logs.countries_event_statistics(identifier= identifier, date=ts, limit=10)
if data:
return make_response(jsonify(data), 200)
else:
return make_response(jsonify([]), 404) | [
"[email protected]"
]
| |
9e4b180c96b1b6d9c029b8758dfdd177d8f91d11 | a54d5a5ae5ba352963f1166a29e1bb6c867157ab | /python/test/test_cumulative_list_sum.py | 2dd93e2dfe24d60ecabdb6d3b8f14108116ddf67 | []
| no_license | alephist/edabit-coding-challenges | 06f573e90ffbd13bc54ecbdaa8e6a225aa44f5d8 | 35f1fc84848fc44e184aae1ae231a36319c1c81e | refs/heads/main | 2023-07-30T22:39:37.468756 | 2021-09-18T07:47:02 | 2021-09-18T07:47:02 | 341,467,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | import unittest
from typing import List, Tuple
from cumulative_list_sum import cumulative_sum
test_values: Tuple[Tuple[List[int], List[int]]] = (
([], []),
([1], [1]),
([1, 2, 3], [1, 3, 6]),
([-1, -2, -3], [-1, -3, -6]),
([1, -2, 3], [1, -1, 2]),
([3, 3, -2, 408, 3, 3, 0, 66, 2, -2, 2, 3, 4, 2, -47, 3, 3, 2], [3, 6, 4,
412, 415, 418, 418, 484, 486, 484, 486, 489, 493, 495, 448, 451, 454, 456]),
)
class CumulativeListSumTestCase(unittest.TestCase):
def test_return_list_of_cumulative_sum(self):
for lst, expected_lst in test_values:
with self.subTest():
self.assertEqual(cumulative_sum(lst), expected_lst)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
8988b99c5aee861a7932799ddd6ef9b5e4ae0eb2 | 669f4fd966fd6068c9658a87fea3545e96df2285 | /mask_tools.py | 0d8c1711e31e27d14cb53d3e6511168301a80bae | [
"LicenseRef-scancode-public-domain"
]
| permissive | wtfuzz/kaggle-dsbowl-2018-dataset-fixes | 23927bad32b20beb6567e0f27c0af452bd3c4cf2 | 72b505fa661cde4e4b1a16198e71f16f1dd94fd1 | refs/heads/master | 2021-09-09T21:19:42.181400 | 2018-03-19T19:16:11 | 2018-03-19T19:16:11 | 125,908,151 | 0 | 0 | null | 2018-03-19T19:14:26 | 2018-03-19T19:14:26 | null | UTF-8 | Python | false | false | 2,181 | py | #!/usr/bin/env python3
import argparse
from pathlib import Path
import numpy as np
from PIL import Image
def create_masks():
""" Create masks from red color above original image.
"""
parser = argparse.ArgumentParser()
parser.add_argument('root')
args = parser.parse_args()
root = Path(args.root)
images_root = root / 'images'
masks_root = root / 'masks'
assert images_root.is_dir() and masks_root.is_dir()
for mask_path in list(images_root.glob('mask*.png')):
a = np.array(Image.open(mask_path))
# filter only red color
mask = (a[:, :, 0] > 127) & (np.max(a[:, :, 1:3], axis=2) < 80)
out_path = masks_root / mask_path.name
_save_mask(mask, out_path)
mask_path.unlink()
print(f'{mask_path} -> {out_path}')
def ensure_mask():
""" Ensure that a given (edited) mask shows what is it (clip to 0 or 255).
"""
parser = argparse.ArgumentParser()
parser.add_argument('mask_paths', nargs='+')
args = parser.parse_args()
mask_paths = []
for mask_path in map(Path, args.mask_paths):
if mask_path.is_dir():
mask_paths.extend((mask_path / 'masks').glob('*.png'))
else:
mask_path.append(mask_path)
for mask_path in mask_paths:
a = np.array(Image.open(mask_path))
needs_fixing = False
if len(a.shape) == 3:
needs_fixing = True
print(f'fixing shape (was {a.shape}) for {mask_path}')
a = a[:, :, :3].max(axis=2)
if a.dtype != np.uint8:
print(f'fixing dtype (was {a.dtype}) for {mask_path}')
needs_fixing = True
assert a.dtype == np.bool
a = a.astype(np.uint8) * 255
assert len(a.shape) == 2
to_fix = np.sum((a > 0) & (a < 255))
if to_fix:
needs_fixing = True
print(f'fixed {to_fix} pixels for {mask_path}')
if needs_fixing:
_save_mask(a > 80, mask_path)
def _save_mask(mask: np.ndarray, path: Path):
assert mask.dtype == np.bool
Image.fromarray(mask.astype(np.uint8) * 255).save(path)
if __name__ == '__main__':
ensure_mask()
| [
"[email protected]"
]
| |
e188fbadc4084e203a34f001c1a6c864b6c16e49 | 33518b9521d8e633010b0b9d1ea0f7a937437200 | /Python/rotate_list/rotate_list.py | 3ecf8c4f8d6e4e343598e87bf7f9176d4bd68aa5 | []
| no_license | lqs4188980/CodingPractice | 977ddb69306c92a5e3df88f26572200622fad82a | c17653832269ab1bb3e411f7d74bef4c8e9985b3 | refs/heads/master | 2021-01-22T05:10:40.885490 | 2016-02-05T09:06:51 | 2016-02-05T09:06:51 | 25,272,652 | 0 | 1 | null | 2016-01-06T07:50:29 | 2014-10-15T20:40:34 | Java | UTF-8 | Python | false | false | 551 | py | class Solution(object):
def rotateRight(self, head, k):
if head is None:
return head
l = 0
p = head
while p:
l += 1
p = p.next
k %= l
if k == 0:
return head
i, p = l-k, head
while i:
i -= 1
p = p.next
q = p
while q.next:
q = q.next
q.next = head
i = l-k-1
while i > 0:
i -= 1
head = head.next
head.next = None
return p
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.