blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6249e0ffb60185954c5323d646f6ee5e4b97a4cc
|
2be8a9f06d4003d12c0a727fb83d284c31a53050
|
/HoudiniHotBox17.0/lib/PastFbx.py
|
a984bb3fb35778efa1d77ea747bb869b4f43016f
|
[] |
no_license
|
LiuLiangFx/SmileHotBOX
|
7551d9578b2defe612950cb8e3bffdb85024cede
|
8bd8eac69b3c2a9824b9aa4488ca77789bea8d85
|
refs/heads/master
| 2021-01-01T10:22:26.959731 | 2020-02-09T03:16:32 | 2020-02-09T03:16:32 | 239,236,801 | 0 | 0 | null | 2020-02-09T02:47:18 | 2020-02-09T02:47:18 | null |
UTF-8
|
Python
| false | false | 3,133 |
py
|
import hou
class PastFbx:
def __init__(self):
pass
def checkNode(self,node, name,temp1 =0):
for childrenNode in node.parent().children():
if childrenNode.name() == name:
temp1 =childrenNode
return temp1
def checkInput(self,qian,hou1,temp=0):
if hou1.inputs() ==():
pass
else:
for node in hou1.inputs():
if node == qian:
temp =hou1
else:
temp =0
return temp
def creatNode(self,node,temp ):
for mergeName in temp:
serachNode = self.checkNode(node, mergeName)
if serachNode :
houNode = self.checkInput(node, serachNode )
if houNode ==0:
serachNode.setInput(100,node)
node = serachNode
else:
node = houNode
else:
merge = node.createOutputNode("merge",mergeName)
node = merge
def run(self):
plane = hou.ui.paneTabOfType(hou.paneTabType.NetworkEditor)
pos = plane.selectPosition()
pos1 = pos
node = plane.currentNode()
fl1=open('list.txt', 'r')
a= len( fl1.readlines())
check = 0
fl1.close()
for index in range(a):
pos[0] +=1
try:
null = node.createNode("object_merge")
except:
b = node.parent()
null =b.createNode("object_merge")
null.setPosition(pos)
fl1=open('list.txt', 'r')
path = fl1.readlines()[index][0:-1]
allPath= path.split("++")
null.parm("objpath1").set(allPath[0])
null.parm("xformtype").set("local")
attNode = null.createOutputNode("attribcreate")
attNode.parm("name1").set("shop_materialpath")
attNode.parm("type1").set("index")
attNode.parm("string1").set("/shop/"+ allPath[-1])
attNode.parm("class1").set("primitive")
catchNode = attNode.createOutputNode("catche_tool_1.0.1")
catchNode.bypass(1)
currentNode =catchNode
self.creatNode(currentNode,allPath[1:-1] )
comping =int((index*1.0/(a-1))*100 )
fl1.close()
print "CreatNode for " + null.name()+","+" Comping: " + str(comping)+"%"
print "\nCopy node success!!!!"
|
[
"[email protected]"
] | |
0f6b34fbcc11d1d36e1186122b4196348d01de41
|
15d3a10db27128c06f84c30fa8d64b2e1c629fd9
|
/express/express/api_exception.py
|
50d8121033b83ac36e6070744f39d492bda13465
|
[] |
no_license
|
yiyuhao/exp
|
7cba6650e3113ba05698f90a7baf75b680dd6435
|
866a90b2e6f0d113559b0674f514cdd56020f7d6
|
refs/heads/master
| 2020-03-19T20:20:04.799355 | 2018-07-15T14:55:24 | 2018-07-15T14:55:24 | 136,897,007 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 431 |
py
|
# -*- coding: utf-8 -*
from rest_framework.views import exception_handler
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first,
# to get the standard error response.
response = exception_handler(exc, context)
# Now add the HTTP status code to the response.
if response is not None:
response.data['status_code'] = response.status_code
return response
|
[
"[email protected]"
] | |
3b2ebe81d2835ea42691bb7d5bff97c782a8bc00
|
59ac1d0f09ebfb527701031f3ab2cfbfb8055f51
|
/soapsales/employees/migrations/0003_auto_20200902_1721.py
|
0819efd6aafd9e48e4f311586f2596836d84ff10
|
[] |
no_license
|
DUMBALINYOLO/erpmanu
|
d4eb61b66cfa3704bd514b58580bdfec5639e3b0
|
db979bafcc7481f60af467d1f48d0a81bbbfc1aa
|
refs/heads/master
| 2023-04-28T13:07:45.593051 | 2021-05-12T09:30:23 | 2021-05-12T09:30:23 | 288,446,097 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 543 |
py
|
# Generated by Django 3.0.7 on 2020-09-02 15:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employees', '0002_auto_20200902_0038'),
]
operations = [
migrations.RenameField(
model_name='employee',
old_name='is_staff',
new_name='is_admin',
),
migrations.AlterField(
model_name='employee',
name='is_superuser',
field=models.BooleanField(default=False),
),
]
|
[
"[email protected]"
] | |
27ca4ceae6de9d605e2bfc5c1fee240d3f1fe145
|
10300363f12e5a6a0ea6a69d0a6d210174499d60
|
/times.py
|
746272f2b9f1a029c66f51b8e55c0ba5edea3241
|
[] |
no_license
|
nedbat/point_match
|
2da5cc12bf3f3866b35ec71ea227a5d21760ca97
|
a6c19ed1d206ec1ad02b13e15b8d761192b32593
|
refs/heads/master
| 2023-06-22T04:16:09.638622 | 2019-04-01T21:25:09 | 2019-04-01T21:25:09 | 100,109,656 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,068 |
py
|
import random
import timeit
if 0:
TRIES = 10000
for z in range(7):
n = int(10**z)
stmt='random.randint(1, 999999) in d'
setup='import random; d = {{random.randint(1, 999999): 1 for _ in xrange({N:d})}}'.format(N=n)
total = timeit.timeit(stmt=stmt, setup=setup, number=TRIES)
print("{N:>9d}: {time:.7f}s".format(time=total/TRIES, N=n))
if 0:
TRIES = 2000
for z in range(7):
n = int(10**z)
stmt='random.randint(1, 999999) in x'
setup='import random; x = [random.randint(1, 999999) for _ in xrange({N:d})]'.format(N=n)
total = timeit.timeit(stmt=stmt, setup=setup, number=TRIES)
print("{N:>9d}: {time:.7f}s".format(time=total/TRIES, N=n))
if 1:
TRIES = 200
for z in range(7):
n = int(10**z)
stmt='sorted(x)'
setup='import random; x = [random.randint(1, 999999) for _ in xrange({N:d})]'.format(N=n)
total = timeit.timeit(stmt=stmt, setup=setup, number=TRIES)
print("{N:>9d}: {time:.7f}s".format(time=total/TRIES, N=n))
|
[
"[email protected]"
] | |
4a3ade146a01bc93108ba525a191d0f4fc777c9b
|
811f4cdb25e26f3b27640aaa2e2bca93e660d2d7
|
/src/anomalib/models/components/flow/all_in_one_block.py
|
f2ab1e17c372351bdd22788c8bdee20d621f06a3
|
[
"CC-BY-SA-4.0",
"CC-BY-SA-3.0",
"CC-BY-NC-SA-4.0",
"Python-2.0",
"Apache-2.0"
] |
permissive
|
openvinotoolkit/anomalib
|
4467dfc392398845e816387267cdf979ff76fe15
|
4abfa93dcfcb98771bc768b334c929ff9a02ce8b
|
refs/heads/main
| 2023-09-03T16:49:05.019269 | 2023-08-28T14:22:19 | 2023-08-28T14:22:19 | 423,775,360 | 2,325 | 454 |
Apache-2.0
| 2023-09-14T11:21:33 | 2021-11-02T09:11:38 |
Python
|
UTF-8
|
Python
| false | false | 12,649 |
py
|
"""All In One Block Layer."""
# Copyright (c) https://github.com/vislearn/FrEIA
# SPDX-License-Identifier: MIT
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import warnings
from typing import Callable
import torch
import torch.nn.functional as F
from FrEIA.modules import InvertibleModule
from scipy.stats import special_ortho_group
from torch import Tensor, nn
def _global_scale_sigmoid_activation(input: Tensor) -> Tensor:
"""Global scale sigmoid activation.
Args:
input (Tensor): Input tensor
Returns:
Tensor: Sigmoid activation
"""
return 10 * torch.sigmoid(input - 2.0)
def _global_scale_softplus_activation(input: Tensor) -> Tensor:
"""Global scale softplus activation.
Args:
input (Tensor): Input tensor
Returns:
Tensor: Softplus activation
"""
softplus = nn.Softplus(beta=0.5)
return 0.1 * softplus(input)
def _global_scale_exp_activation(input: Tensor) -> Tensor:
"""Global scale exponential activation.
Args:
input (Tensor): Input tensor
Returns:
Tensor: Exponential activation
"""
return torch.exp(input)
class AllInOneBlock(InvertibleModule):
"""Module combining the most common operations in a normalizing flow or similar model.
It combines affine coupling, permutation, and global affine transformation
('ActNorm'). It can also be used as GIN coupling block, perform learned
householder permutations, and use an inverted pre-permutation. The affine
transformation includes a soft clamping mechanism, first used in Real-NVP.
The block as a whole performs the following computation:
.. math::
y = V\\,R \\; \\Psi(s_\\mathrm{global}) \\odot \\mathrm{Coupling}\\Big(R^{-1} V^{-1} x\\Big)+ t_\\mathrm{global}
- The inverse pre-permutation of x (i.e. :math:`R^{-1} V^{-1}`) is optional (see
``reverse_permutation`` below).
- The learned householder reflection matrix
:math:`V` is also optional all together (see ``learned_householder_permutation``
below).
- For the coupling, the input is split into :math:`x_1, x_2` along
the channel dimension. Then the output of the coupling operation is the
two halves :math:`u = \\mathrm{concat}(u_1, u_2)`.
.. math::
u_1 &= x_1 \\odot \\exp \\Big( \\alpha \\; \\mathrm{tanh}\\big( s(x_2) \\big)\\Big) + t(x_2) \\\\
u_2 &= x_2
Because :math:`\\mathrm{tanh}(s) \\in [-1, 1]`, this clamping mechanism prevents
exploding values in the exponential. The hyperparameter :math:`\\alpha` can be adjusted.
"""
def __init__(
self,
dims_in,
dims_c=[],
subnet_constructor: Callable | None = None,
affine_clamping: float = 2.0,
gin_block: bool = False,
global_affine_init: float = 1.0,
global_affine_type: str = "SOFTPLUS",
permute_soft: bool = False,
learned_householder_permutation: int = 0,
reverse_permutation: bool = False,
):
"""
Args:
subnet_constructor:
class or callable ``f``, called as ``f(channels_in, channels_out)`` and
should return a torch.nn.Module. Predicts coupling coefficients :math:`s, t`.
affine_clamping:
clamp the output of the multiplicative coefficients before
exponentiation to +/- ``affine_clamping`` (see :math:`\\alpha` above).
gin_block:
Turn the block into a GIN block from Sorrenson et al, 2019.
Makes it so that the coupling operations as a whole is volume preserving.
global_affine_init:
Initial value for the global affine scaling :math:`s_\mathrm{global}`.
global_affine_init:
``'SIGMOID'``, ``'SOFTPLUS'``, or ``'EXP'``. Defines the activation to be used
on the beta for the global affine scaling (:math:`\\Psi` above).
permute_soft:
bool, whether to sample the permutation matrix :math:`R` from :math:`SO(N)`,
or to use hard permutations instead. Note, ``permute_soft=True`` is very slow
when working with >512 dimensions.
learned_householder_permutation:
Int, if >0, turn on the matrix :math:`V` above, that represents
multiple learned householder reflections. Slow if large number.
Dubious whether it actually helps network performance.
reverse_permutation:
Reverse the permutation before the block, as introduced by Putzky
et al, 2019. Turns on the :math:`R^{-1} V^{-1}` pre-multiplication above.
"""
super().__init__(dims_in, dims_c)
channels = dims_in[0][0]
# rank of the tensors means 1d, 2d, 3d tensor etc.
self.input_rank = len(dims_in[0]) - 1
# tuple containing all dims except for batch-dim (used at various points)
self.sum_dims = tuple(range(1, 2 + self.input_rank))
if len(dims_c) == 0:
self.conditional = False
self.condition_channels = 0
else:
assert tuple(dims_c[0][1:]) == tuple(
dims_in[0][1:]
), f"Dimensions of input and condition don't agree: {dims_c} vs {dims_in}."
self.conditional = True
self.condition_channels = sum(dc[0] for dc in dims_c)
split_len1 = channels - channels // 2
split_len2 = channels // 2
self.splits = [split_len1, split_len2]
try:
self.permute_function = {0: F.linear, 1: F.conv1d, 2: F.conv2d, 3: F.conv3d}[self.input_rank]
except KeyError:
raise ValueError(f"Data is {1 + self.input_rank}D. Must be 1D-4D.")
self.in_channels = channels
self.clamp = affine_clamping
self.GIN = gin_block
self.reverse_pre_permute = reverse_permutation
self.householder = learned_householder_permutation
if permute_soft and channels > 512:
warnings.warn(
(
"Soft permutation will take a very long time to initialize "
f"with {channels} feature channels. Consider using hard permutation instead."
)
)
# global_scale is used as the initial value for the global affine scale
# (pre-activation). It is computed such that
# global_scale_activation(global_scale) = global_affine_init
# the 'magic numbers' (specifically for sigmoid) scale the activation to
# a sensible range.
if global_affine_type == "SIGMOID":
global_scale = 2.0 - torch.log(torch.tensor([10.0 / global_affine_init - 1.0]))
self.global_scale_activation = _global_scale_sigmoid_activation
elif global_affine_type == "SOFTPLUS":
global_scale = 2.0 * torch.log(torch.exp(torch.tensor(0.5 * 10.0 * global_affine_init)) - 1)
self.global_scale_activation = _global_scale_softplus_activation
elif global_affine_type == "EXP":
global_scale = torch.log(torch.tensor(global_affine_init))
self.global_scale_activation = _global_scale_exp_activation
else:
raise ValueError('Global affine activation must be "SIGMOID", "SOFTPLUS" or "EXP"')
self.global_scale = nn.Parameter(torch.ones(1, self.in_channels, *([1] * self.input_rank)) * global_scale)
self.global_offset = nn.Parameter(torch.zeros(1, self.in_channels, *([1] * self.input_rank)))
if permute_soft:
w = special_ortho_group.rvs(channels)
else:
indices = torch.randperm(channels)
w = torch.zeros((channels, channels))
w[torch.arange(channels), indices] = 1.0
if self.householder:
# instead of just the permutation matrix w, the learned housholder
# permutation keeps track of reflection vectors vk, in addition to a
# random initial permutation w_0.
self.vk_householder = nn.Parameter(0.2 * torch.randn(self.householder, channels), requires_grad=True)
self.w_perm = None
self.w_perm_inv = None
self.w_0 = nn.Parameter(torch.FloatTensor(w), requires_grad=False)
else:
self.w_perm = nn.Parameter(
torch.FloatTensor(w).view(channels, channels, *([1] * self.input_rank)), requires_grad=False
)
self.w_perm_inv = nn.Parameter(
torch.FloatTensor(w.T).view(channels, channels, *([1] * self.input_rank)), requires_grad=False
)
if subnet_constructor is None:
raise ValueError("Please supply a callable subnet_constructor" "function or object (see docstring)")
self.subnet = subnet_constructor(self.splits[0] + self.condition_channels, 2 * self.splits[1])
self.last_jac = None
def _construct_householder_permutation(self):
"""Computes a permutation matrix from the reflection vectors that are
learned internally as nn.Parameters."""
w = self.w_0
for vk in self.vk_householder:
w = torch.mm(w, torch.eye(self.in_channels).to(w.device) - 2 * torch.ger(vk, vk) / torch.dot(vk, vk))
for i in range(self.input_rank):
w = w.unsqueeze(-1)
return w
def _permute(self, x, rev=False):
"""Performs the permutation and scaling after the coupling operation.
Returns transformed outputs and the LogJacDet of the scaling operation."""
if self.GIN:
scale = 1.0
perm_log_jac = 0.0
else:
scale = self.global_scale_activation(self.global_scale)
perm_log_jac = torch.sum(torch.log(scale))
if rev:
return ((self.permute_function(x, self.w_perm_inv) - self.global_offset) / scale, perm_log_jac)
else:
return (self.permute_function(x * scale + self.global_offset, self.w_perm), perm_log_jac)
def _pre_permute(self, x, rev=False):
"""Permutes before the coupling block, only used if
reverse_permutation is set"""
if rev:
return self.permute_function(x, self.w_perm)
else:
return self.permute_function(x, self.w_perm_inv)
def _affine(self, x, a, rev=False):
"""Given the passive half, and the pre-activation outputs of the
coupling subnetwork, perform the affine coupling operation.
Returns both the transformed inputs and the LogJacDet."""
# the entire coupling coefficient tensor is scaled down by a
# factor of ten for stability and easier initialization.
a *= 0.1
ch = x.shape[1]
sub_jac = self.clamp * torch.tanh(a[:, :ch])
if self.GIN:
sub_jac -= torch.mean(sub_jac, dim=self.sum_dims, keepdim=True)
if not rev:
return (x * torch.exp(sub_jac) + a[:, ch:], torch.sum(sub_jac, dim=self.sum_dims))
else:
return ((x - a[:, ch:]) * torch.exp(-sub_jac), -torch.sum(sub_jac, dim=self.sum_dims))
def forward(self, x, c=[], rev=False, jac=True):
"""See base class docstring"""
if self.householder:
self.w_perm = self._construct_householder_permutation()
if rev or self.reverse_pre_permute:
self.w_perm_inv = self.w_perm.transpose(0, 1).contiguous()
if rev:
x, global_scaling_jac = self._permute(x[0], rev=True)
x = (x,)
elif self.reverse_pre_permute:
x = (self._pre_permute(x[0], rev=False),)
x1, x2 = torch.split(x[0], self.splits, dim=1)
if self.conditional:
x1c = torch.cat([x1, *c], 1)
else:
x1c = x1
if not rev:
a1 = self.subnet(x1c)
x2, j2 = self._affine(x2, a1)
else:
a1 = self.subnet(x1c)
x2, j2 = self._affine(x2, a1, rev=True)
log_jac_det = j2
x_out = torch.cat((x1, x2), 1)
if not rev:
x_out, global_scaling_jac = self._permute(x_out, rev=False)
elif self.reverse_pre_permute:
x_out = self._pre_permute(x_out, rev=True)
# add the global scaling Jacobian to the total.
# trick to get the total number of non-channel dimensions:
# number of elements of the first channel of the first batch member
n_pixels = x_out[0, :1].numel()
log_jac_det += (-1) ** rev * n_pixels * global_scaling_jac
return (x_out,), log_jac_det
def output_dims(self, input_dims):
return input_dims
|
[
"[email protected]"
] | |
adcb107a99607a4473a99cbe4a62c8ecc5918f4d
|
f71118a9f24e09bba18d021f9c4a43a97dc4dead
|
/codes/scripts/make_gif_video.py
|
fc81e5647ff7ce75b5bb35f226bce946a93a1d56
|
[
"Apache-2.0"
] |
permissive
|
BlueAmulet/BasicSR
|
d7420fd9d7b73bf0cd90a3201d84393f262e63be
|
7040913d8659a05af4c2428feb71c260efbf1e9c
|
refs/heads/lite
| 2021-07-10T14:48:26.037589 | 2020-07-23T01:59:27 | 2020-07-23T01:59:27 | 196,041,187 | 19 | 9 |
Apache-2.0
| 2020-09-01T17:39:00 | 2019-07-09T16:00:14 |
Python
|
UTF-8
|
Python
| false | false | 3,311 |
py
|
"""
Add text to images, then make gif/video sequence from images.
Since the created gif has low quality with color issues, use this script to generate image with
text and then use `gifski`.
Call `ffmpeg` to make video.
"""
import os.path
import numpy as np
import cv2
crt_path = os.path.dirname(os.path.realpath(__file__))
# configurations
img_name_list = ['x1', 'x2', 'x3', 'x4', 'x5']
ext = '.png'
text_list = ['1', '2', '3', '4', '5']
h_start, h_len = 0, 576
w_start, w_len = 10, 352
enlarge_ratio = 1
txt_pos = (10, 50) # w, h
font_size = 1.5
font_thickness = 4
color = 'red'
duration = 0.8 # second
use_imageio = False # use imageio to make gif
make_video = False # make video using ffmpeg
is_crop = True
if h_start == 0 or w_start == 0:
is_crop = False # do not crop
img_name_list = [x + ext for x in img_name_list]
input_folder = os.path.join(crt_path, './ori')
save_folder = os.path.join(crt_path, './ori')
color_tb = {}
color_tb['yellow'] = (0, 255, 255)
color_tb['green'] = (0, 255, 0)
color_tb['red'] = (0, 0, 255)
color_tb['magenta'] = (255, 0, 255)
color_tb['matlab_blue'] = (189, 114, 0)
color_tb['matlab_orange'] = (25, 83, 217)
color_tb['matlab_yellow'] = (32, 177, 237)
color_tb['matlab_purple'] = (142, 47, 126)
color_tb['matlab_green'] = (48, 172, 119)
color_tb['matlab_liblue'] = (238, 190, 77)
color_tb['matlab_brown'] = (47, 20, 162)
color = color_tb[color]
img_list = []
# make temp dir
if not os.path.exists(save_folder):
os.makedirs(save_folder)
print('mkdir [{}] ...'.format(save_folder))
if make_video:
# tmp folder to save images for video
tmp_video_folder = os.path.join(crt_path, '_tmp_video')
if not os.path.exists(tmp_video_folder):
os.makedirs(tmp_video_folder)
idx = 0
for img_name, write_txt in zip(img_name_list, text_list):
img = cv2.imread(os.path.join(input_folder, img_name), cv2.IMREAD_UNCHANGED)
base_name = os.path.splitext(img_name)[0]
print(base_name)
# crop image
if is_crop:
print('Crop image ...')
if img.ndim == 2:
img = img[h_start:h_start + h_len, w_start:w_start + w_len]
elif img.ndim == 3:
img = img[h_start:h_start + h_len, w_start:w_start + w_len, :]
else:
raise ValueError('Wrong image dim [{:d}]'.format(img.ndim))
# enlarge img if necessary
if enlarge_ratio > 1:
H, W, _ = img.shape
img = cv2.resize(img, (W * enlarge_ratio, H * enlarge_ratio), \
interpolation=cv2.INTER_CUBIC)
# add text
font = cv2.FONT_HERSHEY_COMPLEX
cv2.putText(img, write_txt, txt_pos, font, font_size, color, font_thickness, cv2.LINE_AA)
cv2.imwrite(os.path.join(save_folder, base_name + '_text.png'), img)
if make_video:
idx += 1
cv2.imwrite(os.path.join(tmp_video_folder, '{:05d}.png'.format(idx)), img)
img = np.ascontiguousarray(img[:, :, [2, 1, 0]])
img_list.append(img)
if use_imageio:
import imageio
imageio.mimsave(os.path.join(save_folder, 'out.gif'), img_list, format='GIF', duration=duration)
if make_video:
os.system('ffmpeg -r {:f} -i {:s}/%05d.png -vcodec mpeg4 -y {:s}/movie.mp4'.format(
1 / duration, tmp_video_folder, save_folder))
if os.path.exists(tmp_video_folder):
os.system('rm -rf {}'.format(tmp_video_folder))
|
[
"[email protected]"
] | |
6747e33efcd4f93c3dbf79fe12368de440154955
|
b45e649b4580692dd1b8bf63ad29befb3daad95a
|
/spark/src/main/python/preprocBinning.py
|
6c21866ee6f9e294698dfe7cff5be5841bf1c7fa
|
[] |
no_license
|
xu-hao/FHIR-PIT
|
21ea0e5b8796d86f3a931b99e3e7a3f1e58b04a2
|
db2fb04e2cc0d9fce2f8043f594f60fdb8f5a8e8
|
refs/heads/master
| 2021-05-25T09:49:48.084629 | 2021-05-19T20:17:11 | 2021-05-19T20:17:11 | 127,015,534 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 660 |
py
|
import os
import sys
import json
from preprocPatient import *
from preprocVisit import *
year_start, year_end, config_file, input_dir, output_dir = sys.argv[1:]
for year in range(int(year_start), int(year_end) + 1):
print(year)
input_file_p = f"{input_dir}/{year}/all_patient"
output_file_p = f"{output_dir}/{year}patient"
preproc_patient(config_file, input_file_p, output_file_p)
input_file_v = f"{input_dir}/{year}/all_visit"
output_file_v = f"{output_dir}/{year}visit"
preproc_visit(config_file, input_file_v, output_file_v)
|
[
"[email protected]"
] | |
f93a39f3c7ce5dc35b811f46c70586ec4a00c270
|
4d93acd63ce2835fcd7ea610fcd412b727a4f03e
|
/08-Markov/decay.py
|
aa454eea1ad7fb4d3765d62e0e5f8e83dfc8525a
|
[] |
no_license
|
jsbarbosa/JuanBarbosa_MCA
|
41ebcc27bb7dd8a886c9b4c1b416bd7e3cad2e57
|
4f49d17282679ae1fa81d7cc892b6560edf93828
|
refs/heads/master
| 2021-01-11T17:53:44.115810 | 2017-04-24T17:58:09 | 2017-04-24T17:58:09 | 79,863,469 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,590 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 13 18:21:00 2017
@author: juan
"""
import numpy as np
import matplotlib.pyplot as plt
obs = np.array([1.5, 1.7, 2])
def rand():
return 2*np.random.random() - 1
def integral(a, b, lm):
return -lm*(func(b, lm) - func(a, lm))
def func(x, lm):
return np.exp(-x/lm)
def probability(x, lm):
p = 1
z = integral(1, 20, lm)
for x_ in x:
p *= func(x_, lm)/z
return p
def bayesian(x, lm):
return probability(x, lm)
def hastings(N, dx = 1):
lambdas = np.ones(N+1)
lambdas[0] = np.random.random()*10.0
for i in range(N):
second = lambdas[i] + dx*rand()
q = bayesian(obs, second)/bayesian(obs, lambdas[i])
alpha = min(q, 1.0)
u = np.random.random()
if u <= alpha and second > 0:
lambdas[i+1] = second
else:
lambdas[i+1] = lambdas[i]
return lambdas
def rubin(N, M, dl):
avs = np.zeros(M)
vas = np.zeros(M)
R = np.zeros(N-2)
chains = np.array([hastings(N, dl) for i in range(M)])
for j in range(2, N):
for i in range(M):
avs[i] = np.mean(chains[i, :j])
vas[i] = np.std(chains[i, :j])**2
total = np.mean(avs)
B = j/(M-1)*np.sum((avs-total)**2)
W = vas.mean()
R[j-2] = (j-1)/j + (B/W)*(M+1)/(j*M)
return R
N = 10000
lm = np.logspace(-3, 3, 5)
for l in lm:
R = rubin(N, 5, l)
plt.plot(R, label="%f"%l)
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.show()
|
[
"[email protected]"
] | |
d39c8a61833fc2f4123d6803bf8dce614ed0a12a
|
cfc9a8831e5946d738329fad2763d643dec8566f
|
/src/encoded/tests/test_create_mapping.py
|
44d89dea3b1ec2d190ef281061e331a2302547be
|
[
"MIT"
] |
permissive
|
emi80/encoded
|
8e244a66b0d36610dcf8d9a47d385640dfa7987d
|
2fe2c2afbd3be21b65b10a189a3bd623ecdaee37
|
refs/heads/master
| 2021-01-18T12:34:56.060690 | 2015-03-05T21:56:05 | 2015-03-05T21:56:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 299 |
py
|
import pytest
from ..loadxl import ORDER
@pytest.mark.parametrize('item_type', ORDER)
def test_create_mapping(registry, item_type):
from ..commands.create_mapping import type_mapping
from ..contentbase import TYPES
mapping = type_mapping(registry[TYPES], item_type)
assert mapping
|
[
"[email protected]"
] | |
8fc10d35f9fa5cced3f4939ab0d2ca50d42ab5cb
|
b5dbf732d26a2a924c85c5a107035be48bfe69cd
|
/2.7.py
|
a41cca6bfe45aaf10f7b7a81df3ea5680c11f318
|
[] |
no_license
|
Beks667/2.7Hw
|
2435bfa58e252357c46819f6987639ca025549be
|
4e03706bdfc70f2f94145a50f493f36995d08cdb
|
refs/heads/main
| 2023-04-19T13:10:24.348768 | 2021-05-07T12:44:27 | 2021-05-07T12:44:27 | 365,230,069 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,341 |
py
|
# class Phone :
# def __init__ (self,brand,model,color):
# self.brand = brand
# self.model = model
# self.color = color
# def show (self):
# print(f"{self.brand},{self.model},{self.color}")
# phone = Phone("Apple", "XS", "black")
# phone.show()
# class Monkey:
# max_age = 12
# loves_bananas = True
# def climb(self):
# print('I am climbing the tree')
# abc = Monkey()
# abc.climb()
# print(abc.max_age)
# abc.climb()
# print(abc.loves_bananas)
# Это через input----------------------------------------------------------------
# class Person:
# def __init__(self,name,age,gender):
# self.name = name
# self.age = age
# self.gender = gender
# def calculate_age(self):
# self.number = int(input('enter year:'))
# print(self.age + self.number)
# p = Person('John', 23, 'male')
# p.calculate_age()
# #Это через self-----------------------------------------------------------------------
# class Person:
# def __init__(self,name,age,gender):
# self.name = name
# self.age = age
# self.gender = gender
# def calculate_age(self,year):
# self.year = year
# print(self.age + self.year)
# p = Person('John', 23, 'male')
# p.calculate_age(10)
#
|
[
"[email protected]"
] | |
ecd72f46add5e5f213fc1533ff3e25f25160af31
|
9de18e1e39c941aeba1781630711cef1d3d4d44c
|
/experiments/cifar10/conv.py
|
41757c9d21758f8c35cf7d9e176d18cd6ff88602
|
[] |
no_license
|
BINDS-LAB-UMASS/bindsnet_experiments
|
cee786ae7e087845f58e0af4a49fa319d4fb81d5
|
8a20be9d1ede021b70ff95cc7e85024ff5a222db
|
refs/heads/master
| 2022-11-12T11:33:20.451028 | 2019-10-01T15:40:28 | 2019-10-01T15:40:28 | 135,615,246 | 41 | 10 | null | 2022-10-28T00:35:03 | 2018-05-31T17:28:04 |
Python
|
UTF-8
|
Python
| false | false | 14,333 |
py
|
import os
import sys
import torch
import argparse
import numpy as np
import matplotlib.pyplot as plt
from time import time as t
sys.path.append('..')
from utils import print_results, update_curves
from bindsnet.datasets import CIFAR10
from bindsnet.network import Network
from bindsnet.learning import Hebbian
from bindsnet.encoding import bernoulli
from bindsnet.network.monitors import Monitor
from bindsnet.network.nodes import Input, DiehlAndCookNodes
from bindsnet.evaluation import update_ngram_scores, assign_labels
from bindsnet.network.topology import Conv2dConnection, SparseConnection
from bindsnet.analysis.plotting import plot_input, plot_spikes, plot_conv2d_weights
print()
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n_train', type=int, default=60000)
parser.add_argument('--n_test', type=int, default=10000)
parser.add_argument('--kernel_size', type=int, nargs='+', default=[16])
parser.add_argument('--stride', type=int, nargs='+', default=[4])
parser.add_argument('--n_filters', type=int, default=25)
parser.add_argument('--padding', type=int, default=0)
parser.add_argument('--inhib', type=float, default=100.0)
parser.add_argument('--time', type=int, default=100)
parser.add_argument('--dt', type=float, default=1.0)
parser.add_argument('--intensity', type=float, default=0.5)
parser.add_argument('--progress_interval', type=int, default=10)
parser.add_argument('--update_interval', type=int, default=250)
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='train', action='store_false')
parser.add_argument('--plot', dest='plot', action='store_true')
parser.add_argument('--gpu', dest='gpu', action='store_true')
parser.set_defaults(plot=False, gpu=False, train=True)
args = parser.parse_args()
seed = args.seed
n_train = args.n_train
n_test = args.n_test
kernel_size = args.kernel_size
stride = args.stride
n_filters = args.n_filters
padding = args.padding
inhib = args.inhib
time = args.time
dt = args.dt
intensity = args.intensity
progress_interval = args.progress_interval
update_interval = args.update_interval
train = args.train
plot = args.plot
gpu = args.gpu
if len(kernel_size) == 1:
kernel_size = [kernel_size[0], kernel_size[0]]
if len(stride) == 1:
stride = [stride[0], stride[0]]
args = vars(args)
print('\nCommand-line argument values:')
for key, value in args.items():
print('-', key, ':', value)
print()
model = 'conv'
data = 'cifar10'
assert n_train % update_interval == 0 and n_test % update_interval == 0, \
'No. examples must be divisible by update_interval'
params = [seed, n_train, kernel_size, stride, n_filters,
padding, inhib, time, dt, intensity, update_interval]
model_name = '_'.join([str(x) for x in params])
if not train:
test_params = [seed, n_train, n_test, kernel_size, stride, n_filters,
padding, inhib, time, dt, intensity, update_interval]
np.random.seed(seed)
if gpu:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
torch.cuda.manual_seed_all(seed)
else:
torch.manual_seed(seed)
n_examples = n_train if train else n_test
input_shape = [32, 32, 3]
if kernel_size == input_shape:
conv_size = [1, 1]
else:
conv_size = (int((input_shape[0] - kernel_size[0]) / stride[0]) + 1,
int((input_shape[1] - kernel_size[1]) / stride[1]) + 1)
n_classes = 10
n_neurons = n_filters * np.prod(conv_size)
per_class = int(n_neurons / n_classes)
total_kernel_size = int(np.prod(kernel_size))
total_conv_size = int(np.prod(conv_size))
# Build network.
network = Network()
input_layer = Input(n=32*32*3, shape=(1, 3, 32, 32), traces=True)
conv_layer = DiehlAndCookNodes(n=n_filters * total_conv_size, shape=(1, n_filters, *conv_size),
thresh=-64.0, traces=True, theta_plus=0.05, refrac=0)
conv_layer2 = DiehlAndCookNodes(n=n_filters * total_conv_size, shape=(1, n_filters, *conv_size), refrac=0)
conv_conn = Conv2dConnection(input_layer, conv_layer, kernel_size=kernel_size, stride=stride, update_rule=Hebbian,
norm=0.5 * int(np.sqrt(total_kernel_size)), nu=(1e-3, 1e-3), wmax=2.0)
conv_conn2 = Conv2dConnection(input_layer, conv_layer2, w=conv_conn.w, kernel_size=kernel_size, stride=stride,
update_rule=None, nu=(0, 1e-3), wmax=2.0)
w = torch.ones(1, n_filters, conv_size[0], conv_size[1], 1, n_filters, conv_size[0], conv_size[1])
for f in range(n_filters):
for i in range(conv_size[0]):
for j in range(conv_size[1]):
w[0, f, i, j, 0, f, i, j] = 0
w = w.view(conv_layer.n, conv_layer.n)
i = w.nonzero()
v = -inhib * torch.ones(i.shape[0])
w = torch.sparse.FloatTensor(i.t(), v, w.size())
# for fltr1 in range(n_filters):
# for fltr2 in range(n_filters):
# for i1 in range(conv_size):
# for j1 in range(conv_size):
# for i2 in range(conv_size):
# for j2 in range(conv_size):
# if not (i1 == i2 and j1 == j2):
# w[0, fltr1, i1, j1, 0, fltr2, i2, j2] = -inhib
# if fltr1 != fltr2:
# for i in range(conv_size):
# for j in range(conv_size):
# w[0, fltr1, i, j, 0, fltr2, i, j] = -inhib
# for i1 in range(conv_size[0]):
# for j1 in range(conv_size[1]):
# for i2 in range(conv_size[0]):
# for j2 in range(conv_size[1]):
# if not (fltr1 == fltr2 and i1 == i2 and j1 == j2):
# w[0, fltr1, i1, j1, 0, fltr2, i2, j2] = -inhib
# if fltr1 != fltr2:
# for i1 in range(conv_size):
# for j1 in range(conv_size):
# for i2 in range(conv_size):
# for j2 in range(conv_size):
# w[0, fltr1, i1, j1, 0, fltr2, i2, j2] = -inhib
recurrent_conn = SparseConnection(conv_layer, conv_layer, w=w)
network.add_layer(input_layer, name='X')
network.add_layer(conv_layer, name='Y')
network.add_layer(conv_layer2, name='Y_')
network.add_connection(conv_conn, source='X', target='Y')
network.add_connection(conv_conn2, source='X', target='Y_')
network.add_connection(recurrent_conn, source='Y', target='Y')
# Voltage recording for excitatory and inhibitory layers.
voltage_monitor = Monitor(network.layers['Y'], ['v'], time=time)
network.add_monitor(voltage_monitor, name='output_voltage')
# Load CIFAR-10 data.
dataset = CIFAR10(path=os.path.join('..', '..', 'data', 'CIFAR10'), download=True)
if train:
images, labels = dataset.get_train()
else:
images, labels = dataset.get_test()
images *= intensity
# Record spikes during the simulation.
spike_record = torch.zeros(update_interval, time, n_neurons)
# Neuron assignments and spike proportions.
if train:
assignments = -torch.ones_like(torch.Tensor(n_neurons))
proportions = torch.zeros_like(torch.Tensor(n_neurons, n_classes))
rates = torch.zeros_like(torch.Tensor(n_neurons, n_classes))
ngram_scores = {}
else:
path = os.path.join('..', '..', 'params', data, model)
path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt')
assignments, proportions, rates, ngram_scores = torch.load(open(path, 'rb'))
# Sequence of accuracy estimates.
curves = {'all': [], 'proportion': [], 'ngram': []}
if train:
best_accuracy = 0
spikes = {}
for layer in set(network.layers):
spikes[layer] = Monitor(network.layers[layer], state_vars=['s'], time=time)
network.add_monitor(spikes[layer], name='%s_spikes' % layer)
# Train the network.
if train:
print('\nBegin training.\n')
else:
print('\nBegin test.\n')
inpt_ims = None
inpt_axes = None
spike_ims = None
spike_axes = None
weights_im = None
start = t()
for i in range(n_examples):
if i % progress_interval == 0:
print('Progress: %d / %d (%.4f seconds)' % (i, n_train, t() - start))
start = t()
if i % update_interval == 0 and i > 0:
if i % len(labels) == 0:
current_labels = labels[-update_interval:]
else:
current_labels = labels[i % len(images) - update_interval:i % len(images)]
# Update and print accuracy evaluations.
curves, predictions = update_curves(
curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments,
proportions=proportions, ngram_scores=ngram_scores, n=2
)
print_results(curves)
if train:
if any([x[-1] > best_accuracy for x in curves.values()]):
print('New best accuracy! Saving network parameters to disk.')
# Save network to disk.
path = os.path.join('..', '..', 'params', data, model)
if not os.path.isdir(path):
os.makedirs(path)
network.save(os.path.join(path, model_name + '.pt'))
path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt')
torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb'))
best_accuracy = max([x[-1] for x in curves.values()])
# Assign labels to excitatory layer neurons.
assignments, proportions, rates = assign_labels(spike_record, current_labels, n_classes, rates)
# Compute ngram scores.
ngram_scores = update_ngram_scores(spike_record, current_labels, n_classes, 2, ngram_scores)
print()
# Get next input sample.
image = images[i].permute(2, 0, 1)
sample = bernoulli(datum=image, time=time, dt=dt, max_prob=1.0).unsqueeze(1)
inpts = {'X': sample}
# Run the network on the input.
network.run(inpts=inpts, time=time)
retries = 0
while spikes['Y_'].get('s').sum() < 5 and retries < 3:
retries += 1
sample = bernoulli(datum=image, time=time, dt=dt, max_prob=1.0).unsqueeze(1)
inpts = {'X': sample}
network.run(inpts=inpts, time=time)
# Add to spikes recording.
spike_record[i % update_interval] = spikes['Y_'].get('s').view(time, -1)
# Optionally plot various simulation information.
if plot:
_input = image.permute(1, 2, 0).float()
_input /= _input.max()
reconstruction = inpts['X'].sum(0).view(3, 32, 32).permute(1, 2, 0).float()
reconstruction /= reconstruction.max()
w = conv_conn.w
_spikes = {'X': spikes['X'].get('s').view(32*32*3, time),
'Y': spikes['Y'].get('s').view(n_filters * total_conv_size, time),
'Y_': spikes['Y_'].get('s').view(n_filters * total_conv_size, time)}
inpt_axes, inpt_ims = plot_input(
images[i].view(32, 32, 3), reconstruction, label=labels[i], ims=inpt_ims, axes=inpt_axes
)
spike_ims, spike_axes = plot_spikes(spikes=_spikes, ims=spike_ims, axes=spike_axes)
weights_im = plot_conv2d_weights(w, im=weights_im, wmax=0.1)
plt.pause(1e-8)
network.reset_() # Reset state variables.
print(f'Progress: {n_examples} / {n_examples} ({t() - start:.4f} seconds)')
i += 1
if i % len(labels) == 0:
current_labels = labels[-update_interval:]
else:
current_labels = labels[i % len(images) - update_interval:i % len(images)]
# Update and print accuracy evaluations.
curves, predictions = update_curves(
curves, current_labels, n_classes, spike_record=spike_record, assignments=assignments,
proportions=proportions, ngram_scores=ngram_scores, n=2
)
print_results(curves)
if train:
if any([x[-1] > best_accuracy for x in curves.values()]):
print('New best accuracy! Saving network parameters to disk.')
# Save network to disk.
path = os.path.join('..', '..', 'params', data, model)
if not os.path.isdir(path):
os.makedirs(path)
network.save(os.path.join(path, model_name + '.pt'))
path = os.path.join(path, '_'.join(['auxiliary', model_name]) + '.pt')
torch.save((assignments, proportions, rates, ngram_scores), open(path, 'wb'))
best_accuracy = max([x[-1] for x in curves.values()])
if train:
print('\nTraining complete.\n')
else:
print('\nTest complete.\n')
print('Average accuracies:\n')
for scheme in curves.keys():
print('\t%s: %.2f' % (scheme, np.mean(curves[scheme])))
# Save accuracy curves to disk.
path = os.path.join('..', '..', 'curves', data, model)
if not os.path.isdir(path):
os.makedirs(path)
if train:
to_write = ['train'] + params
else:
to_write = ['test'] + params
to_write = [str(x) for x in to_write]
f = '_'.join(to_write) + '.pt'
torch.save((curves, update_interval, n_examples), open(os.path.join(path, f), 'wb'))
# Save results to disk.
path = os.path.join('..', '..', 'results', data, model)
if not os.path.isdir(path):
os.makedirs(path)
results = [
np.mean(curves['all']), np.mean(curves['proportion']), np.mean(curves['ngram']),
np.max(curves['all']), np.max(curves['proportion']), np.max(curves['ngram'])
]
if train:
to_write = params + results
else:
to_write = test_params + results
to_write = [str(x) for x in to_write]
name = 'train.csv' if train else 'test.csv'
if not os.path.isfile(os.path.join(path, name)):
with open(os.path.join(path, name), 'w') as f:
if train:
columns = [
'seed', 'n_train', 'kernel_size', 'stride', 'n_filters', 'padding', 'inhib', 'time', 'dt',
'intensity', 'update_interval', 'mean_all_activity', 'mean_proportion_weighting',
'mean_ngram', 'max_all_activity', 'max_proportion_weighting', 'max_ngram'
]
header = ','.join(columns) + '\n'
f.write(header)
else:
columns = [
'seed', 'n_train', 'n_test', 'kernel_size', 'stride', 'n_filters', 'padding', 'inhib', 'time',
'dt', 'intensity', 'update_interval', 'mean_all_activity', 'mean_proportion_weighting',
'mean_ngram', 'max_all_activity', 'max_proportion_weighting', 'max_ngram'
]
header = ','.join(columns) + '\n'
f.write(header)
with open(os.path.join(path, name), 'a') as f:
f.write(','.join(to_write) + '\n')
print()
|
[
"[email protected]"
] | |
e1bccde57c18d31ab7ae91528e51e89563c8c9b2
|
3e7b2ebb64e9e324ce47d19def21ae62cc1e56a6
|
/Problem-solving/HackerRank/p14- sWAP cASE.py
|
5f4f5a0512103085cb85a010c0c4672a7a9a5c87
|
[] |
no_license
|
shuvo14051/python-data-algo
|
9b6622d9260e95ca9ffabd39b02996f13bdf20d1
|
8f66ff6f2bd88a0ae48dac72e4ea6c5382a836ec
|
refs/heads/master
| 2023-02-03T03:04:01.183093 | 2020-12-13T10:13:15 | 2020-12-13T10:13:15 | 274,106,480 | 0 | 0 | null | 2020-07-05T06:33:28 | 2020-06-22T10:24:05 |
Python
|
UTF-8
|
Python
| false | false | 194 |
py
|
# n = input()
#
# swaped_n = n.swapcase()
#
# print(swaped_n)
def swap_case(s):
return s.swapcase()
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result)
|
[
"[email protected]"
] | |
e8b0c3235cae1f212444dfb8a51751d4dc4ad88f
|
c0717724c7dc3937252bb4a7bd7c796088db4c5d
|
/solutions/rijeci.py
|
f7bb3e0093b8d9f71b50155f01803ba70acde38f
|
[] |
no_license
|
matthew-cheney/kattis-solutions
|
58cd03394ad95e9ca7ffa3de66b69d90647b31ff
|
d9397ca4715a3ad576046a62bdd6c0fb9542d838
|
refs/heads/main
| 2023-01-24T12:49:18.871137 | 2020-12-10T04:10:48 | 2020-12-10T04:10:48 | 318,857,227 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 89 |
py
|
K = int(input())
A, B = 1, 0
for k in range(K):
A, B = B, A + B
print(A, B)
|
[
"[email protected]"
] | |
0769073e54f97a7b28ca46674614b73ce89d67c6
|
37906b41991719dff0590f9161f9b69af8d7e491
|
/tensorflow/python/tpu/tensor_tracer.py
|
b9aec3f2e26e5272030fbfb380877f6d6a789d29
|
[
"Apache-2.0"
] |
permissive
|
nauman07/tensorflow
|
7ae4277564bb596c0f8ba5d107a35d9505c3c2fb
|
f88cf68393e60525506a567e0081b8e2e6db409b
|
refs/heads/master
| 2020-08-28T15:55:35.510154 | 2019-10-26T15:34:58 | 2019-10-26T15:39:08 | 217,742,698 | 3 | 0 |
Apache-2.0
| 2019-10-26T17:11:10 | 2019-10-26T17:11:09 | null |
UTF-8
|
Python
| false | false | 66,668 |
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""A utility to trace tensor values on TPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import sys
import numpy as np
import six
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import analytics
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.tpu import tensor_tracer_flags
from tensorflow.python.tpu import tensor_tracer_report
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.training import training_util
_DEVICE_TYPE_TPU = 'tpu'
_DEVICE_TYPE_CPU = 'cpu'
_TRACE_MODE_PART_TENSOR_SIZE = 3
_REASON_OUTSIDE_OP_RANGE = 'not-traced-outside-op-range'
_REASON_UNSAFE_OP = 'not-traced-unsafe-op'
_REASON_WHILELOOP_OP = 'not-traced-special-whileloop-op'
_REASON_UNSAFE_SCALAR = 'not-traced-unsafe-scalar'
_REASON_SKIP_SCALAR = 'not-traced-scalar'
_REASON_LESS_INTERESTING_OP = 'not-traced-less-interesting-op'
_REASON_DEVICE_MISMATCH = 'not-traced-device-mismatch'
_REASON_DYNAMIC_SHAPE = 'not-traced-dynamic-shape'
_REASON_SCALAR_GET_TRACED = 'traced-scalar'
_REASON_TENSOR_GET_TRACED = 'traced-tensor'
_REASON_USER_INCLUDED = 'traced-user-included'
_REASON_USER_EXCLUDED = 'not-traced-user-excluded'
_REASON_NOT_EXECUTED = 'not-traced-not-in-exec-path'
_REASON_NON_NUMERIC_TENSOR = 'not-traced-non-numeric-tensor'
_REASON_FEEDS_WHILELOOP_OP = 'not-traced-feeds-special-whileloop-op'
_OUTPUT_STREAM_ESCAPE = 'file://'
_TENSOR_TRACER_COLLECTION = 'tensor_tracer_variables'
_TRACE_FILE_NAME = 'trace.all'
_COMPACT_TRACE_FILE_PREFIX = 'compact_trace.'
_COMPACT_TRACE_ENTRY_INIT_VALUE = -1.0
_TENSOR_TRACER_STORAGE = 'tensor_tracer_storage'
_TT_SNAPSHOT = 'tensor_tracer_snapshot'
_REPLICA_ID_TAG = '#replica-id: '
_TT_SUMMARY_NORM = tensor_tracer_flags.TT_SUMMARY_NORM
_TT_SUMMARY_MAX = tensor_tracer_flags.TT_SUMMARY_MAX
_TT_SUMMARY_MIN = tensor_tracer_flags.TT_SUMMARY_MIN
_TT_SUMMARY_MEAN = tensor_tracer_flags.TT_SUMMARY_MEAN
_TT_SUMMARY_VAR = tensor_tracer_flags.TT_SUMMARY_VAR
_TT_SUMMARY_SIZE = tensor_tracer_flags.TT_SUMMARY_SIZE
_TT_SUMMARY_TAG = 'tensor_tracer_summary'
_TT_TENSORBOARD_PLUGIN_NAME = 'tensor_tracer'
_TT_HOSTCALL_KEY = 'tensor_tracer_host_call'
_TT_EVENT_FILE_SUFFIX = '.tensor_tracer'
_TT_SUMMARY_MAX_QUEUE = 100
def op_priority(op_type):
"""Returns the priority of the op.
If the priority of the op is k, it will be traced if trace_level>=k.
Args:
op_type: String name of the operation type.
Returns:
Integer value corresponding the priority of the op.
"""
if op_type in ('Const', 'Shape', 'BroadcastGradientArgs', 'Range',
'VariableShape', 'Fill', 'OneHot'):
# Lowest priority ops, e.g., constant ops accross different steps,
# They will be traced only if trace_level>=7
return 7
if op_type in ('Identity', 'Cast', 'Reshape', 'ExpandDims', 'StopGradient',
'PreventGradient', 'Squeeze'):
# Operations without numerical effects.
# They will be only if trace_level>=6
return 6
if op_type in ('ConcatV2', 'Concat', 'StridedSlice', 'Slice', 'Pack', 'Tile'):
# Operations that merge or slice an input, will be traced if trace_level>=5
return 5
if op_type in ('Pad', 'RandomUniformInt', 'GreaterEqual'):
# Operations less likely to provide useful information,
# will be traced if trace_level>=4
return 4
if op_type in ('Sum', 'AddV2', 'Add', 'AddN', 'BiasAdd', 'CrossReplicaSum'):
# Add operations that are less likely create any issues, will be traced
# if trace_level>=3 (default=3)
return 3
if op_type in ('Neg', 'Sub'):
# Sub operations that are less likely create any issues, will be traced
# trace_level>=2
return 2
if op_type in ('Mul', 'Square', 'MatMul', 'RandomUniform', 'Select',
'Maximum', 'Mean', 'Variance'):
# Multiplication and some other operations, will be traced if trace_level>=1
return 1
return 0
def read_tensor_tracer_event_file(event_file):
"""Reads the event file written by tensor tracer.
Args:
event_file: Path to the event file that contains only tensor tracer events.
Returns:
An event dictionary in the form of
{step_number: {tensor_name: tensor_content}}
Raises:
ValueError: If an unexpected trace is found.
"""
event_dict = {}
for trace_event in summary_iterator.summary_iterator(event_file):
# First event is an event with file_version: "brain.Event:2"
if not trace_event.HasField('summary'):
continue
step = trace_event.step
if step not in event_dict:
event_dict[step] = {}
if len(trace_event.summary.value) != 1:
raise ValueError('Single step contains %d summary values,'
' expected 1.' % len(trace_event.summary.value))
tensor_value = trace_event.summary.value[0]
tensor_name = tensor_value.tag
real_shape = [d.size for d in tensor_value.tensor.tensor_shape.dim]
tensor_content = np.frombuffer(
tensor_value.tensor.tensor_content,
dtypes.DType(tensor_value.tensor.dtype).as_numpy_dtype()
).reshape(real_shape)
event_dict[step][tensor_name] = tensor_content
return event_dict
def tensor_tracepoint(tensor, checkpoint_name):
"""Adds a checkpoint with the given checkpoint name for the given tensor.
The tensor will be added to the list of tensors that will be traced by the
tensor tracer.
Args:
tensor: the tensor object for which the tracing is requested.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided tensor.
"""
tensor.graph.get_collection(_TENSOR_TRACER_COLLECTION)
tensor.graph.add_to_collection(_TENSOR_TRACER_COLLECTION,
(tensor, checkpoint_name))
return tensor
def keras_layer_tracepoint(layer, checkpoint_name):
"""An interface for adding the tensor outputs of a keras layer.
Encapsulates tensor_tracepoint.
Args:
layer: A keras layer.
checkpoint_name: a string name for the checkpoint. This name has to be a
unique name if used within model comparison. The tensors that have the same
checkpoint identifier is compared in model comparison.
Returns:
The provided layer.
"""
try:
outputs = layer.output
if tensor_util.is_tensor(outputs):
tensor_tracepoint(outputs, '%s' % (checkpoint_name))
else:
idx = 0
for output_tensor in outputs:
if tensor_util.is_tensor(outputs):
tensor_tracepoint(output_tensor, '%s_%d' % (checkpoint_name, idx))
idx += 1
except AttributeError:
pass
except RuntimeError:
pass
return layer
def _trace_files_need_precreated(output_dir):
"""Return True if trace files must be pre-created by users."""
if not output_dir.startswith('/'):
return False
if len(output_dir) < 5:
return False
if output_dir[2] != 'n':
return False
if output_dir[3] != 's':
return False
if output_dir[1] != 'c':
return False
if output_dir[4] != '/':
return False
return True
class TensorTracer(object):
"""A software construct for tracing tensor values in a TF graph on TPU.
This utility is disabled by default. It can be enabled by setting
the TENSOR_TRACER_FLAGS env variable as:
export TENSOR_TRACER_FLAGS="--enable=1"
If it is enabled, it will trace the output tensor values of
selected Ops in the graph. It has two outputs: (1) the traces and (2)
a report. The traces are dumped to a specified local file on the TPU
host. The report is printed to the log.info of the TPU job.
By passing options via the env variable, users can change:
(1) the trace mode (e.g., detecting NaN/Inf, printing partial or
full tensor values)
(2) which Ops to be traced (via op.name or op.type)
(3) output trace file path.
"""
# The set of graphs that are rewritten by tensor tracer.
_traced_graphs = set()
@staticmethod
def is_enabled():
"""Returns True if TensorTracer is enabled."""
return tensor_tracer_flags.TTParameters().is_enabled()
@staticmethod
def check_device_type(device_type):
"""Checks if the given device type is valid."""
if device_type not in (_DEVICE_TYPE_TPU, _DEVICE_TYPE_CPU):
raise ValueError('Invalid device_type "%s"'%device_type)
@staticmethod
def check_trace_mode(device_type, trace_mode):
"""Checks if the given trace mode work on the given device type.
Args:
device_type: Device type, TPU, GPU, CPU.
trace_mode: Tensor tracer trace mode.
Raises:
ValueError: If the given trace mode is not supported for the device.
"""
if trace_mode in (tensor_tracer_flags.TRACE_MODE_SUMMARY,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY):
if device_type != _DEVICE_TYPE_TPU:
raise ValueError('Device_type "%s" is not yet supported for '
'trace mode "%s"' % (device_type, trace_mode))
@staticmethod
def loop_cond_op(op):
return op.type in ('LoopCond', 'RefLoopCond')
@staticmethod
def while_loop_op(op):
"""Returns true if op is one of the special ops of in a while loop.
Args:
op: A tf.Operation.
Returns:
True if the given op is one of [Switch, Merge, Enter, Exit,
NextIteration, LoopCond], which are all building blocks for TF while
loops.
"""
return (control_flow_util.IsLoopSwitch(op) or
control_flow_util.IsLoopMerge(op) or
control_flow_util.IsLoopEnter(op) or
control_flow_util.IsLoopExit(op) or
TensorTracer.loop_cond_op(op) or
op.type in ('RefNextIteration', 'NextIteration'))
@staticmethod
def unsafe_op(op):
"""Returns True if this op is not safe to be traced."""
if control_flow_util.IsInCond(op):
return True
# Reasons for not including following op types:
# Assign: cause incorrect result with CPU tracing.
if op.type == 'Assign':
return True
return False
@staticmethod
def device_mismatch(device_type, op):
if device_type == _DEVICE_TYPE_TPU:
# pylint: disable=protected-access
return tpu._TPU_REPLICATE_ATTR not in op.node_def.attr
# pylint: enable=protected-access
return False
@staticmethod
def unsafe_scalar_trace(op):
"""Return true if scalar output tensor from Op is not safe to be traced."""
# Tracing the following causes cycle in the graph on TPU.
if op.type in ('LoopCond', 'Enter', 'Merge', 'Const',
'Switch', 'Less', 'ReadVariableOp'):
return True
# Tracing the following will cause casting-issue
# with the norm tracing mode or other compilation issues on CPU.
if op.type in ('VarHandleOp', 'IteratorToStringHandle',
'IteratorGetNext', 'OneShotIterator',
'IteratorV2', 'MakeIterator',
'BatchDatasetV2', 'MapDataset',
'FixedLengthRecordDataset', 'TakeDataset', 'ZipDataset',
'Placeholder', 'PlaceholderWithDefault', 'StridedSlice'):
return True
return False
def _is_interesting_op(self, op):
"""Returns True if the given op is not an interesting one to be traced."""
# If flag is set to include less interesting ops, then include everything.
if self._parameters.include_less_interesting_ops:
return True
return op_priority(op.type) <= self._parameters.trace_level
@staticmethod
def reason(op_idx, details):
"""Returns reason why the Op at op_idx is traced or not."""
return '%d %s'%(op_idx, details)
def __init__(self):
"""Initializes a TensorTracer.
Sets the various member fields from the flags (if given) or the defaults.
"""
self._replica_id = None
self._tt_config = tensor_tracer_report.TensorTracerConfig()
self._parameters = tensor_tracer_flags.TTParameters()
self._included_op_full_names = set()
self._host_call_fn = {}
self._cache_variables = {}
def _get_all_cache_variables(self):
return self._cache_variables
def _create_or_get_tensor_values_cache(self, cache_name, graph=None,
shape=None, dtype=dtypes.float32):
"""Creates a variable as the cache to store intermediate tensor values.
Args:
cache_name: Name to be given to the cache (an instance of tf.variable).
graph: Tensorflow graph.
shape: A list of dimensions.
dtype: Data type of created cache.
Returns:
A ref to newly created or existing cache with the given dimensions.
Raises:
ValueError: If missing a parameter to create the cache.
"""
def _escape_namescopes(variable_name):
# TODO(deveci): This might cause name collisions as in "foo/bar/mytensor"
# and "foo_bar/mytensor".
return variable_name.replace('/', '_').replace(':', '_')
if cache_name not in self._cache_variables:
if graph is None:
raise ValueError('Graph must be provided at cache creation.')
if shape is None:
raise ValueError('shape must be provided at cache creation.')
graph = graph or ops.get_default_graph()
if dtype.is_integer:
init_val = int(_COMPACT_TRACE_ENTRY_INIT_VALUE)
else:
init_val = _COMPACT_TRACE_ENTRY_INIT_VALUE
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
self._cache_variables[cache_name] = variable_scope.get_variable(
_TT_SNAPSHOT + '_' + _escape_namescopes(cache_name),
shape=shape, dtype=dtype,
initializer=init_ops.constant_initializer(init_val),
trainable=False,
use_resource=True,
collections=[_TENSOR_TRACER_STORAGE, ops.GraphKeys.LOCAL_VARIABLES])
return self._cache_variables[cache_name]
def _add_replica_id_to_graph(self):
"""Adds nodes for computing the replica ID to the graph."""
if self._tt_config.num_replicas:
with ops.control_dependencies(None):
# Uses None as dependency to run outside of TPU graph rewrites.
self._replica_id = tpu_ops.tpu_replicated_input(
list(range(self._tt_config.num_replicas)),
name='tt_replica_id')
else:
self._replica_id = 'unknown'
def _inside_op_range(self, idx):
"""Return True if the given index is inside the selected range."""
if idx < self._parameters.op_range[0]:
return False
return (self._parameters.op_range[1] < 0 or
idx <= self._parameters.op_range[1])
def _is_user_included_op(self, op):
"""Checks whether the op is included in the tensor tracer flags.
Args:
op: tf Operation
Returns:
True, if the op is included.
An op is included if:
- Its op name is given in included_opnames
- Its op type is given in included_optypes
- The op is at most _trace_ops_before_included hops before an included op
- The op is at most _trace_ops_after_included hops after an included op
"""
def _is_op_or_any_neighbor_included(op, check_before=0, check_after=0):
"""Helper function to check if op is included or not."""
if op.name in self._included_op_full_names:
return True
for opname_re in self._parameters.included_opname_re_list:
if opname_re.match(op.name):
self._included_op_full_names.add(op.name)
return True
for optype_re in self._parameters.included_optype_re_list:
if optype_re.match(op.type):
self._included_op_full_names.add(op.name)
return True
if check_after > 0:
for out_tensor in op.outputs:
for consumer in out_tensor.consumers():
if _is_op_or_any_neighbor_included(consumer, check_after - 1, 0):
self._included_op_full_names.add(op.name)
return True
if check_before > 0:
for input_tensor in op.inputs:
if _is_op_or_any_neighbor_included(input_tensor.op,
0,
check_before - 1):
self._included_op_full_names.add(op.name)
return True
return False
# check_after and check_before are swapped below, as below operation
# checks the distance from an arbitrary op to included ops.
return _is_op_or_any_neighbor_included(
op, self._parameters.trace_ops_after_included,
self._parameters.trace_ops_before_included)
def _is_user_excluded_op(self, op):
for opname_re in self._parameters.excluded_opname_re_list:
if opname_re.match(op.name):
return True
for optype_re in self._parameters.excluded_optype_re_list:
if optype_re.match(op.type):
return True
return False
def _signature_types(self):
"""Returns a dictionary holding the order of signatures in the cache for the selected trace mode."""
if self._parameters.trace_mode in set([
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS]):
return {self._parameters.trace_mode: 0}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
return self._parameters.summary_signatures
return {}
def _num_signature_dimensions(self):
return len(self._signature_types())
def _use_tensor_values_cache(self):
"""Returns True if immediate tensors should be first saved to a cache."""
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
# For summary tace mode only compact format is supported.
return True
if self._parameters.trace_mode not in set([
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
]):
return False
if (self._parameters.trace_dir and
_trace_files_need_precreated(self._parameters.trace_dir)):
return True
return self._parameters.use_compact_trace
def _use_tensor_buffer(self):
"""Returns true if the whole tensor needs to be cached/buffered in memory."""
return (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)
def _save_tensor_value_to_cache_op(self, cache_idx, updates):
"""Returns an op that will save the given updates to an entry in the cache.
Args:
cache_idx: The cache index of the tensor within the cache.
updates: A dictionary of the signature updates.
Returns:
Cache update operation.
"""
# state_ops.scatter_update allows updates only along the first dimension.
# Make a compact array by concantating different signatures, and update
# them all together.
sorted_update = []
if self._num_signature_dimensions() > 1:
signature_indices = self._signature_types()
for _, val in sorted(updates.items(),
key=lambda item: signature_indices[item[0]]):
sorted_update.append(val)
updates = array_ops.stack(sorted_update, axis=0)
updates = array_ops.reshape(updates, [1,
self._num_signature_dimensions()])
else:
(_, val), = updates.items()
updates = array_ops.reshape(val, [1, self._num_signature_dimensions()])
indices = constant_op.constant([cache_idx])
cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG)
return state_ops.scatter_update(cache, indices, updates).op
def _snapshot_tensor(self, tensor):
"""Creates a new tf.Variable and a new tf.Operation that assigns the value of the tensor to this variable.
Args:
tensor: tensor whose values will be stored in a new tf.Variable.
Returns:
An assignment operation.
"""
snapshot_variable = self._create_or_get_tensor_values_cache(
tensor.name, tensor.op.graph,
tensor.shape.as_list(), tensor.dtype)
return state_ops.assign(snapshot_variable, tensor).op
def _preprocess_traced_tensor(self, tensor):
"""Computes NAN/Norm/Max on TPUs before sending to CPU.
Args:
tensor: The tensor to be traced.
Returns:
A tensor that should be input to the trace_function.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _detect_nan_inf(tensor):
"""Trace function for detecting any NaN/Inf in the tensor."""
if tensor.dtype.is_floating:
mask = math_ops.reduce_any(
gen_math_ops.logical_or(
gen_math_ops.is_nan(tensor), gen_math_ops.is_inf(tensor)))
output_tensor = control_flow_ops.cond(
mask,
lambda: constant_op.constant([1.0]),
lambda: constant_op.constant([0.0]))
else:
output_tensor = constant_op.constant([0.0])
return output_tensor
def _compute_signature(tensor, tf_op, cast_to_f32=True):
if cast_to_f32:
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = tf_op(tensor)
# Return type should be scalar. Set it if it does not have the
# information.
if not output_tensor.get_shape().is_fully_defined():
output_tensor = array_ops.reshape(output_tensor, [])
return output_tensor
def _show_size(tensor):
# In order to check the size of a tensor.
# Not all sizes are known at the compile time, also, different replicas
# sometimes get different sizes of tensors.
# Collect it here to be used in merging replica data.
tsize = _compute_signature(tensor, array_ops.size, cast_to_f32=False)
# Cast to float32, so that it can be placed into same cache with other
# signatures.
return math_ops.cast(tsize, dtypes.float32)
def _show_max(tensor, cast_to_f32=True):
# returns -inf for empty tensor
return _compute_signature(tensor, math_ops.reduce_max, cast_to_f32)
def _show_min(tensor, cast_to_f32=True):
# returns inf for empty tensor
return _compute_signature(tensor, math_ops.reduce_min, cast_to_f32)
def _show_norm(tensor, cast_to_f32=True):
# returns 0 for empty tensor
return _compute_signature(tensor, linalg_ops.norm, cast_to_f32)
def _show_mean_and_variance(tensor, cast_to_f32=True):
"""Returns the mean and variance of the given tensor."""
if cast_to_f32:
tensor = math_ops.cast(tensor, dtypes.float32)
# returns nan for empty tensor
mean, var = nn_impl.moments(array_ops.reshape(tensor, [-1]), axes=[0])
# The shape has to be 1. Set it if it does not have the information.
if not mean.get_shape().is_fully_defined():
mean = array_ops.reshape(mean, [])
if not var.get_shape().is_fully_defined():
var = array_ops.reshape(var, [])
return mean, var
def _show_max_abs(tensor):
tensor = math_ops.cast(tensor, dtypes.float32)
output_tensor = math_ops.reduce_max(math_ops.abs(tensor))
zero = constant_op.constant(0, dtypes.float32)
output_tensor = gen_math_ops.maximum(zero, output_tensor)
# The shape has to be 1. Set it if it does not have the information.
output_tensor = array_ops.reshape(output_tensor, [1])
return output_tensor
def _detect_inf_nan_producer(tensor):
"""Checks if the tensor is the first NaN/Inf tensor in the computation path."""
if tensor.op.inputs:
inp_check = [
_detect_nan_inf(inp_tensor) for inp_tensor in tensor.op.inputs
]
is_any_input_inf_nan = math_ops.add_n(inp_check)
else:
is_any_input_inf_nan = constant_op.constant(0, dtypes.bool)
is_current_tensor_inf_nan = _detect_nan_inf(tensor)
# An op is NaN/INF producer only when all inputs are nan/inf free (
# is_any_input_inf_nan = 0), and its output has nan/inf (
# is_current_tensor_inf_nan=1). Below will be 1 if op nan/inf is producer.
is_nan_producer = is_current_tensor_inf_nan - is_any_input_inf_nan
is_nan_producer = math_ops.reduce_any(is_nan_producer > 0)
return is_nan_producer
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_IF_NAN):
return {self._parameters.trace_mode: _detect_inf_nan_producer(tensor)}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NAN_INF:
return {self._parameters.trace_mode: _detect_nan_inf(tensor)}
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_PART_TENSOR):
return {self._parameters.trace_mode: tensor}
if (self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)):
return {self._parameters.trace_mode: tensor}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_NORM:
return {self._parameters.trace_mode: array_ops.reshape(
_show_norm(tensor), [1])}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_MAX_ABS:
return {self._parameters.trace_mode: _show_max_abs(tensor)}
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
tensor = math_ops.cast(tensor, dtypes.float32)
result_dict = {}
# Call mean and variance computation here to avoid adding the same nodes
# twice.
if (_TT_SUMMARY_MEAN in self._signature_types() or
_TT_SUMMARY_VAR in self._signature_types()):
mean, variance = _show_mean_and_variance(tensor, cast_to_f32=False)
for signature_name, _ in sorted(self._signature_types().items(),
key=lambda x: x[1]):
if signature_name == _TT_SUMMARY_NORM:
signature_result_tensor = _show_norm(tensor, cast_to_f32=False)
elif signature_name == _TT_SUMMARY_MAX:
signature_result_tensor = _show_max(tensor, cast_to_f32=False)
elif signature_name == _TT_SUMMARY_MIN:
signature_result_tensor = _show_min(tensor, cast_to_f32=False)
elif signature_name == _TT_SUMMARY_SIZE:
signature_result_tensor = _show_size(tensor)
elif signature_name == _TT_SUMMARY_MEAN:
signature_result_tensor = mean
elif signature_name == _TT_SUMMARY_VAR:
signature_result_tensor = variance
else:
raise ValueError('Unknown signature type :%s.' % signature_name)
result_dict[signature_name] = signature_result_tensor
return result_dict
raise RuntimeError(
'Tensor trace fun for %s is not yet implemented'
% self._parameters.trace_mode)
def _make_tensor_trace_fun(self, tensor_name, tensor_trace_order):
"""Makes the tensor tracing function called by outside compilation.
Args:
tensor_name: name of the tensor being traced.
tensor_trace_order: TensorTraceOrder object holding tensorname to id map.
Returns:
A function to be passed as the first argument to outside compilation.
Raises:
RuntimeError: If the trace mode is invalid.
"""
def _print_tensor(tensor_name, num_elements, tensor, output_tensor):
"""Prints a tensor value to a file.
Args:
tensor_name: name of the tensor being traced.
num_elements: number of elements to print (-1 means print all).
tensor: the tensor needs to be returned.
output_tensor: the tensor needs to be printed.
Returns:
The same tensor passed via the "tensor" argument.
Raises:
ValueError: If tensor_name is not already in
self._tensorname_idx_map.
"""
if self._parameters.is_brief_mode():
if tensor_name not in tensor_trace_order.tensorname_idx_map:
raise ValueError(
'Tensor name %s is not in the tensorname_idx_map'%tensor_name)
msg = '%d'%self._tensorname_idx_map[tensor_name]
else:
msg = '"%s"'%tensor_name
if self._parameters.trace_dir:
output_path = os.path.join(self._parameters.trace_dir, _TRACE_FILE_NAME)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
return logging_ops.print_v2(msg, array_ops.shape(output_tensor),
'@', self._replica_id,
'\n', output_tensor, '\n',
summarize=num_elements,
output_stream=output_stream)
def _show_part_tensor(tensor):
"""Trace function for printing part of the tensor."""
return _print_tensor(tensor_name, _TRACE_MODE_PART_TENSOR_SIZE,
tensor, tensor)
def _show_full_tensor(tensor):
"""Trace function for printing the entire tensor."""
return _print_tensor(tensor_name, -1, tensor, tensor)
def _show_full_tensors(tensor):
"""Prints the full tensor values for the tensors that are _trace_stack_size hops away from a given tensor."""
def _get_distance_k_tensors(k_before=0):
"""Returns the tensors that are at most k_before hops away from the tensor."""
if k_before < 0:
return []
visited_tensors = {tensor: 0}
visitor_queue = [tensor]
head = 0
while head < len(visitor_queue):
current_tensor = visitor_queue[head]
head += 1
distance = visited_tensors[current_tensor]
if distance == k_before:
break
for input_tensor in current_tensor.op.inputs:
if input_tensor in visited_tensors:
continue
visitor_queue.append(input_tensor)
visited_tensors[input_tensor] = distance + 1
return visitor_queue
tensors_to_print = _get_distance_k_tensors(
self._parameters.trace_stack_size)
print_ops = [_print_tensor(t.name, -1, t, t) for t in tensors_to_print]
with ops.control_dependencies(print_ops):
return constant_op.constant(True)
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_FULL_IF_NAN):
return _show_full_tensors
if (self._parameters.trace_mode ==
tensor_tracer_flags.TRACE_MODE_PART_TENSOR):
return _show_part_tensor
# The input tensor has a shape of "[1]" for TRACE_MODE_NAN_INF,
# TRACE_MODE_NORM, and TRACE_MODE_MAX_ABS, as related computations are
# performed within TPUs and only their results are transferred to CPU.
# Simply, print the full tensor for these trace modes.
if self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
):
return _show_full_tensor
raise RuntimeError('Tensor trace fun for %s is not yet implemented'
%self._parameters.trace_mode)
def _skip_op(self, op_id, op, ops_in_exec_path, report_handler):
"""Returns True if we should not trace Op.
Args:
op_id: Topological index of the op.
op: tf.Operation
ops_in_exec_path: Set of operations that are in the execution path.
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
True if the op should not be traced, false otherwise.
"""
if TensorTracer.while_loop_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_WHILELOOP_OP))
return True
if TensorTracer.unsafe_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_UNSAFE_OP))
return True
if TensorTracer.device_mismatch(self._tt_config.device_type, op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_DEVICE_MISMATCH))
return True
if op not in ops_in_exec_path:
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_NOT_EXECUTED))
return True
if self._is_user_included_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
return False
if not self._inside_op_range(op_id):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_OUTSIDE_OP_RANGE))
return True
if not self._is_interesting_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_LESS_INTERESTING_OP))
return True
if self._is_user_excluded_op(op):
report_handler.instrument_op(
op, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
return True
return False
def _skip_tensor(self, op_id, out_tensor, report_handler):
"""Returns True if we should not trace out_tensor.
Args:
op_id: Topological index of the op producing tensor.
out_tensor: tf.Tensor
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
True if the tensor should not be traced, false otherwise.
"""
# Skips a tensor if the tensor has a non-numeric type.
# Note: we cannot use check_ops.is_numeric_tensor(out_tensor)
# because it also excludes tensors with dtypes, bool, and
# float32_ref, which we actually want to trace.
non_numeric_tensor_types = set([dtypes.variant, dtypes.resource,
dtypes.string])
if out_tensor.dtype in non_numeric_tensor_types:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_NON_NUMERIC_TENSOR))
return True
# Skip a tensor if it feeds a special while loop op.
if [consumer for consumer in out_tensor.consumers() if
TensorTracer.while_loop_op(consumer)]:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_FEEDS_WHILELOOP_OP))
return True
if self._is_user_included_op(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_USER_INCLUDED))
return False
if self._is_user_excluded_op(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_USER_EXCLUDED))
return True
if not out_tensor.get_shape().is_fully_defined():
# If trace mode is nan-inf, norm or max, then the tensor will be reduced
# to a scalar before the outside compilation call.
if self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_NAN_INF,
tensor_tracer_flags.TRACE_MODE_NORM,
tensor_tracer_flags.TRACE_MODE_MAX_ABS,
tensor_tracer_flags.TRACE_MODE_SUMMARY
):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_DYNAMIC_SHAPE))
return True
rank = len(out_tensor.shape)
if rank < 1:
# scalar
if self._parameters.trace_scalar_ops:
if TensorTracer.unsafe_scalar_trace(out_tensor.op):
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_UNSAFE_SCALAR))
return True
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_SCALAR_GET_TRACED))
return False
else:
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_SKIP_SCALAR))
return True
else:
# tensor
report_handler.instrument_tensor(
out_tensor, TensorTracer.reason(op_id, _REASON_TENSOR_GET_TRACED))
return False
def _filter_execution_path_operations(self, operations, fetches):
"""Returns the set of ops in the execution path to compute given fetches."""
# If no fetch provided, then return all operations.
if fetches is None:
return set(operations)
# Convert to list, if a single element is provided.
if not isinstance(fetches, (list, tuple)):
fetches = [fetches]
# If a tensor is given as fetch, convert it to op.
op_fetches = []
for fetch in fetches:
if isinstance(fetch, ops.Operation):
op_fetches.append(fetch)
elif isinstance(fetch, ops.Tensor):
op_fetches.append(fetch.op)
else:
raise RuntimeError('Given fetch:%s is neither a tensor nor an op.'
%fetch)
execution_path_operations = set(op_fetches)
traverse_stack = list(op_fetches)
while True:
if not traverse_stack:
break
head_op = traverse_stack.pop()
input_ops = [tensor_input.op for tensor_input in head_op.inputs]
input_ops.extend(head_op.control_inputs)
for input_op in input_ops:
if input_op not in execution_path_operations:
# Filter out loop condition operations, tracing them causes a cycle.
# Trace only the loop-body.
if TensorTracer.loop_cond_op(input_op):
continue
execution_path_operations.add(input_op)
traverse_stack.append(input_op)
return execution_path_operations
def _determine_and_instrument_traced_tensors(self, graph_order,
ops_in_exec_path,
tensor_trace_points,
report_handler):
"""Determines the tensors to trace and instruments the trace details.
Args:
graph_order: graph_order tuple containing graph (tf.graph), operations
(list of operations), op_to_idx (op id mapping), (tensors) list of
tensors, tensor_to_idx (tensor id mapping), contains_cycle (whether
there is a cycle in the graph), topological_order_or_cycle (list of ops
in topological order or list of ops creating a cycle).
ops_in_exec_path: Set of ops in the execution path.
tensor_trace_points: Collection of programatic tensor trace points.
report_handler: An instance of tensor_tracer_report.TTReportHandle.
Returns:
List of tensors to be traced.
"""
traced_tensors = []
checkpoint_operations = set([tensor.op
for (tensor, _) in tensor_trace_points])
for op_id, op in enumerate(graph_order.operations):
if checkpoint_operations and op not in checkpoint_operations:
continue
if self._skip_op(op_id, op, ops_in_exec_path, report_handler):
continue
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
if not self._skip_tensor(op_id, out_tensor, report_handler):
traced_tensors.append(out_tensor)
return traced_tensors
def _check_trace_files(self):
"""Checks if any requirements for trace files are satisfied."""
if not self._parameters.trace_dir:
# traces will be written to stderr. No need to check trace files.
return
if self._parameters.trace_mode == tensor_tracer_flags.TRACE_MODE_SUMMARY:
# Output files are handled by tf.summary operations, no need to precreate
# them.
return
if _trace_files_need_precreated(self._parameters.trace_dir):
for replica_id in range(0, self._tt_config.num_replicas):
trace_file_path = os.path.join(
self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX) + '%d'%replica_id
if not gfile.Exists(trace_file_path):
raise RuntimeError(
'%s must be pre-created with the '
'appropriate properties.'%trace_file_path)
else:
if not gfile.Exists(self._parameters.trace_dir):
gfile.MkDir(self._parameters.trace_dir)
if not gfile.Exists(self._parameters.trace_dir):
raise RuntimeError('Failed to create %s'%self._parameters.trace_dir)
def _determine_trace_and_create_report(self, graph, ops_in_exec_path):
"""Work needs to be done prior to TPU or CPU tracing.
Args:
graph: tf.graph
ops_in_exec_path: Set of operations in the execution path.
Returns:
An instance of tensor_tracer_report.TensorTraceOrder, containing list of
tensors to be traced with their topological order information.
"""
self._check_trace_files()
graph_order = tensor_tracer_report.sort_tensors_and_ops(graph)
tensor_trace_points = graph.get_collection(_TENSOR_TRACER_COLLECTION)
report_handler = tensor_tracer_report.TTReportHandle()
traced_tensors = self._determine_and_instrument_traced_tensors(
graph_order, ops_in_exec_path, tensor_trace_points, report_handler)
tensor_trace_order = tensor_tracer_report.TensorTraceOrder(graph_order,
traced_tensors)
num_signatures = self._num_signature_dimensions()
if num_signatures:
self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG,
graph,
[len(traced_tensors),
num_signatures])
if self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_SUMMARY,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY):
report_proto = report_handler.create_report_proto(self._tt_config,
self._parameters,
tensor_trace_order,
tensor_trace_points,
self._signature_types())
report_handler.write_report_proto(report_proto, self._parameters)
else:
report_handler.create_report(self._tt_config, self._parameters,
tensor_trace_order, tensor_trace_points)
return tensor_trace_order
def _create_host_call(self):
return self._parameters.trace_mode in (
tensor_tracer_flags.TRACE_MODE_SUMMARY,
tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY)
def _generate_flush_cache_op(self, num_replicas, on_tpu):
"""Generates an Op that will flush the cache to file.
Args:
num_replicas: total number of replicas.
on_tpu: if the graph is executed on TPU.
Returns:
The Op to flush the cache to file.
"""
def _flush_fun(cache, replica_id):
"""Flushes the cache to a file corresponding to replica_id."""
def _f(file_index):
"""Generates a func that flushes the cache to a file."""
def _print_cache():
"""Flushes the cache to a file."""
replica_str = ('%d' % file_index)
if self._parameters.trace_dir:
output_path = (os.path.join(self._parameters.trace_dir,
_COMPACT_TRACE_FILE_PREFIX)
+ replica_str)
output_stream = _OUTPUT_STREAM_ESCAPE + output_path
else:
output_stream = sys.stderr
new_step_line = _REPLICA_ID_TAG + replica_str
print_ops = []
for i in range(self._num_signature_dimensions()):
print_ops.append(logging_ops.print_v2(
new_step_line, '\n',
cache[:, i], '\n',
summarize=-1,
output_stream=output_stream))
with ops.control_dependencies(print_ops):
return constant_op.constant(0).op
return _print_cache
def _eq(file_index):
return math_ops.equal(replica_id, file_index)
flush_op_cases = {}
for i in range(num_replicas):
flush_op_cases[_eq(i)] = _f(i)
# Each replica needs to determine where to write their output.
# To do this, we check if replica_id is 0, then 1, ..., and then
# num_replicas - 1 statically; and return the corresponding static file
# name. We cannot simply set the file name in python, as replica_id is
# only known during tf runtime, and we cannot create dynamic filenames.
return control_flow_ops.case(flush_op_cases, exclusive=True)
cache = self._create_or_get_tensor_values_cache(_TT_SUMMARY_TAG)
if on_tpu:
flush_op = tpu.outside_compilation(_flush_fun,
cache.value(), self._replica_id)
else:
flush_op = _flush_fun(cache.value(), self._replica_id)
with ops.control_dependencies([flush_op]):
reset_value = constant_op.constant(_COMPACT_TRACE_ENTRY_INIT_VALUE,
dtype=cache.dtype,
shape=cache.shape)
assign_op = state_ops.assign(cache, reset_value).op
with ops.control_dependencies([assign_op]):
return constant_op.constant(0).op
def _flush_tensor_values_cache(self, tensor_fetches, op_fetches, on_tpu):
"""Flushes the intermediate tensor values in the graph to the cache.
Args:
tensor_fetches: list of tensor results returned by the model_fn.
op_fetches: list of ops that are returned by the model_fn, e.g., train_op.
on_tpu: if the graph is executed on TPU.
Returns:
An identical copy of tensor_fetches.
"""
# Add a dependency to op and tensor fetches to make sure that all tracing
# ops are executed before flushing trace results.
with ops.control_dependencies(op_fetches +
[tensor.op for tensor in tensor_fetches]):
flush_cache_op = self._generate_flush_cache_op(
self._tt_config.num_replicas, on_tpu)
return control_flow_ops.tuple(tensor_fetches,
control_inputs=[flush_cache_op])
def _process_tensor_fetches(self, tensor_fetches):
"""Check that tensor_fetches is not empty and have valid tensors."""
# If none or empty list.
if tensor_fetches is None:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'None.')
if not isinstance(tensor_fetches, (list, tuple)):
tensor_fetches = [tensor_fetches]
elif not tensor_fetches:
raise RuntimeError('tensor_fetches provided to tensor_tracer cannot be '
'empty list.')
fetches = []
for fetch in tensor_fetches:
if isinstance(fetch, ops.Tensor):
fetches.append(fetch)
else:
raise RuntimeError('Given tensor_fetch:%s is not a tensor.' % fetch)
return fetches
def _process_op_fetches(self, op_fetches):
"""Check that op_fetches have valid ops."""
if op_fetches is None:
return []
if not isinstance(op_fetches, (list, tuple)):
op_fetches = [op_fetches]
fetches = []
for fetch in op_fetches:
if isinstance(fetch, ops.Operation):
fetches.append(fetch)
elif isinstance(fetch, ops.Tensor):
fetches.append(fetch.op)
else:
logging.warning('Ignoring the given op_fetch:%s, which is not an op.' %
fetch)
return fetches
def _convert_fetches_to_input_format(self, input_fetches, current_fetches):
"""Changes current_fetches' format, so that it matches input_fetches."""
if isinstance(input_fetches, ops.Tensor):
if len(current_fetches) != 1:
raise RuntimeError('Tensor tracer input/output fetches do not match.')
return current_fetches[0]
else:
if len(current_fetches) != len(current_fetches):
raise RuntimeError('Tensor tracer input/output fetches do not match.')
elif isinstance(input_fetches, tuple):
return tuple(current_fetches)
else:
return current_fetches
def _get_op_control_flow_context(self, op):
"""Returns the control flow of the given op.
Args:
op: tf.Operation for which the control flow context is requested.
Returns:
op_control_flow_context: which the is control flow context of the given
op. If the operation type is LoopExit, returns the outer control flow
context.
"""
# pylint: disable=protected-access
op_control_flow_context = op._control_flow_context
# pylint: enable=protected-access
if control_flow_util.IsLoopExit(op):
op_control_flow_context = op_control_flow_context.outer_context
return op_control_flow_context
def _prepare_host_call_fn(self, processed_t_fetches, op_fetches):
"""Creates a host call function that will write the cache as tb summary.
Args:
processed_t_fetches: List of tensor provided to session.run.
op_fetches: List of operations provided to session.run.
Raises:
ValueError if trace_dir is not set.
"""
if self._parameters.trace_dir is None:
raise ValueError('Provide a trace_dir for tensor tracer in summary mode. '
'--trace_dir=/model/dir')
def _write_cache(step, **kwargs):
"""Writes the given caches as tensor summary.
Args:
step: Step tensor with dimension [num_cores].
**kwargs: The dictionary of tensors that needs to be written as
summaries. Key and value pairs within kwargs correspond to the tag
name, and tensor content that will be written using summary.write.
The trace_modes that use this function are:
- summary: In summary mode, kwargs includes a single (tag, content)
pair which are, _TT_SUMMARY_TAG and a tf.float32 signature_cache
variable. The dimension of the signature_cache is:
num_cores x num_traced_tensors x num_signatures.
- full_tensor_summary: kwargs will include all traced tensors. Tag
and content correspond to the name of the tensor, and its actual
content.
Returns:
A tf.Operation that needs to be executed for the host call dependencies.
"""
# TODO(deveci): Parametrize max_queue, so that flushing op can be called
# less frequently.
# Setting max_queue to 100 appears to be safe even when the number of
# iterations are much lower, as the destructor of the writer will flushes
# it.
summary_write_ops = []
with summary.create_file_writer_v2(
self._parameters.trace_dir,
filename_suffix=_TT_EVENT_FILE_SUFFIX,
max_queue=_TT_SUMMARY_MAX_QUEUE).as_default():
summary_metadata = summary_pb2.SummaryMetadata(
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=_TT_TENSORBOARD_PLUGIN_NAME))
for key, value in kwargs.items():
summary_write_ops.append(summary.write(
_TT_SUMMARY_TAG + '/' + key, value, metadata=summary_metadata,
step=step[0]))
return control_flow_ops.group(summary_write_ops)
step = array_ops.reshape(training_util.get_or_create_global_step(), [1])
self._host_call_fn = {}
host_call_deps = op_fetches + [tensor.op for tensor in processed_t_fetches]
caches_to_write = {}
with ops.control_dependencies(host_call_deps):
all_caches = self._get_all_cache_variables()
for cache_name, cache_variable in all_caches.items():
# Increase the cache rank by 1, so that when host call concatenates
# tensors from different replicas, we can identify them with [core_id].
new_cache_shape = [1]
new_cache_shape.extend(cache_variable.shape.as_list())
cache = array_ops.reshape(cache_variable.value(), new_cache_shape)
caches_to_write[cache_name] = cache
# Add step to parameter dictionary.
caches_to_write['step'] = step
# Other options without adding step to parameter dictionary are
# * host_call_fn = (_write_cache(step, caches_to_write)) : fails as it
# considers caches_to_write as a single parameter, rather than a keyword
# parameters.
# * host_call_fn = (_write_cache(step, **caches_to_write)) : fails with
# a syntax error.
self._host_call_fn[_TT_HOSTCALL_KEY] = (_write_cache, caches_to_write)
def host_call_deps_and_fn(self):
return self._host_call_fn
def _trace_execution(self, graph,
tensor_fetches,
op_fetches=None,
on_tpu=True):
"""Commong tracing function for both CPU and TPUs.
The caller function should set device_type, num_replicas,
num_replicas_per_host, num_hosts and replica_id before calling
_trace_execution.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
on_tpu: True if executing on TPU.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
def _cast_unsupported_dtypes(tensor):
"""Casts tensor to a supported type."""
if tensor.dtype.__eq__(dtypes.int64):
# outside-compilation doesn't support int64 input yet.
return math_ops.cast(tensor, dtypes.int32)
if tensor.dtype.__eq__(dtypes.bfloat16) or tensor.dtype.__eq__(
dtypes.float16):
# Since host can't handle bf16, convert tensor to f32.
return math_ops.cast(tensor, dtypes.float32)
return tensor
trace_mode = self._parameters.trace_mode
device_type = self._tt_config.device_type
analytics.track_usage('tensor_tracer', [trace_mode, device_type])
TensorTracer.check_device_type(device_type)
TensorTracer.check_trace_mode(device_type, trace_mode)
# Check in_tensor_fetches, and op_fetches and convert them to lists.
processed_t_fetches = self._process_tensor_fetches(tensor_fetches)
op_fetches = self._process_op_fetches(op_fetches)
all_fetches = op_fetches + [tensor.op for tensor in processed_t_fetches]
# Filter out the operations that won't be executed.
# if fetches=None, then ops_in_exec_path = set(operations)
exec_op_set = self._filter_execution_path_operations(graph.get_operations(),
all_fetches)
# Write report file, and determine the traced tensors.
tensor_trace_order = self._determine_trace_and_create_report(
graph, exec_op_set)
tensor_fetch_set = set(processed_t_fetches)
tracing_ops = []
# pylint: disable=protected-access
current_control_flow_context = graph._get_control_flow_context()
# pylint: enable=protected-access
sorted_exec_op_list = list(exec_op_set)
sorted_exec_op_list.sort(key=lambda op: op.name)
# Trace ops only if they are in the execution path.
for op in sorted_exec_op_list:
for i in range(len(op.outputs)):
out_tensor = op.outputs[i]
tensor_name = out_tensor.name
if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:
continue
# Create the list of consumers before calling _preprocess_traced_tensor.
# Otherwise, adding control input below, will introduce a cycle in the
# graph.
consumers = out_tensor.consumers()
# Not all consumers may be in the exec path. Filter out the consumers
# to keep the graph simpler.
consumers = [cop for cop in consumers if cop in exec_op_set]
# If there is no consumer of the tensor, there is no need to trace it;
# unless the tensor itself is one of the fetches.
is_a_fetched_tensor = out_tensor in tensor_fetch_set
if (not consumers) and (not is_a_fetched_tensor):
continue
op_control_flow_context = self._get_op_control_flow_context(op)
# pylint: disable=protected-access
graph._set_control_flow_context(op_control_flow_context)
# pylint: enable=protected-access
processed_tensors = self._preprocess_traced_tensor(out_tensor)
if on_tpu:
for signature in processed_tensors.keys():
processed_tensors[signature] = _cast_unsupported_dtypes(
processed_tensors[signature])
if self._use_tensor_values_cache():
# Use a small cache to store the characteristics of the tensor.
cache_idx = tensor_trace_order.tensorname_to_cache_idx[tensor_name]
trace_op = self._save_tensor_value_to_cache_op(cache_idx,
processed_tensors)
elif self._use_tensor_buffer():
if len(processed_tensors) != 1:
raise RuntimeError('Multiple stats are only allowed in compact '
'mode.')
processed_out_tensor = processed_tensors.values()[0]
# Store the whole tensor in a buffer.
trace_op = self._snapshot_tensor(processed_out_tensor)
else:
def tpu_wrap_trace_fn(tensor, out_tensor_name):
"""Wraps the trace_fn with outside compilation if on TPUs."""
tensor_trace_fn = self._make_tensor_trace_fun(out_tensor_name,
tensor_trace_order)
if on_tpu:
return tpu.outside_compilation(tensor_trace_fn, tensor)
else:
return tensor_trace_fn(tensor)
def conditional_trace_fn(predicate_tensor, out_tensor, trace_fn,
out_tensor_name):
"""Creates a cond op that traces the out_tensor if predicate is satisfied."""
return control_flow_ops.cond(
predicate_tensor, lambda: trace_fn(out_tensor, out_tensor_name),
lambda: constant_op.constant(False)).op
if len(processed_tensors) != 1:
raise RuntimeError('Multiple stats are only allowed in compact '
'mode.')
# Collecting multiple statistics are only supported in the summary
# mode that uses compact format(self._use_tensor_values_cache = true).
# Non-compact mode currently allows single stat per tensor.
processed_out_tensor = six.next(six.itervalues(processed_tensors))
if self._parameters.is_conditional_trace:
trace_op = conditional_trace_fn(processed_out_tensor, out_tensor,
tpu_wrap_trace_fn, tensor_name)
elif self._parameters.included_cores:
should_print = constant_op.constant(False)
for core in self._parameters.included_cores:
should_print = gen_math_ops.logical_or(
should_print, gen_math_ops.equal(self._replica_id, core))
trace_op = conditional_trace_fn(should_print, processed_out_tensor,
tpu_wrap_trace_fn, tensor_name)
else:
trace_op = tpu_wrap_trace_fn(processed_out_tensor, tensor_name)
if is_a_fetched_tensor:
tracing_ops.append(trace_op)
continue
# Add it to all consumers, as some consumers may not be executed if they
# are in a control flow.
for consumer_op in consumers:
# pylint: disable=protected-access
consumer_op._add_control_input(trace_op)
# pylint: enable=protected-access
# pylint: disable=protected-access
graph._set_control_flow_context(current_control_flow_context)
# pylint: enable=protected-access
if tracing_ops:
# If we are tracing a fetched tensor, their dependency is stored in
# tracing_ops.
processed_t_fetches = control_flow_ops.tuple(processed_t_fetches,
control_inputs=tracing_ops)
if self._use_tensor_values_cache() or self._use_tensor_buffer():
if self._create_host_call() and on_tpu:
self._prepare_host_call_fn(processed_t_fetches, op_fetches)
else:
processed_t_fetches = self._flush_tensor_values_cache(
processed_t_fetches, op_fetches, on_tpu=on_tpu)
# processed_t_fetches is a list at this point. Convert it to the same
# format as given in tensor_fetches.
return self._convert_fetches_to_input_format(tensor_fetches,
processed_t_fetches)
def trace_tpu(self, graph,
tensor_fetches,
op_fetches=None,
num_replicas=None,
num_replicas_per_host=None,
num_hosts=None):
"""Traces the tensors generated by TPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the TPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
num_replicas: number of replicas used on the TPU.
num_replicas_per_host: number of replicas per TPU host.
num_hosts: total number of TPU hosts.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If num_replicas_per_host > 8.
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._tt_config.device_type = _DEVICE_TYPE_TPU
self._tt_config.num_replicas = num_replicas
self._tt_config.num_replicas_per_host = num_replicas_per_host
self._tt_config.num_hosts = num_hosts
if self._tt_config.num_replicas is not None:
if self._tt_config.num_replicas_per_host is None:
self._tt_config.num_replicas_per_host = 8
if self._tt_config.num_hosts is None:
self._tt_config.num_hosts = (
num_replicas // self._tt_config.num_replicas_per_host +
(num_replicas % self._tt_config.num_replicas_per_host > 0))
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
self._add_replica_id_to_graph()
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=True)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
def trace_cpu(self, graph, tensor_fetches, op_fetches=None):
"""Traces the tensors generated by CPU Ops in a TF graph.
Args:
graph: the graph of Ops executed on the CPU.
tensor_fetches: a (list,tuple,or a single object) of tensor fetches
returned by model_fn given to session.run. Function must be provided
with as least one tensor to fetch.
op_fetches: A list of op fetches returned by model_fn given to
session.run. op_fetches and tensor_fetches are used to determine the
nodes that will be executed. Can be None.
Returns:
tensor_fetches: an exact copy of tensor_fetches that has additional
dependencies.
Raises:
RuntimeError: If tensor_fetches is None or empty.
"""
if graph in TensorTracer._traced_graphs:
logging.warning('Graph is already rewritten with tensor tracer, ignoring '
'multiple calls.')
return tensor_fetches
else:
TensorTracer._traced_graphs.add(graph)
self._tt_config.device_type = _DEVICE_TYPE_CPU
self._tt_config.num_replicas = 1
self._tt_config.num_replicas_per_host = 1
self._tt_config.num_hosts = 1
self._replica_id = 0
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_before_tt.pbtxt')
with graph.as_default():
tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,
on_tpu=False)
if self._parameters.graph_dump_path:
graph_io.write_graph(graph, self._parameters.graph_dump_path,
'graph_after_tt.pbtxt')
return tensor_fetches
|
[
"[email protected]"
] | |
25dd87758892b414426ec0e9c48e05fb4ac4a527
|
a4a44ad46cd1306e2da72ff89483b0102fc9787d
|
/SamplePython/Developer Tool/fab_fabric/pengenalan_dr_web/11_settings.py
|
8a018fc9d73ccd0692dcf6cf3d3a40dad2777d35
|
[] |
no_license
|
okipriyadi/NewSamplePython
|
640eb3754de98e6276f0aa1dcf849ecea22d26b1
|
e12aeb37e88ffbd16881a20a3c37cd835b7387d0
|
refs/heads/master
| 2020-05-22T01:15:17.427350 | 2017-02-21T04:47:08 | 2017-02-21T04:47:08 | 30,009,299 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 333 |
py
|
"""
settings (fabric.context_managers.settings)
When you need to temporarily (i.e. for a certain command chain),
you can use the settings statement (i.e. override env values).
Usage examples:
"""
from fabric.api import settings, sudo
# Perform actions using a different *user*
with settings(user="avionics"):
sudo("cmd")
|
[
"[email protected]"
] | |
4e8a125a7458dd004507e648e9417922ad85affe
|
14d7f5f83b6f84871ff6ebfa0af4c17b7115a33f
|
/eco_models/mpb/integration_stop.py
|
f391a20c2a14bae90e14d4ebe8bd5777a3fa7d32
|
[] |
no_license
|
tonychangmsu/Python_Scripts
|
8ca7bc841c94dcab36743bce190357ac2b1698a5
|
036f498b1fc68953d90aac15f0a5ea2f2f72423b
|
refs/heads/master
| 2016-09-11T14:32:17.133399 | 2016-03-28T16:34:40 | 2016-03-28T16:34:40 | 10,370,475 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 736 |
py
|
# Title: integration_stop.py
# Author: Tony Chang
# Date: 10.26.2015
# Abstract: Attempt to find a solution to determining where the cumulative sum (numerical integration), of a array of
2D matricies sum up to one (find the index)
import numpy as np
#first suppose we have a 3D matrix of values under 1
G = np.random.uniform(0,.05, (365,500,400))
#now develop a cumulative sum for each step
integral_G = np.cumsum(G, axis =0)
#now find out the index of the first axis where the value is equal to one.
index = np.argmax(integral_G>1, axis = 0)
#if any of these equals to 0 then we have a development that didn't complete, and we have a problem
#need more time to finish (i.e. more years to inspect).
#done!
|
[
"[email protected]"
] | |
9ae067e5cd4eccc2e3a324cc2e07669caccf8637
|
6630694f401f6f475dd81bb01ff9368db844ccff
|
/configs/_base_/models/hrnet/hrnet-w48.py
|
f0604958481ba2af277e3a0f9515dc1423def6c6
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmpretrain
|
98a4d6b3bb747efc3d50decebf84fc3ffa41076a
|
d2ccc44a2c8e5d49bb26187aff42f2abc90aee28
|
refs/heads/main
| 2023-08-30T19:11:24.771498 | 2023-08-23T02:45:18 | 2023-08-23T02:45:18 | 278,415,292 | 652 | 186 |
Apache-2.0
| 2023-09-08T08:01:40 | 2020-07-09T16:25:04 |
Python
|
UTF-8
|
Python
| false | false | 418 |
py
|
# model settings
model = dict(
type='ImageClassifier',
backbone=dict(type='HRNet', arch='w48'),
neck=[
dict(type='HRFuseScales', in_channels=(48, 96, 192, 384)),
dict(type='GlobalAveragePooling'),
],
head=dict(
type='LinearClsHead',
in_channels=2048,
num_classes=1000,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, 5),
))
|
[
"[email protected]"
] | |
170f4291b543e014fadf954a0e8b37173c22f52f
|
965e1e205bf053d93b32be0dab4d45455b42b3a2
|
/NLP/PartsSpeech.py
|
29aa35ab37a1d1ca416e2d528400a686da8f4ba4
|
[] |
no_license
|
panditdandgule/DataScience
|
9e58867dd960ec554e0bbb8e4ce93baa226ab927
|
3eb59c129d81a6ba6b45e24113e25e63d19c60cb
|
refs/heads/master
| 2021-07-22T21:44:12.700518 | 2020-05-14T12:01:05 | 2020-05-14T12:01:05 | 166,497,260 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 669 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 15 19:59:50 2018
@author: pandit
"""
import nltk
from nltk.corpus import state_union
from nltk.tokenize import PunktSentenceTokenizer
train_text=state_union.raw("2005-GWBush.txt")
sample_text=state_union.raw("2005-GWBush.txt")
custom_sent_tokenizer=PunktSentenceTokenizer(train_text)
tokenized =custom_sent_tokenizer.tokenize(sample_text)
def process_content():
try:
for i in tokenized:
words=nltk.word_tokenize(i)
tagged=nltk.pos_tag(words)
print(tagged)
except Exception as e:
print(str(e))
process_content()
|
[
"[email protected]"
] | |
f70d4e2e4894ba7b8637af7ba93f753c0b5faa18
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/115_testing/examples/Github/_Level_1/python_unittests-master/sample_functions.py
|
8d63bc99d2a30ac0321b97976440c0d8474e1244
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 204 |
py
|
def sum(a, b):
return a + b
def contains_numbers(alpha_num_str):
for char in alpha_num_str:
if char.isdigit():
return False
return False
def div(a, b):
return a / b
|
[
"[email protected]"
] | |
d4a278c814384d490f690a077bab77a109b60b57
|
0ad79e7104500b5988f07e9f19f17a540f07605a
|
/Python算法指南/动态规划/67_最小和子数组_灵活运用动态规划.py
|
3273d8c3606054f4d873463057975d507015c93a
|
[] |
no_license
|
tonyyo/PycharmProjects
|
f0ce458ed662e33e75ddffbfcf28b0d1ed638743
|
a28620923336c352103858e0ccfc4117d1c4ea01
|
refs/heads/master
| 2022-09-19T02:02:15.919201 | 2020-06-03T12:57:38 | 2020-06-03T12:57:38 | 263,204,131 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 551 |
py
|
class Solution:
def minSubArray(self, nums):
MIN = 65536
SUM = nums[0]
for i in range(1, len(nums)):
SUM = SUM + nums[i] if SUM < 0 else nums[i] # SUM > 0有害于最小和
MIN = min(MIN, SUM)
return MIN
if __name__ == '__main__':
temp = Solution()
List1 = [1, -1, -2, 1]
List2 = [3, -2, 2, 1]
print("输入:" + str(List1))
print(("输出:" + str(temp.minSubArray(List1))))
print("输入:" + str(List2))
print(("输出:" + str(temp.minSubArray(List2))))
|
[
"[email protected]"
] | |
962ad189b3695ad55e5db43027b6e869b2817147
|
fb408595c1edee0be293302c6d7bfc0c77d37c46
|
/python/DP/DP_2096.py
|
a5753e0e8dda2057310f4dee0f056e7940fbb74d
|
[] |
no_license
|
as950118/Algorithm
|
39ad25519fd0e42b90ddf3797a61239862ad79b5
|
739a7d4b569057cdb6b6faa74254512b83d02bb1
|
refs/heads/master
| 2023-07-21T12:38:00.653579 | 2023-07-19T06:57:17 | 2023-07-19T06:57:17 | 125,176,176 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 610 |
py
|
n = int(input())
arr = [0]*(3)
dp_max = [0]*(3)
dp_min = [0]*(3)
arr = list(map(int, input().split()))
temp = arr[:]
dp_max = temp[:]
dp_min = temp[:]
for i in range(1, n):
arr = list(map(int, input().split()))
temp[0] = max(dp_max[0], dp_max[1]) + arr[0]
temp[1] = max(dp_max[0], dp_max[1], dp_max[2]) + arr[1]
temp[2] = max(dp_max[1], dp_max[2]) + arr[2]
dp_max = temp[:]
temp[0] = min(dp_min[0], dp_min[1]) + arr[0]
temp[1] = min(dp_min[0], dp_min[1], dp_min[2]) + arr[1]
temp[2] = min(dp_min[1], dp_min[2]) + arr[2]
dp_min = temp[:]
print(max(dp_max), min(dp_min))
|
[
"[email protected]"
] | |
f443e27275903b151314c40311f6464aafca1b44
|
72784799e5436e8a96462bdbcb29baeb644dcc7f
|
/utilities/animate.py
|
2c562e41c8ec2e736db293f0f772a55ff0091345
|
[] |
no_license
|
simonsben/undergrad_thesis
|
31dd205cb734f7c876b5053040e5ab0bf8fbd5cb
|
8458d00ae6525602b944279c2c280149a5957cb1
|
refs/heads/master
| 2020-04-02T10:46:55.255322 | 2019-04-08T06:01:48 | 2019-04-08T06:01:48 | 154,354,775 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 979 |
py
|
from matplotlib.pylab import figure, show, savefig, title, axis, draw
from networkx import spring_layout, draw_networkx_edges, draw_networkx_nodes
from matplotlib.animation import FuncAnimation
def add_node(graph, i, plot_layout):
# draw_networkx_edges(graph, plot_layout, alpha=.3)
# draw_networkx_nodes(node, plot_layout, node_size=100, edgecolors='k', node_color='w')
i += 1
draw()
def animate_creation(network, blocking=True, save_plot=True):
_title = 'Free-Scale Network'
fig = figure(_title)
axis('off')
graph = network.network_plot
plot_layout = spring_layout(graph)
init_nodes = graph.nodes[:3]
init_edges = graph.edges[:2]
draw_networkx_nodes(graph, plot_layout, nodelist=init_nodes, node_size=100, edgecolors='k', node_color='w')
draw_networkx_edges(graph, plot_layout, edgelist=init_edges, alpha=.3)
draw()
show()
i = 3
animation = FuncAnimation(fig, add_node, fargs=(graph, i, plot_layout))
|
[
"[email protected]"
] | |
52a608c85aa5b18e530c6cb0cae1d8d2f58b7ec4
|
14d8418ca5990217be67aee89fdaa310db03fbba
|
/test_stats_d_graph_display.py
|
fffe014750a15f323e8f39408530e03c6133cae4
|
[
"Apache-2.0"
] |
permissive
|
sachanta/lm-sdk-python
|
3a16457bd2d5b880a0d238a88a9d1d5b8d9675f0
|
e476d415c7279457f79b5d032a73d950af2fe96b
|
refs/heads/master
| 2023-08-03T08:39:42.842790 | 2021-09-13T07:20:56 | 2021-09-13T07:20:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,445 |
py
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. Note: For Python SDKs, the REQUEST parameters can contain camelCase or an underscore. However, the RESPONSE parameters will always contain an underscore. For example, the REQUEST parameter can be testLocation or test_location. The RESPONSE parameter will be test_location. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import logicmonitor_sdk
from logicmonitor_sdk.models.stats_d_graph_display import StatsDGraphDisplay # noqa: E501
from logicmonitor_sdk.rest import ApiException
class TestStatsDGraphDisplay(unittest.TestCase):
"""StatsDGraphDisplay unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStatsDGraphDisplay(self):
"""Test StatsDGraphDisplay"""
# FIXME: construct object with mandatory attributes with example values
# model = logicmonitor_sdk.models.stats_d_graph_display.StatsDGraphDisplay() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
ed3cea97ae571dfe1f0a45dba14fc43b93212a84
|
fb21a8f1fc02f5cee6f0a759e336561726d3b184
|
/experiments/lstm-notcut/run.py
|
74e6c8e8a8f1be2abab441657d41651360c17bd5
|
[] |
no_license
|
yamaguchi-milkcocholate/GoogleBrain-VentilatorPressurePrediction
|
76632353ff25a0c9ad8db51ef1f4b728954537b5
|
1996bb81f5b6880a20b8e39c681fecef0bc8a201
|
refs/heads/main
| 2023-09-05T17:25:46.980274 | 2021-11-24T04:40:50 | 2021-11-24T04:40:50 | 410,795,933 | 0 | 0 | null | 2021-11-04T01:28:27 | 2021-09-27T08:06:55 |
Python
|
UTF-8
|
Python
| false | false | 6,082 |
py
|
from random import seed
import numpy as np
import pandas as pd
import json
import os
import sys
import gc
import shutil
from pprint import pprint
from pathlib import Path
from typing import *
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from sklearn.metrics import mean_absolute_error as mae
from sklearn.preprocessing import RobustScaler, PowerTransformer, MinMaxScaler
from sklearn.model_selection import KFold
import sys
print(str(Path(__file__).resolve().parent.parent.parent))
sys.path.append(str(Path(__file__).resolve().parent.parent.parent))
from src.utils import (
seed_every_thing,
fetch_data,
Config,
plot_metric,
reduce_tf_gpu_memory,
reduce_mem_usage,
fetch_custom_data,
CustomL1Loss
)
def build_model(config: Config, n_features) -> keras.models.Sequential:
model = keras.models.Sequential([keras.layers.Input(shape=(config.cut, n_features))])
for n_unit in config.n_units:
model.add(
keras.layers.Bidirectional(
keras.layers.LSTM(
n_unit,
return_sequences=True,
)
)
)
for n_unit in config.n_dense_units:
model.add(keras.layers.Dense(n_unit, activation="selu"))
model.add(keras.layers.Dense(1))
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=config.lr),
loss='mae')
return model
def main(config: Dict[str, Any]):
config = Config().update(config)
seed_every_thing(seed=config.seed)
reduce_tf_gpu_memory(gpu_id=config.gpu_id)
basedir = Path(__file__).resolve().parent
datadir = basedir / ".." / ".." / "data"
logdir = basedir / ".." / ".." / "logs" / config.dirname
cachedir = basedir / ".." / ".." / "cache"
os.makedirs(logdir, exist_ok=True)
config.to_json(logdir / "config.json")
_, test_df, submission_df = fetch_custom_data(datadir=datadir, n_splits=config.n_splits)
test_df["count"] = (np.arange(test_df.shape[0]) % 80).astype(int)
test_preds_idx = test_df["count"] < config.cut
test_df = test_df[test_preds_idx].reset_index(drop=True)
test_df["pressure"] = 0
train_df = reduce_mem_usage(pd.read_csv(cachedir / f"train-10fold-debug{config.debug}.csv"))
test_df = reduce_mem_usage(pd.read_csv(cachedir / f"test-10fold-debug{config.debug}.csv"))
kfolds = train_df.iloc[0::config.cut]['kfold'].values
features = list(train_df.drop(["kfold", "pressure"], axis=1).columns)
pprint(features)
print(len(features))
cont_features = [f for f in features if ("RC_" not in f) and ("R_" not in f) and ("C_" not in f) and ("u_out" not in f)]
pprint(cont_features)
RS = RobustScaler()
train_df[cont_features] = RS.fit_transform(train_df[cont_features])
test_df[cont_features] = RS.transform(test_df[cont_features])
train_data, test_data = train_df[features].values, test_df[features].values
train_data = train_data.reshape(-1, config.cut, train_data.shape[-1])
targets = train_df[["pressure"]].to_numpy().reshape(-1, config.cut)
test_data = test_data.reshape(-1, config.cut, test_data.shape[-1])
with tf.device(f"/GPU:{config.gpu_id}"):
valid_preds = np.empty_like(targets)
test_preds = []
for fold in range(config.n_splits):
train_idx, test_idx = (kfolds != fold), (kfolds == fold)
print("-" * 15, ">", f"Fold {fold+1}", "<", "-" * 15)
savedir = logdir / f"fold{fold}"
os.makedirs(savedir, exist_ok=True)
X_train, X_valid = train_data[train_idx], train_data[test_idx]
y_train, y_valid = targets[train_idx], targets[test_idx]
model = build_model(config=config, n_features=len(features))
# es = EarlyStopping(
# monitor="val_loss",
# patience=config.es_patience,
# verbose=1,
# mode="min",
# restore_best_weights=True,
# )
customL1 = CustomL1Loss(
X_valid=X_valid,
y_valid=y_valid,
u_outs=X_valid[:, :, features.index("u_out")],
filepath=savedir / "weights_custom_best.h5"
)
check_point = ModelCheckpoint(
filepath=savedir / "weights_best.h5",
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="min",
save_weights_only=True,
)
schedular = ReduceLROnPlateau(
mode="min", **config.schedular
)
history = model.fit(
X_train,
y_train,
validation_data=(X_valid, y_valid),
epochs=config.epochs,
batch_size=config.batch_size,
callbacks=[check_point, schedular, customL1]
)
model.save_weights(savedir / "weights_final.h5")
model.load_weights(savedir / "weights_custom_best.h5")
pd.DataFrame(history.history).to_csv(savedir / "log.csv")
plot_metric(filepath=savedir / "log.csv", metric="loss")
valid_preds[test_idx, :] = model.predict(X_valid).squeeze()
test_preds.append(model.predict(test_data).squeeze().reshape(-1, 1).squeeze())
del model, X_train, X_valid, y_train, y_valid
keras.backend.clear_session()
gc.collect()
pd.DataFrame(valid_preds).to_csv(logdir / "valid_preds.csv")
if not config.debug:
submission_df.loc[test_preds_idx, "pressure"] = np.median(test_preds, axis=0)
submission_df.to_csv(logdir / "submission.csv", index=False)
shutil.copyfile(Path(__file__), logdir / "script.py")
if __name__ == "__main__":
cnf_file = sys.argv[1]
cfg_file_path = Path(__file__).resolve().parent / cnf_file
with open(cfg_file_path, "rb") as f:
config = json.load(f)
main(config=config)
|
[
"[email protected]"
] | |
75b4c345054f9757d6e642ce84b0d8c16a1c82c6
|
eb00755d9d0f2630ffdb21e3ab6685b2fbcb0d9e
|
/tests/bench/bench_scripts/bench_sampleData.py
|
729fcf79af5383d0af68875e3179d971fe99aff2
|
[
"BSD-3-Clause"
] |
permissive
|
mlangill/biom-format
|
aca45518c71b807cf30b0f548ad726880802a2b5
|
4cebfbdba8b6b64ff0d503df33634e3d52de1de0
|
refs/heads/master
| 2021-01-16T21:59:51.218830 | 2013-12-04T16:41:50 | 2013-12-04T16:41:50 | 9,486,201 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 274 |
py
|
#!/usr/bin/env python
from sys import argv
from gzip import open as gzip_open
from biom.parse import parse_biom_table
from random import choice
if __name__ == '__main__':
table = parse_biom_table(gzip_open(argv[1]))
foo = table.sampleData(choice(table.SampleIds))
|
[
"[email protected]"
] | |
0dac53703ab198d385005c1bd7a5a57d670af88e
|
caee06b143be5117a667e0f14ed6cf54958e85c1
|
/page/showreview.py
|
0b3dcc9721d9eed2b09256eae20318e2959a16f8
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ctesting/critic
|
720b68331aac81443e53ccd0c4c6cb4c3b75b5ec
|
8ba956d124279d0fca9d4522fb0ee6970e863588
|
refs/heads/master
| 2021-01-17T13:43:41.205293 | 2013-03-15T20:34:47 | 2013-03-15T20:34:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 49,272 |
py
|
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 Jens Lindström, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import time
import re
import datetime
import calendar
import traceback
import dbutils
import gitutils
import htmlutils
import page.utils
import log.html
import review.utils as review_utils
import review.html as review_html
import review.comment as review_comment
import configuration
import diff
import profiling
import linkify
import extensions
try:
from customization.paths import getModuleFromFile
except:
def getModuleFromFile(repository, filename):
try:
base, rest = filename.split("/", 1)
return base + "/"
except:
return None
class SummaryColumn(log.html.SummaryColumn):
def __init__(self, review, linkToCommit):
log.html.SummaryColumn.__init__(self, linkToCommit)
self.__review = review
self.__cache = {}
def fillCache(self, db, review):
cursor = db.cursor()
cursor.execute("""SELECT DISTINCT assignee, child
FROM fullreviewuserfiles
JOIN changesets ON (changesets.id=changeset)
WHERE review=%s
AND state='pending'""",
(review.id,))
for user_id, commit_id in cursor:
self.__cache.setdefault(commit_id, set()).add(user_id)
def render(self, db, commit, target):
user_ids = self.__cache.get(commit.getId(db))
if user_ids:
users = ["%s:%s" % (user.fullname, user.status) for user in dbutils.User.fromIds(db, [user_id for user_id in user_ids])]
target.setAttribute("critic-reviewers", ",".join(sorted(users)))
log.html.SummaryColumn.render(self, db, commit, target)
class ApprovalColumn:
APPROVED = 1
TOTAL = 2
def __init__(self, user, review, type, cache):
self.__user = user
self.__review = review
self.__type = type
self.__cache = cache
@staticmethod
def fillCache(db, user, review, cache, profiler):
cursor = db.cursor()
profiler.check("fillCache")
cursor.execute("""SELECT child, state, COUNT(*), SUM(deleted), SUM(inserted)
FROM changesets
JOIN reviewfiles ON (changeset=changesets.id)
WHERE review=%s
GROUP BY child, state""",
(review.id,))
for commit_id, state, nfiles, deleted, inserted in cursor:
data = cache.get(commit_id)
if not data: data = cache[commit_id] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
if state == 'reviewed':
data[3] += nfiles
data[4] += deleted
data[5] += inserted
data[0] += nfiles
data[1] += deleted
data[2] += inserted
profiler.check("fillCache: total")
cursor.execute("""SELECT child, COALESCE(reviewfilechanges.to, reviewfiles.state) AS effective_state, COUNT(*), SUM(deleted), SUM(inserted)
FROM changesets
JOIN reviewfiles ON (changeset=changesets.id)
JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
LEFT OUTER JOIN reviewfilechanges ON (reviewfilechanges.file=reviewfiles.id
AND reviewfilechanges.uid=reviewuserfiles.uid
AND reviewfilechanges.state='draft')
WHERE review=%s
AND reviewuserfiles.uid=%s
GROUP BY child, effective_state""",
(review.id, user.id))
for commit_id, state, nfiles, deleted, inserted in cursor:
data = cache.get(commit_id)
if state == 'reviewed':
data[9] += nfiles
data[10] += deleted
data[11] += inserted
data[6] += nfiles
data[7] += deleted
data[8] += inserted
profiler.check("fillCache: user")
def __calculate(self, db, commit):
return self.__cache.get(commit.id, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def className(self, db, commit):
if commit:
(total_nfiles, total_deleted, total_inserted,
approved_nfiles, approved_deleted, approved_inserted,
user_total_nfiles, user_total_deleted, user_total_inserted,
user_approved_nfiles, user_approved_deleted, user_approved_inserted) = self.__calculate(db, commit)
if user_approved_nfiles == user_total_nfiles:
category = ""
else:
category = " user"
else:
category = ""
if self.__type == ApprovalColumn.APPROVED:
return "approval" + category
else:
return "total" + category
def heading(self, target):
if self.__type == ApprovalColumn.APPROVED:
target.text("Pending")
else:
target.text("Total")
def render(self, db, commit, target):
(total_nfiles, total_deleted, total_inserted,
approved_nfiles, approved_deleted, approved_inserted,
user_total_nfiles, user_total_deleted, user_total_inserted,
user_approved_nfiles, user_approved_deleted, user_approved_inserted) = self.__calculate(db, commit)
if self.__type == ApprovalColumn.APPROVED:
if user_approved_nfiles == user_total_nfiles:
if approved_nfiles == total_nfiles:
target.text()
elif approved_deleted == total_deleted and approved_inserted == total_inserted:
target.span().text("?? %")
else:
target.span().text("%d %%" % int(100.0 * ((total_deleted + total_inserted) - (approved_deleted + approved_inserted)) / (total_deleted + total_inserted)))
elif user_approved_deleted == user_total_deleted and user_approved_inserted == user_total_inserted:
target.span().text("?? %")
else:
target.span().text("%d %%" % int(100.0 * ((user_total_deleted + user_total_inserted) - (user_approved_deleted + user_approved_inserted)) / (user_total_deleted + user_total_inserted)))
else:
if user_approved_deleted == user_total_deleted and user_approved_inserted == user_total_inserted:
target.span().text("-%d/+%d" % (total_deleted, total_inserted))
else:
target.span().text("-%d/+%d" % (user_total_deleted, user_total_inserted))
def notModified(req, db, user, review):
value = req.getRequestHeader("If-None-Match")
return review.getETag(db, user) == value
def usesExperimentalFeature(req, db, review):
return False
def renderShowReview(req, db, user):
profiler = profiling.Profiler()
cursor = db.cursor()
if user.getPreference(db, "commit.diff.compactMode"): default_compact = "yes"
else: default_compact = "no"
compact = req.getParameter("compact", default_compact) == "yes"
highlight = req.getParameter("highlight", None)
review_id = req.getParameter("id", filter=int)
review = dbutils.Review.fromId(db, review_id, load_commits=False, profiler=profiler)
profiler.check("create review")
if not review:
raise page.utils.DisplayMessage, ("Invalid Review ID", "%d is not a valid review ID." % review_id)
if review.getETag(db, user) == req.getRequestHeader("If-None-Match"):
raise page.utils.NotModified
profiler.check("ETag")
# if usesExperimentalFeature(req, db, review):
# def renderMessage(target):
# url = "%s/r/%d" % (configuration.URL_PER_TYPE['development'], review.id)
# p = target.p(style="padding-top: 1em")
# p.text("Sorry, this review uses experimental features currently only available in the development version of Critic. Because of that, it can only be displayed there.")
# p = target.p(style="padding-top: 1em")
# p.b().a(href=url).text(url)
# yield page.utils.displayMessage(db, req, user, "Experimental Feature Alert!", message=renderMessage)
# return
repository = review.repository
prefetch_commits = {}
cursor.execute("""SELECT sha1, child
FROM changesets
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
JOIN commits ON (commits.id=changesets.child)
WHERE review=%s""",
(review.id,))
prefetch_commits.update(dict(cursor))
profiler.check("commits (query)")
cursor.execute("""SELECT old_head, commits1.sha1, new_head, commits2.sha1, new_upstream, commits3.sha1
FROM reviewrebases
LEFT OUTER JOIN commits AS commits1 ON (commits1.id=old_head)
LEFT OUTER JOIN commits AS commits2 ON (commits2.id=new_head)
LEFT OUTER JOIN commits AS commits3 ON (commits3.id=new_upstream)
WHERE review=%s""",
(review.id,))
rebases = cursor.fetchall()
if rebases:
has_finished_rebases = False
for old_head_id, old_head_sha1, new_head_id, new_head_sha1, new_upstream_id, new_upstream_sha1 in rebases:
if old_head_id:
prefetch_commits[old_head_sha1] = old_head_id
if new_head_id:
prefetch_commits[new_head_sha1] = new_head_id
has_finished_rebases = True
if new_upstream_id:
prefetch_commits[new_upstream_sha1] = new_upstream_id
profiler.check("auxiliary commits (query)")
if has_finished_rebases:
cursor.execute("""SELECT commits.sha1, commits.id
FROM commits
JOIN reachable ON (reachable.commit=commits.id)
WHERE branch=%s""",
(review.branch.id,))
prefetch_commits.update(dict(cursor))
profiler.check("actual commits (query)")
prefetch_commits = gitutils.FetchCommits(repository, prefetch_commits)
document = htmlutils.Document(req)
html = document.html()
head = html.head()
body = html.body(onunload="void(0);")
def flush(target=None):
return document.render(stop=target, pretty=not compact)
def renderHeaderItems(target):
has_draft_items = review_utils.renderDraftItems(db, user, review, target)
target = target.div("buttons")
if not has_draft_items:
if review.state == "open":
if review.accepted(db):
target.button(id="closeReview", onclick="closeReview();").text("Close Review")
else:
if user in review.owners or user.getPreference(db, "review.pingAnyReview"):
target.button(id="pingReview", onclick="pingReview();").text("Ping Review")
if user in review.owners or user.getPreference(db, "review.dropAnyReview"):
target.button(id="dropReview", onclick="dropReview();").text("Drop Review")
if user in review.owners and not review.description:
target.button(id="writeDescription", onclick="editDescription();").text("Write Description")
else:
target.button(id="reopenReview", onclick="reopenReview();").text("Reopen Review")
target.span("buttonscope buttonscope-global")
profiler.check("prologue")
page.utils.generateHeader(body, db, user, renderHeaderItems)
cursor.execute("SELECT 1 FROM fullreviewuserfiles WHERE review=%s AND state='pending' AND assignee=%s", (review.id, user.id))
hasPendingChanges = bool(cursor.fetchone())
if hasPendingChanges:
head.setLink("next", "showcommit?review=%d&filter=pending" % review.id)
profiler.check("header")
document.addExternalStylesheet("resource/showreview.css")
document.addExternalStylesheet("resource/review.css")
document.addExternalStylesheet("resource/comment.css")
document.addExternalScript("resource/showreview.js")
document.addExternalScript("resource/review.js")
document.addExternalScript("resource/comment.js")
document.addExternalScript("resource/autocomplete.js")
document.addInternalScript(user.getJS())
document.addInternalScript("var owners = [ %s ];" % ", ".join(owner.getJSConstructor() for owner in review.owners))
document.addInternalScript("var updateCheckInterval = %d;" % user.getPreference(db, "review.updateCheckInterval"));
log.html.addResources(document)
document.addInternalScript(review.getJS())
target = body.div("main")
basic = target.table('paleyellow basic', align='center')
basic.col(width='10%')
basic.col(width='60%')
basic.col(width='30%')
h1 = basic.tr().td('h1', colspan=3).h1()
h1.text("r/%d: " % review.id)
h1.span(id="summary").text("%s" % review.summary, linkify=linkify.Context(db=db, review=review))
h1.a("edit", href="javascript:editSummary();").text("[edit]")
def linkToCommit(commit):
cursor.execute("SELECT 1 FROM commits JOIN changesets ON (child=commits.id) JOIN reviewchangesets ON (changeset=changesets.id) WHERE sha1=%s AND review=%s", (commit.sha1, review.id))
if cursor.fetchone():
return "%s/%s?review=%d" % (review.repository.name, commit.sha1, review.id)
return "%s/%s" % (review.repository.name, commit.sha1)
def row(heading, value, help, right=None, linkify=False, cellId=None):
main_row = basic.tr('line')
main_row.td('heading').text("%s:" % heading)
if right is False: colspan = 2
else: colspan = None
if callable(value): value(main_row.td('value', id=cellId, colspan=colspan).preformatted())
else: main_row.td('value', id=cellId, colspan=colspan).preformatted().text(value, linkify=linkify, repository=review.repository)
if right is False: pass
elif callable(right): right(main_row.td('right', valign='bottom'))
else: main_row.td('right').text()
if help: basic.tr('help').td('help', colspan=3).text(help)
def renderBranchName(target):
target.code("branch").text(review.branch.name, linkify=linkify.Context())
if repository.name != user.getPreference(db, "defaultRepository"):
target.text(" in ")
target.code("repository").text("%s:%s" % (configuration.base.HOSTNAME, repository.path))
cursor.execute("""SELECT id, remote, remote_name, disabled, previous
FROM trackedbranches
WHERE repository=%s
AND local_name=%s""",
(repository.id, review.branch.name))
row = cursor.fetchone()
if row:
trackedbranch_id, remote, remote_name, disabled, previous = row
target.p("tracking disabled" if disabled else "tracking").text("tracking")
target.code("branch").text(remote_name, linkify=linkify.Context(remote=remote))
target.text(" in ")
target.code("repository").text(remote, linkify=linkify.Context())
if previous:
target.span("lastupdate").script(type="text/javascript").text("document.write('(last fetched: ' + shortDate(new Date(%d)) + ')');" % (calendar.timegm(previous.utctimetuple()) * 1000))
if user in review.owners:
buttons = target.div("buttons")
if disabled:
buttons.button("enabletracking", onclick="enableTracking(%d);" % trackedbranch_id).text("Enable Tracking")
else:
buttons.button("disabletracking", onclick="triggerUpdate(%d);" % trackedbranch_id).text("Update Now")
buttons.button("disabletracking", onclick="disableTracking(%d);" % trackedbranch_id).text("Disable Tracking")
def renderReviewers(target):
if review.reviewers:
for index, reviewer in enumerate(review.reviewers):
if index != 0: target.text(", ")
span = target.span("user %s" % reviewer.status)
span.span("name").text(reviewer.fullname)
if reviewer.status == 'absent':
span.span("status").text(" (%s)" % reviewer.getAbsence(db))
elif reviewer.status == 'retired':
span.span("status").text(" (retired)")
else:
target.i().text("No reviewers.")
cursor.execute("""SELECT reviewfilters.id, reviewfilters.uid, reviewfilters.directory, reviewfilters.file
FROM reviewfilters
JOIN users ON (reviewfilters.uid=users.id)
WHERE reviewfilters.review=%s
AND reviewfilters.type='reviewer'
AND users.status!='retired'""",
(review.id,))
rows = cursor.fetchall()
reviewer_filters_hidden = []
if rows:
table = target.table("reviewfilters reviewers")
row = table.thead().tr("h1")
row.th("h1", colspan=4).text("Custom filters:")
filter_data = {}
reviewfilters = {}
for filter_id, user_id, directory_id, file_id in rows:
filter_user = dbutils.User.fromId(db, user_id)
if file_id: path = dbutils.describe_file(db, file_id)
else: path = dbutils.describe_directory(db, directory_id) + "/"
reviewfilters.setdefault(filter_user.fullname, []).append(path)
filter_data[(filter_user.fullname, path)] = (filter_id, filter_user)
count = 0
tbody = table.tbody()
for fullname in sorted(reviewfilters.keys()):
original_paths = sorted(reviewfilters[fullname])
trimmed_paths = diff.File.eliminateCommonPrefixes(original_paths[:])
first = True
for original_path, trimmed_path in zip(original_paths, trimmed_paths):
row = tbody.tr("filter")
if first:
row.td("username", rowspan=len(original_paths)).text(fullname)
row.td("reviews", rowspan=len(original_paths)).text("reviews")
first = False
row.td("path").span().innerHTML(trimmed_path)
filter_id, filter_user = filter_data[(fullname, original_path)]
href = "javascript:removeReviewFilter(%d, %s, 'reviewer', %s, %s);" % (filter_id, filter_user.getJSConstructor(), htmlutils.jsify(original_path), "true" if filter_user != user else "false")
row.td("remove").a(href=href).text("[remove]")
count += 1
tfoot = table.tfoot()
tfoot.tr().td(colspan=4).text("%d line%s hidden" % (count, "s" if count > 1 else ""))
if count > 10:
tbody.setAttribute("class", "hidden")
reviewer_filters_hidden.append(True)
else:
tfoot.setAttribute("class", "hidden")
reviewer_filters_hidden.append(False)
buttons = target.div("buttons")
if reviewer_filters_hidden:
buttons.button("showfilters", onclick="toggleReviewFilters('reviewers', $(this));").text("%s Custom Filters" % ("Show" if reviewer_filters_hidden[0] else "Hide"))
if review.applyfilters and review.repository.parent and not review.applyparentfilters:
buttons.button("applyparentfilters", onclick="applyParentFilters();").text("Apply Upstream Filters")
buttons.button("addreviewer", onclick="addReviewer();").text("Add Reviewer")
buttons.button("manage", onclick="location.href='managereviewers?review=%d';" % review.id).text("Manage Assignments")
def renderWatchers(target):
if review.watchers:
for index, watcher in enumerate(review.watchers):
if index != 0: target.text(", ")
span = target.span("user %s" % watcher.status)
span.span("name").text(watcher.fullname)
if watcher.status == 'absent':
span.span("status").text(" (%s)" % watcher.getAbsence(db))
elif watcher.status == 'retired':
span.span("status").text(" (retired)")
else:
target.i().text("No watchers.")
cursor.execute("""SELECT reviewfilters.id, reviewfilters.uid, reviewfilters.directory, reviewfilters.file
FROM reviewfilters
JOIN users ON (reviewfilters.uid=users.id)
WHERE reviewfilters.review=%s
AND reviewfilters.type='watcher'
AND users.status!='retired'""",
(review.id,))
rows = cursor.fetchall()
watcher_filters_hidden = []
if rows:
table = target.table("reviewfilters watchers")
row = table.thead().tr("h1")
row.th("h1", colspan=4).text("Custom filters:")
filter_data = {}
reviewfilters = {}
for filter_id, user_id, directory_id, file_id in rows:
filter_user = dbutils.User.fromId(db, user_id)
if file_id: path = dbutils.describe_file(db, file_id)
else: path = dbutils.describe_directory(db, directory_id) + "/"
reviewfilters.setdefault(filter_user.fullname, []).append(path)
filter_data[(filter_user.fullname, path)] = (filter_id, filter_user)
count = 0
tbody = table.tbody()
for fullname in sorted(reviewfilters.keys()):
original_paths = sorted(reviewfilters[fullname])
trimmed_paths = diff.File.eliminateCommonPrefixes(original_paths[:])
first = True
for original_path, trimmed_path in zip(original_paths, trimmed_paths):
row = tbody.tr("filter")
if first:
row.td("username", rowspan=len(original_paths)).text(fullname)
row.td("reviews", rowspan=len(original_paths)).text("watches")
first = False
row.td("path").span().innerHTML(trimmed_path)
filter_id, filter_user = filter_data[(fullname, original_path)]
href = "javascript:removeReviewFilter(%d, %s, 'watcher', %s, %s);" % (filter_id, filter_user.getJSConstructor(), htmlutils.jsify(original_path), "true" if filter_user != user else "false")
row.td("remove").a(href=href).text("[remove]")
count += 1
tfoot = table.tfoot()
tfoot.tr().td(colspan=4).text("%d line%s hidden" % (count, "s" if count > 1 else ""))
if count > 10:
tbody.setAttribute("class", "hidden")
watcher_filters_hidden.append(True)
else:
tfoot.setAttribute("class", "hidden")
watcher_filters_hidden.append(False)
buttons = target.div("buttons")
if watcher_filters_hidden:
buttons.button("showfilters", onclick="toggleReviewFilters('watchers', $(this));").text("%s Custom Filters" % ("Show" if watcher_filters_hidden[0] else "Hide"))
buttons.button("addwatcher", onclick="addWatcher();").text("Add Watcher")
if user not in review.reviewers and user not in review.owners:
if user not in review.watchers:
buttons.button("watch", onclick="watchReview();").text("Watch Review")
elif review.watchers[user] == "manual":
buttons.button("watch", onclick="unwatchReview();").text("Stop Watching Review")
def renderEditOwners(target):
target.button("description", onclick="editOwners();").text("Edit Owners")
def renderEditDescription(target):
target.button("description", onclick="editDescription();").text("Edit Description")
def renderRecipientList(target):
cursor.execute("SELECT uid, fullname, include FROM reviewrecipientfilters JOIN users ON (uid=id) WHERE review=%s", (review.id,))
default_include = True
included = dict((owner.fullname, owner.id) for owner in review.owners)
excluded = {}
for user_id, fullname, include in cursor:
if user_id == 0: default_include = include
elif include: included[fullname] = user_id
elif user_id not in review.owners: excluded[fullname] = user_id
mode = None
users = None
buttons = []
opt_in_button = False
opt_out_button = False
if default_include:
if excluded:
mode = "Everyone except "
users = excluded
opt_out_button = user.fullname not in excluded
opt_in_button = not opt_out_button
else:
mode = "Everyone."
opt_out_button = True
else:
if included:
mode = "No-one except "
users = included
opt_in_button = user.fullname not in included
opt_out_button = not opt_in_button
else:
mode = "No-one at all."
opt_in_button = True
if user in review.owners or user in review.reviewers or user in review.watchers:
if opt_in_button:
buttons.append(("Include me, please!", "includeRecipient(%d);" % user.id))
if opt_out_button:
buttons.append(("Exclude me, please!", "excludeRecipient(%d);" % user.id))
target.span("mode").text(mode)
if users:
container = target.span("users")
first = True
for fullname in sorted(users.keys()):
if first: first = False
else: container.text(", ")
container.span("user", critic_user_id=users[fullname]).text(fullname)
container.text(".")
if buttons:
container = target.div("buttons")
for label, onclick in buttons:
container.button(onclick=onclick).text(label)
row("Branch", renderBranchName, "The branch containing the commits to review.", right=False)
row("Owner%s" % ("s" if len(review.owners) > 1 else ""), ", ".join(owner.fullname for owner in review.owners), "The users who created and/or owns the review.", right=renderEditOwners)
if review.description:
row("Description", review.description, "A longer description of the changes to be reviewed.", linkify=linkToCommit, cellId="description", right=renderEditDescription)
row("Reviewers", renderReviewers, "Users responsible for reviewing the changes in this review.", right=False)
row("Watchers", renderWatchers, "Additional users who receive e-mails about updates to this review.", right=False)
row("Recipient List", renderRecipientList, "Users (among the reviewers and watchers) who will receive any e-mails about the review.", right=False)
profiler.check("basic")
review_state = review.getReviewState(db)
profiler.check("review state")
progress = target.table('paleyellow progress', align='center')
progress_header = progress.tr().td('h1', colspan=3).h1()
progress_header.text("Review Progress")
progress_header_right = progress_header.span("right")
progress_header_right.text("Display log: ")
progress_header_right.a(href="showreviewlog?review=%d&granularity=module" % review.id).text("[per module]")
progress_header_right.text()
progress_header_right.a(href="showreviewlog?review=%d&granularity=file" % review.id).text("[per file]")
progress_h1 = progress.tr().td('percent', colspan=3).h1()
title_data = { 'id': 'r/%d' % review.id,
'summary': review.summary,
'progress': str(review_state) }
if review.state == "closed":
progress_h1.img(src=htmlutils.getStaticResourceURI("seal-of-approval-left.png"),
style="position: absolute; margin-left: -80px; margin-top: -100px")
progress_h1.text("Finished!")
elif review.state == "dropped":
progress_h1.text("Dropped...")
elif review.state == "open" and review_state.accepted:
progress_h1.img(src=htmlutils.getStaticResourceURI("seal-of-approval-left.png"),
style="position: absolute; margin-left: -80px; margin-top: -100px")
progress_h1.text("Accepted!")
progress_h1.div().span("remark").text("Hurry up and close it before anyone has a change of heart.")
else:
progress_h1.text(review_state.getProgress())
if review_state.issues:
progress_h1.span("comments").text(" and ")
progress_h1.text("%d" % review_state.issues)
progress_h1.span("comments").text(" issue%s" % (review_state.issues > 1 and "s" or ""))
if review_state.getPercentReviewed() != 100.0:
cursor = db.cursor()
cursor.execute("""SELECT 1
FROM reviewfiles
LEFT OUTER JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
WHERE reviewfiles.review=%s
AND reviewfiles.state='pending'
AND reviewuserfiles.uid IS NULL""",
(review.id,))
if cursor.fetchone():
progress.tr().td('stuck', colspan=3).a(href="showreviewlog?review=%d&granularity=file&unassigned=yes" % review.id).text("Not all changes have a reviewer assigned!")
cursor.execute("""SELECT uid, MIN(reviewuserfiles.time)
FROM reviewfiles
JOIN reviewuserfiles ON (reviewuserfiles.file=reviewfiles.id)
WHERE reviewfiles.review=%s
AND reviewfiles.state='pending'
GROUP BY reviewuserfiles.uid""",
(review.id,))
def total_seconds(delta):
return delta.days * 60 * 60 * 24 + delta.seconds
now = datetime.datetime.now()
pending_reviewers = [(dbutils.User.fromId(db, user_id), total_seconds(now - timestamp)) for (user_id, timestamp) in cursor.fetchall() if total_seconds(now - timestamp) > 60 * 60 * 8]
if pending_reviewers:
progress.tr().td('stragglers', colspan=3).text("Needs review from")
for reviewer, seconds in pending_reviewers:
if reviewer.status == 'retired': continue
elif reviewer.status == 'absent': warning = " absent"
elif not reviewer.getPreference(db, "email.activated"): warning = " no-email"
else: warning = ""
if seconds < 60 * 60 * 24:
hours = seconds / (60 * 60)
duration = " (%d hour%s)" % (hours, "s" if hours > 1 else "")
elif seconds < 60 * 60 * 24 * 7:
days = seconds / (60 * 60 * 24)
duration = " (%d day%s)" % (days, "s" if days > 1 else "")
elif seconds < 60 * 60 * 24 * 30:
weeks = seconds / (60 * 60 * 24 * 7)
duration = " (%d week%s)" % (weeks, "s" if weeks > 1 else "")
else:
duration = " (wake up!)"
progress.tr().td('straggler' + warning, colspan=3).text("%s%s" % (reviewer.fullname, duration))
if user in review.owners:
progress.tr().td('pinging', colspan=3).span().text("Send a message to these users by pinging the review.")
title_format = user.getPreference(db, 'ui.title.showReview')
try:
document.setTitle(title_format % title_data)
except Exception, exc:
document.setTitle(traceback.format_exception_only(type(exc), exc)[0].strip())
profiler.check("progress")
check = profiler.start("ApprovalColumn.fillCache")
def linkToCommit(commit):
return "%s?review=%d" % (commit.sha1[:8], review.id)
approval_cache = {}
ApprovalColumn.fillCache(db, user, review, approval_cache, profiler)
check.stop()
summary_column = SummaryColumn(review, linkToCommit)
summary_column.fillCache(db, review)
profiler.check("SummaryColumn.fillCache")
columns = [(10, log.html.WhenColumn()),
(60, summary_column),
(16, log.html.AuthorColumn()),
(7, ApprovalColumn(user, review, ApprovalColumn.APPROVED, approval_cache)),
(7, ApprovalColumn(user, review, ApprovalColumn.TOTAL, approval_cache))]
def renderReviewPending(db, target):
if not user.isAnonymous():
target.text("Filter: ")
if hasPendingChanges:
target.a(href="showcommit?review=%d&filter=pending" % review.id, title="All changes you need to review.").text("[pending]")
target.text()
if user in review.reviewers:
target.a(href="showcommit?review=%d&filter=reviewable" % review.id, title="All changes you can review, including what you've already reviewed.").text("[reviewable]")
target.text()
target.a(href="showcommit?review=%d&filter=relevant" % review.id, title="All changes that match your filters.").text("[relevant]")
target.text()
target.text("Manual: ")
target.a(href="filterchanges?review=%d" % review.id, title="Manually select what files to display of the changes from all commits.").text("[full]")
target.text()
target.a(href="javascript:void(filterPartialChanges());", title="Manually select what what files to display of the changes in a selection of commits.").text("[partial]")
req.addResponseHeader("ETag", review.getETag(db, user))
if user.getPreference(db, "review.useMustRevalidate"):
req.addResponseHeader("Cache-Control", "must-revalidate")
yield flush(target)
try:
try: prefetch_commits.getCommits(db)
except AttributeError: raise Exception, prefetch_commits.error
profiler.check("FetchCommits.getCommits()")
cursor.execute("""SELECT child
FROM changesets
JOIN reviewchangesets ON (reviewchangesets.changeset=changesets.id)
JOIN commits ON (commits.id=changesets.child)
WHERE review=%s""",
(review.id,))
commits = [gitutils.Commit.fromId(db, repository, commit_id) for (commit_id,) in cursor]
cursor.execute("""SELECT id, old_head, new_head, new_upstream, uid, branch
FROM reviewrebases
WHERE review=%s""",
(review.id,))
all_rebases = [(rebase_id,
gitutils.Commit.fromId(db, repository, old_head),
gitutils.Commit.fromId(db, repository, new_head) if new_head else None,
dbutils.User.fromId(db, user_id),
gitutils.Commit.fromId(db, repository, new_upstream) if new_upstream is not None else None,
branch_name)
for rebase_id, old_head, new_head, new_upstream, user_id, branch_name in cursor]
bottom_right = None
finished_rebases = filter(lambda item: item[2] is not None, all_rebases)
current_rebases = filter(lambda item: item[2] is None, all_rebases)
if current_rebases:
assert len(current_rebases) == 1
def renderCancelRebase(db, target):
target.button("cancelrebase").text("Cancel Rebase")
if user == current_rebases[0][3]:
bottom_right = renderCancelRebase
else:
def renderPrepareRebase(db, target):
target.button("preparerebase").text("Prepare Rebase")
bottom_right = renderPrepareRebase
if finished_rebases:
cursor.execute("""SELECT commit
FROM reachable
WHERE branch=%s""",
(review.branch.id,))
actual_commits = [gitutils.Commit.fromId(db, repository, commit_id) for (commit_id,) in cursor]
else:
actual_commits = []
log.html.render(db, target, "Commits (%d)", commits=commits, columns=columns, title_right=renderReviewPending, rebases=finished_rebases, branch_name=review.branch.name, bottom_right=bottom_right, review=review, highlight=highlight, profiler=profiler, user=user, extra_commits=actual_commits)
yield flush(target)
profiler.check("log")
except gitutils.GitError, error:
div = target.div("error")
div.h1().text("Error!")
if error.sha1:
div.text("The commit %s is missing from the repository." % error.sha1)
else:
div.text("Failed to read commits from the repository: %s" % error.message)
all_chains = review.getCommentChains(db, user, skip=set(['commits', 'lines']))
profiler.check("chains (load)")
if all_chains:
issue_chains = filter(lambda chain: chain.type == "issue", all_chains)
draft_issues = filter(lambda chain: chain.state == "draft", issue_chains)
open_issues = filter(lambda chain: chain.state == "open", issue_chains)
addressed_issues = filter(lambda chain: chain.state == "addressed", issue_chains)
closed_issues = filter(lambda chain: chain.state == "closed", issue_chains)
note_chains = filter(lambda chain: chain.type == "note", all_chains)
draft_notes = filter(lambda chain: chain.state == "draft", note_chains)
open_notes = filter(lambda chain: chain.state != "draft" and chain.state != "empty", note_chains)
else:
open_issues = []
open_notes = []
chains = target.table("paleyellow comments", align="center", cellspacing=0)
h1 = chains.tr("h1").td("h1", colspan=3).h1().text("Comments")
links = h1.span("links")
if all_chains:
links.a(href="showcomments?review=%d&filter=all" % review.id).text("[display all]")
if not user.isAnonymous():
links.a(href="showcomments?review=%d&filter=all&blame=%s" % (review.id, user.name)).text("[in my commits]")
cursor.execute("""SELECT count(commentstoread.comment) > 0
FROM commentchains
JOIN comments ON (comments.chain=commentchains.id)
JOIN commentstoread ON (commentstoread.comment=comments.id)
WHERE commentchains.review=%s
AND commentstoread.uid=%s""",
[review.id, user.id])
if cursor.fetchone()[0]:
links.a(href="showcomments?review=%d&filter=toread" % review.id).text("[display unread]")
def renderChains(target, chains):
for chain in chains:
row = target.tr("comment %s %s" % (chain.type, chain.state))
row.td("author").text(chain.user.fullname)
row.td("title").a(href="showcomment?chain=%d" % chain.id).innerHTML(chain.leader())
ncomments = chain.countComments()
nunread = chain.countUnread()
cell = row.td("when")
if ncomments == 1:
if nunread: cell.b().text("Unread")
else: cell.text("No replies")
else:
if nunread: cell.b().text("%d of %d unread" % (nunread, ncomments))
else: cell.text("%d repl%s" % (ncomments - 1, "ies" if ncomments > 2 else "y"))
if draft_issues:
h2 = chains.tr("h2", id="draft-issues").td("h2", colspan=3).h2().text("Draft Issues")
h2.a(href="showcomments?review=%d&filter=draft-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=draft-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, draft_issues)
if open_issues:
h2 = chains.tr("h2", id="open-issues").td("h2", colspan=3).h2().text("Open Issues")
h2.a(href="showcomments?review=%d&filter=open-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=open-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, open_issues)
if addressed_issues:
h2 = chains.tr("h2", id="addressed-issues").td("h2", colspan=3).h2().text("Addressed Issues")
h2.a(href="showcomments?review=%d&filter=addressed-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=addressed-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, addressed_issues)
if closed_issues:
h2 = chains.tr("h2", id="closed-issues").td("h2", colspan=3).h2().text("Resolved Issues")
h2.a(href="showcomments?review=%d&filter=closed-issues" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=closed-issues&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, closed_issues)
if draft_notes:
h2 = chains.tr("h2", id="draft-notes").td("h2", colspan=3).h2().text("Draft Notes")
h2.a(href="showcomments?review=%d&filter=draft-notes" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=draft-notes&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, draft_notes)
if open_notes:
h2 = chains.tr("h2", id="notes").td("h2", colspan=3).h2().text("Notes")
h2.a(href="showcomments?review=%d&filter=open-notes" % review.id).text("[display all]")
h2.a(href="showcomments?review=%d&filter=open-notes&blame=%s" % (review.id, user.name)).text("[in my commits]")
renderChains(chains, open_notes)
buttons = chains.tr("buttons").td("buttons", colspan=3)
buttons.button(onclick="CommentChain.create('issue');").text("Raise Issue")
buttons.button(onclick="CommentChain.create('note');").text("Write Note")
profiler.check("chains (render)")
yield flush(target)
cursor.execute("""SELECT DISTINCT reviewfiles.file, theirs.uid
FROM reviewfiles
JOIN reviewuserfiles AS yours ON (yours.file=reviewfiles.id)
JOIN reviewuserfiles AS theirs ON (theirs.file=yours.file AND theirs.uid!=yours.uid)
WHERE reviewfiles.review=%s
AND yours.uid=%s""",
(review.id, user.id))
rows = cursor.fetchall()
profiler.check("shared assignments (query)")
if rows:
reviewers = {}
for file_id, user_id in rows:
reviewers.setdefault(file_id, {})[user_id] = set()
shared = target.table('paleyellow shared', align='center', cellspacing=0)
row = shared.tr('h1')
shared_header = row.td('h1', colspan=2).h1()
shared_header.text("Shared Assignments")
shared_buttons = row.td('buttons', colspan=2).span(style="display: none")
shared_buttons.button("confirm").text("Confirm")
shared_buttons.button("cancel").text("Cancel")
granularity = "module"
def moduleFromFile(file_id):
filename = dbutils.describe_file(db, file_id)
return getModuleFromFile(repository, filename) or filename
def formatFiles(files):
paths = sorted([dbutils.describe_file(db, file_id) for file_id in files])
if granularity == "file":
return diff.File.eliminateCommonPrefixes(paths)
else:
modules = set()
files = []
for path in paths:
module = getModuleFromFile(path)
if module: modules.add(module)
else: files.append(path)
return sorted(modules) + diff.File.eliminateCommonPrefixes(files)
files_per_team = review_utils.collectReviewTeams(reviewers)
teams_per_modules = {}
profiler.check("shared assignments (collect teams)")
for team, files in files_per_team.items():
modules = set()
for file_id in files:
modules.add(moduleFromFile(file_id))
teams_per_modules.setdefault(frozenset(modules), set()).update(team)
for modules, team in teams_per_modules.items():
row = shared.tr("reviewers")
cell = row.td("reviewers")
members = sorted([dbutils.User.fromId(db, user_id).fullname for user_id in team])
for member in members: cell.text(member).br()
row.td("willreview").innerHTML("<span class='also'>also</span> review changes in")
cell = row.td("files")
for path in diff.File.eliminateCommonPrefixes(sorted(modules)):
cell.span("file").innerHTML(path).br()
directory_ids = "[ %s ]" % ", ".join([str(dbutils.find_directory(db, path=path[:-1])) for path in modules if path.endswith("/")])
file_ids = "[ %s ]" % ", ".join([str(dbutils.find_file(db, path=path)) for path in modules if not path.endswith("/")])
user_ids = "[ %s ]" % ", ".join(map(str, team))
cell = row.td("buttons")
cell.button("accept", critic_directory_ids=directory_ids, critic_file_ids=file_ids, critic_user_ids=user_ids).text("I will review this!")
cell.button("deny", critic_directory_ids=directory_ids, critic_file_ids=file_ids, critic_user_ids=user_ids).text("They will review this!")
yield flush(target)
profiler.check("shared assignments")
cursor.execute("SELECT batches.id, users.fullname, batches.comment, batches.time FROM batches JOIN users ON (users.id=batches.uid) WHERE batches.review=%s ORDER BY batches.id DESC", [review.id])
rows = cursor.fetchall()
if rows:
notes = dict([(chain.id, chain) for chain in open_notes])
batches = target.table("paleyellow batches", align="center", cellspacing=0)
batches.tr().td("h1", colspan=3).h1().text("Work Log")
for batch_id, user_fullname, chain_id, when in rows:
row = batches.tr("batch")
row.td("author").text(user_fullname)
title = "<i>No comment</i>"
if chain_id:
if chain_id in notes:
title = notes[chain_id].leader()
else:
for chain in all_chains:
if chain.id == chain_id:
title = chain.leader()
break
row.td("title").a(href="showbatch?batch=%d" % batch_id).innerHTML(title)
row.td("when").text(time.strftime("%Y-%m-%d %H:%M", when.timetuple()))
profiler.check("batches")
profiler.output(db, user, target)
yield flush()
if review.branch.head:
try: head_according_to_git = repository.revparse(review.branch.name)
except: head_according_to_git = None
head_according_to_us = review.branch.head.sha1
if head_according_to_git != head_according_to_us:
# The git repository disagrees with us. Potentially harmful updates
# to the branch will be rejected by the git hook while this is the
# case, but this means that "our" head might not be referenced at
# all and thus that it might be GC:ed by the git repository at some
# point. To avoid that, add a keepalive reference.
repository.keepalive(head_according_to_us)
yield "\n<!-- branch head mismatch: git=%s, us=%s (corrected) -->" % (head_according_to_git[:8] if head_according_to_git else "N/A", head_according_to_us[:8])
|
[
"[email protected]"
] | |
37a2620996f5b4f1543105bffdc6fb58220c624c
|
6a4ebebbe0d7f81efc4f1749054a2ed7242c0e58
|
/granary/test/test_googleplus.py
|
e12902c656d570b1ffc904713e8a4b875bb87829
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
skylarker/granary
|
6e192ecd2475febb3585728d5ba7afe34742107d
|
2fd8ef017588b955e78606242ce582849cfd57ac
|
refs/heads/master
| 2020-12-26T21:35:04.155528 | 2016-04-18T18:15:30 | 2016-04-18T18:15:30 | 56,891,160 | 1 | 0 | null | 2016-04-22T23:43:09 | 2016-04-22T23:43:09 | null |
UTF-8
|
Python
| false | false | 21,560 |
py
|
# coding=utf-8
"""Unit tests for googleplus.py.
See googleapiclient/http.py for details on using RequestMockBuilder to mock out
Google API calls. (This is the current doc on apiclient mocks, but it doesn't
mention RequestMockBuilder:
https://developers.google.com/api-client-library/python/guide/mocks )
TODO: figure out how to check the query parameters. Right now they're ignored. :/
"""
__author__ = ['Ryan Barrett <[email protected]>']
import copy
from email.message import Message
from email.mime.multipart import MIMEMultipart
import json
import os
from apiclient import discovery
from apiclient import http
import httplib2
from oauth_dropins import googleplus as oauth_googleplus
from oauth_dropins.webutil import util
from oauth_dropins.webutil import testutil
from granary import appengine_config
appengine_config.GOOGLE_CLIENT_ID = 'my client id'
appengine_config.GOOGLE_CLIENT_SECRET = 'my client secret'
from granary import googleplus
DISCOVERY_DOC = appengine_config.read(
os.path.join(os.path.dirname(__file__), '../../googleplus_api_discovery.json'))
def tag_uri(name):
return util.tag_uri('plus.google.com', name)
ACTIVITY_GP = { # Google+
'kind': 'plus#activity',
'verb': 'post',
'id': '001',
'actor': {'id': '444', 'displayName': 'Charles'},
'object': {
'content': 'my post',
'url': 'http://plus.google.com/001',
},
}
ACTIVITY_AS = { # ActivityStreams
'kind': 'plus#activity',
'verb': 'post',
'id': tag_uri('001'),
'actor': {'id': tag_uri('444'), 'displayName': 'Charles'},
'object': {
'content': 'my post',
'url': 'http://plus.google.com/001',
'author': {'id': tag_uri('444'), 'displayName': 'Charles'},
'to': [{'objectType':'group', 'alias':'@public'}],
},
}
COMMENT_GP = { # Google+
'kind': 'plus#comment',
'verb': 'post',
'id': 'zyx.888',
'actor': {'id': '777', 'displayName': 'Eve'},
'object': {'content': 'my content'},
'inReplyTo': [{'url': 'http://post/url'}],
}
COMMENT_AS = { # ActivityStreams
'kind': 'plus#comment',
'verb': 'post',
'id': tag_uri('zyx.888'),
'url': 'http://post/url#zyx%23888',
'author': {'id': tag_uri('777'), 'displayName': 'Eve'},
'content': 'my content',
'object': {'content': 'my content'},
'inReplyTo': [{'url': 'http://post/url'}],
'to': [{'objectType':'group', 'alias':'@public'}],
}
PLUSONER = { # Google+
'kind': 'plus#person',
'id': '222',
'displayName': 'Alice',
'url': 'https://profiles.google.com/alice',
'image': {'url': 'https://alice/picture'},
}
LIKE = { # ActivityStreams
'id': tag_uri('001_liked_by_222'),
'url': 'http://plus.google.com/001#liked-by-222',
'objectType': 'activity',
'verb': 'like',
'object': {'url': 'http://plus.google.com/001'},
'author': {
'kind': 'plus#person',
'id': tag_uri('222'),
'displayName': 'Alice',
'url': 'https://profiles.google.com/alice',
'image': {'url': 'https://alice/picture'},
},
}
RESHARER = { # Google+
'kind': 'plus#person',
'id': '444',
'displayName': 'Bob',
'url': 'https://plus.google.com/bob',
'image': {'url': 'https://bob/picture'},
}
SHARE = { # ActivityStreams
'id': tag_uri('001_shared_by_444'),
'url': 'http://plus.google.com/001#shared-by-444',
'objectType': 'activity',
'verb': 'share',
'object': {'url': 'http://plus.google.com/001'},
'author': {
'kind': 'plus#person',
'id': tag_uri('444'),
'displayName': 'Bob',
'url': 'https://plus.google.com/bob',
'image': {'url': 'https://bob/picture'},
},
}
ACTIVITY_GP_EXTRAS = copy.deepcopy(ACTIVITY_GP) # Google+
ACTIVITY_GP_EXTRAS['object'].update({
'replies': {'totalItems': 1},
'plusoners': {'totalItems': 1},
'resharers': {'totalItems': 1},
})
ACTIVITY_AS_EXTRAS = copy.deepcopy(ACTIVITY_AS) # ActivityStreams
ACTIVITY_AS_EXTRAS['object'].update({
'replies': {'totalItems': 1, 'items': [COMMENT_AS]},
'plusoners': {'totalItems': 1},
'resharers': {'totalItems': 1},
'tags': [LIKE, SHARE],
})
# HTML from http://plus.google.com/
HTML_ACTIVITY_GP = [
["..."],
[1002, None, None, None, None, [1001, "z13gjrz4ymeldtd5f04chnrixnvpjjqy42o"],
{"33558957" : [
"",
"",
"",
"David Barrett",
"",
1440425513401,
None,
[], # first comment (if any) would be here
"z13gjrz4ymeldtd5f04chnrixnvpjjqy42o",
"",
"a:ext:client.sharebox.108380595987.apps.googleusercontent.com",
[None],
[None],
"",
None,
[None],
"105815303293125791402",
[None],
"https://lh4.googleusercontent.com/-OvNQMFbbks0/AAAAAAAAAAI/AAAAAAAAOuo/YXnsx5bfWxo/photo.jpg",
None,
u"Hi! It’s been a while since I’ve written because we’ve been hard at work, but I’m very happy to take the wraps off our latest feature (or really, series of features): Realtime Expense Reports. I know I’ve been hyping this up for a long time, and you’re…",
"+DavidBarrettQuinthar/posts/VefFHLMoCqV",
0,
0,
"./105815303293125791402",
[None], None,
[ # location
41.230564,
9.172682,
"(41.2305630, 9.1726818)",
"",
None,
"/maps/api/staticmap?center=41.230564,9.172682&zoom=14&size=300x220&sensor=false&markers=41.230564,9.172682&client=google-buzz&signature=GDLZ49Fe0-uc4BoVt-e7p-OmZ50%3D",
["1152921504606846977", "-7273273746059208260"],
"",
"https://maps.google.com?ll=41.230564,9.172682&q=41.230564,9.172682",
None,
"https://maps-api-ssl.google.com/maps/api/staticmap?center=41.230564,9.172682&zoom=15&size=100x100&sensor=false&client=google-buzz&signature=Doqggt3WB5BQzKieZRSA2VwHRXM%3D",
0, None, 412305629, 91726818, None, None, [None]
],
"", 0, 0, 0, 1, None, 0, 1, None, 0,
1440425513401,
] + [None] * 58 + [ # collapsed for brevity
[
[335, 0],
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
None, None, None, None,
[
1440425513266,
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
[None], [None], [None]
],
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
{
"39748951" : [
"http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/",
"http://0.gravatar.com/blavatar/ee4c59993abdb971416349dee59ca9d1?s=200&ts=1440425508",
"Realtime Expense Reports are Here! (And so much more...)",
"Hi! It's been a while since I've written because we've been hard at work, but I'm very happy to take the wraps off our latest feature (or really, series of features): Realtime Expense Reports. I kn...",
None,
["//lh6.googleusercontent.com/proxy/IvWQIbjjvIWCUhTACtHDQRysGY2NYqf-A6XWPOGMLdr4W5BHFjIeQw4ZOTDrkDA2oc1kKfCgkV7gT-iQIFvOaeUhtfEf_3BPBTNsmesTGSawvh5kednyc-Oi8MPmpdRZ_SE2=w120-h120",
120, 120, None, None, None, None, 120,
[2,
"https://lh6.googleusercontent.com/proxy/IvWQIbjjvIWCUhTACtHDQRysGY2NYqf-A6XWPOGMLdr4W5BHFjIeQw4ZOTDrkDA2oc1kKfCgkV7gT-iQIFvOaeUhtfEf_3BPBTNsmesTGSawvh5kednyc-Oi8MPmpdRZ_SE2=w800-h800"]],
"//s2.googleusercontent.com/s2/favicons?domain=blog.expensify.com",
[[[350, 335, 0], "http://quinthar.com/",
{"41007156" : ["http://quinthar.com/", None, None, None, None, None,
None, [None], None, None, [None]]}]],
None, None, [None], "blog.expensify.com",] + [None] * 172 + [# collapsed for brevity
[[339, 338, 336, 335, 0],
"http://0.gravatar.com/blavatar/ee4c59993abdb971416349dee59ca9d1?s=200&ts=1440425508",
{"40265033" : [
"http://0.gravatar.com/blavatar/ee4c59993abdb971416349dee59ca9d1?s=200&ts=1440425508",
"http://0.gravatar.com/blavatar/ee4c59993abdb971416349dee59ca9d1?s=200&ts=1440425508",
None, None, None,
["//lh6.googleusercontent.com/proxy/IvWQIbjjvIWCUhTACtHDQRysGY2NYqf-A6XWPOGMLdr4W5BHFjIeQw4ZOTDrkDA2oc1kKfCgkV7gT-iQIFvOaeUhtfEf_3BPBTNsmesTGSawvh5kednyc-Oi8MPmpdRZ_SE2=w120-h120",
120, 120, None, None, None, None, 120,
[2,
"https://lh6.googleusercontent.com/proxy/IvWQIbjjvIWCUhTACtHDQRysGY2NYqf-A6XWPOGMLdr4W5BHFjIeQw4ZOTDrkDA2oc1kKfCgkV7gT-iQIFvOaeUhtfEf_3BPBTNsmesTGSawvh5kednyc-Oi8MPmpdRZ_SE2=w800-h800"]],
# ...
]}]]}], # ...
]}],
# second element is non-post, under 7 items long
[1002, None, None],
# third element is non-post, item 6 is empty
[1002, None, None, None, None, None, {}],
] # ...
HTML_ACTIVITIES_GP_HEADER = """
<!DOCTYPE html><html lang="en" dir="ltr" ><head><meta name="referrer" content="origin"><base href="https://plus.google.com/"><style>
...
</style></head><body class="Td lj"><input type="text" name="hist_state" id="hist_state" style="display:none;"><iframe id="hist_frame" name="hist_frame1623222153" class="ss" tabindex="-1"></iframe><script>window['OZ_wizstart'] && window['OZ_wizstart']()</script>
<script>AF_initDataCallback({key: '199', isError: false , hash: '13', data:[2,0]
});</script><script>AF_initDataCallback({key: '161', isError: false , hash: '14', data:["os.con",[[]
,"these few lines test the code that collapses commas",
[,1,1,,,,20,,"social.google.com",[,]
,,,2,,,0,,15,,[[1002,2],"..."]],,[,],,,"""
HTML_ACTIVITIES_GP_FOOTER = """
]
]
});</script></body></html>"""
HTML_ACTIVITY_AS = { # Google+
'id': tag_uri('z13gjrz4ymeldtd5f04chnrixnvpjjqy42o'),
'url': 'https://plus.google.com/+DavidBarrettQuinthar/posts/VefFHLMoCqV',
'actor': {
'id': tag_uri('105815303293125791402'),
'url': 'https://plus.google.com/105815303293125791402',
'objectType': 'person',
'displayName': 'David Barrett',
'image': {
'url': 'https://lh4.googleusercontent.com/-OvNQMFbbks0/AAAAAAAAAAI/AAAAAAAAOuo/YXnsx5bfWxo/photo.jpg',
},
},
'verb': 'post',
'object': {
'id': tag_uri('z13gjrz4ymeldtd5f04chnrixnvpjjqy42o'),
'url': 'https://plus.google.com/+DavidBarrettQuinthar/posts/VefFHLMoCqV',
'objectType': 'note',
'published': '2015-08-24T14:11:53Z',
'updated': '2015-08-24T14:11:53Z',
'content': u'Hi! It’s been a while since I’ve written because we’ve been hard at work, but I’m very happy to take the wraps off our latest feature (or really, series of features): Realtime Expense Reports. I know I’ve been hyping this up for a long time, and you’re…',
'attachments': [
{
'objectType': 'article',
'displayName': 'Realtime Expense Reports are Here! (And so much more...)',
'content': "Hi! It's been a while since I've written because we've been hard at work, but I'm very happy to take the wraps off our latest feature (or really, series of features): Realtime Expense Reports. I kn...",
'url': 'http://blog.expensify.com/2015/08/24/realtime-expense-reports-are-here-and-so-much-more/',
'image': {
'url': 'http://0.gravatar.com/blavatar/ee4c59993abdb971416349dee59ca9d1?s=200&ts=1440425508',
}
}
]
},
'location': {
'displayName': '(41.2305630, 9.1726818)',
'url': 'https://maps.google.com?ll=41.230564,9.172682&q=41.230564,9.172682',
'latitude': 41.230564,
'longitude': 9.172682,
},
# 'access': {
# 'kind': 'plus#acl',
# 'description': 'Public',
# 'items': [
# {
# 'type': 'public'
# }
# ]
# }
}
CREDS_JSON = json.dumps({
'access_token': 'my token',
'client_id': appengine_config.GOOGLE_CLIENT_ID,
'client_secret': appengine_config.GOOGLE_CLIENT_SECRET,
'refresh_token': 'my refresh token',
'token_expiry': '',
'token_uri': '',
'user_agent': '',
'invalid': '',
})
class GooglePlusTest(testutil.HandlerTest):
def setUp(self):
super(GooglePlusTest, self).setUp()
self.auth_entity = oauth_googleplus.GooglePlusAuth(
id='my_string_id',
user_json=json.dumps({
'displayName': 'Bob',
}),
creds_json=CREDS_JSON)
self.googleplus = googleplus.GooglePlus(auth_entity=self.auth_entity)
def tearDown(self):
oauth_googleplus.json_service = None
def init(self, **kwargs):
"""Sets up the API service from test_googleplus_discovery.
Pass a requestBuilder or http kwarg to inject expected HTTP requests and
responses.
"""
oauth_googleplus.json_service = discovery.build_from_document(
DISCOVERY_DOC, **kwargs)
def test_get_comment(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.comments.get': (None, json.dumps(COMMENT_GP)) # None means 200 OK
}))
self.assert_equals(COMMENT_AS, self.googleplus.get_comment('234'))
def test_get_activity(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.get': (None, json.dumps(ACTIVITY_GP))
}))
self.assert_equals([ACTIVITY_AS],
self.googleplus.get_activities(activity_id='234'))
def test_get_activities_no_extras_to_fetch(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (None, json.dumps({
'items': [ACTIVITY_GP, ACTIVITY_GP],
})),
},
# ACTIVITY_GP doesn't say there are any comments, +1s, or shares (via
# totalItems), so we shouldn't ask for them.
check_unexpected=True))
got = self.googleplus.get_activities(fetch_replies=True, fetch_likes=True,
fetch_shares=True)
self.assert_equals([ACTIVITY_AS, ACTIVITY_AS], got)
def test_get_activities_fetch_extras(self):
self.init()
# Generate minimal fake responses for each request in the batch.
#
# Test with multiple activities to cover the bug described in
# https://github.com/snarfed/bridgy/issues/22#issuecomment-56329848 :
# util.CacheDict.get_multi() didn't originally handle generator args.
batch = MIMEMultipart()
for i, item in enumerate((COMMENT_GP, PLUSONER, RESHARER) * 2):
msg = Message()
msg.set_payload('HTTP/1.1 200 OK\n\r\n\r\n' + json.dumps({'items': [item]}))
msg['Content-ID'] = '<response-abc+%d>' % (i + 1)
batch.attach(msg)
# as_string() must be called before get_boundary() to generate the
# boundaries between parts, but can't be called again, so we capture the
# result.
batch_str = batch.as_string()
gpe_1 = ACTIVITY_GP_EXTRAS
gpe_2 = copy.deepcopy(gpe_1)
gpe_2['id'] = '002'
http_seq = http.HttpMockSequence(
[({'status': '200'}, json.dumps({'items': [gpe_1, gpe_2]})),
({'status': '200',
'content-type': 'multipart/mixed; boundary="%s"' % batch.get_boundary()},
batch_str),
({'status': '200'}, json.dumps({'items': [gpe_1, gpe_2]})),
])
self.auth_entity.http = lambda: http_seq
ase_1 = ACTIVITY_AS_EXTRAS
ase_2 = copy.deepcopy(ase_1)
ase_2['id'] = tag_uri('002')
ase_2['object']['tags'][0]['id'] = tag_uri('002_liked_by_222')
ase_2['object']['tags'][1]['id'] = tag_uri('002_shared_by_444')
cache = util.CacheDict()
self.assert_equals([ase_1, ase_2], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True, cache=cache))
for id in '001', '002':
for prefix in 'AGL ', 'AGS ':
self.assertEquals(1, cache[prefix + id])
# no new extras, so another request won't fill them in
as_1 = copy.deepcopy(ACTIVITY_AS)
for field in 'replies', 'plusoners', 'resharers':
as_1['object'][field] = {'totalItems': 1}
as_2 = copy.deepcopy(as_1)
as_2['id'] = tag_uri('002')
self.assert_equals([as_1, as_2], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True, cache=cache))
def test_get_activities_search(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.search': (None, json.dumps({'items': [ACTIVITY_GP]})),
}))
self.assert_equals([ACTIVITY_AS],
self.googleplus.get_activities(search_query='qwert'))
# TODO: resurrect?
# def test_get_activities_request_etag(self):
# self.init()
# http_seq = http.HttpMockSequence(
# [({'status': '200'}, json.dumps({'items': [item]}))])
# self.auth_entity.http = lambda: http_seq
# resp = self.googleplus.get_activities_response(
# fetch_replies=True, fetch_likes=True, fetch_shares=True)
# self.assertEquals('"my etag"', resp['etag'])
def test_get_activities_response_etag(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (httplib2.Response({'status': 200}),
json.dumps({'etag': '"my etag"'})),
}))
resp = self.googleplus.get_activities_response(
fetch_replies=True, fetch_likes=True, fetch_shares=True)
self.assertEquals('"my etag"', resp['etag'])
def test_get_activities_304_not_modified(self):
"""Requests with matching ETags return 304 Not Modified."""
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (httplib2.Response({'status': 304}), '{}'),
}))
self.assert_equals([], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True))
def test_postprocess_actor_url_field(self):
pa = self.googleplus.postprocess_actor
self.assertEqual({'foo': 'bar'}, pa({'foo': 'bar'}))
self.assertEqual({'url': 'x',
'urls': [{'value': 'x'}]},
pa({'urls': [{'value': 'x'}]}))
self.assertEqual({'url': 'x',
'urls': [{'value': 'x'}, {'value': 'y'}]},
pa({'urls': [{'value': 'x'}, {'value': 'y'}]}))
# check alias
self.assertEquals(self.googleplus.postprocess_actor,
self.googleplus.user_to_actor)
def test_get_actor_minimal(self):
self.assert_equals({'displayName': 'Bob'}, self.googleplus.get_actor())
def test_get_actor(self):
user = {
'id': '222',
'displayName': 'Alice',
'urls': [{'value': 'https://profiles.google.com/alice'}],
}
self.auth_entity.user_json = json.dumps(user)
user.update({
'id': tag_uri('222'),
'url': 'https://profiles.google.com/alice',
})
self.assert_equals(user, self.googleplus.get_actor())
def test_get_actor_other_user(self):
with self.assertRaises(NotImplementedError):
self.googleplus.get_actor('other')
def test_get_activities_extra_fetches_fail(self):
"""Sometimes the extras fetches return errors. Ignore that."""
self.init()
batch = MIMEMultipart()
for i in range(3):
msg = Message()
msg.set_payload('HTTP/1.1 500 Foo Bar\n\r\n\r\n')
msg['Content-ID'] = '<response-abc+%d>' % (i + 1)
batch.attach(msg)
# as_string() must be called before get_boundary() to generate the
# boundaries between parts, but can't be called again, so we capture the
# result.
batch_str = batch.as_string()
self.auth_entity.http = lambda: http.HttpMockSequence(
[({'status': '200'}, json.dumps({'items': [ACTIVITY_GP_EXTRAS]})),
({'status': '200',
'content-type': 'multipart/mixed; boundary="%s"' % batch.get_boundary()},
batch_str),
])
cache = util.CacheDict()
self.assert_equals([ACTIVITY_AS], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True, cache=cache))
for prefix in 'AGC ', 'AGL ', 'AGS ':
self.assertNotIn(prefix + '001', cache)
def test_html_to_activities(self):
html = (HTML_ACTIVITIES_GP_HEADER + json.dumps(HTML_ACTIVITY_GP) +
HTML_ACTIVITIES_GP_FOOTER)
self.assert_equals([HTML_ACTIVITY_AS], self.googleplus.html_to_activities(html))
def test_html_to_activities_plusoned(self):
html_gp = copy.deepcopy(HTML_ACTIVITY_GP)
html_gp[1][6].values()[0][69] = [
202,
[['Billy Bob',
'1056789',
1,
1,
'https://lh3.googleusercontent.com/billybob.jpg',
'https://plus.google.com/+BillyBob',
'male',
]],
# ...
]
expected = copy.deepcopy(HTML_ACTIVITY_AS)
expected.update({
'verb': 'like',
'actor': {
'id': tag_uri('1056789'),
'url': 'https://plus.google.com/+BillyBob',
'objectType': 'person',
'displayName': 'Billy Bob',
'image': {'url': 'https://lh3.googleusercontent.com/billybob.jpg'},
},
})
html = (HTML_ACTIVITIES_GP_HEADER + json.dumps(html_gp) +
HTML_ACTIVITIES_GP_FOOTER)
self.assert_equals([expected], self.googleplus.html_to_activities(html))
def test_html_to_activities_similar_to_plusoned(self):
html_gp = copy.deepcopy(HTML_ACTIVITY_GP)
for data_at_69 in None, [], [None], [None, None], [None, [None]]:
html_gp[1][6].values()[0][69] = data_at_69
html = (HTML_ACTIVITIES_GP_HEADER + json.dumps(html_gp) +
HTML_ACTIVITIES_GP_FOOTER)
self.assert_equals([HTML_ACTIVITY_AS],
self.googleplus.html_to_activities(html))
def test_html_to_activities_missing_data(self):
self.assert_equals([], self.googleplus.html_to_activities(''))
|
[
"[email protected]"
] | |
73b01d6e83f15e3b8998e48fde1d8e9a8e9c8657
|
5b7a0d2c364e40581eeff6c592067c954b96aa5b
|
/test_circle_ellipse.py
|
d03fd6ea80484a28a8acc42dbf20a692f6fa80ae
|
[] |
no_license
|
skconan/dice_detection
|
a0f5afbfd1d5e38cf6f5d72872103280690e5ffc
|
da5b065398c0976b90833a10e6dfcde162ce1540
|
refs/heads/master
| 2020-03-18T16:42:32.272709 | 2018-07-05T04:26:47 | 2018-07-05T04:28:03 | 134,981,877 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,445 |
py
|
import cv2 as cv
from lib import *
import numpy as np
from dice_detection import *
if __name__=='__main__':
cap = cv.VideoCapture(CONST.VDO_PATH + 'dice_01.mp4')
while True:
ret, image = cap.read()
if image is None:
continue
# image = cv.resize(image,(0,0),fx=0.5,fy=0.5)
image = pre_processing(image)
mask_th = find_mask_threshold(image)
img = mask_th.copy()
img.fill(0)
_,cnts,hierachy = cv.findContours(mask_th,cv.RETR_CCOMP,cv.CHAIN_APPROX_NONE)
ct = 0
x_min = 100000
x_max = -1
y_min = 100000
y_max = -1
for (cnt,hh) in zip(cnts,hierachy[0]):
if len(cnt) < 5:
continue
(x,y),(w,h),angle = ellipse = cv.fitEllipse(cnt)
x,y,_,_ = cv.boundingRect(cnt)
area = cv.contourArea(cnt)
area_ellipse = math.pi * (w/2.0) * (h/2.0)
hull = cv.convexHull(cnt)
hull_area = cv.contourArea(hull)
solidity = float(area)/hull_area
print(ct,w,h,w/h, solidity, hh)
ct += 1
# print()
if not (list(hh[2:]) == [-1,-1]):
continue
if not (w >= 8 and h>=8):
continue
if not 0.35 <= float(w)/h < 1.2:
continue
if not solidity >= 0.925 or not area/area_ellipse >= 0.8:
continue
if area > 10000:
continue
box = cv.boxPoints(ellipse)
box = np.int0(box)
cv.ellipse(img,ellipse,(255),-1)
x,y,w,h = cv.boundingRect(cnt)
dice_size = max(h/2.0,w/2.0) * 9
# cv.rectangle(img,(int(x-(w*0.5)),int(y-(h*0.5))),(int(x+(w*4.5)),int(y+(h*4.5))),(155),1)
cv.rectangle(img,(int(x-(w*2)),int(y-(h*2))),(int(x+(w*2.75)),int(y+(h*2.75))),(155),1)
# cv.rectangle(img,(int(x+(w*0.5)),int(y+(h*0.5))),(int(x-(w*4.5)),int(y-(h*4.5))),(155),1)
cv.rectangle(img,(int(x),int(y)),(int(x+w),int(y+h)),(155),1)
# img = cv.drawContours(img,[box],0,(0,0,255),1)
# img = cv.drawContours(img,cnt,-1,(0,0,255),1)
cv.imshow('img',img)
cv.imshow('image',image)
k = cv.waitKey(-1) & 0xff
if k == ord('q'):
break
cap.release()
cv.destroyAllWindows()
|
[
"[email protected]"
] | |
d7df6a4d66ed2fa92ca477942ec9176c1f23591a
|
f5f771cd8600c2aeb7fc9b192d9084ec5fdf3616
|
/lux/extensions/odm/mapper.py
|
ef04cc0a9b43586b1fb4efb156df2f1e77bd748a
|
[
"BSD-3-Clause"
] |
permissive
|
SirZazu/lux
|
75fe9fde4ddaee1c9c17e55c6e6d07a289ea2f5b
|
d647c34d11d1172d40e16b6afaba4ee67950fb5a
|
refs/heads/master
| 2021-01-21T19:40:46.536485 | 2015-06-02T16:30:18 | 2015-06-02T16:30:18 | 36,931,033 | 0 | 3 | null | 2015-10-09T14:08:26 | 2015-06-05T12:15:21 |
Python
|
UTF-8
|
Python
| false | false | 11,508 |
py
|
import re
import os
import logging
from copy import copy
from contextlib import contextmanager
from inspect import ismodule
from importlib import import_module
from itertools import chain
from sqlalchemy import MetaData, Table, inspect, event, exc
from sqlalchemy.engine import create_engine
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.orm.session import Session
from pulsar import ImproperlyConfigured
from pulsar.apps.data import Store, create_store
_camelcase_re = re.compile(r'([A-Z]+)(?=[a-z0-9])')
logger = logging.getLogger('lux.odm')
class BaseModel(object):
@declared_attr
def __tablename__(self):
return self.__name__.lower()
Model = declarative_base(cls=BaseModel)
class Mapper:
'''SQLAlchemy wrapper for lux applications
'''
def __init__(self, app, binds):
self.app = app
self._autodiscover(binds)
def __getitem__(self, model):
return self._declarative_register[model]
def __getattr__(self, name):
if name in self._declarative_register:
return self._declarative_register[name]
raise AttributeError('No model named "%s"' % name)
def database_create(self, database, **params):
'''Create databases for each engine and return a new :class:`.Mapper`.
'''
binds = {}
dbname = database
for key, engine in self.keys_engines():
if hasattr(database, '__call__'):
dbname = database(engine)
assert dbname, "Cannot create a database, no db name given"
key = key if key else 'default'
binds[key] = self._database_create(engine, dbname)
return self.__class__(self.app, binds)
def database_all(self):
'''Return a dictionary mapping engines with databases
'''
all = {}
for engine in self.engines():
all[engine] = self._database_all(engine)
return all
def database_drop(self, database=None, **params):
dbname = database
for engine in self.engines():
if hasattr(database, '__call__'):
dbname = database(engine)
assert dbname, "Cannot drop database, no db name given"
self._database_drop(engine, dbname)
def tables(self):
tables = []
for engine in self.engines():
tbs = engine.table_names()
if tbs:
tables.append((str(engine.url), tbs))
return tables
def table_create(self, remove_existing=False):
"""Creates all tables.
"""
for engine in self.engines():
tables = self._get_tables(engine)
if not remove_existing:
self.metadata.create_all(engine, tables=tables)
else:
pass
def table_drop(self):
"""Drops all tables.
"""
for engine in self.engines():
self.metadata.drop_all(engine, tables=self._get_tables(engine))
def reflect(self, bind='__all__'):
"""Reflects tables from the database.
"""
self._execute_for_all_tables(bind, 'reflect', skip_tables=True)
@contextmanager
def begin(self, close=True, expire_on_commit=False, **options):
"""Provide a transactional scope around a series of operations.
By default, ``expire_on_commit`` is set to False so that instances
can be used outside the session.
"""
session = self.session(expire_on_commit=expire_on_commit, **options)
try:
yield session
session.commit()
except Exception:
session.rollback()
raise
finally:
if close:
session.close()
def session(self, **options):
options['binds'] = self.binds
return LuxSession(self, **options)
def get_engine(self, key=None):
'''Get an engine by key
'''
if key in self._engines:
return self._engines[key]
elif key in self._nosql_engines:
return self._nosql_engines[key]
def engines(self):
return chain(self._engines.values(), self._nosql_engines.values())
def keys_engines(self):
return chain(self._engines.items(), self._nosql_engines.items())
def close(self):
for engine in self.engines():
engine.dispose()
# INTERNALS
def _get_tables(self, engine):
tables = []
for table, eng in self.binds.items():
if eng == engine:
tables.append(table)
return tables
def _database_all(self, engine):
if isinstance(engine, Store):
return engine.database_all()
elif engine.name == 'sqlite':
database = engine.url.database
if os.path.isfile(database):
return [database]
else:
return []
else:
insp = inspect(engine)
return insp.get_schema_names()
def _database_create(self, engine, dbname):
if isinstance(engine, Store):
from pulsar.apps.greenio import wait
return wait(engine.database_create(dbname))
elif engine.name != 'sqlite':
conn = engine.connect()
# the connection will still be inside a transaction,
# so we have to end the open transaction with a commit
conn.execute("commit")
conn.execute('create database %s' % dbname)
conn.close()
url = copy(engine.url)
url.database = dbname
return str(url)
def _database_drop(self, engine, database):
logger.info('dropping database "%s" from %s', database, engine)
if engine.name == 'sqlite':
try:
os.remove(database)
except FileNotFoundError:
pass
elif isinstance(engine, Store):
engine.database_drop(database)
else:
conn = engine.connect()
conn.execute("commit")
conn.execute('drop database %s' % database)
conn.close()
def _autodiscover(self, binds):
# Setup mdoels and engines
if not binds:
binds = {}
elif isinstance(binds, str):
binds = {'default': binds}
if binds and 'default' not in binds:
raise ImproperlyConfigured('default datastore not specified')
self.metadata = MetaData()
self._engines = {}
self._nosql_engines = {}
self._declarative_register = {}
self.binds = {}
# Create all sql engines in the binds dictionary
# Quietly fails if the engine is not recognised,
# it my be a NoSQL store
for name, bind in tuple(binds.items()):
key = None if name == 'default' else name
try:
self._engines[key] = create_engine(bind)
except exc.NoSuchModuleError:
self._nosql_engines[key] = create_store(bind)
#
if self._nosql_engines and not self.app.green_pool:
raise ImproperlyConfigured('NoSql stores requires GREEN_POOL')
for label, mod in module_iterator(self.app.config['EXTENSIONS']):
# Loop through attributes in mod_models
for name in dir(mod):
value = getattr(mod, name)
if isinstance(value, (Table, DeclarativeMeta)):
for table in value.metadata.sorted_tables:
if table.key not in self.metadata.tables:
engine = None
label = table.info.get('bind_label')
keys = ('%s.%s' % (label, table.key),
label, None) if label else (None,)
for key in keys:
engine = self.get_engine(key)
if engine:
break
assert engine
table.tometadata(self.metadata)
self.binds[table] = engine
if (isinstance(value, DeclarativeMeta) and
hasattr(value, '__table__')):
table = value.__table__
self._declarative_register[table.key] = value
class LuxSession(Session):
"""The sql alchemy session that lux uses.
It extends the default session system with bind selection and
modification tracking.
"""
def __init__(self, mapper, **options):
#: The application that this session belongs to.
self.mapper = mapper
if self.app.config['DATABASE_SESSION_SIGNALS']:
self.register()
super().__init__(**options)
@property
def app(self):
return self.mapper.app
def register(self):
if not hasattr(self, '_model_changes'):
self._model_changes = {}
event.listen(self, 'before_flush', self.record_ops)
event.listen(self, 'before_commit', self.record_ops)
event.listen(self, 'before_commit', self.before_commit)
event.listen(self, 'after_commit', self.after_commit)
event.listen(self, 'after_rollback', self.after_rollback)
@staticmethod
def record_ops(session, flush_context=None, instances=None):
try:
d = session._model_changes
except AttributeError:
return
for targets, operation in ((session.new, 'insert'),
(session.dirty, 'update'),
(session.deleted, 'delete')):
for target in targets:
state = inspect(target)
key = state.identity_key if state.has_identity else id(target)
d[key] = (target, operation)
@staticmethod
def before_commit(session):
try:
d = session._model_changes
except AttributeError:
return
# if d:
# before_models_committed.send(session.app,
# changes=list(d.values()))
@staticmethod
def after_commit(session):
try:
d = session._model_changes
except AttributeError:
return
# if d:
# models_committed.send(session.app, changes=list(d.values()))
# d.clear()
@staticmethod
def after_rollback(session):
try:
d = session._model_changes
except AttributeError:
return
# d.clear()
def module_iterator(application):
'''Iterate over applications modules
'''
if ismodule(application) or isinstance(application, str):
if ismodule(application):
mod, application = application, application.__name__
else:
try:
mod = import_module(application)
except ImportError:
# the module is not there
mod = None
if mod:
label = application.split('.')[-1]
try:
mod_models = import_module('.models', application)
except ImportError:
mod_models = mod
label = getattr(mod_models, 'APP_LABEL', label)
yield label, mod_models
else:
for app in application:
yield from module_iterator(app)
|
[
"[email protected]"
] | |
75a1c7bfd7129ce55f5eba80d259be9cc3f58c32
|
d4cd2476f8fa8a7d94e183a68bd0678971310c5b
|
/checkio/05_Alice_in_Wonderland/01_Alice_05_DigitDoublets.py
|
93be0ef309f0753e3758c5c296e1049c4e7b3414
|
[] |
no_license
|
gwqw/LessonsSolution
|
b495579f6d5b483c30d290bfa8ef0a2e29515985
|
0b841b1ae8867890fe06a5f0dcee63db9a3319a3
|
refs/heads/master
| 2020-07-05T19:15:53.758725 | 2019-10-01T11:34:44 | 2019-10-01T11:34:44 | 202,744,145 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,207 |
py
|
# check if nums differs only by one digit
def isOneDiff(n1, n2):
n1 = str(n1)
n2 = str(n2)
diffcount = 0
for i in range(len(n1)):
if n1[i] != n2[i]: diffcount += 1
if diffcount > 1: return False
return (diffcount == 1)
# find next nums in list
def findnext(numbers):
first_num = numbers[0]
next_nums = []
for n in numbers[1:]:
if isOneDiff(n, first_num):
next_nums.append(n)
return next_nums
# move next number to second position
def regroupList(numbers, snum):
i = numbers.index(snum)
reslst = numbers[:]
n = reslst[i]
reslst[i] = reslst[1]
reslst[1] = n
return reslst
# construct all trees
def constrTree(numbers):
#print("inp_nums= ", numbers)
res_tree = []
isFinal = len(numbers) == 2
finalNum = numbers[-1]
# find next and form tree
next_nums = findnext(numbers)
#print("next_nums= ", next_nums)
for n in next_nums:
if n == finalNum:
#print("find final")
res_tree.append([numbers[0], n])
break
elif not isFinal:
lst = regroupList(numbers, n)
tmptree = constrTree(lst[1:])
for t in tmptree:
t.insert(0, numbers[0])
res_tree.append(t)
return res_tree
# find the shortest tree
def findShortest(trees):
short_len = 100000
short_tree = []
for t in trees:
if len(t) < short_len:
short_len = len(t)
short_tree = t
return short_tree
def checkio(numbers):
print("input_tree= ", numbers)
res_trees = constrTree(numbers)
print("res_trees= ", res_trees)
short_tree = findShortest(res_trees)
print("short_tree= ", short_tree)
return short_tree
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
assert checkio([123, 991, 323, 321, 329, 121, 921, 125, 999]) == [123, 121, 921, 991, 999], "First"
assert checkio([111, 222, 333, 444, 555, 666, 121, 727, 127, 777]) == [111, 121, 127, 727, 777], "Second"
assert checkio([456, 455, 454, 356, 656, 654]) == [456, 454, 654], "Third, [456, 656, 654] is correct too"
|
[
"="
] |
=
|
bf7d221c249a3241ed1caec79c3c80e33dfe5221
|
35fb414cc9f5c408dc5d2c8316a5b6e4de3ccf22
|
/test/templates/analyze_2l_2tau_cfg.py
|
569b94fbe3d5ab083963e3c54bb48fe7dbaef4c9
|
[] |
no_license
|
kartikmaurya/tth-htt
|
abf1abafc9335da9687938f8588550a86631f751
|
8486aa6f33085a7b2d665e9215b828970f6ee8a7
|
refs/heads/master
| 2020-05-05T02:09:31.876729 | 2019-04-05T06:54:50 | 2019-04-05T06:54:50 | 177,517,377 | 0 | 0 | null | 2019-03-25T05:01:21 | 2019-03-25T05:01:21 | null |
UTF-8
|
Python
| false | false | 4,412 |
py
|
import FWCore.ParameterSet.Config as cms
import os
from tthAnalysis.HiggsToTauTau.configs.recommendedMEtFilters_cfi import *
from tthAnalysis.HiggsToTauTau.configs.EvtYieldHistManager_cfi import *
process = cms.PSet()
process.fwliteInput = cms.PSet(
fileNames = cms.vstring(),
maxEvents = cms.int32(-1),
outputEvery = cms.uint32(100000)
)
process.fwliteOutput = cms.PSet(
fileName = cms.string('')
)
process.analyze_2l_2tau = cms.PSet(
treeName = cms.string('Events'),
process = cms.string(''),
histogramDir = cms.string(''),
era = cms.string(''),
triggers_1e = cms.vstring(),
use_triggers_1e = cms.bool(True),
triggers_2e = cms.vstring(),
use_triggers_2e = cms.bool(True),
triggers_1mu = cms.vstring(),
use_triggers_1mu = cms.bool(True),
triggers_2mu = cms.vstring(),
use_triggers_2mu = cms.bool(True),
triggers_1e1mu = cms.vstring(),
use_triggers_1e1mu = cms.bool(True),
apply_offline_e_trigger_cuts_1e = cms.bool(True),
apply_offline_e_trigger_cuts_2e = cms.bool(True),
apply_offline_e_trigger_cuts_1mu = cms.bool(True),
apply_offline_e_trigger_cuts_2mu = cms.bool(True),
apply_offline_e_trigger_cuts_1e1mu = cms.bool(True),
electronSelection = cms.string(''),
muonSelection = cms.string(''),
lep_mva_cut = cms.double(1.),
apply_leptonGenMatching = cms.bool(True),
leptonChargeSelection = cms.string(''),
hadTauChargeSelection = cms.string(''),
hadTauGenMatch = cms.string('all'),
hadTauSelection = cms.string(''),
apply_hadTauGenMatching = cms.bool(False),
chargeSumSelection = cms.string(''),
applyFakeRateWeights = cms.string(""),
leptonFakeRateWeight = cms.PSet(
inputFileName = cms.string(""),
histogramName_e = cms.string(""),
histogramName_mu = cms.string("")
),
hadTauFakeRateWeight = cms.PSet(
inputFileName = cms.string(""),
lead = cms.PSet(
absEtaBins = cms.vdouble(-1., 1.479, 9.9),
graphName = cms.string("jetToTauFakeRate/$hadTauSelection/$etaBin/jetToTauFakeRate_mc_hadTaus_pt"),
applyGraph = cms.bool(True),
fitFunctionName = cms.string("jetToTauFakeRate/$hadTauSelection/$etaBin/fitFunction_data_div_mc_hadTaus_pt"),
applyFitFunction = cms.bool(True)
),
sublead = cms.PSet(
absEtaBins = cms.vdouble(-1., 1.479, 9.9),
graphName = cms.string("jetToTauFakeRate/$hadTauSelection/$etaBin/jetToTauFakeRate_mc_hadTaus_pt"),
applyGraph = cms.bool(True),
fitFunctionName = cms.string("jetToTauFakeRate/$hadTauSelection/$etaBin/fitFunction_data_div_mc_hadTaus_pt"),
applyFitFunction = cms.bool(True)
)
),
minNumJets = cms.int32(2),
isMC = cms.bool(True),
central_or_shift = cms.string(''),
lumiScale = cms.double(1.),
apply_genWeight = cms.bool(True),
apply_DYMCReweighting = cms.bool(False),
apply_hlt_filter = cms.bool(False),
apply_met_filters = cms.bool(True),
cfgMEtFilter = cms.PSet(),
apply_hadTauFakeRateSF = cms.bool(False),
fillGenEvtHistograms = cms.bool(False),
cfgEvtYieldHistManager = cms.PSet(),
branchName_electrons = cms.string('Electron'),
branchName_muons = cms.string('Muon'),
branchName_hadTaus = cms.string('Tau'),
branchName_jets = cms.string('Jet'),
branchName_met = cms.string('MET'),
branchName_memOutput = cms.string(''),
branchName_genLeptons = cms.string('GenLep'),
branchName_genHadTaus = cms.string('GenVisTau'),
branchName_genPhotons = cms.string('GenPhoton'),
branchName_genJets = cms.string('GenJet'),
redoGenMatching = cms.bool(True),
selEventsFileName_input = cms.string(''),
selEventsFileName_output = cms.string(''),
selectBDT = cms.bool(False),
syncNtuple = cms.PSet(
tree = cms.string(''),
output = cms.string(''),
requireGenMatching = cms.bool(False),
),
useNonNominal = cms.bool(False),
isDEBUG = cms.bool(False),
hasLHE = cms.bool(True),
evtWeight = cms.PSet(
apply = cms.bool(False),
histogramFile = cms.string(''),
histogramName = cms.string(''),
branchNameXaxis = cms.string(''),
branchNameYaxis = cms.string(''),
branchTypeXaxis = cms.string(''),
branchTypeYaxis = cms.string(''),
),
)
|
[
"[email protected]"
] | |
8768faa5431569743e0a31b1002db656d70a142c
|
6fdb4eaf5b0e6dbd7db4bf947547541e9aebf110
|
/shared-data/python/tests/errors/__init__.py
|
8b858a24b392381b87b32f4c5db9f32be4fbee49
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Opentrons/opentrons
|
874321e01149184960eeaeaa31b1d21719a1ceda
|
026b523c8c9e5d45910c490efb89194d72595be9
|
refs/heads/edge
| 2023-09-02T02:51:49.579906 | 2023-08-31T16:02:45 | 2023-08-31T16:02:45 | 38,644,841 | 326 | 174 |
Apache-2.0
| 2023-09-14T21:47:20 | 2015-07-06T20:41:01 |
Python
|
UTF-8
|
Python
| false | false | 43 |
py
|
"""Tests for shared-data global errors."""
|
[
"[email protected]"
] | |
93dc5c3a9db14864da78ac12366778f18d0c1263
|
b289a5076e06a24064526569086644f6383587c4
|
/projetofinanceiro/appfinanceiro/apps.py
|
1fec721d51e98309f6b4f627541b2729ccc1f5a5
|
[] |
no_license
|
Rubensrvsc/Programacao-WEB
|
d2eb36d7364736fdb93981b549e139d79e048310
|
e38f3a809a0aa244f32f053ed9aa45c7e8586b5e
|
refs/heads/master
| 2020-03-29T12:59:25.098325 | 2019-01-02T19:49:42 | 2019-01-02T19:49:42 | 149,933,053 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 101 |
py
|
from django.apps import AppConfig
class AppfinanceiroConfig(AppConfig):
name = 'appfinanceiro'
|
[
"[email protected]"
] | |
c47123eb1d1b70624bb34e5b9652c9cf7a8dd2ec
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-2/vse-naloge-brez-testov/DN10-M-123.py
|
0c1eae41abe8c8c3d571897a3c84d3a0b0442dcb
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,389 |
py
|
otroci = {
"Adam": ["Matjaž", "Cilka", "Daniel"],
"Aleksander": [],
"Alenka": [],
"Barbara": [],
"Cilka": [],
"Daniel": ["Elizabeta", "Hans"],
"Erik": [],
"Elizabeta": ["Ludvik", "Jurij", "Barbara"],
"Franc": [],
"Herman": ["Margareta"],
"Hans": ["Herman", "Erik"],
"Jožef": ["Alenka", "Aleksander", "Petra"],
"Jurij": ["Franc", "Jožef"],
"Ludvik": [],
"Margareta": [],
"Matjaž": ["Viljem"],
"Petra": [],
"Tadeja": [],
"Viljem": ["Tadeja"],
}
def premozenje(oseba,denar):
xs = [denar[oseba]]
for otrok in otroci[oseba]:
xs.append(premozenje(otrok,denar))
return sum(xs)
def najbogatejsi(oseba,denar):
najvec_denarja = 0
#print("oseba: ",oseba)
#if denar[oseba] > najbolj_bogat:
obdelani = []
najbolj_bogat = (oseba,denar[oseba])
for otrok in otroci[oseba]:
if denar[otrok] >= (denar[oseba] in najbolj_bogat):
najbolj_bogat = najbogatejsi(otrok,denar)
#if int(denar[otrok]) > najvec_denarja:
# najvec_denarja = denar[otrok]
#print(najbolj_bogat,"-----1")
#print(najbolj_bogat,"-----2")
#print("------------------------------------------------------")
#print(najvec_denarja)
#print(otrok,'---',denar[otrok])
return najbolj_bogat
|
[
"[email protected]"
] | |
a430b405c518f5492c4bfcf40ae484ae3432d216
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02618/s417415114.py
|
ddebb487f588173570c9610c70cadb46a063199e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,303 |
py
|
from sys import exit
import copy
#import numpy as np
#from collections import deque
d, = map(int, input().split())
c= list(map(int, input().split()))
s=[list(map(int, input().split())) for _ in range(d)]
# t=[int(input()) for _ in range(d)]
sche=[0 for _ in range(d)]
s_tmp=float("inf")*(-1)
for off in range(0,13):
last=[0 for _ in range(26)]
sche=[0 for _ in range(d)]
for day in range(1,d+1):
idx=day-1
d_tmp=float("inf")*(-1)
i_tmp=0
for t in range(26):
delta=0
l_tmp=copy.copy(last)
delta+=s[idx][t]
l_tmp[t]=day
for l in range(26):
delta-=0.5*(off+1)*c[l]*((day-l_tmp[l])+(day+off-l_tmp[l]))
if delta>=d_tmp:
d_tmp=delta
i_tmp=t
sche[idx]=i_tmp+1
# score+=d_tmp
last[i_tmp]=day
# print(score)
# print(i_tmp+1)
score=0
last=[0 for _ in range(26)]
for i in range(1,d+1):
idx=i-1
score+=s[idx][sche[idx]-1]
for l in range(26):
score-=c[l]*(i-last[l])
last[sche[idx]-1]=i
# print(score)
if score>=s_tmp:
s_tmp=score
sche_tmp=copy.copy(sche)
for i in sche_tmp:
print(i)
# print(s_tmp)
|
[
"[email protected]"
] | |
e712ac004c472f06084a23769197fbe9c9c1722a
|
f09dc121f213f2881df3572288b7ee5b39246d73
|
/aliyun-python-sdk-dataworks-public/aliyunsdkdataworks_public/request/v20200518/DeleteConnectionRequest.py
|
9a4d962fc099ab8a8094f136d5551e069631099c
|
[
"Apache-2.0"
] |
permissive
|
hetw/aliyun-openapi-python-sdk
|
2f31378ad6be0896fb8090423f607e9c7d3ae774
|
7443eacee9fbbaa93c7975c6dbec92d3c364c577
|
refs/heads/master
| 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 |
NOASSERTION
| 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null |
UTF-8
|
Python
| false | false | 1,474 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdataworks_public.endpoint import endpoint_data
class DeleteConnectionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'dataworks-public', '2020-05-18', 'DeleteConnection')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ConnectionId(self):
return self.get_query_params().get('ConnectionId')
def set_ConnectionId(self,ConnectionId):
self.add_query_param('ConnectionId',ConnectionId)
|
[
"[email protected]"
] | |
91cd296fa5741cfcebc94e7927b78d1ff38eebc5
|
030aadc06eba914dbc9f7e774d54cafd5acc0ae6
|
/docker/wsgi_docker.py
|
4f4d5ae871cf9698ce31dd779b010d807bd24fde
|
[] |
no_license
|
torchbox/wagtail-template
|
985047e917031cf033f61c0c2480870da430aa15
|
4c0cb34d28ccbc03a96ca9f1ff0499a3554ba5e6
|
refs/heads/develop
| 2016-09-06T14:55:28.078233 | 2015-08-11T12:03:08 | 2015-08-11T12:03:08 | 21,358,329 | 9 | 5 | null | 2015-05-06T09:29:53 | 2014-06-30T16:42:33 |
Python
|
UTF-8
|
Python
| false | false | 124 |
py
|
from whitenoise.django import DjangoWhiteNoise
from .wsgi import application
application = DjangoWhiteNoise(application)
|
[
"[email protected]"
] | |
f1a84740d0a5c3bf1ba1441ba380dc64176cbe97
|
d7ad696cd1b550bb41d20f87b83c984ec7f19aa7
|
/practice/design_pattern/03_abstract_factory/abstract_factory.py
|
5fa712b16a1b0fb0cd9de79237fa18d370861894
|
[] |
no_license
|
mida-hub/hobby
|
2947d10da7964d945e63d57b549c1dcb90ef7305
|
6e6f381e59fc2b0429fab36474d867aa3855af77
|
refs/heads/master
| 2022-12-21T23:33:14.857931 | 2022-12-19T16:30:34 | 2022-12-19T16:30:34 | 147,890,434 | 0 | 0 | null | 2021-03-20T04:31:58 | 2018-09-08T01:31:59 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,129 |
py
|
# Abstract Factory
# abstract_factory.py
from abc import ABC, abstractmethod
class AbcItem(ABC):
def __init__(self, caption):
self.caption = caption
@abstractmethod
def make_html(self):
pass
class PageItem(AbcItem):
def __init__(self, title, author):
self.title = title
self.author = author
self.content = []
def add(self, item):
self.content.append(item)
def write_html(self, file_name):
with open(file_name, 'w', encoding='utf-8') as fh:
fh.write(self.make_html())
class LinkItem(AbcItem):
def __init__(self, caption, url):
super().__init__(caption)
self.url = url
class ListItem(AbcItem):
def __init__(self, caption):
super().__init__(caption)
self.items = []
def add(self, item):
self.items.append(item)
class Factory(ABC):
@abstractmethod
def create_page_item(self, title, author):
pass
@abstractmethod
def create_link_item(self, caption ,url):
pass
@abstractmethod
def create_list_item(self, caption):
pass
|
[
"[email protected]"
] | |
bb48285834ee29beb7a898493b7d407dafdf7dd6
|
8c7a187ebfe858ff3f840602585d166b29fce576
|
/appstore/regulate_underscores.py
|
db0232fa39df3b96f78c3dc29fa2e15e90914bc1
|
[] |
no_license
|
ohannes/pythonScripts
|
b756faa2e6d5314cb04c7afc0ca07f69027f59b2
|
5249b2735d8b2a9a2c6ad8a1ae625cb47f50d0b5
|
refs/heads/master
| 2020-04-06T04:20:29.565042 | 2015-07-19T17:40:39 | 2015-07-19T17:40:39 | 34,119,366 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 821 |
py
|
import sys
sys.path.append(os.environ["ohannes"])
from ohannes import *
input_file = getStrArg(1, 1)
output_file = input_file + ".regulated"
lines = getFileLines(input_file)
ftw = open(output_file, write_mode)
for line in lines:
sharp_found = False
equal_found = False
line_regulated = False
if not "=>" in line or not "#" in line or not "_" in line:
ftw.write(line)
continue
index = 0
while True:
if index == len(line) - 1:
ftw.write(line[index])
break
if line[index] == "#":
sharp_found = True
if line[index] == "=" and line[index+1] == ">":
equal_found = True
if line[index] == "_" and (not sharp_found) and equal_found and (not line_regulated):
ftw.write(line[index+1].upper())
index += 1
line_regulated = True
else:
ftw.write(line[index])
index += 1
ftw.close()
|
[
"[email protected]"
] | |
2e77842e863422f2ffdaefdc8d6d8126892ba1d3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03347/s144374882.py
|
8ce3352dfe431d952e676130950485ebdc55dc2e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 751 |
py
|
import sys,queue,math,copy,itertools,bisect,collections,heapq
def main():
sys.setrecursionlimit(10**7)
INF = 10**18
MOD = 10**9 + 7
LI = lambda : [int(x) for x in sys.stdin.readline().split()]
NI = lambda : int(sys.stdin.readline())
SI = lambda : sys.stdin.readline().rstrip()
N = NI()
A = [NI() for _ in range(N)]
ans = 0
cnt = 0
for i in range(N-1,-1,-1):
if cnt == 0:
ans += A[i]
cnt = A[i]
elif A[i] < cnt -1:
print(-1)
return
elif A[i] >= cnt:
ans += A[i]
cnt = A[i]
else:
cnt -= 1
if cnt > 0:
print(-1)
else:
print(ans)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
7098f4dd04eee66744539b132a94f353fab0fbdd
|
3373b2bbe6303dcee3ae7f7f3e715ce674878a7b
|
/packages/hyperk/wcsim_dev.py
|
7187f72a10ce70577639daba6aa414e563e94670
|
[
"MIT"
] |
permissive
|
pgjones/nusoft
|
f3515a6e2fc90622638cde0b8712ba6fcea2aa8e
|
442c7bca2f921892ecf9eb3ff6821e2a9da7b156
|
refs/heads/master
| 2020-09-12T21:44:54.453633 | 2014-10-03T20:22:09 | 2014-10-03T20:22:09 | 17,223,474 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,401 |
py
|
#!/usr/bin/env python
#
# WCSimDev
#
# The HyperK WCSim development version
#
# Author P G Jones - 2014-06-20 <[email protected]> : New file.
####################################################################################################
import nusoft.package.local as local_package
import os
import nusoft.envfile
class WCSimDev(local_package.LocalPackage):
""" The WCSimDev installation package.
:param _root: version of ROOT this is dependent on
:param _geant4: version of Geant4 this is dependent on
"""
def __init__(self, system, repository):
""" Initialise this wcsim installation package.
:param system: class that manages system commands
:type system: :class:`nusoft.system.System` instance
:param repository: local name of the repository the package is from
"""
super(WCSimDev, self).__init__("wcsim-dev", system, repository)
self._root = "root_v5.34.10"
self._geant4 = "geant4.9.4.p04"
self._clhep = "clhep-2.1.0.1"
def get_dependencies(self):
""" Return a list of dependency names
:returns: list of dependency package names
:rtype: list
"""
return ["make", "g++", "gcc", "ld", "python", "python-dev", self._root, self._geant4,
self._clhep]
def _download(self):
""" Git clone the wcsim repository file."""
self._system.git_clone("ssh://[email protected]/hk-WCSim", self.get_install_path())
def _install(self):
""" Write an environment file and install wcsim."""
# Now write the environment file
self.write_env_file()
commands = ["source " + os.path.join(self._system.get_install_path(), "env_wcsim-dev.sh"),
"cd " + self.get_install_path(),
"make rootcint",
"make "]
self._system.execute_commands(commands)
def write_env_file(self):
""" Write an environment file for this package."""
env_file = nusoft.envfile.EnvFile("#wcsim environment\n")
env_file.add_source(os.path.join(self._dependencies[self._root].get_install_path(), "bin"), "thisroot")
env_file.add_source(os.path.join(self._dependencies[self._geant4].get_install_path(),
"share/geant4-9.4.4/config"),
"geant4-9.4.4")
env_file.add_environment("CLHEP_BASE_DIR", self._dependencies[self._clhep].get_install_path())
env_file.add_environment("G4WORKDIR", os.path.join(self.get_install_path(), "exe"))
env_file.write(self._system.get_install_path(), "env_wcsim-dev")
def _update(self):
""" Update the git repository."""
if not self._system.git_update(self.get_install_path()):
raise Exception("Cannot update, repository has changes")
self._install() # Now reinstall (compile)
def _remove(self):
""" Remove the install directory."""
self._system.remove(self.get_install_path())
def _is_installed(self):
""" Check if root is installed by looking for the root executable in the bin directory.
:return: True if installed
"""
sys = os.uname()[0]
return False
# The versions of WCSimDev that can be installed (only one, WCSimDev)
# [Although potentially more if the user wants].
versions = [WCSimDev]
|
[
"[email protected]"
] | |
3ae2079875387f561dad5fbc4ea251ed85ed9d12
|
fcef3602a044a82b75eb1bdee87a5eb347a56769
|
/recolo/tests/test_coordinate_solver.py
|
d18af8c84528da0a59395aaf2880b71ea511ddb3
|
[
"MIT"
] |
permissive
|
PolymerGuy/recolo
|
5cb9c6b01d7eeb4108710606341518aa13efc1d1
|
05b14f0834fa675579eabdf43fac046259df19bb
|
refs/heads/master
| 2023-04-12T00:17:50.150126 | 2022-03-11T12:42:44 | 2022-03-11T12:42:44 | 343,329,602 | 4 | 1 |
MIT
| 2022-03-05T08:04:49 | 2021-03-01T07:39:40 |
Python
|
UTF-8
|
Python
| false | false | 3,877 |
py
|
from unittest import TestCase
from recolo.artificial_grid_deformation import find_coords_in_undef_conf, interpolated_disp_field
import numpy as np
def rms_diff(array1, array2):
return np.sqrt(np.nanmean((array1 - array2) ** 2.))
def biharmonic_disp_field(x, y, amp_scale=0.5):
return (amp_scale * 0.4 * np.cos(np.pi * x / 30) + amp_scale * 0.5 * np.sin(np.pi * y / 40)), (
amp_scale * 0.6 * np.cos(np.pi * x / 50) + amp_scale * 0.7 * np.sin(np.pi * y / 60))
class TestFindCoordinatesInUndefConf(TestCase):
# As X is needed for other calculations, check that we can determine X from x = X + u(X)
def test_analytical_disp_field(self):
tol = 1e-5
dx = 3.5
dy = 2.7
xs, ys = np.meshgrid(np.arange(0, 80, dx), np.arange(0, 100, dy))
Xs, Ys = find_coords_in_undef_conf(xs, ys, biharmonic_disp_field, tol=1e-9)
u_X, u_Y = biharmonic_disp_field(Xs, Ys)
errors_x = xs - Xs - u_X
errors_y = ys - Ys - u_Y
peak_error_x = np.max(np.abs(errors_x))
peak_error_y = np.max(np.abs(errors_y))
if peak_error_x > tol or peak_error_y > tol:
self.fail("Maximum error is %f and %f" % (peak_error_x, peak_error_y))
def test_interpolated_disp_field(self):
tol = 1e-5
dx = 3.5
dy = 2.7
xs, ys = np.meshgrid(np.arange(0, 80, dx), np.arange(0, 100, dy))
# Make an approximated displacement field
u_x, u_y = biharmonic_disp_field(xs, ys)
disp_func_interp = interpolated_disp_field(u_x, u_y, dx=2, dy=4, order=3)
X, Y = find_coords_in_undef_conf(xs, ys, disp_func_interp, tol=1e-9)
u_X, u_Y = disp_func_interp(X, Y)
errors_x = xs - X - u_X
errors_y = ys - Y - u_Y
peak_error_x = np.max(np.abs(errors_x))
peak_error_y = np.max(np.abs(errors_y))
if peak_error_x > tol or peak_error_y > tol:
self.fail("Maximum error is %f and %f" % (peak_error_x, peak_error_y))
def test_compare_interpolated_and_analytical(self):
# As there will always be minor error at the edges, we look at the mean error for the whole field
tol = 1.e-3
dx = 3.5
dy = 2.7
xs, ys = np.meshgrid(np.arange(0, 80, dx), np.arange(0, 100, dy))
# Make an approximated displacement field0
u_x, u_y = biharmonic_disp_field(xs, ys)
disp_func_interp = interpolated_disp_field(u_x, u_y, dx=dx, dy=dy, order=3, mode="nearest")
X_interp, Y_interp = find_coords_in_undef_conf(xs, ys, disp_func_interp, tol=1e-9)
X, Y = find_coords_in_undef_conf(xs, ys, biharmonic_disp_field, tol=1e-9)
rms_diff_X = rms_diff(X_interp, X)
rms_diff_Y = rms_diff(Y_interp, Y)
if rms_diff_X > tol or rms_diff_Y > tol:
self.fail("RMS error is %f and %f" % (rms_diff_X, rms_diff_Y))
def test_check_grid_sampling_independency(self):
# Ensure that the sampling of u_x and u_y does not have a large impact on the final results
tol = 1.e-3
dxs = [0.1,0.5,1.0,3.2]
for i,dx in enumerate(dxs):
dy = dx + 0.12
xs, ys = np.meshgrid(np.arange(0, 80, dx), np.arange(0, 100, dy))
# Make an approximated displacement field0
u_x, u_y = biharmonic_disp_field(xs, ys)
disp_func_interp = interpolated_disp_field(u_x, u_y, dx=dx, dy=dy, order=3, mode="nearest")
X_interp, Y_interp = find_coords_in_undef_conf(xs, ys, disp_func_interp, tol=1e-9)
X, Y = find_coords_in_undef_conf(xs, ys, biharmonic_disp_field, tol=1e-9)
rms_diff_X = rms_diff(X_interp, X)
rms_diff_Y = rms_diff(Y_interp, Y)
if rms_diff_X > tol or rms_diff_Y > tol:
self.fail("RMS error is %f and %f for dx=%f and dy=%f" % (rms_diff_X, rms_diff_Y,dx,dy))
|
[
"[email protected]"
] | |
68d8c35e5fbad07bc4f7755a167a5ce85247e30e
|
3ce592352627591346ea33ea0c2665ad879414e2
|
/References/search/3-3.soduku.py
|
8c6629c01cce7fc88632fdba5af2f6689c63a47c
|
[
"MIT"
] |
permissive
|
royqh1979/python_libs_usage
|
113df732ef106f4a5faae1343493756fd703c8c0
|
57546d5648d8a6b7aca7d7ff9481aa7cd4d8f511
|
refs/heads/master
| 2021-04-16T18:14:43.835482 | 2021-01-11T03:55:25 | 2021-01-11T03:55:25 | 249,374,754 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,139 |
py
|
"""
数独求解
使用简化的启发式回溯搜索
使用递归实现
每次优先尝试填写可行数字最少的格子
"""
import numpy as np
from easygraphics import *
from dataclasses import dataclass
import copy
from typing import Set
FONT_WIDTH = 30
BOARD_TOP = 10
BOARD_LEFT = 10
SQUARE_WIDTH = 50
SPEED = 100
# 棋盘,为了方便定义为[10][10],实际只用[1][1]-[9][9]
board = np.zeros((10, 10),dtype="int32")
# 行、列、小九宫已使用数字集合
cols = [set() for i in range(10)] # 各列数字集合
rows = [set() for i in range(10)] # 各行数字集合
blks = [set() for i in range(10)] # 各小九宫格数字集合
# 绘图相关函数
def draw_number_at(i, j, number, color):
"""
Draw a number at cell(i,j) with the specified color
:param i: the row
:param j: the column
:param number: the number
:param color: the color
"""
left = BOARD_LEFT + (j - 1) * SQUARE_WIDTH
top = BOARD_TOP + (i - 1) * SQUARE_WIDTH
set_color(color)
if number != 0:
draw_rect_text(left + 5, top + 5, FONT_WIDTH, FONT_WIDTH, number)
else:
set_color(Color.WHITE)
fill_rect(left+1, top+1, left + SQUARE_WIDTH-2, top + SQUARE_WIDTH-2)
def draw_board():
clear_device()
for i in range(1, 10):
for j in range(1, 10):
left = BOARD_LEFT + (j - 1) * SQUARE_WIDTH
top = BOARD_TOP + (i - 1) * SQUARE_WIDTH
set_color(Color.LIGHT_GRAY)
rect(left, top, left + SQUARE_WIDTH, top + SQUARE_WIDTH)
draw_number_at(i, j, board[i][j], Color.RED)
# 画小九宫格边框
set_color(Color.BLACK)
for i in range(1, 4):
for j in range(1, 4):
left = BOARD_LEFT + (j - 1) * 3 * SQUARE_WIDTH
top = BOARD_TOP + (i - 1) * 3 * SQUARE_WIDTH
rect(left, top, left + 3 * SQUARE_WIDTH, top + 3 * SQUARE_WIDTH)
def init():
init_graph(800, 600)
set_color(Color.BLACK)
set_background_color(Color.WHITE)
set_line_width(2)
set_fill_color(Color.WHITE)
set_render_mode(RenderMode.RENDER_MANUAL)
set_font_size(FONT_WIDTH)
DATA_FILE = "10soduku.board"
# 候选格子, canPut[n]=1表示该格可以放数字n,否则不行
@dataclass()
class CandiateSquare:
x: int = 0
y: int = 0
possibles = set()
def which_block(i, j):
"""
计算当前方格属于哪一宫
:param i: 格子所在行
:param j: 格子所在列
:return: 格子所在的宫编号
"""
return ((i - 1) // 3) * 3 + ((j - 1) // 3)+1
def tag(i, j, number):
"""
在本列、本行、本宫中标记数字number已被使用
:param i: 格子所在的行
:param j: 格子所在的列
:param number: 格子中填写的数字
"""
rows[i].add(number)
cols[j].add(number)
block = which_block(i,j)
blks[block].add(number)
def untag(i, j, number):
"""
在本列、本行、本宫中取消数字val的使用标记
:param i: 格子所在的行
:param j: 格子所在的列
:param number: 格子中填写的数字
"""
rows[i].remove(number)
cols[j].remove(number)
block = which_block(i,j)
blks[block].remove(number)
def fill(i, j, number):
"""
将数字val填写到方格(i,j)中
:param i: 格子所在的行
:param j: 格子所在的列
:param number: 格子中填写的数字
"""
board[i][j] = number
tag(i, j, number)
def unfill(i, j):
"""
清除方格(i,j)中的数字
:param i: 格子所在的行
:param j: 格子所在的列
"""
number = board[i][j]
untag(i, j, number)
board[i][j] = 0
def load_board(boardFile):
"""
从数据文件中读取数独初始状态
:param boardFile: 数据文件名
"""
global board
try:
with open(boardFile, mode="r") as file:
board = [ [0]*10 for i in range(10)]
for line in file:
line = line.strip()
numbers = line.split(',')
if len(numbers) != 3:
continue
i, j, k = int(numbers[0]), int(numbers[1]), int(numbers[2])
board[i][j] = k
except IOError :
clear_device()
draw_rect_text(10, 500, 700, 50, f"无法打开文件{boardFile}")
def count_unsolved():
"""
计算有多少个格子需要填
:return:
"""
count = 0
for i in range(1, 10):
for j in range(1, 10):
if board[i][j] == 0:
count += 1
return count
def can_fill(i, j, number):
"""
判断number能否填写在格子(i,j)中
:param i: 格子所在的行
:param j: 格子所在的列
:param number: 要填写的数字
"""
if number in rows[i]:
return False
if number in cols[j]:
return False
if number in blks[which_block(i, j)]:
return False
return True
def calculatePossible(i, j):
"""
找出格子(i,j)中所有可填的数字
:param i: 格子所在的行
:param j: 格子所在的列
"""
possibles = set()
for number in range(1, 10):
if can_fill(i, j, number):
possibles.add(number)
return possibles
def findSureSquareByBlock():
"""
排除法1:对于每一个数字,在每一个九宫看看它是否只有一个可填位置
"""
for number in range(1,10):
in_rows = copy.deepcopy(rows)
in_cols = copy.deepcopy(cols)
in_blks = copy.deepcopy(blks)
while True:
# print(in_rows)
# print(in_cols)
# print(in_blks)
found_one_row = False # 发现数字number只能在某九宫的某行上
found_one_col = False # 发现数字number只能在某九宫的某列上
for block in range(1,10):
if number not in in_blks[block]:
start_row = ((block-1) // 3 ) * 3 + 1
start_col = (block-1) % 3 * 3 +1
if block != which_block(start_row,start_col):
print(number,block,start_row,start_col,which_block(start_row,start_col))
can_rows = [] # 数字number能填在该九宫的哪几行
can_cols = [] # 数字number能填在该九宫的哪几列
for i in range(3):
for j in range(3):
row=start_row+i
col=start_col+j
if (board[row][col]==0) and (number not in in_rows[row]) and (number not in in_cols[col]):
if row not in can_rows:
can_rows.append(row)
if col not in can_cols:
can_cols.append(col)
# print(number,block,can_rows,can_cols)
if len(can_rows)==1 and len(can_cols)==1: #只能填在某行某格上
row=can_rows[0]
col=can_cols[0]
return number,row,col
if len(can_rows)==1:
found_one_row = True
row = can_rows[0]
in_blks[block].add(number)
in_rows[row].add(number)
if len(can_cols)==1:
found_one_col = True
col = can_cols[0]
in_blks[block].add(number)
in_cols[col].add(number)
if not found_one_row and not found_one_col:
break
return None,None,None
def findSureSquareByRow():
"""
排除法2:对于每一个数字,在每一行上看看它是否只有一个可填位置
"""
for number in range(1, 10):
for row in range(1,10):
if number not in rows[row]:
can_cols = []
for j in range(1,10):
block = which_block(row,j)
if number not in cols[j] and number not in blks[block] and board[row][j]==0:
can_cols.append(j)
if len(can_cols)==1: #只能填在row行某列上
col=can_cols[0]
return number,row,col
return None, None, None
def findSureSquareByCol():
"""
排除法3:对于每一个数字,在每一列上看看它是否只有一个可填位置
"""
for number in range(1, 10):
for col in range(1, 10):
if number not in cols[col]:
can_rows = []
for i in range(1, 10):
block = which_block(i, col)
if number not in rows[i] and number not in blks[block] and board[i][col]==0:
can_rows.append(i)
if len(can_rows) == 1: #只能填在某行col列上
row=can_rows[0]
return number,row,col
return None,None,None
def solve(unsolved):
if unsolved == 0:
return True
# 显示用
delay_fps(SPEED)
number,row,col=findSureSquareByBlock()
if number is not None:
# set_fill_color("white")
# fill_rect(500,10,800,80)
# draw_text(500, 40, f"规则1 {row},{col}只能填{number} {board[row][col]}")
# pause()
fill(row, col, number)
draw_number_at(row, col, number, Color.BLACK)
if solve(unsolved - 1):
return True
unfill(row, col)
draw_number_at(row, col, 0, Color.BLACK)
return False
number,row,col=findSureSquareByRow()
if number is not None:
# set_fill_color("white")
# fill_rect(500,10,800,80)
# draw_text(500, 40, f"规则2: {row},{col}只能填{number} {board[row][col]}")
# pause()
fill(row, col, number)
draw_number_at(row, col, number, Color.BLACK)
if solve(unsolved - 1):
return True
unfill(row, col)
draw_number_at(row, col, 0, Color.BLACK)
return False
number,row,col=findSureSquareByCol()
if number is not None:
# set_fill_color("white")
# fill_rect(500,10,800,80)
# draw_text(500, 40, f"规则3: {row},{col}只能填{number} {board[row][col]}")
# pause()
fill(row, col, number)
draw_number_at(row, col, number, Color.BLACK)
if solve(unsolved - 1):
return True
unfill(row, col)
draw_number_at(row, col, 0, Color.BLACK)
return False
# 找出可填的数字数量最少的格子
possibles,c = findMinPossibles1()
# 尝试填写该格子
if len(c.possibles)!=1:
# fill_rect(500,10,800,80)
# draw_text(500, 40, f"规则4 {c.x},{c.y}只能填{c.possibles}")
# pause()
# else:
possibles,c = findMinPossibles2(possibles,c)
# # 尝试填写该格子
# if len(c.possibles)==1:
# fill_rect(500,10,800,80)
# draw_text(500, 40, f"规则5 {c.x},{c.y}只能填{c.possibles}")
# pause()
# else:
# fill_rect(500, 10, 800, 80)
# draw_text(500, 40, f"{c.x},{c.y}只能填{c.possibles}")
# pause()
if len(c.possibles) > 1:
fill_rect(500, 10, 800, 80)
draw_text(500, 40, f"{c.x},{c.y}只能填{c.possibles}")
pause()
for v in c.possibles:
fill(c.x, c.y, v)
draw_number_at(c.x, c.y, v, Color.BLACK)
if solve(unsolved - 1):
return True
unfill(c.x, c.y)
draw_number_at(c.x, c.y, 0, Color.BLACK)
return False
def findMinPossibles1():
"""
找到能填的数字最少的格子
:return:
"""
c = CandiateSquare()
min_possible_count = 10
possibles = [[None for i in range(10)] for j in range(10)]
for i in range(1, 10):
for j in range(1, 10):
if board[i][j] == 0:
possibles[i][j] = calculatePossible(i, j)
if len(possibles[i][j]) < min_possible_count:
min_possible_count = len(possibles[i][j])
c.x = i
c.y = j
c.possibles = possibles[i][j]
if len(c.possibles)<2:
return None,c
return possibles,c
def findMinPossibles2(possibles,c):
"""
当同一行或者同一列有两个格同时只能填同样的两个数时,同一行/列上的其他格必然不能填这两个数
:param possibles:
:param c:
:return:
"""
if len(c.possibles)==2:
while True:
found = False
row = c.x
col = c.y
for i in range(10):
if i!=col and possibles[row][col] == possibles[row][i]:
for j in range(10):
if j !=i and j!=col and possibles[row][j] is not None:
possibles[row][j].difference_update(possibles[row][i])
found = True
if len(possibles[row][j])<2:
c.x=row
c.y=j
c.possibles = possibles[row][j]
return possibles,c
if not found:
break
return possibles,c
def main():
init()
load_board(DATA_FILE)
draw_board()
draw_rect_text(10, 550, 700, 50, "按任意键开始...")
pause()
fill_rect(10, 550, 710, 600)
draw_rect_text(10, 550, 700, 50, "正在穷举...")
# 将数独中已有的数字做标记
for i in range(1, 10):
for j in range(1, 10):
if board[i][j] != 0:
tag(i, j, board[i][j])
#初始化所有未填格的possible
for i in range(1,10):
for j in range(1,10):
if board[i][j] == 0:
tag(i, j, board[i][j])
solve(count_unsolved())
fill_rect(10, 550, 710, 600)
draw_rect_text(10, 550, 700, 50, "找到答案了!按任意键退出...")
pause()
close_graph()
easy_run(main)
|
[
"[email protected]"
] | |
138d7251e99fd5b8de87425401cfefea55cd6357
|
84065ee4fb4ebeb8cb2cf1d3f6f385d2c56d787e
|
/page/__init__.py
|
359e38e1661042b3715145fd8b364217bb2881c4
|
[] |
no_license
|
bian-py/app_kefu_code
|
59ed0bcf247e5dd7b06e0f91cdd9563faa49ce60
|
2f84a152bdc2c226f2bcb6aabc34f0a5313c094e
|
refs/heads/master
| 2023-01-28T11:17:40.984458 | 2020-12-08T11:07:50 | 2020-12-08T11:07:50 | 319,289,680 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,863 |
py
|
from selenium.webdriver.common.by import By
# 以下是服务器页面配置信息
fwq_new = By.XPATH, '//*[contains(@content-desc,"添加新的服务器")]'
fwq_hand_input = By.XPATH, '//*[contains(@content-desc,"手工输入")]'
fwq_scan_code = By.XPATH, '//*[contains(@content-desc,"扫码二维码")]'
fwq_input_name = By.XPATH, """//android.view.View[@content-desc="{{ 'server.name' | trans }}"]/../../android.widget.EditText"""
fwq_input_URL = By.XPATH, """//android.view.View[@content-desc="{{ 'm.api.url' | trans }}"]/../../android.widget.EditText"""
fwq_save_btn = By.XPATH, '//*[contains(@content-desc,"保存")]'
fwq_confirm = By.XPATH, '//*[contains(@content-desc,"{}")]'
fwq_url_error = By.XPATH, "//*[@content-desc = '无法连接到API']"
fwq_swipe_area = By.XPATH, "//android.view.View[@scrollable = 'true']"
fwq_back_btn = By.XPATH, "//*[@content-desc = '编辑服务器']/../android.widget.Button"
fwq_modify_btn = By.XPATH, '//*[contains(@content-desc,"我的服务器 http://192.168.1.10/kefu/php/app.php?mobile-api")]' \
'/../android.view.View[2]/android.view.View[1]/android.widget.Button'
fwq_delete_btn = By.XPATH, '//*[contains(@content-desc,"我的服务器 http://192.168.1.10/kefu/php/app.php?mobile-api")]' \
'/../android.view.View[2]/android.view.View[2]/android.widget.Button'
fwq_delete_confirm_btn = By.XPATH, '//*[@content-desc="删除 "]'
# 以下是登录页面配置信息
login_username = By.XPATH, '//android.view.View[@content-desc="登陆"]/../../android.widget.EditText'
login_password = By.XPATH, '//android.view.View[@content-desc="密码"]/../../android.widget.EditText'
login_confirm_btn = By.XPATH, '//android.widget.Button[@content-desc="登陆 "]'
login_cancel_btn = By.XPATH, '//android.widget.Button[@content-desc="取消 "]'
login_if_success = By.XPATH, '//android.view.View[@content-desc="我的服务器"]/../android.widget.Button'
login_logout = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]'
login_error_confirm = By.XPATH, '//android.widget.Button[@content-desc="OK "]'
login_error_info = By.XPATH, '//android.widget.Button[@content-desc="OK "]/../android.view.View[2]'
# 以下是用户列表页面配置信息
def get_user_self_element(username):
loc = By.XPATH, '//android.view.View[@content-desc="{}"]'.format(username)
return loc
user_details_page = By.XPATH, '//android.view.View[@content-desc="用户详细信息"]'
user_details_page_back_btn = By.XPATH, '//android.view.View[@content-desc="用户详细信息"]/../android.widget.Button'
user_details_send_btn = By.XPATH, '//android.widget.Button[contains(@content-desc,"发送消息 ")]'
user_conversation_page = By.XPATH, '//android.view.View[@content-desc="会话"]'
user_conversation_page_back_btn = By.XPATH, '//android.view.View[@content-desc="会话"]/../android.widget.Button'
user_bottom_btn_talk_list = By.XPATH, '//android.view.View[contains(@content-desc,"会话 会话")]/android.view.View/android.view.View'
user_bottom_btn_user_list = By.XPATH, '//android.view.View[contains(@content-desc,"在线用户 在线用户")]/android.view.View/android.view.View'
user_talk_input = By.CLASS_NAME, 'android.widget.EditText'
user_talk_input_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[3]'
# 以下是导航栏配置信息
dhl_menu = By.XPATH, '//android.view.View[@content-desc="我的服务器"]/../android.widget.Button'
dhl_logout = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]'
dhl_user = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[1]'
dhl_talk = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[2]'
dhl_history = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[3]'
dhl_view = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[4]'
dhl_if_user = By.XPATH, '//android.view.View[@content-desc=" 匿名用户"]'
dhl_if_history = By.XPATH, '//android.widget.Button[contains(@content-desc,"搜索 ")]'
dhl_if_view = 'org.chromium.webview_shell'
dhl_if_view_for_android_6 = 'com.android.browser'
dhl_if_logout = By.XPATH, '//*[contains(@content-desc,"添加新的服务器")]'
dhl_back_from_talk = By.XPATH, '//android.view.View[contains(@content-desc,"在线用户 在线用户")]/android.view.View/android.view.View'
# 以下是会话页面配置信息
def get_talk_list_element(username):
loc = By.XPATH, '//android.view.View[@content-desc="{}"]'.format(username)
return loc
def search_history_msg(msg):
loc = By.XPATH, '//android.view.View[@content-desc="{}"]'.format(msg)
return loc
talk_bottom_btn = By.XPATH, '//android.view.View[contains(@content-desc,"会话 会话")]/android.view.View/android.view.View'
talk_back_to_list = By.XPATH, '//android.view.View[@content-desc="会话"]/../android.widget.Button'
talk_input = By.CLASS_NAME, 'android.widget.EditText'
talk_input_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[3]'
talk_emoji_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[2]'
talk_menu_btn = By.XPATH, '//android.widget.EditText/../../android.widget.Button[1]'
talk_attachment_btn = By.XPATH, '//android.widget.EditText/../../android.view.View[2]/android.view.View[1]'
talk_attachment_for_6_arth = By.ID,'com.android.packageinstaller:id/permission_allow_button'
talk_attachment_enter = By.XPATH, '//android.widget.TextView[contains(@text,"文")]'
talk_attachment_file_menu = By.XPATH, '//android.widget.ImageButton[@content-desc="显示根目录"]'
talk_attachment_download = By.XPATH, "//android.widget.TextView[@text = '下载']"
talk_attachment = By.XPATH, "//android.widget.TextView[@text = 'timg.png']"
talk_attachment_if = By.XPATH, '//android.view.View[@content-desc="timg.png"]'
talk_emoji_select = By.XPATH, '//android.view.View[@content-desc="emot-3"]'
talk_emoji_if = By.XPATH, '//android.widget.Image[@content-desc="emot-3"]'
talk_menu_invite_user = By.XPATH, '//android.view.View[contains(@content-desc,"邀请会话")]'
talk_invite_user = By.XPATH, '//android.view.View[@content-desc="test05"]'
talk_invite_user2 = By.XPATH, '//android.view.View[@content-desc="test04"]'
talk_invite_if = By.XPATH, '//android.view.View[@content-desc=") 已被邀请参加会谈"]'
talk_menu_exit = By.XPATH, '//android.view.View[contains(@content-desc,"离开会话")]'
talk_menu_cancel = By.XPATH, '//android.widget.Button[@content-desc="取消 "]'
# 以下是历史记录页面配置信息
history_enter = By.XPATH, '//android.view.View[contains(@content-desc,"退出")]/../android.view.View[3]'
history_username_input = By.XPATH, '''//android.view.View[@content-desc="{{ 'user.name' | trans }}"]/../../android.widget.EditText'''
history_email_input = By.XPATH, '''//android.view.View[@content-desc="{{ 'user.email' | trans }}"]/../../android.widget.EditText'''
history_search_btn = By.XPATH, '//android.widget.Button[contains(@content-desc,"搜索 ")]'
history_username_if_success = By.XPATH, '//android.view.View[@content-desc="test04, test05"]'
history_email_if_success = By.XPATH, '//android.view.View[@content-desc="test04, test03"]'
history_date_start_btn = By.XPATH, '''//android.widget.Spinner[@content-desc="{{ 'from.date' | trans }} "]'''
history_date_end_btn = By.XPATH, '''//android.widget.Spinner[@content-desc="{{ 'to.date' | trans }} "]'''
history_data_start = By.XPATH, '//android.view.View[@content-desc="06 十二月 2020"]'
history_data_end = By.XPATH, '//android.view.View[@content-desc="07 十二月 2020"]'
history_date_set_btn = By.ID, 'android:id/button1'
history_check_if1 = By.XPATH, '//android.view.View[@content-desc="历史会话"]'
history_check_if2 = By.XPATH, '//android.view.View[@content-desc="这是test03发给test04的历史信息"]'
|
[
"[email protected]"
] | |
ee70005f6474b587eee09a190290dc11f5c5439e
|
4d7b2858eb43506f822e1c3c906bee287186b2a9
|
/pizza_project/lib/__init__.py
|
f0acbff325741a8764e1c8595c7766f74b4ceaf7
|
[] |
no_license
|
byt3-m3/da_pizza_house
|
c4d98b1c3246aa48256b368a69fad4046bf19691
|
01d163b511428b442e8d8f97bc4408e6060851db
|
refs/heads/master
| 2022-12-08T03:52:02.487557 | 2020-09-01T21:06:32 | 2020-09-01T21:06:32 | 292,047,731 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 79 |
py
|
from pizza_project.lib.inventory import *
from pizza_project.lib.store import *
|
[
"[email protected]"
] | |
e3ba9166ff9f094c8ede9e3c3756bb8117241c50
|
3cae667175b2d6aac6d7f3d8189e9a02c38ea1cf
|
/AOJ/ITP1/python/ITP1_1_D_Watch.py
|
c0a07556a3ffec6f81a94127a026e1a802c5a520
|
[] |
no_license
|
kokorinosoba/contests
|
3ee14acf729eda872ebec9ec7fe3431f50ae23c2
|
6e0dcd7c8ee086650d89fc65616981361b9b20b9
|
refs/heads/master
| 2022-08-04T13:45:29.722075 | 2022-07-24T08:50:11 | 2022-07-24T08:50:11 | 149,092,111 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
s=int(input())
print(s//3600,s//60%60,s%60,sep=':')
"""
S=int(input())
m,s=divmod(S,60)
h,m=divmod(m,60)
print(h,m,s,sep=":")
"""
|
[
"[email protected]"
] | |
7765cc67a607b9556d7c75470b892c02b3fe5707
|
f208676788a901f4b66fa0a5809ef5563c1d5471
|
/classy_vision/hooks/classy_hook.py
|
ad5c0a900f8643ca8ed1f247fd4a4e113ac37853
|
[
"MIT"
] |
permissive
|
cwb96/ClassyVision
|
10e47703ec3989260840efe22db94720122f9e66
|
597a929b820efdd914cd21672d3947fa9c26d55e
|
refs/heads/master
| 2021-02-18T03:35:51.520837 | 2020-03-05T05:41:24 | 2020-03-05T05:43:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,321 |
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import Any, Dict
from classy_vision import tasks
class ClassyHookState:
"""Class to store state within instances of ClassyHook.
Any serializable data can be stored in the instance's attributes.
"""
def get_classy_state(self) -> Dict[str, Any]:
return self.__dict__
def set_classy_state(self, state_dict: Dict[str, Any]):
self.__dict__ = state_dict
class ClassyHook(ABC):
"""Base class for hooks.
Hooks allow to inject behavior at different places of the training loop, which
are listed below in the chronological order.
on_start -> on_phase_start ->
on_step -> on_phase_end -> on_end
Deriving classes should call ``super().__init__()`` and store any state in
``self.state``. Any state added to this property should be serializable.
E.g. -
.. code-block:: python
class MyHook(ClassyHook):
def __init__(self, a, b):
super().__init__()
self.state.a = [1,2,3]
self.state.b = "my_hook"
# the following line is not allowed
# self.state.my_lambda = lambda x: x^2
"""
def __init__(self):
self.state = ClassyHookState()
def _noop(self, *args, **kwargs) -> None:
"""Derived classes can set their hook functions to this.
This is useful if they want those hook functions to not do anything.
"""
pass
@classmethod
def name(cls) -> str:
"""Returns the name of the class."""
return cls.__name__
@abstractmethod
def on_start(self, task: "tasks.ClassyTask") -> None:
"""Called at the start of training."""
pass
@abstractmethod
def on_phase_start(
self, task: "tasks.ClassyTask", local_variables: Dict[str, Any]
) -> None:
"""Called at the start of each phase."""
pass
@abstractmethod
def on_step(self, task: "tasks.ClassyTask") -> None:
"""Called each time after parameters have been updated by the optimizer."""
pass
@abstractmethod
def on_phase_end(
self, task: "tasks.ClassyTask", local_variables: Dict[str, Any]
) -> None:
"""Called at the end of each phase (epoch)."""
pass
@abstractmethod
def on_end(self, task: "tasks.ClassyTask") -> None:
"""Called at the end of training."""
pass
def get_classy_state(self) -> Dict[str, Any]:
"""Get the state of the ClassyHook.
The returned state is used for checkpointing.
Returns:
A state dictionary containing the state of the hook.\
"""
return self.state.get_classy_state()
def set_classy_state(self, state_dict: Dict[str, Any]) -> None:
"""Set the state of the ClassyHook.
Args:
state_dict: The state dictionary. Must be the output of a call to
:func:`get_classy_state`.
This is used to load the state of the hook from a checkpoint.
"""
self.state.set_classy_state(state_dict)
|
[
"[email protected]"
] | |
942d5f383fb074463bde66060a1faedb97568626
|
1033c93917117f462771571c29dd046954582bd8
|
/revscores/features/proportion_of_symbolic_added.py
|
2eeae56295eca238e2c206c786853e46201b8d7b
|
[
"MIT"
] |
permissive
|
jonasagx/Revision-Scoring
|
d4e3e892ac5de3a7f3032ef2b4fcc7b6efb20330
|
dfacba014e30d49577aa1a56aab13393ecede9d5
|
refs/heads/master
| 2021-01-17T11:57:39.393734 | 2015-01-10T19:13:02 | 2015-01-10T19:13:02 | 29,064,762 | 0 | 1 | null | 2015-01-10T19:13:03 | 2015-01-10T17:25:22 |
Python
|
UTF-8
|
Python
| false | false | 501 |
py
|
from .chars_added import chars_added
from .feature import Feature
from .symbolic_chars_added import symbolic_chars_added
def process(chars_added, symbolic_chars_added):
return symbolic_chars_added/(chars_added or 1)
proportion_of_symbolic_added = Feature("proportion_of_symbolic_added", process,
returns=float,
depends_on=[chars_added,
symbolic_chars_added])
|
[
"[email protected]"
] | |
55095ee0ea77fe40bd4ed68f53cd486d3d782b2d
|
fb235cccecab5368074bc43ed8677025f925dceb
|
/notebooks/westgrid/cffi_practice/__init__.py
|
6a5ba61abdb1177997fc7a77bffbd803fbab65cb
|
[] |
no_license
|
sbowman-mitre/parallel_python_course
|
88a5f767de2f0f630d48faf94983fad51ecbe50f
|
85b03809c9725c38df85b0ac1e9b34cc50c0dc54
|
refs/heads/master
| 2022-01-04T18:29:12.443568 | 2019-11-29T16:08:06 | 2019-11-29T16:08:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,313 |
py
|
# import version for use by setup.py
from ._version import version_info, __version__ # noqa: F401 imported but unused
from pathlib import Path
import pdb
import os
import pdb
def get_paths(*args, **kwargs):
binpath=Path(os.environ['CONDA_PREFIX'])
<<<<<<< HEAD
libfile= binpath / Path('lib/libcffi_funs.so')
libdir= binpath / Path('lib')
pdb.set_trace()
#
# find either libcffi_funs.so or libcffi_funs.dll
#
library=list(libdir.glob('libcffi_funs.*'))
if len(library) > 1:
raise ImportError('found more than one libcffi_funs library')
try:
libfile=library[0]
except IndexError:
libfile=Path('libcffi_funs')
includedir=Path.joinpath(binpath.parent,Path('include'))
for the_path in [libfile, libdir, includedir]:
if not the_path.exists():
print(f"couldn't find {str(the_path)}. Did you install cffi_funs?")
out_dict=None
break
else:
out_dict=dict(libfile=str(libfile),libdir=str(libdir),includedir=str(includedir))
=======
libfile= binpath/ Path('lib/libcffi_funs.so')
libdir= binpath / Path('lib')
includedir = binpath / Path('include')
out_dict=dict(libfile=str(libfile),libdir=str(libdir),includedir=str(includedir))
>>>>>>> checkpoint
return out_dict
|
[
"[email protected]"
] | |
4668b524700dbf55e3711938e6cfd959affaa864
|
57ddfddd1e11db649536a8ed6e19bf5312d82d71
|
/AtCoder/ABC1/ABC123/ABC123-A.py
|
04402036b76e6ab088ca47d8dcc146c57c639e4d
|
[] |
no_license
|
pgDora56/ProgrammingContest
|
f9e7f4bb77714dc5088c2287e641c0aa760d0f04
|
fdf1ac5d1ad655c73208d98712110a3896b1683d
|
refs/heads/master
| 2023-08-11T12:10:40.750151 | 2021-09-23T11:13:27 | 2021-09-23T11:13:27 | 139,927,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 191 |
py
|
sm = float('inf')
bi = - float('inf')
for _ in range(5):
v = int(input())
if v < sm: sm = v
if v > bi: bi = v
if bi - sm > int(input()): print(':(')
else: print('Yay!')
|
[
"[email protected]"
] | |
8c36fc26a272f071d2585e8f26ae41f860d794bf
|
85381529f7a09d11b2e2491671c2d5e965467ac6
|
/OJ/Leetcode/Algorithm/54. Spiral Matrix.py
|
877d512e72cd9a17631f7f49ff7225fae0269c52
|
[] |
no_license
|
Mr-Phoebe/ACM-ICPC
|
862a06666d9db622a8eded7607be5eec1b1a4055
|
baf6b1b7ce3ad1592208377a13f8153a8b942e91
|
refs/heads/master
| 2023-04-07T03:46:03.631407 | 2023-03-19T03:41:05 | 2023-03-19T03:41:05 | 46,262,661 | 19 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 688 |
py
|
# -*- coding: utf-8 -*-
# @Author: HaonanWu
# @Date: 2017-03-03 10:57:26
# @Last Modified by: HaonanWu
# @Last Modified time: 2017-03-03 11:01:34
class Solution(object):
def spiralOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
ret = []
while matrix:
ret += matrix.pop(0)
if matrix and matrix[0]:
for row in matrix:
ret.append(row.pop())
if matrix:
ret += matrix.pop()[::-1]
if matrix and matrix[0]:
for row in matrix[::-1]:
ret.append(row.pop(0))
return ret
|
[
"[email protected]"
] | |
143a773bbbec049d6b12a6406b50a9fce3cdd585
|
26dec2f8f87a187119336b09d90182d532e9add8
|
/mcod/resources/documents.py
|
da3e92fb9c8f6d9a843336fb6541b7e1b3f9d460
|
[] |
no_license
|
olekstomek/mcod-backend-dane.gov.pl
|
7008bcd2dbd0dbada7fe535536b02cf27f3fe4fd
|
090dbf82c57633de9d53530f0c93dddf6b43a23b
|
refs/heads/source-with-hitory-from-gitlab
| 2022-09-14T08:09:45.213971 | 2019-05-31T06:22:11 | 2019-05-31T06:22:11 | 242,246,709 | 0 | 1 | null | 2020-02-24T22:39:26 | 2020-02-21T23:11:50 |
Python
|
UTF-8
|
Python
| false | false | 2,197 |
py
|
from django.apps import apps
from django_elasticsearch_dsl import DocType, Index, fields
from mcod import settings
from mcod.lib.search.fields import TranslatedTextField
Resource = apps.get_model('resources', 'Resource')
Dataset = apps.get_model('datasets', 'Dataset')
TaskResult = apps.get_model('django_celery_results', "TaskResult")
INDEX = Index(settings.ELASTICSEARCH_INDEX_NAMES['resources'])
INDEX.settings(**settings.ELASTICSEARCH_DSL_INDEX_SETTINGS)
data_schema = fields.NestedField(attr='schema', properties={
'fields': fields.NestedField(properties={
'name': fields.KeywordField(attr='name'),
'type': fields.KeywordField(attr='type'),
'format': fields.KeywordField(attr='format')
}),
'missingValue': fields.KeywordField(attr='missingValue')
})
@INDEX.doc_type
class ResourceDoc(DocType):
id = fields.IntegerField()
slug = fields.TextField()
uuid = fields.TextField()
title = TranslatedTextField('title', common_params={'suggest': fields.CompletionField()})
description = TranslatedTextField('description')
file_url = fields.TextField(
attr='file_url'
)
download_url = fields.TextField(
attr='download_url'
)
link = fields.TextField()
format = fields.KeywordField()
file_size = fields.LongField()
type = fields.KeywordField()
openness_score = fields.IntegerField()
dataset = fields.NestedField(properties={
'id': fields.IntegerField(),
'title': TranslatedTextField('title'),
'slug': TranslatedTextField('slug')
})
views_count = fields.IntegerField()
downloads_count = fields.IntegerField()
status = fields.TextField()
modified = fields.DateField()
created = fields.DateField()
verified = fields.DateField()
data_date = fields.DateField()
class Meta:
doc_type = 'resource'
model = Resource
related_models = [Dataset, ]
def get_instances_from_related(self, related_instance):
if isinstance(related_instance, Dataset):
return related_instance.resources.all()
def get_queryset(self):
return self._doc_type.model.objects.filter(status='published')
|
[
"[email protected]"
] | |
220274ef4a9b4c4918eadc9760519ac1b39963d8
|
3cd18a3e789d3a0739768f1ae848d9f74b9dbbe7
|
/mounth001/day21/exercise03.py
|
fe9a7a38bb1bfcf3fe7454d21909dc564595ee5d
|
[] |
no_license
|
Molly-l/66
|
4bfe2f93e726d3cc059222c93a2bb3460b21ad78
|
fae24a968f590060522d30f1b278fcfcdab8b36f
|
refs/heads/master
| 2020-09-28T12:50:18.590794 | 2019-11-27T04:42:28 | 2019-11-27T04:42:28 | 226,782,243 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 773 |
py
|
"""
lstack.py 栈的链式结构
重点代码
思路:
1. 源于节点存储数据,建立节点关联
2. 封装方法 入栈 出栈 栈空 栈顶元素
3. 链表的开头作为栈顶(不需要每次遍历)
"""
# 自定义异常
class StackError(Exception):
pass
# 创建节点类
class Node:
def __init__(self,val,next=None):
self.val = val # 有用数据
self.next = next # 节点关系
# 链式栈
class LStack:
def __init__(self):
# 标记顶位置
self._top = None
def is_empty(self):
return self._top is None
def push(self,val):
node=Node(val)
node.next=self._top
self._top=node
def pop(self):
temp=self._top.val
self.top=self.top.next
return temp
|
[
"[email protected]"
] | |
d8cd32918e0332ff185300fa7e171a9a68f0cdd3
|
7ce076dd764fe4b5c7881734f157bc6f77a99ead
|
/tests/providers/exasol/operators/test_exasol.py
|
68e3d121b48bccf3971c3dd9c3a0247ac1f8a694
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"Python-2.0"
] |
permissive
|
kaxil/airflow
|
db31c98e23f2e0d869d857484e56a7c58acef231
|
42f1da179db00491610946a0b089dd82269adc74
|
refs/heads/master
| 2023-04-28T04:46:38.478352 | 2020-09-28T20:51:16 | 2020-09-28T20:51:16 | 112,322,392 | 1 | 1 |
Apache-2.0
| 2020-08-27T20:15:22 | 2017-11-28T10:42:19 |
Python
|
UTF-8
|
Python
| false | false | 1,922 |
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mock
from airflow.providers.exasol.operators.exasol import ExasolOperator
class TestExasol(unittest.TestCase):
@mock.patch('airflow.providers.exasol.hooks.exasol.ExasolHook.run')
def test_overwrite_autocommit(self, mock_run):
operator = ExasolOperator(task_id='TEST', sql='SELECT 1', autocommit=True)
operator.execute({})
mock_run.assert_called_once_with('SELECT 1', autocommit=True, parameters=None)
@mock.patch('airflow.providers.exasol.hooks.exasol.ExasolHook.run')
def test_pass_parameters(self, mock_run):
operator = ExasolOperator(task_id='TEST', sql='SELECT {value!s}', parameters={'value': 1})
operator.execute({})
mock_run.assert_called_once_with('SELECT {value!s}', autocommit=False, parameters={'value': 1})
@mock.patch('airflow.providers.exasol.operators.exasol.ExasolHook')
def test_overwrite_schema(self, mock_hook):
operator = ExasolOperator(task_id='TEST', sql='SELECT 1', schema='dummy')
operator.execute({})
mock_hook.assert_called_once_with(exasol_conn_id='exasol_default', schema='dummy')
|
[
"[email protected]"
] | |
7046f96277b3a24fa4c120d9e42ebb229ccaad4a
|
fe7763e194be94c402482619c0111fcaca1ef7f6
|
/tutorial/snippets/permissions.py
|
a42b29204436ae53823a6a8aff8bf895527515ec
|
[
"MIT"
] |
permissive
|
antoniocarlosortiz/django-rest-framework-sample
|
1fc8b11af2aa1cacfbbc2c3363e097262eec7aee
|
45ff0213b4a74566c8571c498c67adf66b420d3e
|
refs/heads/master
| 2021-01-01T05:18:51.457373 | 2016-04-23T18:28:12 | 2016-04-23T18:28:12 | 56,934,397 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 474 |
py
|
from rest_framework import permissions
class IsOwnerOrReadOnly(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request.
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
return obj.owner == request.user
|
[
"[email protected]"
] | |
c412835e863548366c31fa22434e45e614059113
|
56278a6e508ce1a282270f90f1cd9984edd14965
|
/tests/test_validation/_test_utils.py
|
ae430d81167f643c218fc773e99d0fc4cf3c2974
|
[
"MIT"
] |
permissive
|
gc-ss/py-gql
|
3d5707938e503dc26addc6340be330c1aeb2aa76
|
5a2d180537218e1c30c65b2a933fb4fe197785ae
|
refs/heads/master
| 2023-04-10T05:21:24.086980 | 2020-04-01T14:18:20 | 2020-04-01T14:18:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,463 |
py
|
# -*- coding: utf-8 -*-
from py_gql._string_utils import dedent
from py_gql.lang import parse
from py_gql.validation import validate_ast
from py_gql.validation.validate import SPECIFIED_RULES, default_validator
def _ensure_list(value):
if isinstance(value, list):
return value
else:
return [value]
def assert_validation_result(
schema, source, expected_msgs=None, expected_locs=None, checkers=None
):
# Prints are here so we can more easily debug when running pytest with -v
expected_msgs = expected_msgs or []
expected_locs = expected_locs or []
print(source)
result = validate_ast(
schema,
parse(dedent(source), allow_type_system=True),
validators=[
lambda s, d, v: default_validator(
s, d, v, validators=(checkers or SPECIFIED_RULES)
)
],
)
errors = result.errors
msgs = [str(err) for err in errors]
locs = [[node.loc for node in err.nodes] for err in errors]
print(" [msgs] ", msgs)
print(" [locs] ", locs)
assert msgs == expected_msgs
if expected_locs:
assert locs == [_ensure_list(l) for l in expected_locs]
def assert_checker_validation_result(
checker, schema, source, expected_msgs=None, expected_locs=None
):
assert_validation_result(
schema,
source,
expected_msgs=expected_msgs,
expected_locs=expected_locs,
checkers=[checker],
)
|
[
"[email protected]"
] | |
c2191030e2543c62287b31ad7e253f8767252f1c
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v9/enums/types/feed_item_quality_approval_status.py
|
2b7fc3c81f16e8f0168b1a99e3484c10977c937b
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 |
Apache-2.0
| 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null |
UTF-8
|
Python
| false | false | 1,260 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"FeedItemQualityApprovalStatusEnum",},
)
class FeedItemQualityApprovalStatusEnum(proto.Message):
r"""Container for enum describing possible quality evaluation
approval statuses of a feed item.
"""
class FeedItemQualityApprovalStatus(proto.Enum):
r"""The possible quality evaluation approval statuses of a feed
item.
"""
UNSPECIFIED = 0
UNKNOWN = 1
APPROVED = 2
DISAPPROVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"[email protected]"
] | |
a30f1f5184e240fdb168d288874791f7260c7029
|
cdbb11473dc8d34767a5916f9f85cb68eb2ca3f2
|
/core/helpers.py
|
a9cf1b2ad8c669f8aac1b940187d7a46adde3660
|
[] |
no_license
|
skyride/evestats
|
fb2a1a248952771731dcfecadab7d02b1f08cd4b
|
4bd2153f65c084b478272513733dcc78f9a0ef98
|
refs/heads/master
| 2020-03-23T13:50:19.216870 | 2018-08-05T19:19:47 | 2018-08-05T19:19:47 | 141,640,834 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 578 |
py
|
from sde.models import Type
def generate_breadcrumb_trail(marketgroup):
def recurse(node):
"""Return an list containing the path to this trail"""
if isinstance(node, dict):
return []
elif isinstance(node, Type):
return [*recurse(node.market_group), node]
elif node.parent is None:
return [node]
else:
return [*recurse(node.parent), node]
return [
{
"name": "Market",
"root": True
},
*recurse(marketgroup)
]
|
[
"[email protected]"
] | |
48dee7176bb8171d5e34ce3b814a3824745949bb
|
974c5a4f101d0e6f4dfa5fc2f7c641c9d2bd8184
|
/sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/operations/_dedicated_hosts_operations.py
|
51cb4faf00fcd17afa1aa62853dffed3a1b72cf3
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
gaoyp830/azure-sdk-for-python
|
4816f04c554dcffb7510a6b7044b0c86a2dd32e1
|
1c66defa502b754abcc9e5afa444ca03c609342f
|
refs/heads/master
| 2022-10-20T21:33:44.281041 | 2022-09-29T17:03:13 | 2022-09-29T17:03:13 | 250,355,505 | 0 | 0 |
MIT
| 2020-03-26T19:42:13 | 2020-03-26T19:42:12 | null |
UTF-8
|
Python
| false | false | 44,268 |
py
|
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request(
resource_group_name: str, host_group_name: str, host_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"),
"hostName": _SERIALIZER.url("host_name", host_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_request(
resource_group_name: str, host_group_name: str, host_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"),
"hostName": _SERIALIZER.url("host_name", host_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, host_group_name: str, host_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"),
"hostName": _SERIALIZER.url("host_name", host_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, **kwargs)
def build_get_request(
resource_group_name: str,
host_group_name: str,
host_name: str,
subscription_id: str,
*,
expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"),
"hostName": _SERIALIZER.url("host_name", host_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if expand is not None:
_params["$expand"] = _SERIALIZER.query("expand", expand, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_host_group_request(
resource_group_name: str, host_group_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"hostGroupName": _SERIALIZER.url("host_group_name", host_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class DedicatedHostsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2021_03_01.ComputeManagementClient`'s
:attr:`dedicated_hosts` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
def _create_or_update_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHost, IO],
**kwargs: Any
) -> _models.DedicatedHost:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "DedicatedHost")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: _models.DedicatedHost,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHost, IO],
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Create or update a dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Create Dedicated Host. Is either a model type or
a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
def _update_initial(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHostUpdate, IO],
**kwargs: Any
) -> _models.DedicatedHost:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "DedicatedHostUpdate")
request = build_update_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@overload
def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: _models.DedicatedHostUpdate,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHostUpdate
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
parameters: Union[_models.DedicatedHostUpdate, IO],
**kwargs: Any
) -> LROPoller[_models.DedicatedHost]:
"""Update an dedicated host .
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host . Required.
:type host_name: str
:param parameters: Parameters supplied to the Update Dedicated Host operation. Is either a
model type or a IO type. Required.
:type parameters: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHostUpdate or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DedicatedHost or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial( # type: ignore
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, host_group_name: str, host_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@distributed_trace
def begin_delete(
self, resource_group_name: str, host_group_name: str, host_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Delete a dedicated host.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host. Required.
:type host_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
host_group_name: str,
host_name: str,
expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None,
**kwargs: Any
) -> _models.DedicatedHost:
"""Retrieves information about a dedicated host.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:param host_name: The name of the dedicated host. Required.
:type host_name: str
:param expand: The expand expression to apply on the operation. 'InstanceView' will retrieve
the list of instance views of the dedicated host. 'UserData' is not supported for dedicated
host. Known values are: "instanceView" and "userData". Default value is None.
:type expand: str or ~azure.mgmt.compute.v2021_03_01.models.InstanceViewTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DedicatedHost or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.DedicatedHost
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHost]
request = build_get_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
host_name=host_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("DedicatedHost", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts/{hostName}"} # type: ignore
@distributed_trace
def list_by_host_group(
self, resource_group_name: str, host_group_name: str, **kwargs: Any
) -> Iterable["_models.DedicatedHost"]:
"""Lists all of the dedicated hosts in the specified dedicated host group. Use the nextLink
property in the response to get the next page of dedicated hosts.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param host_group_name: The name of the dedicated host group. Required.
:type host_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DedicatedHost or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2021_03_01.models.DedicatedHost]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.DedicatedHostListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_host_group_request(
resource_group_name=resource_group_name,
host_group_name=host_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_host_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DedicatedHostListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_host_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/hostGroups/{hostGroupName}/hosts"} # type: ignore
|
[
"[email protected]"
] | |
fa3e65432481dc50669a709c3740fc9753628e14
|
8f0524fc0171e27a15f4cf5fb3fe48ef2053b40e
|
/leetcode/DP/edit_distance_formula.py
|
e9141de529dbc4bde7fdefe5cc4713fae1837147
|
[] |
no_license
|
MohammedAlewi/competitive-programming
|
51514fa04ba03d14f8e00031ee413d6d74df971f
|
960da78bfa956cb1cf79a0cd19553af97a2aa0f3
|
refs/heads/master
| 2023-02-08T20:25:58.279241 | 2023-02-02T00:11:23 | 2023-02-02T00:11:23 | 222,710,225 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 275 |
py
|
def edit_str(s1,s2,n,m):
if n<0 or m<0:
return max(m,n)+1
elif s1[n]==s2[m]:
return edit_str(s1,s2,n-1,m-1)
else:
return min(edit_str(s1,s2,n-1,m-1),edit_str(s1,s2,n,m-1),edit_str(s1,s2,n-1,m)) +1
print(edit_str("kitten","sitting",5,6))
|
[
"[email protected]"
] | |
aae84273d14923a5fb83bf35b9b0e6a31ea3d1af
|
a6270537b5c6d924fa6353a8f0328e07c71a0366
|
/numbasltiprovider/urls.py
|
c12994c32a9c81f0df352e00b8c9d1aa5310f5c7
|
[
"Apache-2.0"
] |
permissive
|
oscarsiles/numbas-lti-provider
|
9b993175a6b6463a974373c7bdb2c9f38b057b89
|
ef7080a2593a800a1b9630c746e4f8667e2ec42d
|
refs/heads/master
| 2020-08-20T03:47:54.399198 | 2020-08-05T13:44:16 | 2020-08-05T13:44:16 | 215,979,486 | 0 | 0 |
NOASSERTION
| 2019-10-18T08:39:09 | 2019-10-18T08:39:09 | null |
UTF-8
|
Python
| false | false | 519 |
py
|
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('numbas_lti.urls')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
try:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
except ImportError:
pass
|
[
"[email protected]"
] | |
2f85952fcbe3b65f4c744f4e3bb7f9549a012652
|
cb4cfcece4bc14f591b038adbc7fadccaf447a1d
|
/ELEVSTRS.py
|
d84b11ce6e30ca754fe1115b5248d18d884db818
|
[] |
no_license
|
psycho-pomp/CodeChef
|
ba88cc8e15b3e87d39ad0c4665c6892620c09d22
|
881edddded0bc8820d22f42b94b9959fd6912c88
|
refs/heads/master
| 2023-03-21T06:46:14.455055 | 2021-03-11T12:07:48 | 2021-03-11T12:07:48 | 275,214,989 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 224 |
py
|
# cook your dish here
from math import sqrt
t=int(input())
for _ in range(t):
n,v1,v2=map(int,input().split())
t1=n/v1
t2=(sqrt(2)*n)/v2
if t2>=t1:
print("Stairs")
else:
print("Elevator")
|
[
"[email protected]"
] | |
8c7ec1217dd7bc22b88439c1f406972e4f2a9006
|
3bae1ed6460064f997264091aca0f37ac31c1a77
|
/apps/cloud_api_generator/generatedServer/tasklets/rack/create/rack_create.py
|
3e407f24ace515e0974c5621850b08fc380425ff
|
[] |
no_license
|
racktivity/ext-pylabs-core
|
04d96b80ac1942754257d59e91460c3a141f0a32
|
53d349fa6bee0ccead29afd6676979b44c109a61
|
refs/heads/master
| 2021-01-22T10:33:18.523799 | 2017-06-08T09:09:28 | 2017-06-08T09:09:28 | 54,314,984 | 0 | 0 | null | 2017-06-08T09:09:29 | 2016-03-20T11:55:01 |
Python
|
UTF-8
|
Python
| false | false | 174 |
py
|
__author__ = 'aserver'
__tags__ = 'rack', 'create'
__priority__= 3
def main(q, i, params, tags):
params['result'] = ''
def match(q, i, params, tags):
return True
|
[
"devnull@localhost"
] |
devnull@localhost
|
7be5aa773f2e343fd4b8b491a4269fdf9fff5719
|
ca609a94fd8ab33cc6606b7b93f3b3ef201813fb
|
/2017-feb/1.python/5.data-frames.py
|
959658216b9ad8cb6baf46f1063d69277bcff50f
|
[] |
no_license
|
rajesh2win/datascience
|
fbc87def2a031f83ffceb4b8d7bbc31e8b2397b2
|
27aca9a6c6dcae3800fabdca4e3d76bd47d933e6
|
refs/heads/master
| 2021-01-20T21:06:12.488996 | 2017-08-01T04:39:07 | 2017-08-01T04:39:07 | 101,746,310 | 1 | 0 | null | 2017-08-29T09:53:49 | 2017-08-29T09:53:49 | null |
UTF-8
|
Python
| false | false | 726 |
py
|
import pandas as pd
col1 = [10,20,30,40]
col2 = ['abc','def','xyz','pqr']
col3 = [0,0,0,0]
#creating data frame
df1 = pd.DataFrame({'pid':col1,
'pname':col2,'survived':col3})
df1.shape
df1.info()
df1.describe()
df1.head(2)
df1.tail()
df1['col4'] = 0
#access frame content by column/columns
df1.pid
df1['pid']
df1[['pid','pname']]
df1[[0,1]]
#dropping a column
df2 = df1.drop('survived',1)
#slicing rows of frame
df1[0:2]
df1[0:4]
df1[0:]
df1[:2]
df1[-2:]
#filtering rows of dataframe by condition
type(df1.pid > 20)
df1[df1.pid>20]
#selecting subsets of rows and columns
df1.iloc[0:2,]
df1.iloc[[0,2],]
df1.iloc[0:2,0]
df1.iloc[0:2,[0,2]]
df1.loc[0:2,['pname']]
#grouping data in data frames
df1.groupby('id').size()
|
[
"[email protected]"
] | |
2ce930a77f53d08bd7633bac3cdee86c6e5cdd88
|
f7327136419a3b895fb185bdc0af7a08256f8aed
|
/python/paddle/nn/layer/fused_transformer.py
|
0084f7ff339df3e185dbe727d4632f758e7e9255
|
[
"Apache-2.0"
] |
permissive
|
paddlelaw/Paddle
|
45a7598535d6a4b9dd0cfb9bbc61540ff9c1c21e
|
12865234fe1e28fe5df50a43901845ceaea42c2d
|
refs/heads/develop
| 2023-08-28T01:19:16.786973 | 2021-10-09T14:39:35 | 2021-10-09T14:39:35 | 331,300,511 | 0 | 0 |
Apache-2.0
| 2021-10-09T14:39:36 | 2021-01-20T12:29:27 |
Python
|
UTF-8
|
Python
| false | false | 19,928 |
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class FusedMultiHeadAttention(Layer):
"""
Attention mapps queries and a set of key-value pairs to outputs, and
Multi-Head Attention performs multiple parallel attention to jointly attending
to information from different representation subspaces.
Please refer to `Attention Is All You Need <https://arxiv.org/pdf/1706.03762.pdf>`_
for more details.
Parameters:
embed_dim (int): The expected feature size in the input and output.
num_heads (int): The number of heads in multi-head attention.
dropout (float, optional): The dropout probability used on attention
weights to drop some attention targets. 0 for no dropout. Default 0
kdim (int, optional): The feature size in key. If None, assumed equal to
`embed_dim`. Default None.
vdim (int, optional): The feature size in value. If None, assumed equal to
`embed_dim`. Default None.
need_weights (bool, optional): Indicate whether to return the attention
weights. Default False.
weight_attr(ParamAttr, optional): To specify the weight parameter property.
Default: None, which means the default weight parameter property is used.
See usage for details in :code:`ParamAttr` .
bias_attr (ParamAttr|bool, optional): To specify the bias parameter property.
Default: None, which means the default bias parameter property is used.
If it is set to False, this layer will not have trainable bias parameter.
See usage for details in :code:`ParamAttr` .
Examples:
.. code-block:: python
import paddle
# encoder input: [batch_size, sequence_length, d_model]
query = paddle.rand((2, 4, 128))
# self attention mask: [batch_size, num_heads, query_len, query_len]
attn_mask = paddle.rand((2, 2, 4, 4))
multi_head_attn = paddle.nn.MultiHeadAttention(128, 2)
output = multi_head_attn(query, None, None, attn_mask=attn_mask) # [2, 4, 128]
"""
Cache = collections.namedtuple("Cache", ["k", "v"])
StaticCache = collections.namedtuple("StaticCache", ["k", "v"])
def __init__(self,
embed_dim,
num_heads,
dropout=0.,
kdim=None,
vdim=None,
need_weights=False,
weight_attr=None,
bias_attr=None):
super(FusedMultiHeadAttention, self).__init__()
raise NotImplementedError()
def forward(self, query, key=None, value=None, attn_mask=None, cache=None):
"""
Applies multi-head attention to map queries and a set of key-value pairs
to outputs.
Parameters:
query (Tensor): The queries for multi-head attention. It is a
tensor with shape `[batch_size, query_length, embed_dim]`. The
data type should be float32 or float64.
key (Tensor, optional): The keys for multi-head attention. It is
a tensor with shape `[batch_size, key_length, kdim]`. The
data type should be float32 or float64. If None, use `query` as
`key`. Default None.
value (Tensor, optional): The values for multi-head attention. It
is a tensor with shape `[batch_size, value_length, vdim]`.
The data type should be float32 or float64. If None, use `query` as
`value`. Default None.
attn_mask (Tensor, optional): A tensor used in multi-head attention
to prevents attention to some unwanted positions, usually the
paddings or the subsequent positions. It is a tensor with shape
broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
When the data type is bool, the unwanted positions have `False`
values and the others have `True` values. When the data type is
int, the unwanted positions have 0 values and the others have 1
values. When the data type is float, the unwanted positions have
`-INF` values and the others have 0 values. It can be None when
nothing wanted or needed to be prevented attention to. Default None.
cache (MultiHeadAttention.Cache|MultiHeadAttention.StaticCache, optional):
It is a namedtuple with `k` and `v` as fields, and stores tensors
shaped `[batch_size, num_heads, length, embed_dim]` which are results
of linear projection, reshape and transpose calculations in
MultiHeadAttention. If it is an instance of `Cache`, `k` and `v`
fields reserve intermediate results of previous positions, which
mostly used for decoder self attention. If it is an instance of
`StaticCache`, `key` and `value` args would be ignored, `k` and
`v` fields would be used as calculated results on `key` and
`value`, which mostly used for decoder-encoder cross attention.
It is only used for inference and should be None for training.
Default None.
Returns:
Tensor|tuple: It is a tensor that has the same shape and data type \
as `query`, representing attention output. Or a tuple if \
`need_weights` is True or `cache` is not None. If `need_weights` \
is True, except for attention output, the tuple also includes \
the attention weights tensor shaped `[batch_size, num_heads, query_length, key_length]`. \
If `cache` is not None, the tuple then includes the new cache \
having the same type as `cache`, and if it is `StaticCache`, it \
is same as the input `cache`, if it is `Cache`, the new cache \
reserves tensors concatanating raw tensors with intermediate \
results of current query.
"""
raise NotImplementedError()
class FusedFeedForward(Layer):
def __init__(self,
d_model,
dim_feedforward,
dropout=0.1,
activation="relu",
act_dropout=None,
normalize_before=False,
weight_attr=None,
bias_attr=None):
super(FusedFeedForward, self).__init__()
raise NotImplementedError()
def forward(self, src, cache=None):
raise NotImplementedError()
class FusedTransformerEncoderLayer(Layer):
"""
TransformerEncoderLayer is composed of two sub-layers which are self (multi-head)
attention and feedforward network. Before and after each sub-layer, pre-process
and post-precess would be applied on the input and output accordingly. If
`normalize_before` is True, pre-process is layer normalization and post-precess
includes dropout, residual connection. Otherwise, no pre-process and post-precess
includes dropout, residual connection, layer normalization.
Parameters:
d_model (int): The expected feature size in the input and output.
nhead (int): The number of heads in multi-head attention(MHA).
dim_feedforward (int): The hidden layer size in the feedforward network(FFN).
dropout (float, optional): The dropout probability used in pre-process
and post-precess of MHA and FFN sub-layer. Default 0.1
activation (str, optional): The activation function in the feedforward
network. Default relu.
attn_dropout (float, optional): The dropout probability used
in MHA to drop some attention target. If None, use the value of
`dropout`. Default None
act_dropout (float, optional): The dropout probability used after FFN
activition. If None, use the value of `dropout`. Default None
normalize_before (bool, optional): Indicate whether to put layer normalization
into preprocessing of MHA and FFN sub-layers. If True, pre-process is layer
normalization and post-precess includes dropout, residual connection.
Otherwise, no pre-process and post-precess includes dropout, residual
connection, layer normalization. Default False
weight_attr(ParamAttr|list|tuple, optional): To specify the weight parameter property.
If it is a list/tuple, `weight_attr[0]` would be used as `weight_attr` for
MHA, and `weight_attr[1]` would be used as `weight_attr` for linear in FFN.
Otherwise, MHA and FFN both use it as `weight_attr` to create parameters.
Default: None, which means the default weight parameter property is used.
See usage for details in :code:`ParamAttr` .
bias_attr (ParamAttr|list|tuple|bool, optional): To specify the bias parameter property.
If it is a list/tuple, `bias_attr[0]` would be used as `bias_attr` for
MHA, and `bias_attr[1]` would be used as `bias_attr` for linear in FFN.
Otherwise, MHA and FFN both use it as `bias_attr` to create parameters.
The `False` value means the corresponding layer would not have trainable
bias parameter. See usage for details in :code:`ParamAttr` . Default: None,
which means the default bias parameter property is used.
Examples:
.. code-block:: python
import paddle
from paddle.nn import TransformerEncoderLayer
# encoder input: [batch_size, src_len, d_model]
enc_input = paddle.rand((2, 4, 128))
# self attention mask: [batch_size, n_head, src_len, src_len]
attn_mask = paddle.rand((2, 2, 4, 4))
encoder_layer = TransformerEncoderLayer(128, 2, 512)
enc_output = encoder_layer(enc_input, attn_mask) # [2, 4, 128]
"""
def __init__(self,
d_model,
nhead,
dim_feedforward,
dropout=0.1,
activation="relu",
attn_dropout=None,
act_dropout=None,
normalize_before=False,
weight_attr=None,
bias_attr=None):
self._config = locals()
self._config.pop("self")
self._config.pop("__class__", None) # py3
super(FusedTransformerEncoderLayer, self).__init__()
raise NotImplementedError()
def forward(self, src, src_mask=None, cache=None):
"""
Applies a Transformer encoder layer on the input.
Parameters:
src (Tensor): The input of Transformer encoder layer. It is
a tensor with shape `[batch_size, sequence_length, d_model]`.
The data type should be float32 or float64.
src_mask (Tensor, optional): A tensor used in multi-head attention
to prevents attention to some unwanted positions, usually the
paddings or the subsequent positions. It is a tensor with shape
broadcasted to `[batch_size, n_head, sequence_length, sequence_length]`.
When the data type is bool, the unwanted positions have `False`
values and the others have `True` values. When the data type is
int, the unwanted positions have 0 values and the others have 1
values. When the data type is float, the unwanted positions have
`-INF` values and the others have 0 values. It can be None when
nothing wanted or needed to be prevented attention to. Default None.
cache (Tensor, optional): It is an instance of `MultiHeadAttention.Cache`.
See `TransformerEncoderLayer.gen_cache` for more details. It is
only used for inference and should be None for training. Default
None.
Returns:
Tensor|tuple: It is a tensor that has the same shape and data type \
as `enc_input`, representing the output of Transformer encoder \
layer. Or a tuple if `cache` is not None, except for encoder \
layer output, the tuple includes the new cache which is same \
as input `cache` argument but `incremental_cache` has an \
incremental length. See `MultiHeadAttention.gen_cache` and \
`MultiHeadAttention.forward` for more details.
"""
raise NotImplementedError()
class FusedTransformer(Layer):
"""
A Transformer model composed of an instance of `TransformerEncoder` and an
instance of `TransformerDecoder`. While the embedding layer and output layer
are not included.
Please refer to `Attention is all you need <http://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf>`_ ,
and see `TransformerEncoder` and `TransformerDecoder` for more details.
Users can configurate the model architecture with corresponding parameters.
Note the usage of `normalize_before` representing where to apply layer
normalization (in pre-process or post-precess of multi-head attention or FFN),
and some transformer like models are different on this, such as
`BERT <https://arxiv.org/abs/1810.04805>`_ and `GPT2 <https://d4mucfpksywv.cloudfront.net/better-language-models/language-models.pdf>`_ .
The default architecture here places layer normalization in post-process and
applies another layer normalization on the output of last encoder/decoder layer.
Parameters:
d_model (int, optional): The expected feature size in the encoder/decoder input
and output. Default 512
nhead (int, optional): The number of heads in multi-head attention(MHA). Default 8
num_encoder_layers (int, optional): The number of layers in encoder. Default 6
num_decoder_layers (int, optional): The number of layers in decoder. Default 6
dim_feedforward (int, optional): The hidden layer size in the feedforward network(FFN). Default 2048
dropout (float, optional): The dropout probability used in pre-process
and post-precess of MHA and FFN sub-layer. Default 0.1
activation (str, optional): The activation function in the feedforward
network. Default relu.
attn_dropout (float, optional): The dropout probability used
in MHA to drop some attention target. If None, use the value of
`dropout`. Default None
act_dropout (float, optional): The dropout probability used after FFN
activition. If None, use the value of `dropout`. Default None
normalize_before (bool, optional): Indicate whether to put layer normalization
into preprocessing of MHA and FFN sub-layers. If True, pre-process is layer
normalization and post-precess includes dropout, residual connection.
Otherwise, no pre-process and post-precess includes dropout, residual
connection, layer normalization. Default False
weight_attr(ParamAttr|list|tuple, optional): To specify the weight parameter property.
If it is a list/tuple, the length of `weight_attr` could be 1, 2 or 3. If it is 3,
`weight_attr[0]` would be used as `weight_attr` for self attention, `weight_attr[1]`
would be used as `weight_attr` for cross attention of `TransformerDecoder`,
and `weight_attr[2]` would be used as `weight_attr` for linear in FFN.
If it is 2, `weight_attr[0]` would be used as `weight_attr` both for self attention
and cross attntion and `weight_attr[1]` would be used as `weight_attr` for
linear in FFN. If it is 1, `weight_attr[0]` would be used as `weight_attr`
for self attention, cross attention and linear in FFN. Otherwise,
the three sub-layers all uses it as `weight_attr` to create parameters.
Default: None, which means the default weight parameter property is used.
See usage for details
in :code:`ParamAttr` .
bias_attr (ParamAttr|list|tuple|bool, optional): To specify the bias parameter property.
If it is a list/tuple, the length of `bias_attr` could be 1, 2 or 3. If it is 3,
`bias_attr[0]` would be used as `bias_attr` for self attention, `bias_attr[1]`
would be used as `bias_attr` for cross attention of `TransformerDecoder`,
and `bias_attr[2]` would be used as `bias_attr` for linear in FFN.
If it is 2, `bias_attr[0]` would be used as `bias_attr` both for self attention
and cross attntion and `bias_attr[1]` would be used as `bias_attr` for
linear in FFN. If it is 1, `bias_attr[0]` would be used as `bias_attr`
for self attention, cross attention and linear in FFN. Otherwise,
the three sub-layers all uses it as `bias_attr` to create parameters.
The `False` value means the corresponding layer would not have trainable
bias parameter. See usage for details in :code:`ParamAttr` .
Default: None,which means the default bias parameter property is used.
custom_encoder (Layer, optional): If custom encoder is provided, use it as the encoder.
Default None
custom_decoder (Layer, optional): If custom decoder is provided, use it as the decoder.
Default None
Examples:
.. code-block:: python
import paddle
from paddle.nn import Transformer
# src: [batch_size, tgt_len, d_model]
enc_input = paddle.rand((2, 4, 128))
# tgt: [batch_size, src_len, d_model]
dec_input = paddle.rand((2, 6, 128))
# src_mask: [batch_size, n_head, src_len, src_len]
enc_self_attn_mask = paddle.rand((2, 2, 4, 4))
# tgt_mask: [batch_size, n_head, tgt_len, tgt_len]
dec_self_attn_mask = paddle.rand((2, 2, 6, 6))
# memory_mask: [batch_size, n_head, tgt_len, src_len]
cross_attn_mask = paddle.rand((2, 2, 6, 4))
transformer = Transformer(128, 2, 4, 4, 512)
output = transformer(enc_input,
dec_input,
enc_self_attn_mask,
dec_self_attn_mask,
cross_attn_mask) # [2, 6, 128]
"""
def __init__(self,
d_model=512,
nhead=8,
num_encoder_layers=6,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
attn_dropout=None,
act_dropout=None,
normalize_before=False,
weight_attr=None,
bias_attr=None,
custom_encoder=None,
custom_decoder=None):
super(fusedTransformer, self).__init__()
raise NotImplementedError()
def forward(self, src, tgt, src_mask=None, tgt_mask=None, memory_mask=None):
raise NotImplementedError()
|
[
"[email protected]"
] | |
0a53f26329b7e8f590b399d677a12e83e6704b2e
|
28a124b6a2f22a53af3b6bb754e77af88b4138e1
|
/DJANGO/companytodo/reports/migrations/0006_auto_20191209_0121.py
|
a29feb60b3e3cadd0f868274c2f14a8a99ef6f0e
|
[] |
no_license
|
mebaysan/LearningKitforBeginners-Python
|
f7c6668a9978b52cad6cc2b969990d7bbfedc376
|
9e1a47fb14b3d81c5b009b74432902090e213085
|
refs/heads/master
| 2022-12-21T03:12:19.892857 | 2021-06-22T11:58:27 | 2021-06-22T11:58:27 | 173,840,726 | 18 | 4 | null | 2022-12-10T03:00:22 | 2019-03-04T23:56:27 |
Python
|
UTF-8
|
Python
| false | false | 350 |
py
|
# Generated by Django 2.2.7 on 2019-12-08 22:21
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reports', '0005_auto_20191209_0120'),
]
operations = [
migrations.AlterModelOptions(
name='report',
options={'ordering': ('-created',)},
),
]
|
[
"[email protected]"
] | |
a81f1658dd871e8e403dcf6b4e512ae458767a2f
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/HBKAGJZ62JkCTgYX3_15.py
|
5c5665b4393c00c704f2eb04cb3ee08dfe0d3464
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 130 |
py
|
def last(l,n):
if n>len(l):
return 'invalid'
elif n==0:
return []
else:
return l[len(l)-n:]
|
[
"[email protected]"
] | |
31f85f215a9f769b8f6cf5f1c88dce4b0be8c037
|
4f0f411d8d9abe3d520a962d30da67959e6bff2f
|
/tests/sequence/test_phylo.py
|
d40dbd398c6fe4c8b0188a102157cb630725e1f8
|
[
"BSD-3-Clause"
] |
permissive
|
ajshedivy/biotite
|
685f196416cc7be74d299a13f23947f85eb5825e
|
15fe39de165aba6e8bd6376fa8f8ddf069718fb5
|
refs/heads/master
| 2023-08-24T14:45:25.239920 | 2021-10-06T14:32:40 | 2021-10-06T14:32:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,165 |
py
|
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
from os.path import join
import numpy as np
import pytest
import biotite
import biotite.sequence.phylo as phylo
from ..util import data_dir
@pytest.fixture
def distances():
# Distances are based on the example
# "Dendrogram of the BLOSUM62 matrix"
# with the small modification M[i,j] += i+j
# to reduce ambiguity in the tree construction.
return np.loadtxt(join(data_dir("sequence"), "distances.txt"), dtype=int)
@pytest.fixture
def upgma_newick():
# Newick notation of the tree created from 'distances.txt',
# created via DendroUPGMA
with open(join(data_dir("sequence"), "newick_upgma.txt"), "r") as file:
newick = file.read().strip()
return newick
@pytest.fixture
def tree(distances):
return phylo.upgma(distances)
def test_upgma(tree, upgma_newick):
"""
Compare the results of `upgma()` with DendroUPGMA.
"""
ref_tree = phylo.Tree.from_newick(upgma_newick)
# Cannot apply direct tree equality assertion because the distance
# might not be exactly equal due to floating point rounding errors
for i in range(len(tree)):
for j in range(len(tree)):
# Check for equal distances and equal topologies
assert tree.get_distance(i,j) \
== pytest.approx(ref_tree.get_distance(i,j), abs=1e-3)
assert tree.get_distance(i,j, topological=True) \
== ref_tree.get_distance(i,j, topological=True)
def test_neighbor_joining():
"""
Compare the results of `neighbor_join()` with a known tree.
"""
dist = np.array([
[ 0, 5, 4, 7, 6, 8],
[ 5, 0, 7, 10, 9, 11],
[ 4, 7, 0, 7, 6, 8],
[ 7, 10, 7, 0, 5, 9],
[ 6, 9, 6, 5, 0, 8],
[ 8, 11, 8, 9, 8, 0],
])
ref_tree = phylo.Tree(phylo.TreeNode(
[
phylo.TreeNode(
[
phylo.TreeNode(
[
phylo.TreeNode(index=0),
phylo.TreeNode(index=1),
],
[1,4]
),
phylo.TreeNode(index=2),
],
[1, 2]
),
phylo.TreeNode(
[
phylo.TreeNode(index=3),
phylo.TreeNode(index=4),
],
[3,2]
),
phylo.TreeNode(index=5),
],
[1,1,5]
))
test_tree = phylo.neighbor_joining(dist)
assert test_tree == ref_tree
def test_node_distance(tree):
"""
Test whether the `distance_to()` and `lowest_common_ancestor()` work
correctly.
"""
# Tree is created via UPGMA
# -> The distances to root should be equal for all leaf nodes
dist = tree.root.distance_to(tree.leaves[0])
for leaf in tree.leaves:
assert leaf.distance_to(tree.root) == dist
# Example topological distances
assert tree.get_distance(0, 19, True) == 9
assert tree.get_distance(4, 2, True) == 10
# All pairwise leaf node distances should be sufficient
# to reconstruct the same tree via UPGMA
ref_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
ref_dist_mat[i,j] = tree.get_distance(i,j)
assert np.allclose(ref_dist_mat, ref_dist_mat.T)
new_tree = phylo.upgma(ref_dist_mat)
test_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
test_dist_mat[i,j] = new_tree.get_distance(i,j)
assert np.allclose(test_dist_mat, ref_dist_mat)
def test_leaf_list(tree):
for i, leaf in enumerate(tree.leaves):
assert i == leaf.index
def test_distances(tree):
# Tree is created via UPGMA
# -> The distances to root should be equal for all leaf nodes
dist = tree.root.distance_to(tree.leaves[0])
for leaf in tree.leaves:
assert leaf.distance_to(tree.root) == dist
# Example topological distances
assert tree.get_distance(0, 19, True) == 9
assert tree.get_distance(4, 2, True) == 10
def test_get_leaves(tree):
# Manual example cases
node = tree.leaves[6]
assert set(tree.leaves[6].parent.get_indices()) == set(
[6,11,2,3,13,8,14,5,0,15,16]
)
assert set(tree.leaves[10].get_indices()) == set([10])
assert tree.root.get_leaf_count() == 20
def test_copy(tree):
assert tree is not tree.copy()
assert tree == tree.copy()
def test_immutability():
node = phylo.TreeNode(index=0)
# Attributes are not writable
with pytest.raises(AttributeError):
node.children = None
with pytest.raises(AttributeError):
node.parent = None
with pytest.raises(AttributeError):
node.index = None
# A root node cannot be child
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=1)
node1.as_root()
with pytest.raises(phylo.TreeError):
phylo.TreeNode([node1, node2], [0, 0])
# A child node cannot be root
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=1)
phylo.TreeNode([node1, node2], [0, 0])
with pytest.raises(phylo.TreeError):
node1.as_root()
# A node cannot be child of a two nodes
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=1)
phylo.TreeNode([node1, node2], [0, 0])
with pytest.raises(phylo.TreeError):
phylo.TreeNode([node1, node2], [0, 0])
# Tree cannot be constructed from child nodes
node1 = phylo.TreeNode(index=0)
node2 = phylo.TreeNode(index=0)
# node1 and node2 have now a parent
phylo.TreeNode([node1, node2], [0, 0])
with pytest.raises(phylo.TreeError):
phylo.Tree(node1)
@pytest.mark.parametrize("newick, labels, error", [
# Reference index out of range
("((1,0),4),2);", None, biotite.InvalidFileError),
# Empty string
("", None, biotite.InvalidFileError),
# Empty node
("();", None, biotite.InvalidFileError),
# Missing brackets
("((0,1,(2,3));", None, biotite.InvalidFileError),
# A node with three leaves
("((0,1),(2,3),(4,5));", None, None),
# A node with one leaf
("((0,1),(2,3),(4));", None, None),
# Named intermediate nodes
("((0,1,3)A,2)B;", None, None),
# Named intermediate nodes and distances
("((0:1.0,1:3.0,3:5.0)A:2.0,2:5.0)B;", None, None),
# Nodes with labels
("((((A,B),(C,D)),E),F);", ["A","B","C","D","E","F"], None),
# Nodes with labels and distances
("((((A:1,B:2),(C:3,D:4)),E:5),F:6);", ["A","B","C","D","E","F"], None),
# Newick with spaces
(" ( 0 : 1.0 , 1 : 3.0 ) A ; ", None, None),
])
def test_newick_simple(newick, labels, error):
# Read, write and read again a Newick notation and expect
# the same reult from both reads
if error is None:
tree1 = phylo.Tree.from_newick(newick, labels)
newick = tree1.to_newick(labels, include_distance=True)
tree2 = phylo.Tree.from_newick(newick, labels)
assert tree1 == tree2
else:
with pytest.raises(error):
tree1 = phylo.Tree.from_newick(newick, labels)
@pytest.mark.parametrize("use_labels", [False, True])
def test_newick_complex(upgma_newick, use_labels):
# Same as above with more complex string
if use_labels:
labels = [str(i) for i in range(20)]
else:
labels = None
tree1 = phylo.Tree.from_newick(upgma_newick, labels)
newick = tree1.to_newick(labels, include_distance=True)
tree2 = phylo.Tree.from_newick(newick, labels)
assert tree1 == tree2
@pytest.mark.parametrize("newick_in, exp_newick_out", [
("(0:1.0, 1:2.0);", "(0:1.0,1:2.0):0.0;" ),
("(0:1.0, 1:2.0, 2:3.0);", "((0:1.0,1:2.0):0.0,2:3.0):0.0;" ),
("(((0:1.0, 1:2.0):10.0):5.0, 2:8.0);", "((0:1.0,1:2.0):15.0,2:8.0):0.0;"),
("((0:1.0, 1:2.0):10.0):5.0;", "(0:1.0,1:2.0):0.0;" ),
])
def test_as_binary_cases(newick_in, exp_newick_out):
"""
Test the `as_binary()` function based on known cases.
"""
tree = phylo.Tree.from_newick(newick_in)
bin_tree = phylo.as_binary(tree)
assert bin_tree.to_newick() == exp_newick_out
def test_as_binary_distances():
"""
Test the preservation of all pairwise leaf distances after calling
`as_binary()`.
"""
# Some random newick
newick = "((((0:5, 1:1, 2:13, 5:9):4, (4:2, 6:9):7):18), 3:12);"
tree = phylo.Tree.from_newick(newick)
ref_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
ref_dist_mat[i,j] = tree.get_distance(i,j)
bin_tree = phylo.as_binary(tree)
test_dist_mat = np.zeros((len(tree), len(tree)))
for i in range(len(tree)):
for j in range(len(tree)):
test_dist_mat[i,j] = bin_tree.get_distance(i,j)
assert np.allclose(test_dist_mat, ref_dist_mat)
def test_equality(tree):
"""
Assert that equal trees equal each other, and non-equal trees do not
equal each other.
"""
assert tree == tree.copy()
# Order of children is not important
assert tree == phylo.Tree(phylo.TreeNode(
[tree.root.children[1].copy(), tree.root.children[0].copy()],
[tree.root.children[1].distance, tree.root.children[0].distance]
))
# Different distance -> Unequal tree
assert tree != phylo.Tree(phylo.TreeNode(
[tree.root.children[0].copy(), tree.root.children[1].copy()],
[tree.root.children[0].distance, 42]
))
# Additional node -> Unequal tree
assert tree != phylo.Tree(phylo.TreeNode(
[
tree.root.children[0].copy(),
tree.root.children[1].copy(),
phylo.TreeNode(index=len(tree))
],
[
tree.root.children[0].distance,
tree.root.children[1].distance,
42
]
))
|
[
"[email protected]"
] | |
c0300915f88b4cbb234193be8a08ceb789f7fd55
|
c24b28c0dc4ad8f83845f4c61882f1e04d49b5cd
|
/Plotly_Graphs/Plotly_Introduction/plotly_charts.py
|
d17cd6a9de3a549f8ebb82ff2712db48bbb76398
|
[] |
no_license
|
Coding-with-Adam/Dash-by-Plotly
|
759e927759513d96060a770b1e0b0a66db13f54f
|
9f178f1d52536efd33827758b741acc4039d8d9b
|
refs/heads/master
| 2023-08-31T17:23:02.029281 | 2023-08-08T05:12:50 | 2023-08-08T05:12:50 | 234,687,337 | 1,293 | 1,822 | null | 2023-07-31T15:47:07 | 2020-01-18T05:36:28 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 370 |
py
|
import pandas as pd
import plotly.express as px
dfb = pd.read_csv("bird-window-collision-death.csv")
df = px.data.tips()
fig = px.pie(dfb, values='Deaths', names='Bldg #', color="Side", hole=0.3)
fig.update_traces(textinfo="label+percent", insidetextfont=dict(color="white"))
fig.update_layout(legend={"itemclick":False})
fig.show()
fig.write_image("images/fig1.png")
|
[
"[email protected]"
] | |
9ff2f22cb931ef1b4b6f3de6cb5ba468dace744c
|
ae613a880eecf783ba23e7ca871f9e165ec2ce6e
|
/calculate_root.py
|
f6e918aef989a07665376a59101b386c993edc8e
|
[
"MIT"
] |
permissive
|
h-mayorquin/capacity_code
|
52d7e81026cd804677d5a5a6312b434bdff6ed32
|
f885f0e409d3f9c54b8e20c902f7ef28584ca8a2
|
refs/heads/master
| 2020-08-28T00:30:14.760936 | 2020-01-31T17:26:29 | 2020-01-31T17:26:29 | 217,534,700 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,388 |
py
|
import warnings
import pickle
import pandas as pd
import numpy as np
import random
from math import ceil, floor
from copy import deepcopy
from functions import *
warnings.filterwarnings('ignore')
minicolumns = 10
hypercolumns = 5
sequence_length = 2
number_of_sequences = 20
pattern_seed = np.random.randint(0, 20)
desired_root = 0.9
verbose = True
n_patterns = 100
pairs = produce_pairs_with_constant_number_of_patterns(n_patterns)[3:-3]
# Format is hypercolumns, minicolumns, extra
pairs = [(3, 66, 0)]
# Do the calculations
for pair in pairs:
hypercolumns, minicolumns, extra = pair
print('hypercolumns', hypercolumns)
print('minicolumns', minicolumns)
print('extra', extra)
pattern_seed = np.random.randint(0, 20)
aux = find_root_empirical(desired_root, hypercolumns, minicolumns, sequence_length, pattern_seed, tolerance=0.01, verbose=verbose)
capacity, p_root, trials = aux
# Read
data_frame = pd.read_csv('../storage_capacity_data.csv', index_col=0)
# Write
data_frame = data_frame.append({'hypercolumns':hypercolumns, 'minicolumns':minicolumns, 'sequence_length':sequence_length,
'capacity':capacity, 'p_critical':p_root, 'trials':trials }, ignore_index=True)
# Store the data base
data_frame.to_csv('../storage_capacity_data.csv')
print('Stored')
print('================')
|
[
"[email protected]"
] | |
c1337933143e4be73f495569475dbf98d651bfac
|
f0b52a3ae5115b9a839d6bd3e765de83ecb21a28
|
/Payload_Type/Apollo/mythic/agent_functions/net_localgroup_member.py
|
6b2fad53fcf068ef12c142ebdcfed4c9d96d878c
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
bopin2020/Apollo
|
ad98f1cb872bd2134509df55ee67a79c51e6d316
|
7660439cbc8d4f18af2b564a5b7a0ac4f8f3765a
|
refs/heads/master
| 2023-01-12T23:50:01.266984 | 2020-11-12T07:03:13 | 2020-11-12T07:03:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,699 |
py
|
from CommandBase import *
import json
class NetLocalgroupMemberArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {
"computer": CommandParameter(name="computer", required=False, type=ParameterType.String, description="Computer to enumerate."),
"group": CommandParameter(name="group", type=ParameterType.String, description="Group to enumerate.")
}
def split_commandline(self):
if self.command_line[0] == "{":
raise Exception("split_commandline expected string, but got JSON object: " + self.command_line)
inQuotes = False
curCommand = ""
cmds = []
for x in range(len(self.command_line)):
c = self.command_line[x]
if c == '"' or c == "'":
inQuotes = not inQuotes
if (not inQuotes and c == ' '):
cmds.append(curCommand)
curCommand = ""
else:
curCommand += c
if curCommand != "":
cmds.append(curCommand)
for x in range(len(cmds)):
if cmds[x][0] == '"' and cmds[x][-1] == '"':
cmds[x] = cmds[x][1:-1]
elif cmds[x][0] == "'" and cmds[x][-1] == "'":
cmds[x] = cmds[x][1:-1]
return cmds
async def parse_arguments(self):
if self.command_line[0] == "{":
self.load_args_from_json_string(self.command_line)
else:
cmds = self.split_commandline()
if len(cmds) == 1:
self.add_arg("group", cmds[0])
elif len(cmds) == 2:
self.add_arg("computer", cmds[0])
self.add_arg("group", cmds[1])
else:
raise Exception("Expected one or two arguments, but got: {}".format(cmds))
class NetLocalgroupMemberCommand(CommandBase):
cmd = "net_localgroup_member"
needs_admin = False
help_cmd = "net_localgroup_member [computer] [group]"
description = "Retrieve local group membership of the group specified by [group]. If [computer] is omitted, defaults to localhost."
version = 1
is_exit = False
is_file_browse = False
is_process_list = False
is_download_file = False
is_upload_file = False
is_remove_file = False
author = "@djhohnstein"
argument_class = NetLocalgroupMemberArguments
attackmapping = []
browser_script = BrowserScript(script_name="net_localgroup_member", author="@djhohnstein")
async def create_tasking(self, task: MythicTask) -> MythicTask:
return task
async def process_response(self, response: AgentResponse):
pass
|
[
"[email protected]"
] | |
109e032b250691b3bf5f5ea34a9982e509cbd868
|
3d9825900eb1546de8ad5d13cae893eb0d6a9b14
|
/AutoWorkup/SEMTools/setup.py
|
94c4428d8e6622b3e676c76f782d87775210107d
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
rtkarcher/BRAINSTools
|
20d69f96e6d5ca92adaeb06aa4fe6556b5e7b268
|
961135366450400409cece431423ed480855d34c
|
refs/heads/master
| 2021-01-15T08:53:48.961607 | 2013-06-26T19:09:34 | 2013-06-26T19:09:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 691 |
py
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('SEMTools', parent_package, top_path)
config.add_data_dir('diffusion')
config.add_data_dir('segmentation')
config.add_data_dir('filtering')
config.add_data_dir('brains')
config.add_data_dir('utilities')
config.add_data_dir('registration')
config.add_data_dir('utility')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
[
"[email protected]"
] | |
b1b669b81b3b3af232520b452ae2ec8d589ea9bc
|
2104153bfc138b7e677aba0da9da376c38b7dcd0
|
/apps/extensions/db.py
|
cfe285977011e170da1d102d9769b0ee4dfad351
|
[] |
no_license
|
lucassimon/flask-api-users
|
992c75491cf577f74649097e49a04fbc74ea50e1
|
510443d481486f09a09398a5225ace82320477f3
|
refs/heads/master
| 2023-06-09T16:17:39.873516 | 2023-06-05T04:24:56 | 2023-06-05T04:24:56 | 137,128,845 | 22 | 16 | null | 2018-11-20T18:00:53 | 2018-06-12T21:18:41 |
Python
|
UTF-8
|
Python
| false | false | 71 |
py
|
# Third
from flask_mongoengine import MongoEngine
db = MongoEngine()
|
[
"[email protected]"
] | |
4cecb1fdea73ca9f39f2bdf440f6840a5f57c2f2
|
7aebfaec6957ad67523f1d8851856af88fb997a6
|
/catkin_ws/build/robotiq/robotiq_3f_gripper_control/catkin_generated/pkg.develspace.context.pc.py
|
472d6f1b90073d4764f3841493bb27694a2f8bfa
|
[] |
no_license
|
k-makihara/ROS
|
918e79e521999085ab628b6bf27ec28a51a8ab87
|
45b60e0488a5ff1e3d8f1ca09bfd191dbf8c0508
|
refs/heads/master
| 2023-01-28T06:00:55.943392 | 2020-11-26T05:27:16 | 2020-11-26T05:27:16 | 316,127,707 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 684 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/mslab/catkin_ws/devel/include".split(';') if "/home/mslab/catkin_ws/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "controller_manager;diagnostic_updater;dynamic_reconfigure;hardware_interface;robotiq_ethercat;roscpp;rospy;socketcan_interface;std_srvs;robotiq_3f_gripper_articulated_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lrobotiq_3f_gripper_control".split(';') if "-lrobotiq_3f_gripper_control" != "" else []
PROJECT_NAME = "robotiq_3f_gripper_control"
PROJECT_SPACE_DIR = "/home/mslab/catkin_ws/devel"
PROJECT_VERSION = "1.0.0"
|
[
"[email protected]"
] | |
b836ab3184c86a5580190a268a1f5b5241677048
|
ef54d37f8a3303013ca7469871a320d303957ed7
|
/robo4.2/fusion/tests/wpst_crm/feature_tests/TBIRD/F110_Port_Monitoring/port_monitor_support_module.py
|
a6deffdae867986fcbebc06b5c2da63aca16ddb9
|
[] |
no_license
|
richa92/Jenkin_Regression_Testing
|
d18badfcf16bda682dfe7bcbbd66f54a9a27a58d
|
24a74926170cbdfafa47e972644e2fe5b627d8ff
|
refs/heads/master
| 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 505 |
py
|
'''
This module is used to include some helper function for Port Monitoring
'''
def set_networkuri_lig(data_variable, get_output):
'''
Build the network URI's from the network Name and form the
LIG body
'''
temp = data_variable
for i in range(len(temp['uplinkSets'])):
for j in range(len(temp['uplinkSets'][i]['networkUris'])):
for x in get_output['members']:
if temp['uplinkSets'][i]['networkUris'][j] == x['name']:
temp['uplinkSets'][i]['networkUris'][j] = x['uri']
return temp
|
[
"[email protected]"
] | |
ebe07b6c084e6824573cbad59b09aeeccd77287e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03130/s063741533.py
|
8e8ac571e89e7cbb1f076333c2fcb83f461a3bff
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,190 |
py
|
def examA():
N, K = LI()
if (N+1)//2>=K:
print("YES")
else:
print("NO")
return
def examB():
d = [0]*4
for _ in range(3):
a, b = LI()
a -=1; b -=1
d[a] +=1
d[b] +=1
for i in d:
if i>=3 or i==0:
print("NO")
return
print("YES")
return
def examC():
ans = 0
print(ans)
return
def examD():
ans = 0
print(ans)
return
def examE():
ans = 0
print(ans)
return
def examF():
ans = 0
print(ans)
return
import sys,copy,bisect,itertools,heapq,math
from heapq import heappop,heappush,heapify
from collections import Counter,defaultdict,deque
def I(): return int(sys.stdin.readline())
def LI(): return list(map(int,sys.stdin.readline().split()))
def LFI(): return list(map(float,sys.stdin.readline().split()))
def LSI(): return list(map(str,sys.stdin.readline().split()))
def LS(): return sys.stdin.readline().split()
def SI(): return sys.stdin.readline().strip()
global mod,mod2,inf,alphabet
mod = 10**9 + 7
mod2 = 998244353
inf = 10**18
alphabet = [chr(ord('a') + i) for i in range(26)]
if __name__ == '__main__':
examB()
"""
"""
|
[
"[email protected]"
] | |
2b2602042f1ed0d95c722a129a06ec21856cab22
|
cc90d98a64693ca4542c999b5d2241b60eb33aac
|
/Problem62-3.py
|
e12955f5179fe407cd38b8141c64e187aee5cac8
|
[] |
no_license
|
Nan-Do/eulerproject
|
1f63b23a4d4e344c8525238b2333920e733b03c9
|
d33033d6af10d1aca8f7db9bcf187ef8f6005040
|
refs/heads/master
| 2021-01-10T15:51:39.594159 | 2016-04-14T05:41:16 | 2016-04-14T05:41:16 | 48,170,330 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 619 |
py
|
from itertools import count, permutations
from math import ceil
def gen_cubes():
for n in count(start=1):
yield n ** 3
def is_cube(n):
v = ceil(pow(n, (1/3.)))
return (v ** 3) == n
def check_cube(number, limit):
n_str = str(number)
count = 0
repeated = set()
for n in set(permutations(n_str)):
if n[0] = '0' or n in repeated:
continue
repeated.add(n)
if is_cube(int(p_str)):
count += 1
if count == limit:
return True
return False
for n in gen_cubes():
if check_cube(n, 4):
print n
break
|
[
"[email protected]"
] | |
18122f8ba0ea425a2f59eac84a1b4f2b379a77d3
|
5204b7b60f1780e2af1bd785beed4145f8d38d83
|
/python/Gohan/core/__init__.py
|
123b3426cb27438160bffca3d7edf46567d62acd
|
[] |
no_license
|
sdss/mangadesign
|
e5001c30db25e6efe9a439359e0e67fd9b5266e4
|
6852432aeb682b19d46eff22f8cf57bbac272b7e
|
refs/heads/master
| 2021-06-08T16:42:33.819938 | 2020-08-24T14:58:13 | 2020-08-24T14:58:13 | 89,097,774 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 38 |
py
|
from .configuration import get_config
|
[
"[email protected]"
] | |
175cd537ba734aea16b54646d227d7f043eae53f
|
3027a838581e2b0778bd6ae40f9a6c72017b3b0d
|
/loss.py
|
84c014beac4352f30db99c0f34f0a9b4f0f3262b
|
[] |
no_license
|
arthur-qiu/robust
|
2617adf3be8ea24592990e66b35123d02b0db045
|
3f40b45a740a1d3f2ba81a18e2cb510fe613d616
|
refs/heads/master
| 2020-12-04T12:08:52.665675 | 2020-02-26T10:37:34 | 2020-02-26T10:37:34 | 231,758,355 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,164 |
py
|
import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
def Entropy(input_):
bs = input_.size(0)
epsilon = 1e-5
entropy = -input_ * torch.log(input_ + epsilon)
entropy = torch.sum(entropy, dim=1)
return entropy
def grl_hook(coeff):
def fun1(grad):
return -coeff*grad.clone()
return fun1
def CDAN(input_list, ad_net, entropy=None, coeff=None, random_layer=None):
softmax_output = input_list[1].detach()
feature = input_list[0]
if random_layer is None:
op_out = torch.bmm(softmax_output.unsqueeze(2), feature.unsqueeze(1))
ad_out = ad_net(op_out.view(-1, softmax_output.size(1) * feature.size(1)))
else:
random_out = random_layer.forward([feature, softmax_output])
ad_out = ad_net(random_out.view(-1, random_out.size(1)))
batch_size = softmax_output.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
# if ad_out.shape[0] != 128 or dc_target.shape[0] != 128:
# print(ad_out.shape)
# print(dc_target.shape)
# print(softmax_output.shape)
# print(feature.shape)
if entropy is not None:
entropy.register_hook(grl_hook(coeff))
entropy = 1.0+torch.exp(-entropy)
source_mask = torch.ones_like(entropy)
source_mask[feature.size(0)//2:] = 0
source_weight = entropy*source_mask
target_mask = torch.ones_like(entropy)
target_mask[0:feature.size(0)//2] = 0
target_weight = entropy*target_mask
weight = source_weight / torch.sum(source_weight).detach().item() + \
target_weight / torch.sum(target_weight).detach().item()
return torch.sum(weight.view(-1, 1) * nn.BCELoss(reduction='none')(ad_out, dc_target)) / torch.sum(weight).detach().item()
else:
return nn.BCELoss()(ad_out, dc_target)
def DANN(features, ad_net):
ad_out = ad_net(features)
batch_size = ad_out.size(0) // 2
dc_target = torch.from_numpy(np.array([[1]] * batch_size + [[0]] * batch_size)).float().cuda()
return nn.BCELoss()(ad_out, dc_target)
|
[
"Arthur"
] |
Arthur
|
d7feedf3e0f9eec8c5f371d5bd23732533460493
|
5babecf71b6b3c3295219b59bd96e348e1cfaf80
|
/singleylinkedlist.py
|
c5bb94f61e1e34fc34faa6ab4bc6d013e5858183
|
[] |
no_license
|
jefinagilbert/dataStructures
|
46697a8c1ec5cdb943a1e95e887f6343a85f648b
|
04773fc0dff7d18078f3960b0993ce8ab7918a19
|
refs/heads/main
| 2023-06-12T02:06:24.345124 | 2021-07-10T18:05:44 | 2021-07-10T18:05:44 | 384,760,735 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,771 |
py
|
class node:
def __init__(self,data):
self.data = data
self.next = None
class linkedlist:
def __init__(self):
self.head = None
def printlinkedlist(self):
temp = self.head
print (temp)
while (temp):
print (temp.data,end=" -> ")
temp = temp.next
def append(self, new_data):
new_node = node(new_data)
if self.head is None:
self.head = new_node
return
last = self.head
while (last.next):
last = last.next
last.next = new_node
def push(self, new_data):
new_node = node(new_data)
new_node.next = self.head
self.head = new_node
def insertAfter(self, prev_node, new_data):
if prev_node is None:
print("The given previous node not inLinkedList.")
return
new_node = node(new_data)
new_node.next = prev_node.next
prev_node.next = new_node
def deletenode(self,key):
temp = self.head
if (temp is not None):
if key == temp.data:
self.head = temp.next
temp = None
while (temp is not None):
if temp.data == key:
break
prev = temp
temp = temp.next
if (temp is None):
return
prev.next = temp.next
temp = None
def deletelist(self):
temp = self.head # we can also use Only self.head = None
while (temp):
next = temp.next
del temp.data
temp = next
self.head = None
def deletenodeposition(self,position):
temp = self.head
if (self.head is None):
return
if position == 0:
self.head = temp.next
temp = None
return
for i in range(position - 1):
temp = temp.next
if (temp is None):
break
if (temp is None):
return
if (temp.next is None):
return
next = temp.next.next
temp.next = None
temp.next = next
if __name__ == "__main__":
llist = linkedlist()
while True:
print()
print("------ NOTES ------")
print()
print("1. Append Value")
print()
print("2. Push Value")
print()
print("3. Insert After")
print()
print("4. Display Node")
print()
print("5. Delete Node by data")
print()
print("6. Delete Node by Position")
print()
print("7. Delete Linked list")
print()
print("8. Exit")
i = int(input("Enter the Number: "))
if i == 1:
k = int(input("enter value to append : "))
llist.append(k)
print()
print(k," Appended Successfully")
elif i == 2:
k = int(input("enter value to push : "))
llist.push(k)
elif i == 3:
k = int(input("enter value to add after : "))
llist.insertAfter(llist.head.next,k)
elif i == 4:
llist.printlinkedlist()
elif i == 5:
k = int(input("enter value to deletenode : "))
llist.deletenode(k)
elif i == 6:
k = int(input("enter position to Delete : "))
llist.deletenodeposition(k)
elif i == 7:
llist.deletelist()
elif i == 8:
break
else:
print("Enter Valid Number")
|
[
"[email protected]"
] | |
271813ce9df854023fe3b6d50c40601bd44a2d32
|
b80059648afab4474e567ec1035d63d060d9b3a6
|
/src/analyze.py
|
e18911d2f1160107000f7ce93c5532bf18c7c900
|
[
"MIT"
] |
permissive
|
SteemData/classify.steemdata.com
|
8b34d7ae9e666b9dfe9930c82dc347650356fb94
|
507d2d537a502701dd6e28c9581c132942084b7a
|
refs/heads/master
| 2021-03-19T05:57:34.360839 | 2017-11-09T22:30:59 | 2017-11-09T22:30:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 526 |
py
|
import boto3
from easydict import EasyDict as ed
config = ed(
region_name = 'us-west-2',
s3_bucket_name = 'steem-hackaton-input'
)
rkg = boto3.client('rekognition', region_name=config.region_name)
def nsfw(img: bytes):
response = rkg.detect_moderation_labels(
Image={'Bytes': img},
)
return response['ModerationLabels']
def labels(img: bytes):
response = rkg.detect_labels(
Image={'Bytes': img},
MaxLabels=100,
MinConfidence=80,
)
return response['Labels']
|
[
"[email protected]"
] | |
bfa64414e10648e405e89258f858138cfe2bcc91
|
f4e21b9a042577400689e83a7ae11c0eee13cecf
|
/gneiss/regression/tests/test_transformer.py
|
3f7aa1cd6eebc62d528cecdf3407afee1faff1f6
|
[] |
no_license
|
ebolyen/gneiss
|
8facaaffe9904c8641f418fdd1461c1ae447e593
|
bb47be8805bf887afcc40b72365b062aa74ff823
|
refs/heads/master
| 2022-12-21T21:08:09.162341 | 2017-04-21T01:30:10 | 2017-04-21T01:30:10 | 88,930,099 | 0 | 0 | null | 2017-04-21T02:20:16 | 2017-04-21T02:20:16 | null |
UTF-8
|
Python
| false | false | 2,346 |
py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, gneiss development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
from gneiss.regression._format import (LinearRegressionFormat_g,
LinearMixedEffectsFormat_g)
from qiime2.plugin.testing import TestPluginBase
from gneiss.regression._ols import OLSModel
from gneiss.regression._mixedlm import LMEModel
import pandas.util.testing as pdt
class TestLinearRegressionTransformers(TestPluginBase):
package = "gneiss.regression.tests"
def test_ols_model_to_regression_format(self):
filepath = self.get_data_path('ols.pickle')
transformer = self.get_transformer(OLSModel, LinearRegressionFormat_g)
input = OLSModel.read_pickle(filepath)
obs = transformer(input)
obs = OLSModel.read_pickle(str(obs))
pdt.assert_frame_equal(input.pvalues, obs.pvalues)
def test_regression_format_to_ols_model(self):
filename = 'ols.pickle'
input, obs = self.transform_format(LinearRegressionFormat_g, OLSModel,
filename)
exp = OLSModel.read_pickle(str(input))
pdt.assert_frame_equal(exp.pvalues, obs.pvalues)
class TestLinearMixedEffectsTransformers(TestPluginBase):
package = "gneiss.regression.tests"
def test_lme_model_to_regression_format(self):
filepath = self.get_data_path('lme.pickle')
transformer = self.get_transformer(LMEModel,
LinearMixedEffectsFormat_g)
input = LMEModel.read_pickle(filepath)
obs = transformer(input)
obs = LMEModel.read_pickle(str(obs))
pdt.assert_frame_equal(input.pvalues, obs.pvalues)
def test_regression_format_to_lme_model(self):
filename = 'lme.pickle'
input, obs = self.transform_format(LinearMixedEffectsFormat_g,
LMEModel, filename)
exp = LMEModel.read_pickle(str(input))
pdt.assert_frame_equal(exp.pvalues, obs.pvalues)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
0d37df26911f7aa45fd992907792f711b760b1d3
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/built-in/cv/classification/Gluon_ResNet50_v1d_for_PyTorch/timm/models/layers/involution.py
|
97e83500b1f997b67fbd369776d069d277ac3bdb
|
[
"Apache-2.0",
"MIT",
"CC-BY-NC-4.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 2,501 |
py
|
# Copyright [yyyy] [name of copyright owner]
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" PyTorch Involution Layer
Official impl: https://github.com/d-li14/involution/blob/main/cls/mmcls/models/utils/involution_naive.py
Paper: `Involution: Inverting the Inherence of Convolution for Visual Recognition` - https://arxiv.org/abs/2103.06255
"""
import torch.nn as nn
from .conv_bn_act import ConvBnAct
from .create_conv2d import create_conv2d
class Involution(nn.Module):
def __init__(
self,
channels,
kernel_size=3,
stride=1,
group_size=16,
rd_ratio=4,
norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU,
):
super(Involution, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.channels = channels
self.group_size = group_size
self.groups = self.channels // self.group_size
self.conv1 = ConvBnAct(
in_channels=channels,
out_channels=channels // rd_ratio,
kernel_size=1,
norm_layer=norm_layer,
act_layer=act_layer)
self.conv2 = self.conv = create_conv2d(
in_channels=channels // rd_ratio,
out_channels=kernel_size**2 * self.groups,
kernel_size=1,
stride=1)
self.avgpool = nn.AvgPool2d(stride, stride) if stride == 2 else nn.Identity()
self.unfold = nn.Unfold(kernel_size, 1, (kernel_size-1)//2, stride)
def forward(self, x):
weight = self.conv2(self.conv1(self.avgpool(x)))
B, C, H, W = weight.shape
KK = int(self.kernel_size ** 2)
weight = weight.view(B, self.groups, KK, H, W).unsqueeze(2)
out = self.unfold(x).view(B, self.groups, self.group_size, KK, H, W)
out = (weight * out).sum(dim=3).view(B, self.channels, H, W)
return out
|
[
"[email protected]"
] | |
a5ae575a5d08b866c988d7daff8b8357e695454b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03148/s977826358.py
|
4dcf39217ab9acca14bc5415bf0a46880be55e2c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,165 |
py
|
import sys
readline = sys.stdin.readline
MOD = 10 ** 9 + 7
INF = float('INF')
sys.setrecursionlimit(10 ** 5)
import heapq
class PriorityQueue:
class Reverse:
def __init__(self, val):
self.val = val
def __lt__(self, other):
return self.val > other.val
def __repr__(self):
return repr(self.val)
def __init__(self, x=None, desc=False):
if not x:
x = []
if desc:
for i in range(len(x)):
x[i] = self.Reverse(x[i])
self._desc = desc
self._container = x
heapq.heapify(self._container)
@property
def is_empty(self):
return not self._container
def pop(self):
if self._desc:
return heapq.heappop(self._container).val
else:
return heapq.heappop(self._container)
def push(self, item):
if self._desc:
heapq.heappush(self._container, self.Reverse(item))
else:
heapq.heappush(self._container, item)
def top(self):
if self._desc:
return self._container[0].val
else:
return self._container[0]
def sum(self):
return sum(self._container)
def __len__(self):
return len(self._container)
def main():
from operator import itemgetter
n, k = list(map(int, readline().split()))
sushis_original = [list(map(int, readline().split())) for _ in range(n)]
sushis_original.sort(key=itemgetter(1))
sushis_original.sort(key=itemgetter(0))
new_type = 0
prev = -1
for i in range(n):
cur = sushis_original[i][0]
if prev != cur:
new_type += 1
if cur > new_type:
sushis_original[i][0] = new_type
prev = cur
type_num = sushis_original[-1][0]
sushis = {i: [] for i in range(1, type_num + 1)}
for sushi_type, val in sushis_original:
sushis[sushi_type].append(val)
eat_sushis = PriorityQueue()
rem_sushis = PriorityQueue(desc=True)
rem = k
if rem >= type_num:
for i in range(1, type_num + 1):
eat_sushis.push(sushis[i].pop())
rem -= type_num
for vals in sushis.values():
for val in vals:
rem_sushis.push(val)
for _ in range(rem):
eat_sushis.push(rem_sushis.pop())
else:
for i in range(1, type_num + 1):
eat_sushis.push(sushis[i].pop())
discard_num = type_num - k
for _ in range(discard_num):
eat_sushis.pop()
for vals in sushis.values():
for val in vals:
rem_sushis.push(val)
cur_type = min(k, type_num)
sub_next = 2 * cur_type - 1
while rem_sushis:
cur_val = eat_sushis.top()
new_val = rem_sushis.top()
diff = new_val - cur_val
if diff >= sub_next:
eat_sushis.pop()
eat_sushis.push(rem_sushis.pop())
cur_type -= 1
sub_next = 2 * cur_type - 1
else:
break
ans = cur_type ** 2 + eat_sushis.sum()
print(ans)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
6849d2ec9790e047a0e8c225fd2ba62a5fdcdd56
|
3dcfa2980db0770af9b4355b0d5a5e5ef2313c50
|
/corpus/exceptions.py
|
173d06acf3cf5bd7493cf25b0c6f41cbc47cf052
|
[
"CC-BY-NC-2.0",
"CC-BY-NC-4.0",
"Apache-2.0"
] |
permissive
|
looselycoupled/partisan-discourse
|
5f4638d984fb54a5add870d4cb59445811c412a1
|
8579924094c92e25e21ce59a26232269cf6b34bc
|
refs/heads/master
| 2020-03-27T06:35:49.627350 | 2018-08-25T18:05:44 | 2018-08-25T18:05:44 | 146,118,079 | 0 | 0 |
Apache-2.0
| 2018-08-25T18:02:38 | 2018-08-25T18:02:38 | null |
UTF-8
|
Python
| false | false | 977 |
py
|
# corpus.exceptions
# Custom exceptions for corpus handling.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Mon Jul 18 09:57:26 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: exceptions.py [63935bc] [email protected] $
"""
Custom exceptions for corpus handling.
"""
##########################################################################
## Corpus Exceptions
##########################################################################
class CorpusException(Exception):
"""
Something went wrong in the corpus app.
"""
pass
class BitlyAPIError(CorpusException):
"""
Something went wrong trying to shorten a url.
"""
pass
class FetchError(CorpusException):
"""
Something went wrong trying to fetch a url using requests.
"""
pass
class NLTKError(CorpusException):
"""
Something went wrong when using NLTK.
"""
pass
|
[
"[email protected]"
] | |
e6abd68ee8891fd33558e2c79ba7b61eeb2bd4b5
|
b35aea9f4411f5dc7942392d78dc31bb76c7ec73
|
/djangoProject/services/migrations/0004_alter_sub_head.py
|
142021ffe6400739d7b16a3cef8ad1787f49ebfd
|
[] |
no_license
|
ashkanusefi/rondshow
|
1079b81704fff55a1d54fa8dee2712ab61e92f4a
|
7e5a80fcc6e326b8b1737a54fb53becc4195e475
|
refs/heads/master
| 2023-09-01T18:45:33.170465 | 2021-09-18T11:24:52 | 2021-09-18T11:24:52 | 407,820,565 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 501 |
py
|
# Generated by Django 3.2.5 on 2021-07-13 10:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('services', '0003_alter_sub_head'),
]
operations = [
migrations.AlterField(
model_name='sub',
name='head',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='services.service', verbose_name='نام سردسته'),
),
]
|
[
"[email protected]"
] | |
ad2cf41e2835382146d98e659562119b73b04000
|
10425fd2f058afb9dd823929314bfede0a4eb513
|
/flaskaiohttp_websocket/app.py
|
67a5c4b45adbbf95106696a4225673ec018d7153
|
[] |
no_license
|
gaozhidf/flask_websocket
|
60883571a469a7c283e3da9a8fbf81d752f82f71
|
41653f71b7fd6d07d3592a22a11f29e795ba45d8
|
refs/heads/master
| 2022-11-29T04:31:08.953294 | 2017-08-12T08:53:24 | 2017-08-12T08:53:24 | 49,828,952 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,060 |
py
|
import json
import asyncio
import aiohttp
from flask import Flask, current_app
from flask_aiohttp import AioHTTP
from flask_aiohttp.helper import async, websocket
app = Flask(__name__)
aio = AioHTTP(app)
@app.route('/echo')
@websocket
def echo():
while True:
msg = yield from aio.ws.receive_msg()
if msg.tp == aiohttp.MsgType.text:
aio.ws.send_str(msg.data)
elif msg.tp == aiohttp.MsgType.close:
print('websocket connection closed')
break
elif msg.tp == aiohttp.MsgType.error:
print('ws connection closed with exception %s',
aio.ws.exception())
break
@app.route('/api')
@async
def api():
response = yield from aiohttp.request(
'GET', 'https://graph.facebook.com/zuck')
data = yield from response.read()
return data
@app.route('/param/<arg>')
@websocket
def param(arg):
while True:
msg = yield from aio.ws.receive_msg()
if msg.tp == aiohttp.MsgType.text:
aio.ws.send_str(arg)
elif msg.tp == aiohttp.MsgType.close:
print('websocket connection closed')
break
elif msg.tp == aiohttp.MsgType.error:
print('ws connection closed with exception %s',
aio.ws.exception())
break
@app.route('/late')
@async
def late():
yield from asyncio.sleep(3)
data = {
'data': 'done'
}
data = json.dumps(data)
current_app.response_class(data, headers={
'Content-Type': 'application/json',
}, status=201)
return 'done'
@app.route('/plain')
def plain():
return 'Hello, World!'
@app.route('/stream')
def stream():
def f():
yield 'Hello, '
yield 'World!'
return app.response_class(f())
@app.route('/async-stream')
@async
def async_stream():
def f():
yield 'I\'m '
yield 'sorry!'
yield from asyncio.sleep(1)
return app.response_class(f())
def main():
aio.run(app, debug=True)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
5e56cc78a121e1d1b486e6bc4a3fc7a7cd46762b
|
2b54b1fb1540ab73d6c83cae3acd5fdd58bdead5
|
/Platinum_clusters_Project/final_images/Pt13_O2_DFTsorted/Pt7_3O2_TiO2_101surface_zorderimageplotbasedondepth1.py
|
4f1b59995eb2ca8b9bc48aa8fecadced15bc2251
|
[] |
no_license
|
sivachiriki/GOFEE_Pt_V_supported
|
5787d44294262870075f35f2d31c096021b7ce20
|
6bd700dac1f3e7c58394b758d75246ac6e07eade
|
refs/heads/master
| 2022-04-08T11:38:13.038455 | 2020-03-09T10:48:31 | 2020-03-09T10:48:31 | 226,359,812 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,837 |
py
|
from __future__ import division
import matplotlib
#matplotlib.use('Agg') # Can also use 'tkagg' or 'webagg'
#from plot_neb_tio2 import *
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from ase.io import read, write
from ase.visualize import view
import matplotlib.patches as mpatches
from ase.data.colors import jmol_colors
from pylab import *
from ase.data import covalent_radii as aradii
from matplotlib.patches import Circle
from math import atan2,pi
import matplotlib.gridspec as gridspec
matplotlib.rcParams['xtick.direction'] = 'out'
matplotlib.rcParams['ytick.direction'] = 'out'
matplotlib.rc('font',**{'family':'sans-serif',
'sans-serif':['Helvetica'],
'size':14})
matplotlib.rc('text',usetex=True)
matplotlib.rcParams['text.latex.unicode']=True
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{bm}']
#matplotlib.rcParams['text.latex.preamble']=['\usepackage{xfrac}']
matplotlib.rcParams['mathtext.default'] = 'regular'
matplotlib.rcParams['ps.usedistiller'] = 'xpdf'
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
def plot_atoms(ax, atoms, xyz, acols, alp, z):
ecols = [[0, 0, 0] for col in atoms]
indices = range(len(atoms))
for ia in indices:
acol = acols[ia]
ecol = ecols[ia]
# if atoms[ia].symbol == 'Ti':
# arad = aradii[atoms[ia].number] #* 0.9 * 0.5
# else:
arad = aradii[atoms[ia].number] #* 0.9
apos = atoms[ia].position
eps = arad
circ = Circle([apos[xyz[0]], apos[xyz[1]]],
fc = acol,
ec = ecol,
radius = arad,
lw = 0.5,
alpha = alp[ia],
zorder = 1 - apos[1]/1000
)
ax.add_patch(circ)
def plot_conf(ax, atoms, rot=False):
colors = np.array([jmol_colors[atom.number] for atom in atoms])
for i, atom in enumerate(atoms):
if (atom.number ==78):
colors[i] =[0.1, 0.6, 0.6]
if (atom.number ==8 and i >= 135 and i <=149 ):
colors[i] =[0.1, 0.2, 0.5]
alp = [None] * colors.shape[0]
for i,a in enumerate(atoms):
if a.symbol == 'Al' or a.symbol == 'O':
if a.position[2] < 9.7:
alp[i] = 0.3
if rot:
atoms.rotate('x',pi/2)
plot_atoms(ax, atoms, [0,2,1], colors, alp, z=-1)
data=read('Pt13_O2_Al2O3_KRRfund9l_DFTrelaxedsorted.traj@:')
#for j in range(len(data)):
image = data[8] #* (2,2,1)
for i,a in enumerate(image):
# if a.position[1] >11.100:
# image.positions[i,1] =0.000
if i ==48 or i==3 :
image.positions[i,1] =image.positions[i,1]-12.429
image.positions[i,0] =image.positions[i,0]+7.176
# if i==148:
# image.positions[i,0] =image.positions[i,0]-14.352
#write('newimage.traj',image)
plt.figure(figsize=(6.0,7.0))
gs = gridspec.GridSpec(2, 1,
height_ratios=[7.77,9.090])
cell = image.get_cell()
# 0 0
ax = plt.subplot(gs[0, 0])
img = image.copy()
plot_conf(ax, img)
ax.set_xlim([-2.8, 11.85])
ax.set_ylim([10.0, 20.0])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
# 0 1
ax = plt.subplot(gs[1, 0])
image = image * (2,2,1)
write('newimage.traj',image)
cell = image.get_cell()
img = image.copy()
plot_conf(ax, img, rot=True)
ax.set_xlim([-2.8, 11.85])
ax.set_ylim([0.80, 12.50])
ax.set_yticks([])
ax.set_xticks([])
ax.set(aspect=1)
gs.update(wspace=0.00,hspace=0.00)
plt.tight_layout()
name ='Pt13_O2_Al2O3_0001_DFTopt_g{}'.format(8)
savefig(name,bbox_inches='tight')
show()
|
[
"[email protected]"
] | |
3dc1ada00afa0327b9f7befb7328a8b103da9b07
|
d7719b7b537a1484228d377d265ebeea8f76748a
|
/Robot Operating System (ROS)/ROS/Starter Code and Resources/ROS_Minimal_Projects/rospy_minimal_packages/modular_pub_sub/setup.py
|
f8270eaaf22d1aa84841ec98b72a858c7271b601
|
[
"BSD-2-Clause"
] |
permissive
|
OpenSUTD/coding-notes
|
9724ac9d35f585ff3140a43c8a10fcdcbaedfc79
|
f9b8c778f8494d0bf47bd816cfd77b88e78a5a1f
|
refs/heads/master
| 2022-07-16T22:17:21.930385 | 2019-07-03T10:11:30 | 2019-07-03T10:11:30 | 166,292,417 | 7 | 5 |
BSD-2-Clause
| 2019-07-03T11:20:16 | 2019-01-17T20:30:47 |
C++
|
UTF-8
|
Python
| false | false | 275 |
py
|
#!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
setup_args = generate_distutils_setup(
packages = ['modular_pub_sub'],
package_dir = {'': 'src'},
install_requires = ['']
)
setup(**setup_args)
|
[
"[email protected]"
] | |
c1f7a9a1be4dab922be13ef7a1acc032b4a8ac3b
|
c53adaefd26bda80cd7ce22ea0cae877e364c2c5
|
/lib/network.py
|
52ca885512d164a52547c7b33578191591958ef4
|
[
"MIT"
] |
permissive
|
Morningstarpayments/electrum
|
a59ac886b23ef5abcc940a5d51ea1ebef78908b7
|
a035bac8577d53a805dc111eb3ba89f48e96fe34
|
refs/heads/master
| 2021-07-06T08:59:44.926421 | 2017-10-03T00:21:10 | 2017-10-03T00:21:10 | 105,598,670 | 0 | 0 | null | 2017-10-03T00:19:17 | 2017-10-03T00:19:17 | null |
UTF-8
|
Python
| false | false | 33,849 |
py
|
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import Queue
import os
import errno
import sys
import random
import select
import traceback
from collections import defaultdict, deque
from threading import Lock
import socks
import socket
import json
import util
import bitcoin
from bitcoin import *
from interface import Connection, Interface
from blockchain import Blockchain
from version import ELECTRUM_VERSION, PROTOCOL_VERSION
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
#There is a schedule to move the default list to e-x (electrumx) by Jan 2018
#Schedule is as follows:
#move ~3/4 to e-x by 1.4.17
#then gradually switch remaining nodes to e-x nodes
DEFAULT_SERVERS = {
'192.241.172.143': DEFAULT_PORTS,
}
def set_testnet():
global DEFAULT_PORTS, DEFAULT_SERVERS
DEFAULT_PORTS = {'t':'51001', 's':'51002'}
DEFAULT_SERVERS = {
}
def set_nolnet():
global DEFAULT_PORTS, DEFAULT_SERVERS
DEFAULT_PORTS = {'t':'52001', 's':'52002'}
DEFAULT_SERVERS = {
}
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
try:
is_recent = cmp(util.normalize_version(version), util.normalize_version(PROTOCOL_VERSION)) >= 0
except Exception:
is_recent = False
if out and is_recent:
out['pruning'] = pruning_level
servers[host] = out
return servers
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if type(p) != dict:
return None
return ':'.join([p.get('mode'),p.get('host'), p.get('port'), p.get('user'), p.get('password')])
def deserialize_proxy(s):
if type(s) not in [str, unicode]:
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).split(':')
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if type(config) == type({}) else config
self.num_server = 8 if not self.config.get('oneserver') else 0
self.blockchain = Blockchain(self.config, self)
# A deque of interface header requests, processed left-to-right
self.bc_requests = deque()
# Server for addresses and transactions
self.default_server = self.config.get('server')
# Sanitize default server
try:
deserialize_server(self.default_server)
except:
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
self.heights = {}
self.merkle_roots = {}
self.utxo_roots = {}
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# subscriptions and requests
self.subscribed_addresses = set()
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.socket_queue = Queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r") as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w") as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.heights.get(self.default_server, 0)
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
for i in bitcoin.FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
self.queue_request('blockchain.relayfee', [])
for addr in self.subscribed_addresses:
self.queue_request('blockchain.address.subscribe', [addr])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return self.interfaces.keys()
def get_servers(self):
if self.irc_servers:
out = self.irc_servers.copy()
out.update(DEFAULT_SERVERS)
else:
out = DEFAULT_SERVERS
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._socket.getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in self.interfaces.values():
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = Queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self, suggestion = None):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
if suggestion and self.protocol == deserialize_server(suggestion)[2]:
self.switch_to_interface(suggestion)
else:
self.switch_to_random_interface()
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.estimatefee':
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.fee_estimates[i] = fee
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.get_chunk':
self.on_get_chunk(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
callbacks = []
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.address.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.address.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.heights.pop(server, None)
self.notify('interfaces')
def new_interface(self, server, socket):
self.add_recent_server(server)
self.interfaces[server] = interface = Interface(server, socket)
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
for interface in self.interfaces.values():
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
def request_chunk(self, interface, data, idx):
interface.print_error("requesting chunk %d" % idx)
self.queue_request('blockchain.block.get_chunk', [idx], interface)
data['chunk_idx'] = idx
data['req_time'] = time.time()
def on_get_chunk(self, interface, response):
'''Handle receiving a chunk of block headers'''
if self.bc_requests:
req_if, data = self.bc_requests[0]
req_idx = data.get('chunk_idx')
# Ignore unsolicited chunks
if req_if == interface and req_idx == response['params'][0]:
idx = self.blockchain.connect_chunk(req_idx, response['result'])
# If not finished, get the next chunk
if idx < 0 or self.get_local_height() >= data['if_height']:
self.bc_requests.popleft()
self.notify('updated')
else:
self.request_chunk(interface, data, idx)
def request_header(self, interface, data, height):
interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.get_header', [height], interface)
data['header_height'] = height
data['req_time'] = time.time()
if not 'chain' in data:
data['chain'] = []
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
if self.blockchain.downloading_headers:
return
if self.bc_requests:
req_if, data = self.bc_requests[0]
req_height = data.get('header_height', -1)
# Ignore unsolicited headers
if req_if == interface and req_height == response['params'][0]:
next_height = self.blockchain.connect_header(data['chain'], response['result'])
# If not finished, get the next header
if next_height in [True, False]:
self.bc_requests.popleft()
if next_height:
self.switch_lagging_interface(interface.server)
self.notify('updated')
else:
interface.print_error("header didn't connect, dismissing interface")
interface.close()
else:
self.request_header(interface, data, next_height)
def bc_request_headers(self, interface, data):
'''Send a request for the next header, or a chunk of them,
if necessary.
'''
if self.blockchain.downloading_headers:
return False
local_height, if_height = self.get_local_height(), data['if_height']
if if_height <= local_height:
return False
elif if_height > local_height + 50:
self.request_chunk(interface, data, (local_height + 1) / 2016)
else:
self.request_header(interface, data, if_height)
return True
def handle_bc_requests(self):
'''Work through each interface that has notified us of a new header.
Send it requests if it is ahead of our blockchain object.
'''
while self.bc_requests:
interface, data = self.bc_requests.popleft()
# If the connection was lost move on
if not interface in self.interfaces.values():
continue
req_time = data.get('req_time')
if not req_time:
# No requests sent yet. This interface has a new height.
# Request headers if it is ahead of our blockchain
if not self.bc_request_headers(interface, data):
continue
elif time.time() - req_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
# Put updated request state back at head of deque
self.bc_requests.appendleft((interface, data))
break
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as (code, msg):
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def run(self):
self.blockchain.init()
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.handle_bc_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_header(self, i, header):
height = header.get('block_height')
if not height:
return
self.heights[i.server] = height
self.merkle_roots[i.server] = header.get('merkle_root')
self.utxo_roots[i.server] = header.get('utxo_root')
# Queue this interface's height for asynchronous catch-up
self.bc_requests.append((i, {'if_height': height}))
if i == self.interface:
self.switch_lagging_interface()
self.notify('updated')
def get_header(self, tx_height):
return self.blockchain.read_header(tx_height)
def get_local_height(self):
return self.blockchain.height()
def synchronous_get(self, request, timeout=30):
queue = Queue.Queue()
self.send([request], queue.put)
try:
r = queue.get(True, timeout)
except Queue.Empty:
raise BaseException('Server did not answer')
if r.get('error'):
raise BaseException(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
|
[
"[email protected]"
] | |
47a76feedcb553cbdfed82f9a1800c56ebd293fe
|
a79519c0032ce5630710bef726886e226369ad1a
|
/bikeshed/refs/__init__.py
|
23d416226c7293aba42df293012d75de99a3883b
|
[
"CC0-1.0"
] |
permissive
|
ylafon/bikeshed
|
9598aee18ac5a9f5fc3094bc847f30f768d4ee07
|
5471ba1f1c21f5b90546909fa30891f0148a750c
|
refs/heads/main
| 2022-07-31T23:42:22.592066 | 2022-07-27T09:17:23 | 2022-07-27T09:17:28 | 311,297,114 | 0 | 0 |
CC0-1.0
| 2020-11-09T10:08:03 | 2020-11-09T10:08:02 | null |
UTF-8
|
Python
| false | false | 100 |
py
|
from .manager import ReferenceManager
from .source import RefSource
from .wrapper import RefWrapper
|
[
"[email protected]"
] | |
4f86503e9967ceaa9cb417c55dc2f4ceb6706b4e
|
a8595670862f9475050abf73399afe34faaa083b
|
/wb_api/urls.py
|
f2d8bb64d1f13e51f8b884542b8f6d173580934d
|
[] |
no_license
|
skiboorg/wb_api
|
14392df2da8569212c0ba05e527b46fcd9c30338
|
c45d8c340a45958bc6d380c2a431d13d0f1ebf37
|
refs/heads/master
| 2022-12-06T03:52:58.621255 | 2020-08-26T19:25:24 | 2020-08-26T19:25:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 427 |
py
|
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('api/v1/', include('api.urls')),
path('ckeditor/', include('ckeditor_uploader.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"[email protected]"
] | |
48217e37537211a9bfd2b671886a356efa6a7a8d
|
b047a32da65cc0fafe249160f57765ddbe80176e
|
/apps/support/templatetags/forum.py
|
20f193d4a3ba8af04bb65ae72f774e72f6431c3b
|
[
"MIT"
] |
permissive
|
fengjinqi/website
|
0568c679e7964bdbb637831a4f1dec7c5e8d767c
|
340eecec49ce0d66cd6a491d0ae9ad23ec9f841b
|
refs/heads/master
| 2023-02-18T10:12:52.158471 | 2023-02-16T08:40:13 | 2023-02-16T08:40:13 | 160,755,540 | 367 | 114 |
MIT
| 2022-12-08T01:42:40 | 2018-12-07T01:44:34 |
Python
|
UTF-8
|
Python
| false | false | 1,226 |
py
|
from datetime import datetime
from django.contrib.sessions.models import Session
from django.core.cache import cache
#from apps.forum.views import get_online_count
from apps.support.models import QQ
from apps.forum.models import Forum
from django import template
from django.utils.timezone import now, timedelta
from apps.user.models import User
register = template.Library()
@register.inclusion_tag('pc/aside/forum_side.html')
def get_fourm():
qq = QQ.objects.all()
fourm = Forum.objects.filter(hidden=False,category__name='求职招聘')[:10]
sessions = Session.objects.filter(expire_date__gte=datetime.now()).count()
#print(get_online_count())
user = User.objects.count()
cur_date = now().date() + timedelta(days=0)
days = Forum.objects.filter(hidden=False,add_time__gte=cur_date).count()
count = Forum.objects.filter(hidden=False).count()
Hottest = Forum.objects.filter(hidden=False).order_by('-click_nums')[:10]
return {'fourm':fourm,'qq':qq,'user':user,'sessions':sessions,'days':days,'count':count,'Hottest':Hottest}
@register.filter
def get_count(x):
return x.filter(hidden=False).count()
@register.filter
def get_counts(x):
return x.filter(is_show=True).count()
|
[
"[email protected]"
] | |
4a98ed5c35bc602fa3cf5522d5f85ab078bbcb92
|
009f9761767f93a2986f8b5a2ba61bac6f33dc59
|
/examples/intro/8/example.py
|
0fc03bff85a41b5054ceffe6fd6a14aa7ee9e136
|
[
"MIT"
] |
permissive
|
crasiak/ginkgo
|
8798d28d16732cc1c5b18f8e5df0d17f8866f999
|
2592de2c8acfe6e62f33e7ac1f79cc5613567908
|
refs/heads/master
| 2021-01-16T20:07:45.269511 | 2012-04-06T07:26:46 | 2012-04-06T07:26:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,345 |
py
|
import gevent
from gevent.pywsgi import WSGIServer
from gevent.server import StreamServer
from gevent.socket import create_connection
from ginkgo.core import Service
class TcpClient(Service):
def __init__(self, address, handler):
self.address = address
self.handler = handler
def do_start(self):
self.spawn(self.handler, self.address)
class MyApplication(Service):
def __init__(self, config):
self.add_service(WSGIServer(('127.0.0.1', config['http_port']), self.handle_http))
self.add_service(StreamServer(('127.0.0.1', config['tcp_port']), self.handle_tcp))
self.add_service(TcpClient(config['connect_address'], self.client_connect))
def client_connect(self, address):
sockfile = create_connection(address).makefile()
while True:
line = sockfile.readline() # returns None on EOF
if line is not None:
print "<<<", line,
else:
break
def handle_tcp(self, socket, address):
print 'new tcp connection!'
while True:
socket.send('hello\n')
gevent.sleep(1)
def handle_http(self, env, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
print 'new http request!'
return ["hello world"]
|
[
"[email protected]"
] | |
df8f34cabb73d62adf171eeae4b7788cbdfdf467
|
6db8aba817161dc573f16cde185f4a1c02c753e0
|
/XOR.py
|
0c07e2db55b43284b1baa73054a707acc0f131b8
|
[] |
no_license
|
Prakashchater/Leetcode-array-easy-questions
|
456153a13397c895acae6550dad8f1b1851ff854
|
7c5d40f9d68dbf61f4a61a33d9b54f769473b057
|
refs/heads/main
| 2023-06-19T14:01:52.483440 | 2021-07-22T19:44:40 | 2021-07-22T19:44:40 | 354,926,404 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 155 |
py
|
xor = 0
out = []
arr = [10,11,1,2,3]
n = arr[len(arr)-1]
for i in range(len(arr)-1):
out.append(arr[i]^arr[i+1])
out.append(arr[len(arr)-1])
print(out)
|
[
"[email protected]"
] | |
ac7f9a29b1083ff198275d312b01fecad5ed4fc3
|
039446516b188899e2fd21a41087ad20f06d666b
|
/src/server_code/game_logic.py
|
9934157d88fc8ff515d2b4ff8f39a8e5a2c028ab
|
[] |
no_license
|
Tyorat/TicTacToe
|
62ebbeee168568a0c590a5923127a3ac529ba134
|
0281f3c7b293256e2c73ac1530786308cea385af
|
refs/heads/main
| 2023-06-24T22:37:18.744669 | 2021-07-12T23:11:30 | 2021-07-12T23:11:30 | 377,521,858 | 0 | 0 | null | 2021-07-12T23:11:31 | 2021-06-16T14:20:21 |
Python
|
UTF-8
|
Python
| false | false | 2,001 |
py
|
import secrets
WIN_COMBO = ((1, 2, 3),
(4, 5, 6),
(7, 8, 9),
(1, 4, 7),
(2, 5, 8),
(3, 6, 9),
(1, 5, 9),
(7, 5, 3)
)
SWITCH_TURN = {"x": "o", "o": "x"}
class WrongMove(Exception): pass
class Game:
def __init__(self, player_one, player_two):
self.__field = list(range(1, 10))
self.switch_turn = {player_one: player_two, player_two: player_one}
self.__turn = None
self.choose_random_player()
turn = property()
@turn.getter
def turn(self):
return self.__turn
def check_end_game(self):
self.show_field()
for combo in WIN_COMBO:
if self.__field[combo[0] - 1] == self.__field[combo[1] - 1] == self.__field[combo[2] - 1]:
return{"endgame": True, "message": f"win {self.__field[combo[0] - 1]}"}
if not any(list(map(lambda x: str(x).isdigit(), self.__field))):
return {"endgame": True, "message": "draw"}
else:
return {"endgame": False, "message": "wait for opponent"}
def check_turn(self, index, who):
if self.__field[index - 1] != index:
raise WrongMove("The cell is already occupied")
elif who not in self.switch_turn.keys():
raise WrongMove("Wrong player")
elif who != self.__turn:
raise WrongMove("Not your turn")
self.__field[index - 1] = who
res = self.check_end_game()
self.__turn = self.switch_turn[self.__turn]
return res
def choose_random_player(self):
print(self.switch_turn.keys())
self.__turn = secrets.choice(list(self.switch_turn.keys()))
def show_field(self):
print("************")
print("|" + "|".join(map(str, self.__field[:3])) + "|")
print("|" + "|".join(map(str, self.__field[3:6])) + "|")
print("|" + "|".join(map(str, self.__field[6:])) + "|")
print("************")
|
[
"[email protected]"
] | |
c8df91551a44a334be5a4cd94d26220e4cc54a07
|
84e661d5d293ec0c544fedab7727767f01e7ddcf
|
/target/migrations/0011_auto_20201101_1147.py
|
9ca50acaa24915c420cbf92d31354e33dd6cdc7f
|
[
"BSD-3-Clause"
] |
permissive
|
groundupnews/gu
|
ea6734fcb9509efc407061e35724dfe8ba056044
|
4c036e79fd735dcb1e5a4f15322cdf87dc015a42
|
refs/heads/master
| 2023-08-31T13:13:47.178119 | 2023-08-18T11:42:58 | 2023-08-18T11:42:58 | 48,944,009 | 21 | 23 |
BSD-3-Clause
| 2023-09-14T13:06:42 | 2016-01-03T11:56:48 |
JavaScript
|
UTF-8
|
Python
| false | false | 882 |
py
|
# Generated by Django 3.0.10 on 2020-11-01 09:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('target', '0010_auto_20201101_1127'),
]
operations = [
migrations.AddField(
model_name='target',
name='tweet_notified_published',
field=models.BooleanField(default=False, editable=False),
),
migrations.AddField(
model_name='target',
name='tweet_notified_solution',
field=models.BooleanField(default=False, editable=False),
),
migrations.AlterField(
model_name='target',
name='publish_solution_after',
field=models.SmallIntegerField(default=24, help_text='Make solution available after this many hours', null=True, verbose_name='solution time'),
),
]
|
[
"[email protected]"
] | |
4f91db0c179ce5761c01cb598130a254ba26e16f
|
1e8d9a251b36f2e80a851d541321522ce4e812fa
|
/igmspec/scripts/plot_igmspec.py
|
c3e68e4c64d8f0b820e6ba8daf9c1882d20f2c30
|
[] |
no_license
|
Waelthus/igmspec
|
c81f31360e9528bd150a991ad96b8b4ca94962d0
|
8fdbb622360ca5263711f75d0f7571ed955f6e28
|
refs/heads/master
| 2020-12-25T21:55:50.001007 | 2016-07-17T21:17:08 | 2016-07-17T21:17:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,154 |
py
|
#!/usr/bin/env python
""" Loads and plots a requested spectrum
"""
import pdb
def parser(options=None):
import argparse
parser = argparse.ArgumentParser(description='plot_igmspec script v0.2')
parser.add_argument("coord", type=str, help="Coordinates, e.g. J081240+320808")
parser.add_argument("--tol", default=5., type=float, help="Maximum offset in arcsec [default=5.]")
parser.add_argument("--meta", default=True, help="Show meta data? [default: True]", action="store_true")
parser.add_argument("-s", "--survey", help="Name of Survey to use")
parser.add_argument("--select", default=0, type=int, help="Index of spectrum to plot (when multiple exist)")
parser.add_argument("--mplot", default=False, help="Use simple matplotlib plot [default: False]")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args, unit_test=False, **kwargs):
""" Run
"""
from astropy import units as u
from igmspec.igmspec import IgmSpec
from igmspec import cat_utils as icu
# init
igmsp = IgmSpec(**kwargs)
# Grab
all_spec, all_meta = igmsp.spec_from_coord(args.coord, tol=args.tol*u.arcsec, isurvey=args.survey)
# Outcome
if len(all_meta) == 0:
print("No source found, try another location or a larger tolerance.")
return
elif len(all_meta) == 1: # One survey hit
spec = all_spec[0]
meta = all_spec[0]
else: # More than 1 survey
idx = 0
spec = all_spec[idx]
meta = all_meta[idx]
surveys = [meta.meta['survey'] for meta in all_meta]
print("Source located in more than one survey")
print("Using survey {:s}. You can choose from this list {}".format(surveys[idx], surveys))
#print("Choose another survey from this list (as you wish): {}".format(surveys))
if args.meta:
igmsp.idb.show_meta()
# Load spectra
spec.select = args.select
if unit_test:
return
# Show [may transition to xspec]
if args.mplot:
spec.plot()
else:
spec.plot(xspec=True)
|
[
"[email protected]"
] | |
c37ee59446bb0ce436a571312628fce8121b88a8
|
1905e5cece92e6cdc68dac3ebb0ee1d05bef35c8
|
/fuzzinator/tracker/base.py
|
84de8815bb65c1977f3e473d326d6eed14335b6c
|
[
"BSD-3-Clause"
] |
permissive
|
darrynza/fuzzinator
|
e876131d18c5f0a17ae8bdc2fb10f18d8b0084fb
|
e1642f75ba8c1b555f7e2557b52f43df4d17b89f
|
refs/heads/master
| 2020-04-29T04:39:36.453300 | 2018-12-06T17:15:35 | 2019-01-08T23:36:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 492 |
py
|
# Copyright (c) 2016-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
class BaseTracker(object):
@property
def logged_in(self):
return True
def find_issue(self, issue):
pass
def report_issue(self, **kwargs):
pass
def issue_url(self, issue):
return ''
|
[
"[email protected]"
] | |
007ee5b8228f8de322122564f9f44722684aa6cf
|
87a9706379670da62739b3c1fbbdd75edb5107b8
|
/Django/django_celery_test/django_celery_test/celeryconfig.py
|
cda60c880a754a04fa66f089f2be5f3d6b7e1eed
|
[] |
no_license
|
zxbzxb180/python_work
|
ba21ab74f842e0d560a8bb192bb8a874d356b9e1
|
6406024e011aa06d1bda78d97cfecc47f7f2058c
|
refs/heads/master
| 2022-12-12T23:53:36.887963 | 2020-03-04T07:20:29 | 2020-03-04T07:20:29 | 194,494,744 | 0 | 0 | null | 2022-11-22T03:54:47 | 2019-06-30T08:48:44 |
Python
|
UTF-8
|
Python
| false | false | 854 |
py
|
import djcelery
djcelery.setup_loader()
BROKER_BACKEND = 'redis'
BROKER_URL = 'redis://:6222580@localhost:6379/1'
CELERY_RESULT_BACKEND = 'redis://:6222580@localhost:6379/2'
CELERY_QUEUES = {
'beat_tasks': {
'exchange': 'beat_tasks',
'exchange_type': 'direct',
'binding_key': 'beat_tasks'
},
'work_queue': {
'exchange': 'work_queue',
'exchange_type': 'direct',
'binding_key': 'work_queue'
}
}
CELERY_DEFAULT_QUEUE = 'work_queue'
CELERY_IMPORTS = (
'course.tasks',
)
#有些情况防止死锁
CELERYD_FORCE_EXECV = True
#设置并发的worker数量
CELERYD_CONCURRENCY = 4
#允许重试
CELERY_ACKS_LATE = True
#每个worker最多执行100个任务,可以防止内存泄漏
CELERYD_MAX_TASKS_PER_CHILD = 100
#单个任务最大执行时间
CELERYD_TASK_TIME_LIMIT = 12 * 30
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.