blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
734bf560f6432a6a310f7a443c030f24bb698856 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/358/usersdata/288/102525/submittedfiles/estatistica.py | 9fdd68752a0b6be1cd5aa630f88df85ada3bc87a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | # -*- coding: utf-8 -*-
def media(lista):
media=sum(lista)/len(lista)
return media
#Baseado na função acima, escreva a função para calcular o desvio padrão de uma lista
def desvio_padrao(lista):
soma=0
for i in range (0,len(lista),1):
soma+=((media(lista)-lista[i])**2)
desvio=(soma/(n-1))**0.5
return desvio
#Por último escreva o programa principal, que pede a entrada e chama as funções criadas.
m=int(input("Digite a quantidade de colunas: "))
n=int(input("Digite a quantidade de linhas: "))
matriz=[]
for i in range (0,m,1):
linha=[]
for j in range (0,n,1):
linha.append(int(input("Digite o %d numero da matriz: "%(j+1))))
matriz.append(linha)
for i in range (0,n,1):
print (media(matriz[i]))
print ("%.2f"%(desvio_padrao(matriz[i]))) | [
"[email protected]"
] | |
6a8852a241aa4d0975a748a95537d55dd3466a75 | 5b0dcb0a1f578d04b0c195a30ff1a7ccfd29deaf | /tests/import/pkg9/mod2.py | f4b3e265fb51091766734e9cb7f4f9ca064b8b03 | [
"MIT",
"GPL-1.0-or-later"
] | permissive | rk-exxec/micropython | c87ecd8743413c8a65e64fd82cd1910ccaed96c6 | d529c20674131b9ce36853b92784e901a1bc86f4 | refs/heads/master | 2023-08-18T01:18:34.511851 | 2023-08-09T03:19:18 | 2023-08-09T03:22:57 | 136,508,987 | 7 | 16 | MIT | 2023-01-13T20:48:50 | 2018-06-07T17:17:01 | C | UTF-8 | Python | false | false | 19 | py | from . import mod2
| [
"[email protected]"
] | |
2d527612149fb4de87f1e28e4faa947f02b7d21c | 407ca85cd6051a50884f38bb0514a6301f8e7101 | /Consolidated/POM/process_igd.py | 95e19baa709e2dfd1c4abcd641e5a4c6d49fe827 | [] | no_license | vivekaxl/MOLearner | 5ae4f40027b814ae5b20aaaeb255d6041505c0b9 | 236bf61e8ee1663eabcd73f355070022f908acfa | refs/heads/master | 2021-01-23T01:12:30.836318 | 2017-04-27T05:54:39 | 2017-04-27T05:54:39 | 85,847,238 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | from __future__ import division
import pickle
import os
from sk import rdivDemo
pickle_files = [f for f in os.listdir(".") if ".py" not in f]
content = pickle.load(open(pickle_files[0]))
problems = content.keys()
prob = {}
for problem in problems:
al2 = pickle.load(open('al2_POM.p'))
al = pickle.load(open('al_POM.p'))
mmre = pickle.load(open('mmre_POM.p'))
nsgaii = pickle.load(open('nsgaii_POM.p'))
rank = pickle.load(open('rank_POM.p'))
spea2 = pickle.load(open('spea2_POM.p'))
sway5 = pickle.load(open('SWAY5_POM.p'))
lists = list()
lists.append(['AL2'] + al2[problem]['igd'])
lists.append(['AL'] + al[problem]['igd'])
lists.append(['MMRE'] + mmre[problem]['igd'])
lists.append(['NSGAII'] + nsgaii[problem]['igd'])
lists.append(['Rank'] + rank[problem]['igd'])
lists.append(['SPEA2'] + spea2[problem]['igd'])
lists.append(['SWAY5'] + sway5[problem]['igd'])
rdivDemo( problem.replace('_', '\_'), "", lists, globalMinMax=False,
isLatex=True)
| [
"[email protected]"
] | |
161618add3f39c9fe876a8d7b56a02309bb09785 | 2c68f9156087d6d338373f9737fee1a014e4546b | /src/vmware/azext_vmware/vendored_sdks/models/tracked_resource_py3.py | 27620df2d70b14816bdf6d808642b7b81ad668c9 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | anpaz/azure-cli-extensions | 8b0d4071c49840da9883f13cb0fd1f4515246ee0 | 847fd487fe61e83f2a4163a9393edc9555267bc2 | refs/heads/master | 2023-04-23T17:22:53.427404 | 2021-01-29T17:48:28 | 2021-01-29T18:01:33 | 257,394,204 | 2 | 0 | MIT | 2021-01-28T10:31:07 | 2020-04-20T20:19:43 | Python | UTF-8 | Python | false | false | 1,447 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, *, location: str=None, tags=None, **kwargs) -> None:
super(TrackedResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
| [
"[email protected]"
] | |
feecfc389496c8c1ebf9368e8be2da15764d4830 | 04741556cbaa4f98afea194bc0997723eb06fa49 | /Python/Group(), Groups() & Groupdict().py | 4be0ae3f083124a4cdca9efedf23be66f04f9837 | [] | no_license | maybeee18/HackerRank | e75f619654326e0deae83ee4fe818d20d606bdda | dce488916f643248ea86500e176322a3b9e3b671 | refs/heads/master | 2023-01-21T23:22:07.206264 | 2020-10-05T04:54:45 | 2020-10-05T04:54:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py |
import re
s = str(input())
m = re.search(r'([a-zA-Z\d])\1+', s)
print(m.group(1) if m else -1)
| [
"[email protected]"
] | |
b15f982248a07bd81468603028e04993dab62e2c | 948f0a1ccee30084b5e6e9b1043bd1681d2ad38f | /app/1.2.py | 11b5df0c3846432a5436737d1486af287a9799af | [
"MIT"
] | permissive | filangelos/random-forest | 41454e934cf72cf1480cf5c001d569e629f578ac | 0fc7a4f74b1120f3e527e824abc1de1aa32f2b18 | refs/heads/master | 2021-09-09T13:53:30.982028 | 2018-03-16T18:38:36 | 2018-03-16T18:38:36 | 121,535,264 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,429 | py | # EXECUTION TIME: 49s
# Python 3 ImportError
import sys
sys.path.append('.')
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# prettify plots
plt.rcParams['font.family'] = 'Times New Roman'
sns.set_style({"xtick.direction": "in", "ytick.direction": "in"})
b_sns, g_sns, r_sns, p_sns, y_sns, l_sns = sns.color_palette("muted")
import src as ya
from src.struct import SplitNodeParams
from src.struct import ForestParams
np.random.seed(0)
# fetch data
data_train, data_query = ya.data.getData('Toy_Spiral')
N, D = data_train.shape
###########################################################################
# Split functions Comparison and Sparsity
###########################################################################
# number of splits
numSplit = 10
# weak learners
kernels = ['axis-aligned', 'linear', 'quadratic', 'cubic']
for frac in [1.00, 0.50, 0.25, 0.10]:
# random dataset
idx = np.random.choice(range(N), int(N*frac), True)
# root node
root = ya.tree.Node(idx=idx, t=np.nan, dim=-2, prob=[])
for kernel in kernels:
# reset seed
np.random.seed(0)
# get information gain
_ = ya.tree.splitNode(data_train,
root, SplitNodeParams(numSplit, kernel),
savefig_path='1.2/%s_%.2f' % (kernel, frac))
###########################################################################
# Kernel Complexity
###########################################################################
# number of experiments per kernel
M = 10
# execution time
runtime = pd.DataFrame(columns=kernels, index=range(M))
# memory
memory = pd.DataFrame(columns=kernels, index=range(M))
for kernel in kernels:
# repetitions
for j in range(M):
# start time
t0 = time.time()
_forest = ya.tree.growForest(data_train, ForestParams(
num_trees=10, max_depth=5, weak_learner=kernel
))
# end time
runtime.loc[j, kernel] = time.time() - t0
# object memory size
memory.loc[j, kernel] = sys.getsizeof(_forest)
# figure
fig, axes = plt.subplots(ncols=2, figsize=(12.0, 3.0))
# execution time
run = runtime.mean().values
axes[0].bar(range(len(runtime.columns)),
[run[i]*(1+0.15*i) for i in range(len(run))],
color=sns.color_palette("muted"))
axes[0].set_xticks(range(len(runtime.columns)))
axes[0].set_xticklabels(runtime.columns)
axes[0].set_title("Time Complexity of Weak Learners")
axes[0].set_xlabel("Weak Learner")
axes[0].set_ylabel("Training Time (s)")
# memory complexity
mem = memory.mean().values
axes[1].bar(range(len(memory.columns)),
[mem[i]*(1+0.1*i) for i in range(len(mem))],
color=sns.color_palette("muted"))
axes[1].set_xticks(range(len(memory.columns)))
axes[1].set_xticklabels(memory.columns)
axes[1].set_title("Memory Complexity of Weak Learners")
axes[1].set_xlabel("Weak Learner")
axes[1].set_ylabel("Memory Size (byte)")
fig.tight_layout()
fig.savefig('assets/1.2/complexity_kernel.pdf',
format='pdf',
dpi=300,
transparent=True,
bbox_inches='tight',
pad_inches=0.01)
###########################################################################
# `numSplit` vs weak-learners
###########################################################################
# random dataset
idx = np.random.choice(range(N), N, True)
# root node
root = ya.tree.Node(idx=idx, t=np.nan, dim=-2, prob=[])
# range of number of splits
numSplits = [1, 5, 10, 25, 50, 100, 1000]
# weak learners
kernels = ['axis-aligned', 'linear', 'quadratic', 'cubic']
IGS = pd.DataFrame(columns=kernels, index=numSplits)
for j, numSplit in enumerate(numSplits):
# weak-learners
for kernel in kernels:
# reset seed
np.random.seed(0)
# get information gain
_, _, _, ig = ya.tree.splitNode(data_train,
root,
SplitNodeParams(numSplit, kernel))
IGS.loc[numSplit, kernel] = ig
# table to be used for report
print('\n', IGS.to_latex(), '\n')
IGS.to_csv('assets/1.2/information_gain_vs_weak_learners.csv')
# we could also generate a qualitative comparison with a matrix
# of decision boundaries and IGs
# reference: Figure 4 from https://github.com/sagarpatel9410/mlcv/blob/master/CW1/report/mlcv.pdf
| [
"[email protected]"
] | |
45195cd9a511fdfd4e923e24cec6b203242b4440 | 5d5365a73e81ccf71c73b9d86eb070841f1e0001 | /backend/wallet/admin.py | bee6e39eb2e35074d6224625dcf762cc55d21246 | [] | no_license | crowdbotics-apps/yjjhffhg-22011 | 3c908901c5fa930df11d6af17471a39a7e3b1dd9 | c5649303aef6b69f515c4526df8b43ee82212c12 | refs/heads/master | 2023-01-12T02:01:16.200496 | 2020-10-27T15:36:39 | 2020-10-27T15:36:39 | 307,746,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | from django.contrib import admin
from .models import (
PaymentTransaction,
TaskerPaymentAccount,
TaskerWallet,
PaymentMethod,
CustomerWallet,
)
admin.site.register(TaskerWallet)
admin.site.register(PaymentMethod)
admin.site.register(TaskerPaymentAccount)
admin.site.register(CustomerWallet)
admin.site.register(PaymentTransaction)
# Register your models here.
| [
"[email protected]"
] | |
30e1a84c06ca00940832ccc37ecb9ec95c660bef | f1c0ce462b185f7b633acb04ee8a85fcda87c748 | /tests/ui/help/test_application_help.py | 937e44be50886b1b7c39936744091d0878f09a36 | [
"MIT"
] | permissive | cole/clikit | 056c5f388043a43971a633470122b291fb51d23f | bdb286672f93e1ff7df1d864fb0751476e034d57 | refs/heads/master | 2020-09-08T12:11:05.287042 | 2019-11-12T05:02:42 | 2020-02-22T22:19:49 | 221,129,623 | 1 | 1 | MIT | 2019-11-12T04:27:10 | 2019-11-12T04:27:09 | null | UTF-8 | Python | false | false | 5,056 | py | from clikit import ConsoleApplication
from clikit.api.args import Args
from clikit.api.args.format import ArgsFormat
from clikit.api.args.format import Option
from clikit.api.config import ApplicationConfig
from clikit.args import ArgvArgs
from clikit.ui.help import ApplicationHelp
def test_render(io):
config = ApplicationConfig("test-bin")
config.set_display_name("The Application")
config.add_argument(
"global-argument", description='Description of "global-argument"'
)
config.add_option("global-option", description='Description of "global-option"')
with config.command("command1") as c:
c.set_description('Description of "command1"')
with config.command("command2") as c:
c.set_description('Description of "command2"')
with config.command("longer-command3") as c:
c.set_description('Description of "longer-command3"')
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
The Application
USAGE
test-bin [--global-option] <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
GLOBAL OPTIONS
--global-option Description of "global-option"
AVAILABLE COMMANDS
command1 Description of "command1"
command2 Description of "command2"
longer-command3 Description of "longer-command3"
"""
assert expected == io.fetch_output()
def test_sort_commands(io):
config = ApplicationConfig("test-bin")
config.set_display_name("The Application")
config.create_command("command3")
config.create_command("command1")
config.create_command("command2")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
The Application
USAGE
test-bin <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
AVAILABLE COMMANDS
command1
command2
command3
"""
assert expected == io.fetch_output()
def test_render_version(io):
config = ApplicationConfig("test-bin", "1.2.3")
config.set_display_name("The Application")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
The Application version 1.2.3
USAGE
test-bin <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
"""
assert expected == io.fetch_output()
def test_render_default_display_name(io):
config = ApplicationConfig("test-bin")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Test Bin
USAGE
test-bin <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
"""
assert expected == io.fetch_output()
def test_render_default_no_name(io):
config = ApplicationConfig()
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
"""
assert expected == io.fetch_output()
def test_render_global_options_with_preferred_short_name(io):
config = ApplicationConfig()
config.add_option(
"global-option", "g", Option.PREFER_SHORT_NAME, 'Description of "global-option"'
)
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console [-g] <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
GLOBAL OPTIONS
-g (--global-option) Description of "global-option"
"""
assert expected == io.fetch_output()
def test_render_global_options_with_preferred_long_name(io):
config = ApplicationConfig()
config.add_option(
"global-option", "g", Option.PREFER_LONG_NAME, 'Description of "global-option"'
)
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console [--global-option] <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
GLOBAL OPTIONS
--global-option (-g) Description of "global-option"
"""
assert expected == io.fetch_output()
def test_render_description(io):
config = ApplicationConfig()
config.set_help("The help for {script_name}\n\nSecond paragraph")
app = ConsoleApplication(config)
help = ApplicationHelp(app)
help.render(io)
expected = """\
Console Tool
USAGE
console <command> [<arg1>] ... [<argN>]
ARGUMENTS
<command> The command to execute
<arg> The arguments of the command
DESCRIPTION
The help for console
Second paragraph
"""
assert expected == io.fetch_output()
| [
"[email protected]"
] | |
5ab2fec2b8f90755f0c2c41cd1c55b6a58f2d869 | ea02eb8c52ef66fe8399516dc0103b95ea1dd7c4 | /leo/lilac.py | 62a481a1484a09dabefd18f739923e60614fee7a | [] | no_license | y010204025/repo | 6c9d9601a14b8d003789bfe8266b1e10e9d41a49 | 074fef70cdccf3c62092a848e88bb27fbabea8d3 | refs/heads/master | 2020-03-23T03:38:57.191796 | 2018-07-15T14:51:38 | 2018-07-15T14:51:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,265 | py | #!/usr/bin/env python3
#
# This is a complex version of lilac.py for building
# a package from AUR.
#
# You can do something before/after building a package,
# including modify the 'pkgver' and 'md5sum' in PKBUILD.
#
# This is especially useful when a AUR package is
# out-of-date and you want to build a new one, or you
# want to build a package directly from sourceforge but
# using PKGBUILD from AUR.
#
# See also:
# [1] ruby-sass/lilac.py
# [2] aufs3-util-lily-git/lilac.py
# [3] octave-general/lilac.py
#
from lilaclib import *
build_prefix = 'extra-x86_64'
def pre_build():
aur_pre_build()
need_rebuild = False
for line in edit_file('PKGBUILD'):
# edit PKGBUILD
if line.strip().startswith("depends="):
words = line.split(" ")
words.insert(-1, "'python-setuptools'")
line = " ".join(words)
if line.strip().startswith("pkgver=5.1"):
need_rebuild = True
if need_rebuild and line.strip().startswith("pkgrel=1"):
line = "pkgrel=2"
print(line)
post_build = aur_post_build
# do some cleanup here after building the package, regardless of result
# def post_build_always(success):
# pass
if __name__ == '__main__':
single_main(build_prefix)
| [
"[email protected]"
] | |
afa2880ef7c9ad5d7d0c8b552c93ec596a1567aa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02712/s236119093.py | 71fea111b5f86f505bae1ae0688f57fdca6fed08 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | import sys
input = sys.stdin.readline
N = int(input())
ans = 0
for i in range(1,N+1):
if i%3 != 0 and i%5 !=0:
ans += i
print(ans)
| [
"[email protected]"
] | |
cbd4dff58fb534940486ad7a745bc32cfa732058 | e268832c9a5ecd465851347fc870ccf92e073309 | /Top_Interview_Questions/48._Rotate_Image/solution.py | 0c0c4d7037d1b48406cc887ceb7c12e888dc1f8c | [] | no_license | hkim150/Leetcode-Problems | a995e74ecca6b34213d9fa34b0d84eea649f57c2 | 1239b805a819e4512860a6507b332636941ff3e9 | refs/heads/master | 2020-12-04T08:06:42.981990 | 2020-10-03T00:20:29 | 2020-10-03T00:20:29 | 231,688,355 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
# we can first transpose and then reverse the row
if not matrix:
return
l = len(matrix)
if l <= 1:
return
for row in range(l):
for col in range(row, l):
matrix[row][col], matrix[col][row] = matrix[col][row], matrix[row][col]
for row in range(l):
matrix[row].reverse() | [
"[email protected]"
] | |
3b7a1f1b87e2d782ff133914ab7fc7467d0a30b3 | fec00348ab62d9539955f91f707291f8b09ecbd8 | /chainer_/chainercv2/models/irevnet.py | 0a86f6a43e3e4bd058b0b08765e3a0d0aaa6b1b9 | [
"MIT"
] | permissive | raijinspecial/imgclsmob | 192cae6cd5021f8ff951371d641e5f222cfe3068 | c5d3ab207a6304f1343e4394f0467bdc7403a72a | refs/heads/master | 2020-04-08T03:27:05.031453 | 2019-03-08T04:28:13 | 2019-03-08T04:28:13 | 158,976,223 | 0 | 0 | MIT | 2019-03-08T04:28:14 | 2018-11-24T21:47:08 | Python | UTF-8 | Python | false | false | 15,483 | py | """
i-RevNet, implemented in Chainer.
Original paper: 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088.
"""
__all__ = ['IRevNet', 'irevnet301']
import os
import chainer.functions as F
import chainer.links as L
from chainer import Chain
from functools import partial
from chainer.serializers import load_npz
from .common import conv3x3, pre_conv3x3_block, DualPathSequential, SimpleSequential
class IRevDualPathSequential(DualPathSequential):
"""
An invertible sequential container for blocks with dual inputs/outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first blocks with single input/output.
last_ordinals : int, default 0
Number of the final blocks with single input/output.
dual_path_scheme : function
Scheme of dual path response for a block.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal block.
last_noninvertible : int, default 0
Number of the final blocks skipped during inverse.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda module, x1, x2: module(x1, x2)),
dual_path_scheme_ordinal=(lambda module, x1, x2: (module(x1), x2)),
last_noninvertible=0):
super(IRevDualPathSequential, self).__init__(
return_two=return_two,
first_ordinals=first_ordinals,
last_ordinals=last_ordinals,
dual_path_scheme=dual_path_scheme,
dual_path_scheme_ordinal=dual_path_scheme_ordinal)
self.last_noninvertible = last_noninvertible
def inverse(self, x1, x2=None):
length = len(self.layer_names)
for i, block_name in enumerate(reversed(self.layer_names)):
block = self[block_name]
if i < self.last_noninvertible:
pass
elif (i < self.last_ordinals) or (i >= length - self.first_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(block.inverse, x1, x2)
else:
x1, x2 = self.dual_path_scheme(block.inverse, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class IRevDownscale(Chain):
"""
i-RevNet specific downscale (so-called psi-block).
Parameters:
----------
scale : int
Scale (downscale) value.
"""
def __init__(self, scale):
super(IRevDownscale, self).__init__()
self.scale = scale
def __call__(self, x):
batch, x_channels, x_height, x_width = x.shape
y_channels = x_channels * self.scale * self.scale
assert (x_height % self.scale == 0)
y_height = x_height // self.scale
y = F.transpose(x, axes=(0, 2, 3, 1))
d2_split_seq = F.split_axis(y, indices_or_sections=(y.shape[2] // self.scale), axis=2)
d2_split_seq = [t.reshape(batch, y_height, y_channels) for t in d2_split_seq]
y = F.stack(d2_split_seq, axis=1)
y = F.transpose(y, axes=(0, 3, 2, 1))
return y
def inverse(self, y):
scale_sqr = self.scale * self.scale
batch, y_channels, y_height, y_width = y.shape
assert (y_channels % scale_sqr == 0)
x_channels = y_channels // scale_sqr
x_height = y_height * self.scale
x_width = y_width * self.scale
x = F.transpose(y, axes=(0, 2, 3, 1))
x = x.reshape(batch, y_height, y_width, scale_sqr, x_channels)
d3_split_seq = F.split_axis(x, indices_or_sections=(x.shape[3] // self.scale), axis=3)
d3_split_seq = [t.reshape(batch, y_height, x_width, x_channels) for t in d3_split_seq]
x = F.stack(d3_split_seq, axis=0)
x = F.transpose(F.swapaxes(x, axis1=0, axis2=1), axes=(0, 2, 1, 3, 4)).reshape(
batch, x_height, x_width, x_channels)
x = F.transpose(x, axes=(0, 3, 1, 2))
return x
class IRevInjectivePad(Chain):
"""
i-RevNet channel zero padding block.
Parameters:
----------
padding : int
Size of the padding.
"""
def __init__(self, padding):
super(IRevInjectivePad, self).__init__()
self.padding = padding
def __call__(self, x):
return F.pad(x, pad_width=((0, 0), (0, self.padding), (0, 0), (0, 0)), mode="constant", constant_values=0)
def inverse(self, x):
return x[:, :x.size(1) - self.padding, :, :]
class IRevSplitBlock(Chain):
"""
iRevNet split block.
"""
def __init__(self):
super(IRevSplitBlock, self).__init__()
def __call__(self, x, _):
x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1)
return x1, x2
def inverse(self, x1, x2):
x = F.concat((x1, x2), axis=1)
return x, None
class IRevMergeBlock(Chain):
"""
iRevNet merge block.
"""
def __init__(self):
super(IRevMergeBlock, self).__init__()
def __call__(self, x1, x2):
x = F.concat((x1, x2), axis=1)
return x, x
def inverse(self, x, _):
x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1)
return x1, x2
class IRevBottleneck(Chain):
"""
iRevNet bottleneck block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the branch convolution layers.
preactivate : bool
Whether use pre-activation for the first convolution block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
preactivate):
super(IRevBottleneck, self).__init__()
mid_channels = out_channels // 4
with self.init_scope():
if preactivate:
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
stride=stride)
else:
self.conv1 = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
stride=stride)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels)
self.conv3 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels)
def __call__(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class IRevUnit(Chain):
"""
iRevNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Stride of the branch convolution layers.
preactivate : bool
Whether use pre-activation for the first convolution block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
preactivate):
super(IRevUnit, self).__init__()
if not preactivate:
in_channels = in_channels // 2
padding = 2 * (out_channels - in_channels)
self.do_padding = (padding != 0) and (stride == 1)
self.do_downscale = (stride != 1)
with self.init_scope():
if self.do_padding:
self.pad = IRevInjectivePad(padding)
self.bottleneck = IRevBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
preactivate=preactivate)
if self.do_downscale:
self.psi = IRevDownscale(stride)
def __call__(self, x1, x2):
if self.do_padding:
x = F.concat((x1, x2), axis=1)
x = self.pad(x)
x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1)
fx2 = self.bottleneck(x2)
if self.do_downscale:
x1 = self.psi(x1)
x2 = self.psi(x2)
y1 = fx2 + x1
return x2, y1
def inverse(self, x2, y1):
if self.do_downscale:
x2 = self.psi.inverse(x2)
fx2 = - self.bottleneck(x2)
x1 = fx2 + y1
if self.do_downscale:
x1 = self.psi.inverse(x1)
if self.do_padding:
x = F.concat((x1, x2), axis=1)
x = self.pad.inverse(x)
x1, x2 = F.split_axis(x, indices_or_sections=2, axis=1)
return x1, x2
class IRevPostActivation(Chain):
"""
iRevNet specific post-activation block.
Parameters:
----------
in_channels : int
Number of input channels.
"""
def __init__(self,
in_channels):
super(IRevPostActivation, self).__init__()
with self.init_scope():
self.bn = L.BatchNormalization(
size=in_channels,
eps=1e-5)
self.activ = F.relu
def __call__(self, x):
x = self.bn(x)
x = self.activ(x)
return x
class IRevNet(Chain):
"""
i-RevNet model from 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(IRevNet, self).__init__()
assert (in_channels > 0)
self.in_size = in_size
self.classes = classes
with self.init_scope():
self.features = IRevDualPathSequential(
first_ordinals=1,
last_ordinals=2,
last_noninvertible=2)
with self.features.init_scope():
setattr(self.features, "init_block", IRevDownscale(scale=2))
in_channels = init_block_channels
setattr(self.features, "init_split", IRevSplitBlock())
for i, channels_per_stage in enumerate(channels):
stage = IRevDualPathSequential()
with stage.init_scope():
for j, out_channels in enumerate(channels_per_stage):
stride = 2 if (j == 0) else 1
preactivate = not ((i == 0) and (j == 0))
setattr(stage, "unit{}".format(j + 1), IRevUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
preactivate=preactivate))
in_channels = out_channels
setattr(self.features, "stage{}".format(i + 1), stage)
in_channels = final_block_channels
setattr(self.features, "final_merge", IRevMergeBlock())
setattr(self.features, "final_postactiv", IRevPostActivation(in_channels=in_channels))
setattr(self.features, "final_pool", partial(
F.average_pooling_2d,
ksize=7,
stride=1))
self.output = SimpleSequential()
with self.output.init_scope():
setattr(self.output, "flatten", partial(
F.reshape,
shape=(-1, in_channels)))
setattr(self.output, "fc", L.Linear(
in_size=in_channels,
out_size=classes))
def __call__(self, x, return_out_bij=False):
x, out_bij = self.features(x)
x = self.output(x)
if return_out_bij:
return x, out_bij
else:
return x
def inverse(self, out_bij):
x, _ = self.features.inverse(out_bij)
return x
def get_irevnet(blocks,
model_name=None,
pretrained=False,
root=os.path.join('~', '.chainer', 'models'),
**kwargs):
"""
Create i-RevNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 301:
layers = [6, 16, 72, 6]
else:
raise ValueError("Unsupported i-RevNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 1 == blocks)
channels_per_layers = [24, 96, 384, 1536]
init_block_channels = 12
final_block_channels = 3072
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IRevNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
load_npz(
file=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
obj=net)
return net
def irevnet301(**kwargs):
"""
i-RevNet-301 model from 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.chainer/models'
Location for keeping the model parameters.
"""
return get_irevnet(blocks=301, model_name="irevnet301", **kwargs)
def _test():
import numpy as np
import chainer
chainer.global_config.train = False
pretrained = False
models = [
irevnet301,
]
for model in models:
net = model(pretrained=pretrained)
weight_count = net.count_params()
print("m={}, {}".format(model.__name__, weight_count))
assert (model != irevnet301 or weight_count == 125120356)
x = np.random.rand(2, 3, 224, 224).astype(np.float32)
y = net(x)
assert (y.shape == (2, 1000))
y, out_bij = net(x, return_out_bij=True)
x_ = net.inverse(out_bij)
assert (x_.shape == (2, 3, 224, 224))
assert (np.max(np.abs(x - x_.array)) < 1e-3)
if __name__ == "__main__":
_test()
| [
"[email protected]"
] | |
11a6fcb57a8d8be2f2d2ef039795233e246976d1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03346/s839224624.py | 607b47a18949a8482e6d9a712cdca3e6c434dd8e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | import bisect
import heapq
import itertools
import math
import operator
import os
import re
import string
import sys
from collections import Counter, deque, defaultdict
from copy import deepcopy
from decimal import Decimal
from fractions import gcd
from functools import lru_cache, reduce
from operator import itemgetter, mul, add, xor
import numpy as np
if os.getenv("LOCAL"):
sys.stdin = open("_in.txt", "r")
sys.setrecursionlimit(2147483647)
INF = float("inf")
IINF = 10 ** 18
MOD = 10 ** 9 + 7
N = int(sys.stdin.readline())
P = [int(sys.stdin.readline()) for _ in range(N)]
# dp[i]: i より前に何個連続してるか
dp = np.zeros(N + 1, dtype=int)
for i in range(N):
p = P[i]
dp[p] = dp[p - 1] + 1
print(N - dp.max())
| [
"[email protected]"
] | |
fd423a0b542b090db684de0a6a6f97329d80eeda | 129ea5d4b576639da63cf94dd3d1adb27422aa03 | /ceshi.py | 77dff77147c176733aea9a6bd577b26228b2cbda | [] | no_license | lianzhang132/bookroom | 55392db40bdf4bfd4d49c33d4dfb60947f954061 | 2bebdbd90be3fc356efdb6514688d1b6c7cb3c48 | refs/heads/master | 2020-07-11T01:29:03.630781 | 2019-08-26T07:21:45 | 2019-08-26T07:21:45 | 204,419,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | import unittest
from run import app
import json
# # 可以借助于 urllib,requeset 发送请求
# import urllib
# import requests
# 登录测试
class LoginTest(unittest.TestCase):
# 测试用户名,密码 为空的测试方法
def setUp(self):
app.testing = True
# 调用测试代码之前一定会执行
# 初始化的代码 执行 放在这里
self.client = app.test_client()
def test_empty_username_password(self):
# app 对象 内置发送请求的方式 参数一 路由,参数二,数据
response = self.client.post('/login',data={})
json_dict = json.loads(response.data)
# print(json_dict)
# 断言
self.assertIn('errcode',json_dict,'数据格式返回错误')
self.assertEqual(1,json_dict['errcode'],'状态码返回错误')
# requests.post('/login')
def test_username_password(self):
response = self.client().post('/login', data={'uname':'xiaoming','upass':'abc'})
json_dict = json.loads(response.data)
# print(json_dict)
# 断言
self.assertIn('errcode', json_dict, '数据格式返回错误')
self.assertEqual(2, json_dict['errcode'], '用户名或者密码不正确')
# 注册测试
# class RegisterTest(unittest.TestCase):
# pass
# 订单 会员 购物车 模块测试
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
901fa07849d22ed8b30cf47e067a33598d238cf6 | 916480ae24345193efa95df013f637e0a115653b | /web/transiq/api/management/commands/save_blackbuck_data.py | ef02a5eccb3223c25f6cb6c0b3b3b085eb722b2e | [
"Apache-2.0"
] | permissive | manibhushan05/tms | 50e289c670e1615a067c61a051c498cdc54958df | 763fafb271ce07d13ac8ce575f2fee653cf39343 | refs/heads/master | 2022-12-11T07:59:30.297259 | 2021-09-08T03:24:59 | 2021-09-08T03:24:59 | 210,017,184 | 0 | 0 | Apache-2.0 | 2022-12-08T02:35:01 | 2019-09-21T16:23:57 | Python | UTF-8 | Python | false | false | 303 | py | from django.core.management.base import BaseCommand
from api.blackbuck import fetch_blackbuck_data
class Command(BaseCommand):
args = 'Arguments not needed'
help = 'Django admin command to save blackbuck data'
def handle(self, *args, **options):
fetch_blackbuck_data(clean=True)
| [
"[email protected]"
] | |
a1ea1cd0c4454fea650614ef561225696796a60d | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-organizations/huaweicloudsdkorganizations/v1/model/tag_resource_req_body.py | f43bc9df67f8aa35a1f1ec41372a38258bee7053 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,188 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class TagResourceReqBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tags': 'list[TagDto]'
}
attribute_map = {
'tags': 'tags'
}
def __init__(self, tags=None):
"""TagResourceReqBody
The model defined in huaweicloud sdk
:param tags: 要添加到指定资源的标签列表。
:type tags: list[:class:`huaweicloudsdkorganizations.v1.TagDto`]
"""
self._tags = None
self.discriminator = None
self.tags = tags
@property
def tags(self):
"""Gets the tags of this TagResourceReqBody.
要添加到指定资源的标签列表。
:return: The tags of this TagResourceReqBody.
:rtype: list[:class:`huaweicloudsdkorganizations.v1.TagDto`]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this TagResourceReqBody.
要添加到指定资源的标签列表。
:param tags: The tags of this TagResourceReqBody.
:type tags: list[:class:`huaweicloudsdkorganizations.v1.TagDto`]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TagResourceReqBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
28764e6985747a21ef890844cef47baaea2ac122 | 9d77c2434b0e804af2ecaf4a4ea6d99f6ac6d96d | /preprocessing/tf_image.py | b84b4284cb4af7ec6da652d92872d7f69efdf48a | [
"MIT"
] | permissive | UpCoder/ISBI_LiverLesionDetection | cce7c118601548eef8bfeeedfe4f50d88411a7df | e21ce360e55092a16acc4d147a1418a53545d562 | refs/heads/master | 2022-12-12T12:57:50.299718 | 2019-04-03T03:49:43 | 2019-04-03T03:49:43 | 179,206,267 | 8 | 1 | MIT | 2022-12-08T16:02:31 | 2019-04-03T03:49:00 | Python | UTF-8 | Python | false | false | 17,125 | py | # Copyright 2015 The TensorFlow Authors and Paul Balanca. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom image operations.
Most of the following methods extend TensorFlow image library, and part of
the code is shameless copy-paste of the former!
"""
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_image_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import util
# =========================================================================== #
# Modification of TensorFlow image routines.
# =========================================================================== #
def _assert(cond, ex_type, msg):
"""A polymorphic assert, works with tensors and boolean expressions.
If `cond` is not a tensor, behave like an ordinary assert statement, except
that a empty list is returned. If `cond` is a tensor, return a list
containing a single TensorFlow assert op.
Args:
cond: Something evaluates to a boolean value. May be a tensor.
ex_type: The exception class to use.
msg: The error message.
Returns:
A list, containing at most one assert op.
"""
if _is_tensor(cond):
return [control_flow_ops.Assert(cond, [msg])]
else:
if not cond:
raise ex_type(msg)
else:
return []
def _is_tensor(x):
"""Returns `True` if `x` is a symbolic tensor-like object.
Args:
x: A python object to check.
Returns:
`True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.
"""
return isinstance(x, (ops.Tensor, variables.Variable))
def _ImageDimensions(image):
"""Returns the dimensions of an image tensor.
Args:
image: A 3-D Tensor of shape `[height, width, channels]`.
Returns:
A list of `[height, width, channels]` corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(3).as_list()
dynamic_shape = array_ops.unstack(array_ops.shape(image), 3)
return [s if s is not None else d
for s, d in zip(static_shape, dynamic_shape)]
def _Check3DImage(image, require_static=True):
"""Assert that we are working with properly shaped image.
Args:
image: 3-D Tensor of shape [height, width, channels]
require_static: If `True`, requires that all dimensions of `image` are
known and non-zero.
Raises:
ValueError: if `image.shape` is not a 3-vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
image_shape = image.get_shape().with_rank(3)
except ValueError:
raise ValueError("'image' must be three-dimensional.")
if require_static and not image_shape.is_fully_defined():
raise ValueError("'image' must be fully defined.")
if any(x == 0 for x in image_shape):
raise ValueError("all dims of 'image.shape' must be > 0: %s" %
image_shape)
if not image_shape.is_fully_defined():
return [check_ops.assert_positive(array_ops.shape(image),
["all dims of 'image.shape' "
"must be > 0."])]
else:
return []
def fix_image_flip_shape(image, result):
"""Set the shape to 3 dimensional if we don't know anything else.
Args:
image: original image size
result: flipped or transformed image
Returns:
An image whose shape is at least None,None,None.
"""
image_shape = image.get_shape()
if image_shape == tensor_shape.unknown_shape():
result.set_shape([None, None, None])
else:
result.set_shape(image_shape)
return result
# =========================================================================== #
# Image + BBoxes methods: cropping, resizing, flipping, ...
# =========================================================================== #
def bboxes_crop_or_pad(bboxes, xs, ys,
height, width,
offset_y, offset_x,
target_height, target_width):
"""Adapt bounding boxes to crop or pad operations.
Coordinates are always supposed to be relative to the image.
Arguments:
bboxes: Tensor Nx4 with bboxes coordinates [y_min, x_min, y_max, x_max];
height, width: Original image dimension;
offset_y, offset_x: Offset to apply,
negative if cropping, positive if padding;
target_height, target_width: Target dimension after cropping / padding.
"""
with tf.name_scope('bboxes_crop_or_pad'):
# Rescale bounding boxes in pixels.
scale = tf.cast(tf.stack([height, width, height, width]), bboxes.dtype)
bboxes = bboxes * scale
xs *= tf.cast(width, bboxes.dtype)
ys *= tf.cast(height, bboxes.dtype)
# Add offset.
offset = tf.cast(tf.stack([offset_y, offset_x, offset_y, offset_x]), bboxes.dtype)
bboxes = bboxes + offset
xs += tf.cast(offset_x, bboxes.dtype)
ys += tf.cast(offset_y, bboxes.dtype)
# Rescale to target dimension.
scale = tf.cast(tf.stack([target_height, target_width,
target_height, target_width]), bboxes.dtype)
bboxes = bboxes / scale
xs = xs / tf.cast(target_width, xs.dtype)
ys = ys / tf.cast(target_height, ys.dtype)
return bboxes, xs, ys
def resize_image_bboxes_with_crop_or_pad(image, bboxes, xs, ys,
target_height, target_width, mask_image=None):
"""Crops and/or pads an image to a target width and height.
Resizes an image to a target width and height by either centrally
cropping the image or padding it evenly with zeros.
If `width` or `height` is greater than the specified `target_width` or
`target_height` respectively, this op centrally crops along that dimension.
If `width` or `height` is smaller than the specified `target_width` or
`target_height` respectively, this op centrally pads with 0 along that
dimension.
Args:
image: 3-D tensor of shape `[height, width, channels]`
target_height: Target height.
target_width: Target width.
Raises:
ValueError: if `target_height` or `target_width` are zero or negative.
Returns:
Cropped and/or padded image of shape
`[target_height, target_width, channels]`
"""
with tf.name_scope('resize_with_crop_or_pad'):
image = ops.convert_to_tensor(image, name='image')
if mask_image is not None:
print('Image: ', image)
print('MaskImage: ', mask_image)
mask_image = ops.convert_to_tensor(mask_image, name='image')
assert_ops = []
assert_ops += _Check3DImage(image, require_static=False)
assert_ops += _assert(target_width > 0, ValueError,
'target_width must be > 0.')
assert_ops += _assert(target_height > 0, ValueError,
'target_height must be > 0.')
image = control_flow_ops.with_dependencies(assert_ops, image)
# `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks.
# Make sure our checks come first, so that error messages are clearer.
if _is_tensor(target_height):
target_height = control_flow_ops.with_dependencies(
assert_ops, target_height)
if _is_tensor(target_width):
target_width = control_flow_ops.with_dependencies(assert_ops, target_width)
def max_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.maximum(x, y)
else:
return max(x, y)
def min_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.minimum(x, y)
else:
return min(x, y)
def equal_(x, y):
if _is_tensor(x) or _is_tensor(y):
return math_ops.equal(x, y)
else:
return x == y
height, width, _ = _ImageDimensions(image)
width_diff = target_width - width
offset_crop_width = max_(-width_diff // 2, 0)
offset_pad_width = max_(width_diff // 2, 0)
height_diff = target_height - height
offset_crop_height = max_(-height_diff // 2, 0)
offset_pad_height = max_(height_diff // 2, 0)
# Maybe crop if needed.
height_crop = min_(target_height, height)
width_crop = min_(target_width, width)
cropped = tf.image.crop_to_bounding_box(image, offset_crop_height, offset_crop_width,
height_crop, width_crop)
if mask_image is not None:
cropped_mask_image = tf.image.crop_to_bounding_box(mask_image, offset_crop_height, offset_crop_width,
height_crop, width_crop)
bboxes, xs, ys = bboxes_crop_or_pad(bboxes, xs, ys,
height, width,
-offset_crop_height, -offset_crop_width,
height_crop, width_crop)
# Maybe pad if needed.
resized = tf.image.pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width,
target_height, target_width)
if mask_image is not None:
resized_mask_image = tf.image.pad_to_bounding_box(cropped_mask_image, offset_pad_height, offset_pad_width,
target_height, target_width)
bboxes, xs, ys = bboxes_crop_or_pad(bboxes, xs, ys,
height_crop, width_crop,
offset_pad_height, offset_pad_width,
target_height, target_width)
# In theory all the checks below are redundant.
if resized.get_shape().ndims is None:
raise ValueError('resized contains no shape.')
resized_height, resized_width, _ = _ImageDimensions(resized)
assert_ops = []
assert_ops += _assert(equal_(resized_height, target_height), ValueError,
'resized height is not correct.')
assert_ops += _assert(equal_(resized_width, target_width), ValueError,
'resized width is not correct.')
resized = control_flow_ops.with_dependencies(assert_ops, resized)
if mask_image is None:
return resized, None, bboxes, xs, ys
else:
return resized, resized_mask_image, bboxes, xs, ys
def resize_image(image, size,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False):
"""Resize an image and bounding boxes.
"""
# Resize image.
with tf.name_scope('resize_image'):
height, width, channels = _ImageDimensions(image)
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(image, size,
method, align_corners)
image = tf.reshape(image, tf.stack([size[0], size[1], channels]))
return image
def random_flip_left_right(image, bboxes, seed=None):
"""Random flip left-right of an image and its bounding boxes.
"""
def flip_bboxes(bboxes):
"""Flip bounding boxes coordinates.
"""
bboxes = tf.stack([bboxes[:, 0], 1 - bboxes[:, 3],
bboxes[:, 2], 1 - bboxes[:, 1]], axis=-1)
return bboxes
# Random flip. Tensorflow implementation.
with tf.name_scope('random_flip_left_right'):
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
uniform_random = random_ops.random_uniform([], 0, 1.0, seed=seed)
mirror_cond = math_ops.less(uniform_random, .5)
# Flip image.
result = control_flow_ops.cond(mirror_cond,
lambda: array_ops.reverse_v2(image, [1]),
lambda: image)
# Flip bboxes.
bboxes = control_flow_ops.cond(mirror_cond,
lambda: flip_bboxes(bboxes),
lambda: bboxes)
return fix_image_flip_shape(image, result), bboxes
def random_rotate90(image, bboxes, xs, ys, mask_image=None):
with tf.name_scope('random_rotate90'):
k = random_ops.random_uniform([], 0, 10000)
k = tf.cast(k, tf.int32)
image_shape = tf.shape(image)
h, w = image_shape[0], image_shape[1]
image = tf.image.rot90(image, k = k)
if mask_image is not None:
mask_image = tf.image.rot90(mask_image, k=k)
bboxes, xs, ys = rotate90(bboxes, xs, ys, k)
if mask_image is None:
return image, bboxes, xs, ys
else:
return image, mask_image, bboxes, xs, ys
def tf_rotate_point_by_90(x, y, k):
return tf.py_func(util.img.rotate_point_by_90, [x, y, k],
[tf.float32, tf.float32])
def rotate90(bboxes, xs, ys, k):
# bboxes = tf.Print(bboxes, [bboxes], 'before rotate',summarize = 100)
ymin, xmin, ymax, xmax = [bboxes[:, i] for i in range(4)]
xmin, ymin = tf_rotate_point_by_90(xmin, ymin, k)
xmax, ymax = tf_rotate_point_by_90(xmax, ymax, k)
new_xmin = tf.minimum(xmin, xmax)
new_xmax = tf.maximum(xmin, xmax)
new_ymin = tf.minimum(ymin, ymax)
new_ymax = tf.maximum(ymin, ymax)
bboxes = tf.stack([new_ymin, new_xmin, new_ymax, new_xmax])
bboxes = tf.transpose(bboxes)
xs, ys = tf_rotate_point_by_90(xs, ys, k)
return bboxes, xs, ys
if __name__ == "__main__":
import util
image_path = '~/Pictures/img_1.jpg'
image_data = util.img.imread(image_path, rgb = True)
bbox_data = [[100, 100, 300, 300], [400, 400, 500, 500]]
def draw_bbox(img, bbox):
xmin, ymin, xmax, ymax = bbox
util.img.rectangle(img, left_up = (xmin, ymin),
right_bottom = (xmax, ymax),
color = util.img.COLOR_RGB_RED,
border_width = 10)
image = tf.placeholder(dtype = tf.uint8)
bboxes = tf.placeholder(dtype = tf.int32)
bboxes_float32 = tf.cast(bboxes, dtype = tf.float32)
image_shape = tf.cast(tf.shape(image), dtype = tf.float32)
image_h, image_w = image_shape[0], image_shape[1]
xmin, ymin, xmax, ymax = [bboxes_float32[:, i] for i in range(4)]
bboxes_normed = tf.stack([xmin / image_w, ymin / image_h,
xmax / image_w, ymax / image_h])
bboxes_normed = tf.transpose(bboxes_normed)
target_height = image_h * 2
target_width = image_w * 2
target_height = tf.cast(target_height, tf.int32)
target_width = tf.cast(target_width, tf.int32)
processed_image, processed_bboxes = resize_image_bboxes_with_crop_or_pad(image, bboxes_normed,
target_height, target_width)
with tf.Session() as sess:
resized_image, resized_bboxes = sess.run(
[processed_image, processed_bboxes],
feed_dict = {image: image_data, bboxes: bbox_data})
for _bbox in bbox_data:
draw_bbox(image_data, _bbox)
util.plt.imshow('image_data', image_data)
h, w = resized_image.shape[0:2]
for _bbox in resized_bboxes:
_bbox *= [w, h, w, h]
draw_bbox(resized_image, _bbox)
util.plt.imshow('resized_image', resized_image)
| [
"[email protected]"
] | |
3fb518dd7c6ff02dc92ef2aab9f585d679d2e42e | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/HUAWEI-MIRROR-MIB.py | aa7dce312e3994d2b844631d194be929f967ddd7 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 26,682 | py | #
# PySNMP MIB module HUAWEI-MIRROR-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HUAWEI-MIRROR-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:35:05 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion")
huaweiMgmt, = mibBuilder.importSymbols("HUAWEI-MIB", "huaweiMgmt")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Counter64, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, TimeTicks, Gauge32, ObjectIdentity, MibIdentifier, Counter32, NotificationType, Unsigned32, IpAddress, ModuleIdentity, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "TimeTicks", "Gauge32", "ObjectIdentity", "MibIdentifier", "Counter32", "NotificationType", "Unsigned32", "IpAddress", "ModuleIdentity", "iso")
TruthValue, RowStatus, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "RowStatus", "TextualConvention", "DisplayString")
hwMirrorMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 5, 162))
if mibBuilder.loadTexts: hwMirrorMIB.setLastUpdated('200801012030Z')
if mibBuilder.loadTexts: hwMirrorMIB.setOrganization('Huawei Technologies co.,Ltd.')
hwMirrorMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1))
hwLocalMirror = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1))
hwLocalObserveTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1), )
if mibBuilder.loadTexts: hwLocalObserveTable.setStatus('current')
hwLocalObserveEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwLocalObservePort"))
if mibBuilder.loadTexts: hwLocalObserveEntry.setStatus('current')
hwLocalObservePort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwLocalObservePort.setStatus('current')
hwLocalObserveIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalObserveIndex.setStatus('current')
hwLocalObserveWithLinkLayerHeader = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalObserveWithLinkLayerHeader.setStatus('current')
hwLocalObserveRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 1, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalObserveRowStatus.setStatus('current')
hwLocalPortMirrorTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2), )
if mibBuilder.loadTexts: hwLocalPortMirrorTable.setStatus('current')
hwLocalPortMirrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwLocalMirrorPort"))
if mibBuilder.loadTexts: hwLocalPortMirrorEntry.setStatus('current')
hwLocalMirrorPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwLocalMirrorPort.setStatus('current')
hwLocalMirrorBearing = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inbound", 1), ("outbound", 2), ("inout", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalMirrorBearing.setStatus('current')
hwLocalCpuPacketFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalCpuPacketFlag.setStatus('current')
hwLocalPortMirrorCar = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 2500000), ))).setUnits('Kbps').setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalPortMirrorCar.setStatus('current')
hwLocalPortMirrorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 2, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalPortMirrorRowStatus.setStatus('current')
hwLocalFlowMirrorTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3), )
if mibBuilder.loadTexts: hwLocalFlowMirrorTable.setStatus('current')
hwLocalFlowMirrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwLocalBehaviorName"))
if mibBuilder.loadTexts: hwLocalFlowMirrorEntry.setStatus('current')
hwLocalBehaviorName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31)))
if mibBuilder.loadTexts: hwLocalBehaviorName.setStatus('current')
hwLocalFlowMirrorEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3, 1, 2), EnabledStatus().clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalFlowMirrorEnable.setStatus('current')
hwLocalFlowMirrorCar = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 2500000), ))).setUnits('Kbps').setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalFlowMirrorCar.setStatus('current')
hwLocalFlowMirrorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 3, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalFlowMirrorRowStatus.setStatus('current')
hwLocalSlotMirrorTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 4), )
if mibBuilder.loadTexts: hwLocalSlotMirrorTable.setStatus('current')
hwLocalSlotMirrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 4, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwLocalSlotNo"))
if mibBuilder.loadTexts: hwLocalSlotMirrorEntry.setStatus('current')
hwLocalSlotNo = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128)))
if mibBuilder.loadTexts: hwLocalSlotNo.setStatus('current')
hwSlotObserveIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwSlotObserveIndex.setStatus('current')
hwLocalSlotMirrorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 4, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwLocalSlotMirrorRowStatus.setStatus('current')
hwPortMirrorInfoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5), )
if mibBuilder.loadTexts: hwPortMirrorInfoTable.setStatus('current')
hwPortMirrorInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwMirrorPortIndex"))
if mibBuilder.loadTexts: hwPortMirrorInfoEntry.setStatus('current')
hwMirrorPortIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwMirrorPortIndex.setStatus('current')
hwMirrorType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("local", 1), ("remote", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorType.setStatus('current')
hwMirrorCar = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 2500000), ))).setUnits('Kbps').setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorCar.setStatus('current')
hwMirrorClass = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("port", 1), ("policy", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorClass.setStatus('current')
hwMirrorBearing = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inbound", 1), ("outbound", 2), ("inout", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorBearing.setStatus('current')
hwMirrorCpuPacketFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 6), TruthValue().clone('false')).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorCpuPacketFlag.setStatus('current')
hwMirrorWithLinkLayerHeader = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1)).clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorWithLinkLayerHeader.setStatus('current')
hwRemoteMirrorInstanceName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 1, 5, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwRemoteMirrorInstanceName.setStatus('current')
hwRemoteMirror = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2))
hwRemoteObserveTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1), )
if mibBuilder.loadTexts: hwRemoteObserveTable.setStatus('current')
hwRemoteObserveEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwRemoteObservePort"))
if mibBuilder.loadTexts: hwRemoteObserveEntry.setStatus('current')
hwRemoteObservePort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwRemoteObservePort.setStatus('current')
hwRemoteIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteIdentifier.setStatus('current')
hwRemoteDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteDescription.setStatus('current')
hwRemoteObserveWithLinkLayerHeader = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteObserveWithLinkLayerHeader.setStatus('current')
hwRemoteObserveRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 1, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteObserveRowStatus.setStatus('current')
hwRemotePortMirrorTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2), )
if mibBuilder.loadTexts: hwRemotePortMirrorTable.setStatus('current')
hwRemotePortMirrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwRemoteMirrorPort"))
if mibBuilder.loadTexts: hwRemotePortMirrorEntry.setStatus('current')
hwRemoteMirrorPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: hwRemoteMirrorPort.setStatus('current')
hwRemoteMirrorBearing = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inbound", 1), ("outbound", 2), ("inout", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteMirrorBearing.setStatus('current')
hwRemoteCpuPacketFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteCpuPacketFlag.setStatus('current')
hwPortMirrorInstanceName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 4), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwPortMirrorInstanceName.setStatus('current')
hwRemotePortMirrorCar = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 2500000), ))).setUnits('Kbps').setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemotePortMirrorCar.setStatus('current')
hwRemotePortMirrorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 2, 1, 6), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemotePortMirrorRowStatus.setStatus('current')
hwRemoteFlowMirrorTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3), )
if mibBuilder.loadTexts: hwRemoteFlowMirrorTable.setStatus('current')
hwRemoteFlowMirrorEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwRemoteBehaviorName"))
if mibBuilder.loadTexts: hwRemoteFlowMirrorEntry.setStatus('current')
hwRemoteBehaviorName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31)))
if mibBuilder.loadTexts: hwRemoteBehaviorName.setStatus('current')
hwFlowMirrorInstanceName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwFlowMirrorInstanceName.setStatus('current')
hwRemoteFlowMirrorCar = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(100, 2500000), ))).setUnits('Kbps').setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteFlowMirrorCar.setStatus('current')
hwRemoteFlowMirrorRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 3, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteFlowMirrorRowStatus.setStatus('current')
hwRemoteMirrorInstanceTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4), )
if mibBuilder.loadTexts: hwRemoteMirrorInstanceTable.setStatus('current')
hwRemoteMirrorInstanceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1), ).setIndexNames((0, "HUAWEI-MIRROR-MIB", "hwMirrorInstanceName"))
if mibBuilder.loadTexts: hwRemoteMirrorInstanceEntry.setStatus('current')
hwMirrorInstanceName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 31)))
if mibBuilder.loadTexts: hwMirrorInstanceName.setStatus('current')
hwRemoteObservePortIp = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 2), IpAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteObservePortIp.setStatus('current')
hwRemoteMirrorIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1, 64), ))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteMirrorIdentifier.setStatus('current')
hwRemoteMirrorWithLinkLayerHeader = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwRemoteMirrorWithLinkLayerHeader.setStatus('current')
hwMirrorFlowClass = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("be", 0), ("af1", 1), ("af2", 2), ("af3", 3), ("af4", 4), ("ef", 5), ("cs6", 6), ("cs7", 7)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwMirrorFlowClass.setStatus('current')
hwMirrorSliceSize = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(64, 9600), ))).setUnits('Byte').setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwMirrorSliceSize.setStatus('current')
hwMirrorTunnelIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorTunnelIndex.setStatus('current')
hwMirrorTunnelType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("lspTunnel", 1), ("teTunnel", 2), ("greTunnel", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorTunnelType.setStatus('current')
hwMirrorTunnelStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 9), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hwMirrorTunnelStatus.setStatus('current')
hwMirrorTunnelPolicy = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 19))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwMirrorTunnelPolicy.setStatus('current')
hwMirrorInstanceRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 5, 162, 1, 2, 4, 1, 11), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hwMirrorInstanceRowStatus.setStatus('current')
hwMirrorConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11))
hwMirrorCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 1))
hwMirrorCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 1, 1)).setObjects(("HUAWEI-MIRROR-MIB", "hwLocalObserveGroup"), ("HUAWEI-MIRROR-MIB", "hwLocalPortMirrorGroup"), ("HUAWEI-MIRROR-MIB", "hwLocalFlowMirrorGroup"), ("HUAWEI-MIRROR-MIB", "hwLocalSlotMirrorGroup"), ("HUAWEI-MIRROR-MIB", "hwLocalPortMirrorInfoGroup"), ("HUAWEI-MIRROR-MIB", "hwRemoteObserveGroup"), ("HUAWEI-MIRROR-MIB", "hwRemotePortMirrorGroup"), ("HUAWEI-MIRROR-MIB", "hwRemoteFlowMirrorGroup"), ("HUAWEI-MIRROR-MIB", "hwRemoteMirrorInstanceGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwMirrorCompliance = hwMirrorCompliance.setStatus('current')
hwBaseMirrorGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2))
hwLocalObserveGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 1)).setObjects(("HUAWEI-MIRROR-MIB", "hwLocalObserveIndex"), ("HUAWEI-MIRROR-MIB", "hwLocalObserveWithLinkLayerHeader"), ("HUAWEI-MIRROR-MIB", "hwLocalObserveRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwLocalObserveGroup = hwLocalObserveGroup.setStatus('current')
hwLocalPortMirrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 2)).setObjects(("HUAWEI-MIRROR-MIB", "hwLocalMirrorBearing"), ("HUAWEI-MIRROR-MIB", "hwLocalCpuPacketFlag"), ("HUAWEI-MIRROR-MIB", "hwLocalPortMirrorCar"), ("HUAWEI-MIRROR-MIB", "hwLocalPortMirrorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwLocalPortMirrorGroup = hwLocalPortMirrorGroup.setStatus('current')
hwLocalFlowMirrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 3)).setObjects(("HUAWEI-MIRROR-MIB", "hwLocalFlowMirrorEnable"), ("HUAWEI-MIRROR-MIB", "hwLocalFlowMirrorCar"), ("HUAWEI-MIRROR-MIB", "hwLocalFlowMirrorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwLocalFlowMirrorGroup = hwLocalFlowMirrorGroup.setStatus('current')
hwLocalSlotMirrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 4)).setObjects(("HUAWEI-MIRROR-MIB", "hwSlotObserveIndex"), ("HUAWEI-MIRROR-MIB", "hwLocalSlotMirrorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwLocalSlotMirrorGroup = hwLocalSlotMirrorGroup.setStatus('current')
hwLocalPortMirrorInfoGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 5)).setObjects(("HUAWEI-MIRROR-MIB", "hwMirrorType"), ("HUAWEI-MIRROR-MIB", "hwMirrorCar"), ("HUAWEI-MIRROR-MIB", "hwMirrorClass"), ("HUAWEI-MIRROR-MIB", "hwMirrorBearing"), ("HUAWEI-MIRROR-MIB", "hwMirrorCpuPacketFlag"), ("HUAWEI-MIRROR-MIB", "hwMirrorWithLinkLayerHeader"), ("HUAWEI-MIRROR-MIB", "hwRemoteMirrorInstanceName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwLocalPortMirrorInfoGroup = hwLocalPortMirrorInfoGroup.setStatus('current')
hwRemoteObserveGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 6)).setObjects(("HUAWEI-MIRROR-MIB", "hwRemoteIdentifier"), ("HUAWEI-MIRROR-MIB", "hwRemoteDescription"), ("HUAWEI-MIRROR-MIB", "hwRemoteObserveWithLinkLayerHeader"), ("HUAWEI-MIRROR-MIB", "hwRemoteObserveRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRemoteObserveGroup = hwRemoteObserveGroup.setStatus('current')
hwRemotePortMirrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 7)).setObjects(("HUAWEI-MIRROR-MIB", "hwRemoteMirrorBearing"), ("HUAWEI-MIRROR-MIB", "hwRemoteCpuPacketFlag"), ("HUAWEI-MIRROR-MIB", "hwPortMirrorInstanceName"), ("HUAWEI-MIRROR-MIB", "hwRemotePortMirrorCar"), ("HUAWEI-MIRROR-MIB", "hwRemotePortMirrorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRemotePortMirrorGroup = hwRemotePortMirrorGroup.setStatus('current')
hwRemoteFlowMirrorGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 8)).setObjects(("HUAWEI-MIRROR-MIB", "hwFlowMirrorInstanceName"), ("HUAWEI-MIRROR-MIB", "hwRemoteFlowMirrorCar"), ("HUAWEI-MIRROR-MIB", "hwRemoteFlowMirrorRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRemoteFlowMirrorGroup = hwRemoteFlowMirrorGroup.setStatus('current')
hwRemoteMirrorInstanceGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 2011, 5, 162, 11, 2, 9)).setObjects(("HUAWEI-MIRROR-MIB", "hwRemoteObservePortIp"), ("HUAWEI-MIRROR-MIB", "hwRemoteMirrorIdentifier"), ("HUAWEI-MIRROR-MIB", "hwRemoteMirrorWithLinkLayerHeader"), ("HUAWEI-MIRROR-MIB", "hwMirrorFlowClass"), ("HUAWEI-MIRROR-MIB", "hwMirrorSliceSize"), ("HUAWEI-MIRROR-MIB", "hwMirrorTunnelIndex"), ("HUAWEI-MIRROR-MIB", "hwMirrorTunnelType"), ("HUAWEI-MIRROR-MIB", "hwMirrorTunnelStatus"), ("HUAWEI-MIRROR-MIB", "hwMirrorInstanceRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hwRemoteMirrorInstanceGroup = hwRemoteMirrorInstanceGroup.setStatus('current')
mibBuilder.exportSymbols("HUAWEI-MIRROR-MIB", hwLocalFlowMirrorCar=hwLocalFlowMirrorCar, hwLocalObserveTable=hwLocalObserveTable, hwRemoteFlowMirrorRowStatus=hwRemoteFlowMirrorRowStatus, hwLocalPortMirrorTable=hwLocalPortMirrorTable, hwLocalPortMirrorCar=hwLocalPortMirrorCar, hwPortMirrorInstanceName=hwPortMirrorInstanceName, hwMirrorFlowClass=hwMirrorFlowClass, hwMirrorBearing=hwMirrorBearing, hwRemoteObserveGroup=hwRemoteObserveGroup, hwRemoteFlowMirrorTable=hwRemoteFlowMirrorTable, hwRemotePortMirrorCar=hwRemotePortMirrorCar, hwMirrorPortIndex=hwMirrorPortIndex, hwMirrorCar=hwMirrorCar, hwRemoteMirrorInstanceName=hwRemoteMirrorInstanceName, hwRemoteMirrorInstanceTable=hwRemoteMirrorInstanceTable, hwRemoteMirrorBearing=hwRemoteMirrorBearing, hwLocalObserveRowStatus=hwLocalObserveRowStatus, hwRemoteObserveRowStatus=hwRemoteObserveRowStatus, hwLocalMirror=hwLocalMirror, hwMirrorCompliance=hwMirrorCompliance, hwLocalFlowMirrorEntry=hwLocalFlowMirrorEntry, hwMirrorTunnelStatus=hwMirrorTunnelStatus, hwLocalMirrorPort=hwLocalMirrorPort, hwMirrorType=hwMirrorType, hwLocalSlotNo=hwLocalSlotNo, hwRemoteFlowMirrorEntry=hwRemoteFlowMirrorEntry, hwMirrorWithLinkLayerHeader=hwMirrorWithLinkLayerHeader, hwLocalBehaviorName=hwLocalBehaviorName, hwRemoteObserveWithLinkLayerHeader=hwRemoteObserveWithLinkLayerHeader, hwMirrorMIB=hwMirrorMIB, hwRemotePortMirrorTable=hwRemotePortMirrorTable, hwMirrorConformance=hwMirrorConformance, hwLocalObserveGroup=hwLocalObserveGroup, hwRemoteDescription=hwRemoteDescription, hwLocalPortMirrorGroup=hwLocalPortMirrorGroup, hwLocalCpuPacketFlag=hwLocalCpuPacketFlag, hwRemoteMirrorWithLinkLayerHeader=hwRemoteMirrorWithLinkLayerHeader, hwSlotObserveIndex=hwSlotObserveIndex, hwLocalFlowMirrorTable=hwLocalFlowMirrorTable, hwLocalObservePort=hwLocalObservePort, hwLocalFlowMirrorEnable=hwLocalFlowMirrorEnable, hwMirrorTunnelPolicy=hwMirrorTunnelPolicy, hwLocalPortMirrorInfoGroup=hwLocalPortMirrorInfoGroup, hwLocalFlowMirrorRowStatus=hwLocalFlowMirrorRowStatus, hwMirrorCompliances=hwMirrorCompliances, hwRemoteFlowMirrorGroup=hwRemoteFlowMirrorGroup, hwLocalObserveWithLinkLayerHeader=hwLocalObserveWithLinkLayerHeader, hwRemotePortMirrorGroup=hwRemotePortMirrorGroup, hwRemoteMirrorPort=hwRemoteMirrorPort, hwLocalMirrorBearing=hwLocalMirrorBearing, hwPortMirrorInfoEntry=hwPortMirrorInfoEntry, hwMirrorTunnelIndex=hwMirrorTunnelIndex, hwMirrorCpuPacketFlag=hwMirrorCpuPacketFlag, hwMirrorClass=hwMirrorClass, hwLocalPortMirrorRowStatus=hwLocalPortMirrorRowStatus, hwLocalFlowMirrorGroup=hwLocalFlowMirrorGroup, hwLocalPortMirrorEntry=hwLocalPortMirrorEntry, hwFlowMirrorInstanceName=hwFlowMirrorInstanceName, hwLocalObserveEntry=hwLocalObserveEntry, hwRemoteFlowMirrorCar=hwRemoteFlowMirrorCar, hwPortMirrorInfoTable=hwPortMirrorInfoTable, hwMirrorSliceSize=hwMirrorSliceSize, hwRemoteBehaviorName=hwRemoteBehaviorName, hwLocalSlotMirrorEntry=hwLocalSlotMirrorEntry, hwRemoteMirrorIdentifier=hwRemoteMirrorIdentifier, hwLocalSlotMirrorTable=hwLocalSlotMirrorTable, hwMirrorTunnelType=hwMirrorTunnelType, hwRemoteMirror=hwRemoteMirror, hwMirrorInstanceName=hwMirrorInstanceName, hwMirrorMIBObjects=hwMirrorMIBObjects, hwRemoteObserveEntry=hwRemoteObserveEntry, hwRemoteObserveTable=hwRemoteObserveTable, hwLocalSlotMirrorRowStatus=hwLocalSlotMirrorRowStatus, hwBaseMirrorGroup=hwBaseMirrorGroup, hwRemoteIdentifier=hwRemoteIdentifier, hwRemoteObservePortIp=hwRemoteObservePortIp, hwRemotePortMirrorEntry=hwRemotePortMirrorEntry, hwLocalSlotMirrorGroup=hwLocalSlotMirrorGroup, hwRemoteCpuPacketFlag=hwRemoteCpuPacketFlag, hwRemoteMirrorInstanceGroup=hwRemoteMirrorInstanceGroup, hwLocalObserveIndex=hwLocalObserveIndex, hwRemotePortMirrorRowStatus=hwRemotePortMirrorRowStatus, PYSNMP_MODULE_ID=hwMirrorMIB, hwMirrorInstanceRowStatus=hwMirrorInstanceRowStatus, hwRemoteObservePort=hwRemoteObservePort, hwRemoteMirrorInstanceEntry=hwRemoteMirrorInstanceEntry)
| [
"[email protected]"
] | |
dd54c49f97d052f3d01d460d92b0d7b59506280b | 8dc84558f0058d90dfc4955e905dab1b22d12c08 | /tools/cygprofile/patch_orderfile.py | 071861e53b11cf764d6c45f7196291be77777555 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"LGPL-2.1-only",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown"
] | permissive | meniossin/src | 42a95cc6c4a9c71d43d62bc4311224ca1fd61e03 | 44f73f7e76119e5ab415d4593ac66485e65d700a | refs/heads/master | 2022-12-16T20:17:03.747113 | 2020-09-03T10:43:12 | 2020-09-03T10:43:12 | 263,710,168 | 1 | 0 | BSD-3-Clause | 2020-05-13T18:20:09 | 2020-05-13T18:20:08 | null | UTF-8 | Python | false | false | 16,496 | py | #!/usr/bin/env vpython
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Patch an orderfile.
Starting with a list of symbols in a binary and an orderfile (ordered list of
sections), matches the symbols in the orderfile and augments each symbol with
the symbols residing at the same address (due to having identical code). The
output is a list of section or symbols matching rules appropriate for the linker
option -section-ordering-file for gold and --symbol-ordering-file for lld. Both
linkers are fine with extra directives that aren't matched in the binary, so we
construct a file suitable for both, concatenating sections and symbols. We
assume that the unpatched orderfile is built for gold, that is, it only contains
sections.
Note: It is possible to have.
- Several symbols mapping to the same offset in the binary.
- Several offsets for a given symbol (because we strip the ".clone." and other
suffixes)
The general pipeline is:
1. Get the symbol infos (name, offset, size, section) from the binary
2. Get the symbol names from the orderfile
3. Find the orderfile symbol names in the symbols coming from the binary
4. For each symbol found, get all the symbols at the same address
5. Output them to an updated orderfile suitable for gold and lld
6. Output catch-all section matching rules for unprofiled methods. This is
ineffective for lld, as it doesn't handle wildcards, but puts unordered
symbols after the ordered ones.
"""
import argparse
import collections
import logging
import sys
import cyglog_to_orderfile
import cygprofile_utils
import symbol_extractor
# Prefixes for the symbols. We strip them from the incoming symbols, and add
# them back in the output file.
# Output sections are constructed as prefix + symbol_name, hence the empty
# prefix is used to generate the symbol entry for lld.
_PREFIXES = ('.text.hot.', '.text.unlikely.', '.text.', '')
# Suffixes for the symbols. These are due to method splitting for inlining and
# method cloning for various reasons including constant propagation and
# inter-procedural optimization.
_SUFFIXES = ('.clone.', '.part.', '.isra.', '.constprop.')
def RemoveSuffixes(name):
"""Strips method name suffixes from cloning and splitting.
.clone. comes from cloning in -O3.
.part. comes from partial method splitting for inlining.
.isra. comes from inter-procedural optimizations.
.constprop. is cloning for constant propagation.
"""
for suffix in _SUFFIXES:
name = name.split(suffix)[0]
return name
def _UniqueGenerator(generator):
"""Converts a generator to skip yielding elements already seen.
Example:
@_UniqueGenerator
def Foo():
yield 1
yield 2
yield 1
yield 3
Foo() yields 1,2,3.
"""
def _FilteringFunction(*args, **kwargs):
returned = set()
for item in generator(*args, **kwargs):
if item in returned:
continue
returned.add(item)
yield item
return _FilteringFunction
def _GroupSymbolInfosFromBinary(binary_filename):
"""Group all the symbols from a binary by name and offset.
Args:
binary_filename: path to the binary.
Returns:
A tuple of dict:
(offset_to_symbol_infos, name_to_symbol_infos):
- offset_to_symbol_infos: {offset: [symbol_info1, ...]}
- name_to_symbol_infos: {name: [symbol_info1, ...]}
"""
symbol_infos = symbol_extractor.SymbolInfosFromBinary(binary_filename)
symbol_infos_no_suffixes = [
s._replace(name=RemoveSuffixes(s.name)) for s in symbol_infos]
return (symbol_extractor.GroupSymbolInfosByOffset(symbol_infos_no_suffixes),
symbol_extractor.GroupSymbolInfosByName(symbol_infos_no_suffixes))
def _StripPrefix(line):
"""Strips the linker section name prefix from a symbol line.
Args:
line: a line from an orderfile, usually in the form:
.text.SymbolName
Returns:
The symbol, SymbolName in the example above.
"""
# Went away with GCC, make sure it doesn't come back, as the orderfile
# no longer contains it.
assert not line.startswith('.text.startup.')
for prefix in _PREFIXES:
if prefix and line.startswith(prefix):
return line[len(prefix):]
return line # Unprefixed case
def _SectionNameToSymbols(section_name, section_to_symbols_map):
"""Yields all symbols which could be referred to by section_name.
If the section name is present in the map, the names in the map are returned.
Otherwise, any clone annotations and prefixes are stripped from the section
name and the remainder is returned.
"""
if (not section_name or
section_name == '.text' or
section_name.endswith('*')):
return # Don't return anything for catch-all sections
if section_name in section_to_symbols_map:
for symbol in section_to_symbols_map[section_name]:
yield symbol
else:
name = _StripPrefix(section_name)
if name:
yield name
def GetSectionsFromOrderfile(filename):
"""Yields the sections from an orderfile.
Args:
filename: The name of the orderfile.
Yields:
A list of symbol names.
"""
with open(filename, 'r') as f:
for line in f.xreadlines():
line = line.rstrip('\n')
if line:
yield line
@_UniqueGenerator
def GetSymbolsFromOrderfile(filename, section_to_symbols_map):
"""Yields the symbols from an orderfile. Output elements do not repeat.
Args:
filename: The name of the orderfile.
section_to_symbols_map: The mapping from section to symbol names. If a
section name is missing from the mapping, the
symbol name is assumed to be the section name with
prefixes and suffixes stripped.
Yields:
A list of symbol names.
"""
# TODO(lizeb,pasko): Move this method to symbol_extractor.py
for section in GetSectionsFromOrderfile(filename):
for symbol in _SectionNameToSymbols(RemoveSuffixes(section),
section_to_symbols_map):
yield symbol
def _SymbolsWithSameOffset(profiled_symbol, name_to_symbol_info,
offset_to_symbol_info):
"""Expands a symbol to include all symbols with the same offset.
Args:
profiled_symbol: the string symbol name to be expanded.
name_to_symbol_info: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary
offset_to_symbol_info: {offset: [symbol_info1, ...], ...}
Returns:
A list of symbol names, or an empty list if profiled_symbol was not in
name_to_symbol_info.
"""
if profiled_symbol not in name_to_symbol_info:
return []
symbol_infos = name_to_symbol_info[profiled_symbol]
expanded = []
for symbol_info in symbol_infos:
expanded += (s.name for s in offset_to_symbol_info[symbol_info.offset])
return expanded
@_UniqueGenerator
def _SectionMatchingRules(section_name, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map, suffixed_sections):
"""Gets the set of section matching rules for section_name.
These rules will include section_name, but also any sections which may
contain the same code due to cloning, splitting, or identical code folding.
Args:
section_name: The section to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol name. Missing
section names are treated as per _SectionNameToSymbols.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol. If a symbol isn't in the mapping, the
section names are generated from the set of _PREFIXES with the symbol
name.
suffixed_sections: A set of sections which can have suffixes.
Yields:
Section names including at least section_name.
"""
for name in _ExpandSection(section_name, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map):
yield name
# Since only a subset of methods (mostly those compiled with O2) ever get
# suffixes, don't emit the wildcards for ones where it won't be helpful.
# Otherwise linking takes too long.
if name in suffixed_sections:
# TODO(lizeb,pasko): instead of just appending .*, append .suffix.* for
# _SUFFIXES. We can't do this right now because that many wildcards
# seems to kill the linker (linking libchrome takes 3 hours). This gets
# almost all the benefit at a much lower link-time cost, but could cause
# problems with unexpected suffixes.
yield name + '.*'
def _ExpandSection(section_name, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map):
"""Yields the set of section names for section_name.
This set will include section_name, but also any sections which may contain
the same code due to identical code folding.
Args:
section_name: The section to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
GetSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol name. Missing
section names are treated as per _SectionNameToSymbols.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol. If a symbol isn't in the mapping, the
section names are generated from the set of _PREFIXES with the symbol
name.
Yields:
Section names including at least section_name.
"""
yield section_name
for first_sym in _SectionNameToSymbols(section_name,
section_to_symbols_map):
for symbol in _SymbolsWithSameOffset(first_sym, name_to_symbol_infos,
offset_to_symbol_infos):
if symbol in symbol_to_sections_map:
for section in symbol_to_sections_map[symbol]:
yield section
for prefix in _PREFIXES:
yield prefix + symbol
@_UniqueGenerator
def _ExpandSections(section_names, name_to_symbol_infos,
offset_to_symbol_infos, section_to_symbols_map,
symbol_to_sections_map, suffixed_sections):
"""Gets an ordered set of section matching rules for a list of sections.
Rules will not be repeated.
Args:
section_names: The sections to expand.
name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by
_GroupSymbolInfosFromBinary.
offset_to_symbol_infos: {offset: [symbol_info1, ...], ...}
section_to_symbols_map: The mapping from section to symbol names.
symbol_to_sections_map: The mapping from symbol name to names of linker
sections containing the symbol.
suffixed_sections: A set of sections which can have suffixes.
Yields:
Section matching rules including at least section_names.
"""
for profiled_section in section_names:
for section in _SectionMatchingRules(
profiled_section, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map, suffixed_sections):
yield section
def _CombineSectionListsByPrimaryName(symbol_to_sections_map):
"""Combines values of the symbol_to_sections_map by stripping suffixes.
Example:
{foo: [.text.foo, .text.bar.part.1],
foo.constprop.4: [.text.baz.constprop.3]} ->
{foo: [.text.foo, .text.bar, .text.baz]}
Args:
symbol_to_sections_map: Mapping from symbol name to list of section names
Returns:
The same mapping, but with symbol and section names suffix-stripped.
"""
simplified = {}
for suffixed_symbol, suffixed_sections in symbol_to_sections_map.iteritems():
symbol = RemoveSuffixes(suffixed_symbol)
sections = [RemoveSuffixes(section) for section in suffixed_sections]
simplified.setdefault(symbol, []).extend(sections)
return simplified
def _SectionsWithSuffixes(symbol_to_sections_map):
"""Finds sections which have suffixes applied.
Args:
symbol_to_sections_map: a map where the values are lists of section names.
Returns:
A set containing all section names which were seen with suffixes applied.
"""
sections_with_suffixes = set()
for suffixed_sections in symbol_to_sections_map.itervalues():
for suffixed_section in suffixed_sections:
section = RemoveSuffixes(suffixed_section)
if section != suffixed_section:
sections_with_suffixes.add(section)
return sections_with_suffixes
def _StripSuffixes(section_list):
"""Remove all suffixes on items in a list of sections or symbols."""
return [RemoveSuffixes(section) for section in section_list]
def GeneratePatchedOrderfile(unpatched_orderfile, native_lib_filename,
output_filename):
"""Writes a patched orderfile.
Args:
unpatched_orderfile: (str) Path to the unpatched orderfile.
native_lib_filename: (str) Path to the native library.
output_filename: (str) Path to the patched orderfile.
"""
(offset_to_symbol_infos, name_to_symbol_infos) = _GroupSymbolInfosFromBinary(
native_lib_filename)
obj_dir = cygprofile_utils.GetObjDir(native_lib_filename)
raw_symbol_map = cyglog_to_orderfile.ObjectFileProcessor(
obj_dir).GetSymbolToSectionsMap()
suffixed = _SectionsWithSuffixes(raw_symbol_map)
symbol_to_sections_map = _CombineSectionListsByPrimaryName(raw_symbol_map)
section_to_symbols_map = cygprofile_utils.InvertMapping(
symbol_to_sections_map)
profiled_sections = _StripSuffixes(
GetSectionsFromOrderfile(unpatched_orderfile))
expanded_sections = _ExpandSections(
profiled_sections, name_to_symbol_infos, offset_to_symbol_infos,
section_to_symbols_map, symbol_to_sections_map, suffixed)
with open(output_filename, 'w') as f:
# Make sure the anchor functions are located in the right place, here and
# after everything else.
# See the comment in //base/android/library_loader/anchor_functions.cc.
#
# __cxx_global_var_init is one of the largest symbols (~38kB as of May
# 2018), called extremely early, and not instrumented.
first_sections = ('dummy_function_start_of_ordered_text',
'__cxx_global_var_init')
for section in first_sections:
for prefix in _PREFIXES:
f.write(prefix + section + '\n')
for section in expanded_sections:
f.write(section + '\n')
for prefix in _PREFIXES:
f.write(prefix + 'dummy_function_end_of_ordered_text\n')
# The following is needed otherwise Gold only applies a partial sort.
f.write('.text\n') # gets methods not in a section, such as assembly
f.write('.text.*\n') # gets everything else
# Since wildcards are not supported by lld, the "end of text" anchor symbol
# is not emitted, a different mechanism is used instead. See comments in the
# file above.
for prefix in _PREFIXES:
if prefix:
f.write(prefix + 'dummy_function_at_the_end_of_text\n')
def _CreateArgumentParser():
"""Creates and returns the argument parser."""
parser = argparse.ArgumentParser()
parser.add_argument('--target-arch', action='store',
choices=['arm', 'arm64', 'x86', 'x86_64', 'x64', 'mips'],
help='The target architecture for the library.')
parser.add_argument('--unpatched-orderfile', required=True,
help='Path to the unpatched orderfile')
parser.add_argument('--native-library', required=True,
help='Path to the native library')
parser.add_argument('--output-file', required=True, help='Output filename')
return parser
def main():
parser = _CreateArgumentParser()
options = parser.parse_args()
if not options.target_arch:
options.arch = cygprofile_utils.DetectArchitecture()
symbol_extractor.SetArchitecture(options.target_arch)
GeneratePatchedOrderfile(options.unpatched_orderfile, options.native_library,
options.output_file)
return 0
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
| [
"[email protected]"
] | |
f453a36bcee504e1fc87eb4a16f5709e38556740 | 415a8a4315e6331b2a157de8a1429fe0562729f8 | /python/TryCatch.py | 75b958f68079d3ef596723ff8107b476c5de2643 | [] | no_license | alfaceor/programming-examples | 784690dd1104e4adbdf958e4163b3b462f635881 | abea970a54cfab0eacc5280ae62383495e9e6eeb | refs/heads/master | 2022-05-04T23:14:30.503114 | 2022-04-29T10:11:45 | 2022-04-29T10:11:45 | 36,015,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | #!/usr/bin/python
import numpy as np
for i in range(4):
try:
data = np.loadtxt("NoExiste.dat")
except IOError as e:
print "Oops!"
pass
for i in range(5):
try:
data = np.loadtxt("NoExiste.dat")
except IOError as e:
print "NO PASS!"
break
print "Fuck U!!!"
| [
"alfaceor"
] | alfaceor |
afe52020807529d3d426f1f80748977c241334c4 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_137/548.py | 6499377835513cc99a4e2110302f4abf9f61a149 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | data = open('minesweeper.txt')
mines = {}
while True:
line = data.readline()
line = line.split()
if len(line) == 0:
break
R = int(line[0])
C = int(line[1])
M = int(line[2])
line = data.readline().strip()
if line == 'Impossible':
mines[(R, C, M)] = 'Impossible'
else:
mine = line + '\n'
for r in range(1, R):
mine += data.readline()
mines[(R, C, M)] = mine
test_data = open('C-small-attempt0.in')
num_tests = int(test_data.readline().strip())
for test in range(num_tests):
line = test_data.readline().split()
R = int(line[0])
C = int(line[1])
M = int(line[2])
output = mines[(R, C, M)]
print('Case #{0}:'.format(test + 1))
print(output)
| [
"[email protected]"
] | |
4b270fa9d701f65ef4e79353a53e22d43df8424f | ad9782856ec2f860fccbefa5e75a896691b8e1cc | /MonteCarlo/test/opt6s3l/crab_step2_VBF_HToBB_OT613_200_IT4025_opt6s3l.py | 8031794a20b9cb961ae352984ee3b6e5b3a772d7 | [] | no_license | OSU-CMS/VFPix | 7fe092fc5a973b4f9edc29dbfdf44907664683e5 | 4c9fd903219742a4eba1321dc4181da125616e4c | refs/heads/master | 2020-04-09T05:52:05.644653 | 2019-01-09T13:44:22 | 2019-01-09T13:44:22 | 30,070,948 | 0 | 0 | null | 2018-11-30T13:15:54 | 2015-01-30T12:26:20 | Python | UTF-8 | Python | false | false | 944 | py | from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = config()
config.General.requestName = 'VBF_HToBB_14TeV_step2_923_PU200_OT613_200_IT4025_opt6s3l'
config.General.workArea = 'crab'
config.General.transferOutputs = True
config.General.transferLogs = True
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'step2_DIGI_L1_L1TrackTrigger_DIGI2RAW_HLT_PU200_OT613_200_IT4025_opt6s3l.py'
config.JobType.maxMemoryMB = 4000
config.Data.inputDataset = '/VBF_HToBB_14TeV_923_OT613_200_IT4025_opt6s3l/jalimena-LheGenSim_RAWSIMoutput-efeae19cc3c320703c0b5144577e0f10/USER'
config.Data.outputDatasetTag = 'step2_PU200'
config.Data.inputDBS = 'phys03'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob = 1
config.Data.outLFNDirBase = '/store/group/lpcfpix'
config.Data.publication = True
config.Data.ignoreLocality = True
config.Site.whitelist = ["T1_US_FNAL"]
config.Site.storageSite = 'T3_US_FNALLPC'
| [
"[email protected]"
] | |
5a308f6b7f9ceacdf803dead7dbd5a2dfc85628e | 9aa1885bfd666b5d3719c29334c9769bbe88d3e0 | /bin/cache-purge-consumer.py | d1bd99303ae097493edde7eadcd860165b207716 | [] | permissive | praekelt/django-ultracache | 9c240cfad4660afdb7e679192ca0f4b05bab1831 | 476eb8a4935043f4fc6901ed3541ececed1664bf | refs/heads/develop | 2022-01-27T18:20:00.062349 | 2020-05-29T09:58:01 | 2020-05-29T09:58:01 | 38,880,711 | 32 | 4 | BSD-3-Clause | 2022-01-06T22:24:32 | 2015-07-10T13:02:45 | Python | UTF-8 | Python | false | false | 3,973 | py | """Subscribe to RabbitMQ and listen for purge instructions continuously. Manage
this script through eg. supervisor."""
import json
import traceback
from multiprocessing.pool import ThreadPool
from optparse import OptionParser
from time import sleep
import pika
import requests
import yaml
class Consumer:
channel = None
connection = None
def __init__(self):
self.pool = ThreadPool()
parser = OptionParser()
parser.add_option("-c", "--config", dest="config",
help="Configuration file", metavar="FILE")
(options, args) = parser.parse_args()
config_file = options.config
self.config = {}
if config_file:
self.config = yaml.load(open(config_file)) or {}
def log(self, msg):
name = self.config.get("logfile", None)
if not name:
return
if name == "stdout":
print(msg)
return
fp = open(name, "a")
try:
fp.write(msg + "\n")
finally:
fp.close()
def connect(self):
parameters = pika.URLParameters(
self.config.get(
"rabbit-url",
"amqp://guest:[email protected]:5672/%2F"
)
)
self.connection = pika.BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.exchange_declare(
exchange="purgatory", exchange_type="fanout"
)
queue = self.channel.queue_declare(exclusive=True)
queue_name = queue.method.queue
self.channel.queue_bind(exchange="purgatory", queue=queue_name)
self.channel.basic_qos(prefetch_count=1)
self.channel.basic_consume(
self.on_message, queue=queue_name, no_ack=False, exclusive=True
)
def on_message(self, channel, method_frame, header_frame, body):
self.pool.apply_async(self.handle_message, (body,))
channel.basic_ack(delivery_tag=method_frame.delivery_tag)
def handle_message(self, body):
if body:
try:
di = json.loads(body)
except ValueError:
path = body
headers = {}
else:
path = di["path"]
headers = di["headers"]
self.log("Purging %s with headers %s" % (path, str(headers)))
host = self.config.get("host", None)
try:
if host:
final_headers = {"Host": host}
final_headers.update(headers)
response = requests.request(
"PURGE", "http://" \
+ self.config.get("proxy-address", "127.0.0.1") + path,
headers=final_headers,
timeout=10
)
else:
response = requests.request(
"PURGE", "http://" \
+ self.config.get("proxy-address", "127.0.0.1") + path,
timeout=10,
headers=headers
)
except Exception as exception:
msg = traceback.format_exc()
self.log("Error purging %s: %s" % (path, msg))
else:
content = response.content
def consume(self):
loop = True
while loop:
try:
if self.channel is None:
raise pika.exceptions.ConnectionClosed()
self.channel.start_consuming()
except KeyboardInterrupt:
loop = False
self.channel.stop_consuming()
except pika.exceptions.ConnectionClosed:
try:
self.connect()
except pika.exceptions.ConnectionClosed:
sleep(1)
self.connection.close()
consumer = Consumer()
consumer.consume()
| [
"[email protected]"
] | |
0e224f6a0ff6149cf70f6a426a50cdc40b769be9 | 8d1ceed7720e374691829d78007ea146a9030e4f | /arkestra_clinical_studies/lister.py | 5905346c3c39dba5232bfa745f6c1a2ba387225d | [
"BSD-2-Clause"
] | permissive | gonff/arkestra-clinical-studies | 25ef186207781bbc979f7f12bdef194802d9c71c | d75540e006a5d8b1ccb6d05a8253eba9c9fb0a79 | refs/heads/master | 2021-01-18T05:10:23.067652 | 2014-05-21T11:19:03 | 2014-05-21T11:19:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,978 | py | from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from arkestra_utilities.generic_lister import (
ArkestraGenericLister, ArkestraGenericList, ArkestraGenericFilterList,
ArkestraGenericFilterSet
)
from arkestra_utilities.settings import MULTIPLE_ENTITY_MODE
from .models import Study
# we're going to have a list of Studies that we can search, filter and paginate
# the ArkestraGenericFilterSet provides us with some of that machinery
class StudiesFilterSet(ArkestraGenericFilterSet):
# the fields we want to be able to filter on
fields = ["date", "status", "studytype"]
class StudiesListMixin(object):
def set_items_for_entity(self):
# if we're not in MULTIPLE_ENTITY_MODE, just leave self.items alone
if MULTIPLE_ENTITY_MODE and self.entity:
# we want to include any item that has any relationship with any
# of the descendants of the entity we're looking at
# get a list of all those entities
entities = self.entity.get_descendants(
include_self=True
).values_list('id', flat=True)
# get the Studies that have a relationship with any item in that list
self.items = self.items.filter(
Q(hosted_by__in=entities) | Q(publish_to__in=entities) |
Q(funding_body__in=entities) | Q(sponsor__in=entities) |
Q(clinical_centre__in=entities)
).distinct()
# the class that produces the list of items, based on ArkestraGenericFilterList
class StudiesList(StudiesListMixin, ArkestraGenericFilterList):
# it must have a filter_set class
filter_set = StudiesFilterSet
# the model we're listing
model = Study
# the text search fields - each one is a dictionary
search_fields = [
{
# the field as its name appears in the URL: ?text=
"field_name": "text",
# a label for the field
"field_label": "Search title/summary",
# the placeholder text in the search widget
"placeholder": "Search",
# the model fields we want to search through
"search_keys": [
"title__icontains",
"summary__icontains",
],
},
]
# we want to override the generic list item template
item_template = "clinical_studies/study_list_item.html"
# we need our own build() method to override the generic one
def build(self):
# get the listable (by default, published and shown in lists) items
self.items = self.model.objects.listable_objects()
# we'll limit the items according to the appropriate entity - the
# method that does this is defined in the StudiesListMixin
self.set_items_for_entity()
# and limit by search terms
self.filter_on_search_terms()
# and set up the filter for rendering
self.itemfilter = self.filter_set(self.items, self.request.GET)
# the Lister class is the one that determines which lists to display, along
# with the surrounding furniture - in the case of Studies, it's just one List,
# but we could have more
class StudiesLister(ArkestraGenericLister):
# a list of available List classes
listkinds = [("studies", StudiesList)]
# the List classes we want to use
display = "studies"
class StudiesMenuList(StudiesListMixin, ArkestraGenericList):
model = Study
heading_text = _(u"News")
def build(self):
# get the listable (by default, published and shown in lists) items
self.items = self.model.objects.listable_objects()
# we'll limit the items according to the appropriate entity - the
# method that does this is defined in the StudiesListMixin
self.set_items_for_entity()
class StudiesMenuLister(ArkestraGenericLister):
listkinds = [("studies", StudiesMenuList)]
display = "studies"
| [
"[email protected]"
] | |
3b15efcd4c58e73f9d4c0135da5f36a883347fa3 | d170efa06e6e682c71961fe1213298e5a68193c3 | /python/python/rotate/test_rotate.py | fc39d304949523ec047f2d1eddf13bc3a777fc50 | [
"MIT"
] | permissive | iggy18/data-structures-and-algorithms | 45b9ebf3c0820968bda62c0ebd90a9cfd65b3902 | 700ef727ca7656724120a1873af4bd4bce5962f4 | refs/heads/main | 2023-02-27T04:45:12.535801 | 2021-02-08T22:41:28 | 2021-02-08T22:41:28 | 300,975,693 | 0 | 0 | MIT | 2021-02-12T18:39:18 | 2020-10-03T20:42:08 | JavaScript | UTF-8 | Python | false | false | 232 | py | from rotate import rotate
def test_rotate():
assert rotate
def test_rotate_works_properly():
x = [[1,2,3], [1,2,3], [1,2,3]]
actual = rotate(x)
expected = [[1,1,1], [2,2,2], [3,3,3,]]
assert actual == expected
| [
"[email protected]"
] | |
5b5318e9339850b6265dc415340e362ff7e63894 | 8f3336bbf7cd12485a4c52daa831b5d39749cf9b | /Python/diameter-of-binary-tree.py | 2f8d44152f1d1d7bc911f4df55398ee39e93ccf0 | [] | no_license | black-shadows/LeetCode-Topicwise-Solutions | 9487de1f9a1da79558287b2bc2c6b28d3d27db07 | b1692583f7b710943ffb19b392b8bf64845b5d7a | refs/heads/master | 2022-05-30T22:16:38.536678 | 2022-05-18T09:18:32 | 2022-05-18T09:18:32 | 188,701,704 | 240 | 110 | null | 2020-05-08T13:04:36 | 2019-05-26T15:41:03 | C++ | UTF-8 | Python | false | false | 501 | py | # Time: O(n)
# Space: O(h)
class Solution(object):
def diameterOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.depth(root, 0)[1]
def depth(self, root, diameter):
if not root:
return 0, diameter
left, diameter = self.depth(root.left, diameter)
right, diameter = self.depth(root.right, diameter)
return 1 + max(left, right), max(diameter, left + right)
| [
"[email protected]"
] | |
ca8bebbb101f6f0a7ea337d1d0365016720fb6b6 | cf09d6430e37b5460d7208d6cae6d3af0fa15925 | /jsonbot/jsb/lib/config.py | 87dc5bc6d9858b7446409e090713d466ea5d113e | [
"MIT"
] | permissive | Lujeni/old-projects | 2bbf0ff89852a3e4a9677475a615d2ee4b07d635 | 657304c8b017a98935de9728fc695abe8be7cc4f | refs/heads/master | 2021-03-12T23:08:34.054777 | 2014-10-16T23:10:15 | 2014-10-16T23:10:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,266 | py | # jsb/lib/config.py
#
#
""" config module. config is stored as item = JSON pairs. """
## jsb imports
from jsb.utils.trace import whichmodule, calledfrom
from jsb.utils.lazydict import LazyDict
from jsb.utils.exception import handle_exception
from jsb.utils.name import stripname
from datadir import getdatadir
from errors import CantSaveConfig, NoSuchFile
from jsb.utils.locking import lockdec
## simplejson imports
from jsb.imports import getjson
json = getjson()
## basic imports
import sys
import os
import types
import thread
import logging
import uuid
import thread
import getpass
import copy
import time
## locks
savelock = thread.allocate_lock()
savelocked = lockdec(savelock)
## defines
cpy = copy.deepcopy
## classes
class Config(LazyDict):
"""
config class is a dict containing json strings. is writable to file
and human editable.
"""
def __init__(self, filename, verbose=False, input={}, ddir=None, nolog=False, *args, **kw):
assert filename
LazyDict.__init__(self, input, *args, **kw)
self.origname = filename
self.origdir = ddir or getdatadir()
self.setcfile(ddir, filename)
self.jsondb = None
if not self._comments: self._comments = {}
try:
import waveapi
self.isdb = True
self.isgae = True
except ImportError:
self.isgae = False
self.isdb = False
dodb = False
try:
logging.info("fromfile - %s from %s" % (self.origname, whichmodule(2)))
self.fromfile(self.cfile)
except IOError, ex: handle_exception() ; dodb = True
if dodb or (self.isgae and not "mainconfig" in filename):
try:
from persist import Persist
self.jsondb = Persist(self.cfile)
if self.jsondb: self.merge(self.jsondb.data)
logging.warn("fromdb - %s" % self.cfile)
except ImportError:
logging.warn("can't read config from %s - %s" % (self.cfile, str(ex)))
self.init()
if self.owner: logging.info("owner is %s" % self.owner)
if not self.has_key("uuid"): self.setuuid()
if not self.has_key("cfile"): self.cfile = self.setcfile(self.origdir, self.origname)
assert self.cfile
def setcfile(self, ddir, filename):
self.filename = filename or 'mainconfig'
self.datadir = ddir or getdatadir()
self.dir = self.datadir + os.sep + 'config'
self.cfile = self.dir + os.sep + filename
def setuuid(self, save=True):
logging.debug("setting uuid")
self.uuid = str(uuid.uuid4())
if save: self.save()
def __deepcopy__(self, a):
""" accessor function. """
cfg = Config(self.filename, input=self, nolog=True)
return cfg
def __getitem__(self, item):
""" accessor function. """
if not self.has_key(item): return None
else: return LazyDict.__getitem__(self, item)
def merge(self, cfg):
""" merge in another cfg. """
self.update(cfg)
def set(self, item, value):
""" set item to value. """
LazyDict.__setitem__(self, item, value)
def fromdb(self):
""" read config from database. """
from jsb.lib.persist import Persist
tmp = Persist(self.cfile)
logging.debug("fromdb - %s - %s" % (self.cfile, tmp.data.tojson()))
self.update(tmp.data)
def todb(self):
""" save config to database. """
cp = dict(self)
del cp['jsondb']
if not self.jsondb:
from jsb.lib.persist import Persist
self.jsondb = Persist(self.cfile)
self.jsondb.data = cp
self.jsondb.save()
def fromfile(self, filename=None):
""" read config object from filename. """
curline = ""
fname = filename or self.cfile
if not fname: raise Exception(" %s - %s" % (self.cfile, self.dump()))
if not os.path.exists(fname): logging.warn("config file %s doesn't exist yet" % fname) ; return False
comment = ""
for line in open(fname, 'r'):
curline = line
curline = curline.strip()
if curline == "": continue
if curline.startswith('#'): comment = curline; continue
if True:
try:
key, value = curline.split('=', 1)
kkey = key.strip()
self[kkey] = json.loads(unicode(value.strip()))
if comment: self._comments[kkey] = comment
comment = ""
except ValueError: logging.error("skipping line - unable to parse: %s" % line)
#self.cfile = fname
return
def tofile(self, filename=None, stdout=False):
""" save config object to file. """
if not filename: filename = self.cfile
if not filename: raise Exception("no cfile found - %s" % whichmodule(3))
if self.isgae: logging.warn("can't save config file %s on GAE" % filename) ; return
logging.warn("saving %s" % filename)
if filename.startswith(os.sep): d = [os.sep,]
else: d = []
for p in filename.split(os.sep)[:-1]:
if not p: continue
d.append(p)
ddir = os.sep.join(d)
if not os.path.isdir(ddir):
logging.debug("persist - creating %s dir" % ddir)
try: os.mkdir(ddir)
except OSError, ex:
logging.error("persist - not saving - failed to make %s - %s" % (ddir, str(ex)))
return
written = []
curitem = None
later = []
try:
if stdout: configtmp = sys.stdout
else: configtmp = open(filename + '.tmp', 'w')
configtmp.write('# ===========================================================\n#\n')
configtmp.write("# JSONBOT CONFIGURATION FILE - %s\n" % filename)
configtmp.write("#\n")
configtmp.write('# last changed on %s\n#\n' % time.ctime(time.time()))
configtmp.write("# This file contains configration data for the JSONBOT.\n")
configtmp.write('# Variables are defined by "name = json value" pairs.\n')
configtmp.write('# Make sure to use " in strings.\n#\n')
configtmp.write('# The bot can edit this file!.\n#\n')
configtmp.write('# ===========================================================\n\n')
teller = 0
keywords = self.keys()
keywords.sort()
for keyword in keywords:
value = self[keyword]
if keyword in written: continue
if keyword in ['isgae', 'origdir', 'origname', 'issaved', 'blacklist', 'whitelist', 'followlist', 'uuid', 'whitelist', 'datadir', 'name', 'createdfrom', 'cfile', 'filename', 'dir', 'isdb']: later.append(keyword) ; continue
if keyword == 'jsondb': continue
if keyword == 'optionslist': continue
if keyword == 'gatekeeper': continue
if keyword == "_comments": continue
if self._comments and self._comments.has_key(keyword):
configtmp.write(self._comments[keyword] + u"\n")
curitem = keyword
try: configtmp.write('%s = %s\n' % (keyword, json.dumps(value)))
except TypeError: logging.error("%s - can't serialize %s" % (filename, keyword)) ; continue
teller += 1
#configtmp.write("\n")
configtmp.write('\n\n# ============================================================\n#\n')
configtmp.write("# bot generated stuff.\n#\n")
configtmp.write('# ============================================================\n\n')
for keyword in later:
if self._comments and self._comments.has_key(keyword):
configtmp.write(self._comments[keyword] + u"\n")
curitem = keyword
value = self[keyword]
try: configtmp.write(keyword + " = " + json.dumps(value) + "\n")
except TypeError: logging.error("%s - can't serialize %s" % (filename, keyword)) ; continue
teller += 1
#configtmp.write("\n")
if not "mainconfig" in filename and self._comments:
try:
configtmp.write('\n\n# ============================================================\n#\n')
configtmp.write("# possible other config variables.\n#\n")
configtmp.write('# ============================================================\n\n')
items = self._comments.keys()
keys = self.keys()
do = []
for var in items:
if var not in keys: do.append(var)
do.sort()
for var in do:
configtmp.write(u"# %s -=- %s\n" % (var, self._comments[var]))
configtmp.write("\n\n")
except Exception, ex: handle_exception()
else: configtmp.write("\n\n# jsonbot can run multiple bots at once. see %s/config/fleet for their configurations.\n\n" % self.origdir)
if not stdout:
configtmp.close()
os.rename(filename + '.tmp', filename)
return teller
except Exception, ex:
handle_exception()
logging.error("ERROR WRITING %s CONFIG FILE: %s .. %s" % (self.cfile, str(ex), curitem))
@savelocked
def save(self):
""" save the config. """
logging.info("save called from %s" % calledfrom(sys._getframe(2)))
self.issaved = True
if self.isdb: self.todb()
else: self.tofile(self.cfile)
def load_config(self, verbose=False):
""" load the config file. """
if self.isdb: self.fromdb()
else: self.fromfile(self.filename)
self.init()
if verbose: logging.debug('%s' % self.dump())
def init(self):
""" initialize the config object. """
if not self._comments: self._comments = {}
if self.filename == 'mainconfig':
self._comments["whitelist"] = "# - whitelist used to allow ips .. bot maintains this"
self.setdefault("whitelist", [])
self._comments["blacklist"] = "# - blacklist used to deny ips .. bot maintains this"
self.setdefault("blacklist", [])
self.setdefault('owner', [])
self._comments["loglist"] = "# - loglist .. maintained by the bot."
self.setdefault('loglist', [])
self._comments["loglevel"] = "# - loglevel of all bots"
self.setdefault('loglevel', "warn")
self._comments["loadlist"] = "# - loadlist .. not used yet."
self.setdefault('loadlist', [])
self._comments["quitmsg"] = "# - message to send on quit"
self.setdefault('quitmsg', "http://jsonbot.googlecode.com")
self._comments["dotchars"] = "# - characters to used as seperator."
self.setdefault('dotchars', ", ")
self._comments["floodallow"] = "# - whether the bot is allowed to flood."
self.setdefault('floodallow', 1)
self._comments["auto_register"] = "# - enable automatic registration of new users."
self.setdefault('auto_register', 0)
self._comments["guestasuser"] = "# - enable this to give new users the USER permission besides GUEST."
self.setdefault('guestasuser', 0)
self._comments["globalcc"] = "# - global control character"
self.setdefault('globalcc', "")
self._comments["app_id"] = "# - application id used by appengine."
self.setdefault('app_id', "jsonbot")
self._comments["appname"] = "# - application name as used by the bot."
self.setdefault('appname', "JSONBOT")
self._comments["domain"] = "# - domain .. used for WAVE."
self.setdefault('domain', "")
self._comments["color"] = "# - color used in the webconsole."
self.setdefault('color', "")
self._comments["colors"] = "# - enable colors in logging."
self.setdefault('colors', "")
self._comments["memcached"] = "# - enable memcached."
self.setdefault('memcached', 0)
self._comments["allowrc"] = "# - allow execution of rc files."
self.setdefault('allowrc', 0)
self._comments["allowremoterc"] = "# - allow execution of remote rc files."
self.setdefault('allowremoterc', 0)
self._comments['dbenable'] = "# - enable database support"
self.setdefault('dbenable', 0)
self._comments['dbtype'] = "# - type of database .. sqlite or mysql at this time."
self.setdefault('dbtype', 'sqlite')
self._comments['dbname'] = "# - database name"
self.setdefault('dbname', "main.db")
self._comments['dbhost'] = "# - database hostname"
self.setdefault('dbhost', "localhost")
self._comments['dbuser'] = "# - database user"
self.setdefault('dbuser', "bart")
self._comments['dbpasswd'] = "# - database password"
self.setdefault('dbpasswd', "mekker2")
self._comments['ticksleep'] = "# - nr of seconds to sleep before creating a TICK event."
self.setdefault('ticksleep', 1)
self._comments['bindhost'] = "# - host to bind to"
self.setdefault("bindhost", "")
self._comments['defaultcc'] = "# - host to bind to"
self.setdefault("defaultcc", ".")
self['createdfrom'] = whichmodule()
if 'xmpp' in self.cfile: self.setdefault('fulljids', 1)
if 'fleet' in self.cfile:
self.setdefault('disable', 1)
self.setdefault("owner", [])
self.setdefault("user", "")
self.setdefault("host", "")
self.setdefault("server", "")
self.setdefault("ssl", 0)
self.setdefault("ipv6", 0)
self.setdefault("channels", [])
self.setdefault("port", "")
self.setdefault("password", "")
self._comments['datadir'] = "# - directory to store bot data in."
self._comments["owner"] = "# - owner of the bot."
self._comments["uuid"] = "# - bot generated uuid for this config file."
self._comments["user"] = "# - user used to login on xmpp networks."
self._comments["host"] = "# - host part of the user, derived from user var."
self._comments["server"] = "# - server to connect to (only when different from users host)."
self._comments["password"] = "# - password to use in authing the bot."
self._comments["port"] = "# - port to connect to (IRC)."
self._comments["ssl"] = "# - whether to enable ssl (set to 1 to enable)."
self._comments["ipv6"] = "# - whether to enable ssl (set to 1 to enable)."
self._comments["name"] = "# - the name of the bot."
self._comments["disable"] = "# - set this to 0 to enable the bot."
self._comments["followlist"] = "# - who to follow on the bot .. bot maintains this list."
self._comments["networkname"] = "# - networkname .. not used right now."
self._comments["type"] = "# - the bot's type."
self._comments["nick"] = "# - the bot's nick."
self._comments["channels"] = "# - channels to join."
self._comments["cfile"] = "# - filename of this config file. edit this when you move this file."
self._comments["createdfrom"] = "# - function that created this config file. bot generated"
self._comments["dir"] = "# - directory in which this config file lives."
self._comments["isdb"] = "# - whether this config file lives in the database and not on file."
self._comments["filename"] = "# - filename of this config file."
self._comments["username"] = "# - username of the bot."
self._comments["fulljids"] = "# - use fulljids of bot users (used in non anonymous conferences."
self._comments["servermodes"] = "# - string of modes to send to the server after connect."
self._comments["realname"] = "# - name used in the ident of the bot."
self._comments["onconnect"] = "# - string to send to server after connect."
self._comments["onconnectmode"] = "# - MODE string to send to server after connect."
self._comments["realname"] = "# - mode string to send to the server after connect."
self._comments["issaved"] = "# - whether this config file has been saved. "
self._comments["origdir"] = "# - original datadir for this configfile. "
self._comments["origname"] = "# - displayable name of the config file name. "
return self
def reload(self):
""" reload the config file. """
self.load_config()
return self
def ownercheck(userhost):
""" check whether userhost is a owner. """
if not userhost: return False
if userhost in cfg['owner']: return True
return False
mainconfig = None
def getmainconfig(ddir=None):
global mainconfig
if not mainconfig: mainconfig = Config("mainconfig", ddir=ddir)
if not mainconfig.has_key("issaved"): mainconfig.save()
return mainconfig
irctemplate = """# =====================================================
#
# JSONBOT CONFIGURATION FILE -
#
# last changed on
#
# This file contains configration data for the JSONBOT.
# Variables are defined by "name = json value" pairs.
# Make sure to use " in strings.
# The bot can edit this file!
#
# =====================================================
# - to enable put this to 0
disable = 1
# - the bot's nick.
nick = "jsb"
# - owner of the bot.
owner = []
# - port to connect to (IRC).
port = 6667
# - server to connect to (on jabber only when different that host.
server = "localhost"
# - the bot's type.
type = "irc"
# - username of the bot.
username = "jsonbot"
# - ssl enabled or not
ssl = 0
# - ipv6 enabled or not
ipv6 = 0
# - name use in ident of the bot
realname = "jsonbot"
# - string of modes send to the server on connect
servermodes = ""
# =====================================================
#
# bot generated stuff.
#
# =====================================================
"""
xmpptemplate = """# =====================================================
#
# JSONBOT CONFIGURATION FILE -
#
# last changed on
#
# This file contains configration data for the JSONBOT.
# Variables are defined by "name = json value" pairs.
# Make sure to use " in strings.
# The bot can edit this file!
#
# =====================================================
# - channels to join
channels = []
# - to enable put this to 0
disable = 1
# - the bot's nick.
nick = "jsb"
# - owner of the bot.
owner = []
# - use fulljids of bot users (used in non anonymous conferences.
fulljids = 1
# password used to auth on the server.
password = ""
# - server to connect to (on jabber only when different that users host.
server = ""
# - the bot's type.
type = "sxmpp"
# - user used to login on xmpp networks.
user = ""
# =====================================================
#
# bot generated stuff.
#
# =====================================================
"""
sleektemplate = """# =====================================================
#
# JSONBOT CONFIGURATION FILE -
#
# last changed on
#
# This file contains configration data for the JSONBOT.
# Variables are defined by "name = json value" pairs.
# Make sure to use " in strings.
# The bot can edit this file!
#
# =====================================================
# - channels to join
channels = []
# - to enable put this to 0
disable = 1
# - the bot's nick.
nick = "jsb"
# - owner of the bot.
owner = []
# - use fulljids of bot users (used in non anonymous conferences.
fulljids = 1
# password used to auth on the server.
password = ""
# - server to connect to (on jabber only when different that users host.
server = ""
# - the bot's type.
type = "sleek"
# - user used to login on xmpp networks.
user = ""
# =====================================================
#
# bot generated stuff.
#
# =====================================================
"""
def makedefaultconfig(type, ddir=None):
filename = 'config'
datadir = ddir or getdatadir()
dir = datadir + os.sep + 'config'
ttype = "default-%s" % type
cfile = dir + os.sep + "fleet" + os.sep + ttype + os.sep + filename
logging.warn("creating default config for type %s in %s" % (type, cfile))
splitted = cfile.split(os.sep)
mdir = ""
for i in splitted[:-1]:
mdir += "%s%s" % (i, os.sep)
if not os.path.isdir(mdir): os.mkdir(mdir)
logging.debug("filename is %s" % cfile)
f = open(cfile, "w")
if type == "irc": f.write(irctemplate) ; f.close()
elif type == "sxmpp": f.write(xmpptemplate) ; f.close()
elif type == "sleek": f.write(sleektemplate) ; f.close()
else: raise Exception("no such bot type: %s" % type)
| [
"[email protected]"
] | |
5d0ee6b0dc39b9f92bdb2eef54ed35b8d54a32c9 | 696e35ccdf167c3f6b1a7f5458406d3bb81987c9 | /content/test/gpu/gpu_tests/gpu_integration_test.py | 9f991d50206a63aaaaa763e1e5b49a5a4108a461 | [
"BSD-3-Clause"
] | permissive | mgh3326/iridium-browser | 064e91a5e37f4e8501ea971483bd1c76297261c3 | e7de6a434d2659f02e94917be364a904a442d2d0 | refs/heads/master | 2023-03-30T16:18:27.391772 | 2019-04-24T02:14:32 | 2019-04-24T02:14:32 | 183,128,065 | 0 | 0 | BSD-3-Clause | 2019-11-30T06:06:02 | 2019-04-24T02:04:51 | null | UTF-8 | Python | false | false | 10,996 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from telemetry.testing import serially_executed_browser_test_case
from telemetry.util import screenshot
from gpu_tests import exception_formatter
from gpu_tests import gpu_test_expectations
class GpuIntegrationTest(
serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
_cached_expectations = None
_also_run_disabled_tests = False
# Several of the tests in this directory need to be able to relaunch
# the browser on demand with a new set of command line arguments
# than were originally specified. To enable this, the necessary
# static state is hoisted here.
# We store a deep copy of the original browser finder options in
# order to be able to restart the browser multiple times, with a
# different set of command line arguments each time.
_original_finder_options = None
# We keep track of the set of command line arguments used to launch
# the browser most recently in order to figure out whether we need
# to relaunch it, if a new pixel test requires a different set of
# arguments.
_last_launched_browser_args = set()
@classmethod
def SetUpProcess(cls):
super(GpuIntegrationTest, cls).SetUpProcess()
cls._original_finder_options = cls._finder_options.Copy()
@classmethod
def AddCommandlineArgs(cls, parser):
"""Adds command line arguments understood by the test harness.
Subclasses overriding this method must invoke the superclass's
version!"""
parser.add_option(
'--also-run-disabled-tests',
dest='also_run_disabled_tests',
action='store_true', default=False,
help='Run disabled tests, ignoring Skip and Fail expectations')
@classmethod
def CustomizeBrowserArgs(cls, browser_args):
"""Customizes the browser's command line arguments.
NOTE that redefining this method in subclasses will NOT do what
you expect! Do not attempt to redefine this method!
"""
if not browser_args:
browser_args = []
cls._finder_options = cls._original_finder_options.Copy()
browser_options = cls._finder_options.browser_options
# A non-sandboxed, 15-seconds-delayed gpu process is currently running in
# the browser to collect gpu info. A command line switch is added here to
# skip this gpu process for all gpu integration tests to prevent any
# interference with the test results.
browser_args.append(
'--disable-gpu-process-for-dx12-vulkan-info-collection')
# Append the new arguments.
browser_options.AppendExtraBrowserArgs(browser_args)
cls._last_launched_browser_args = set(browser_args)
cls.SetBrowserOptions(cls._finder_options)
@classmethod
def RestartBrowserIfNecessaryWithArgs(cls, browser_args, force_restart=False):
if not browser_args:
browser_args = []
elif '--disable-gpu' in browser_args:
# Some platforms require GPU process, so browser fails to launch with
# --disable-gpu mode, therefore, even test expectations fail to evaluate.
browser_args = list(browser_args)
os_name = cls.browser.platform.GetOSName()
if os_name == 'android' or os_name == 'chromeos':
browser_args.remove('--disable-gpu')
if force_restart or set(browser_args) != cls._last_launched_browser_args:
logging.info('Restarting browser with arguments: ' + str(browser_args))
cls.StopBrowser()
cls.CustomizeBrowserArgs(browser_args)
cls.StartBrowser()
@classmethod
def RestartBrowserWithArgs(cls, browser_args):
cls.RestartBrowserIfNecessaryWithArgs(browser_args, force_restart=True)
# The following is the rest of the framework for the GPU integration tests.
@classmethod
def GenerateTestCases__RunGpuTest(cls, options):
cls._also_run_disabled_tests = options.also_run_disabled_tests
for test_name, url, args in cls.GenerateGpuTests(options):
yield test_name, (url, test_name, args)
@classmethod
def StartBrowser(cls):
# We still need to retry the browser's launch even though
# desktop_browser_finder does so too, because it wasn't possible
# to push the fetch of the first tab into the lower retry loop
# without breaking Telemetry's unit tests, and that hook is used
# to implement the gpu_integration_test_unittests.
for x in range(0, 3):
try:
super(GpuIntegrationTest, cls).StartBrowser()
cls.tab = cls.browser.tabs[0]
return
except Exception:
logging.warning('Browser start failed (attempt %d of 3)', (x + 1))
# If we are on the last try and there is an exception take a screenshot
# to try and capture more about the browser failure and raise
if x == 2:
url = screenshot.TryCaptureScreenShotAndUploadToCloudStorage(
cls.platform)
if url is not None:
logging.info("GpuIntegrationTest screenshot of browser failure " +
"located at " + url)
else:
logging.warning("GpuIntegrationTest unable to take screenshot")
raise
# Otherwise, stop the browser to make sure it's in an
# acceptable state to try restarting it.
if cls.browser:
cls.StopBrowser()
@classmethod
def _RestartBrowser(cls, reason):
logging.warning('Restarting browser due to '+ reason)
cls.StopBrowser()
cls.SetBrowserOptions(cls._finder_options)
cls.StartBrowser()
def _RunGpuTest(self, url, test_name, *args):
expectations = self.__class__.GetExpectations()
expectation = expectations.GetExpectationForTest(
self.browser, url, test_name)
if self.__class__._also_run_disabled_tests:
# Ignore test expectations if the user has requested it.
expectation = 'pass'
if expectation == 'skip':
# skipTest in Python's unittest harness raises an exception, so
# aborts the control flow here.
self.skipTest('SKIPPING TEST due to test expectations')
try:
# TODO(nednguyen): For some reason the arguments are getting wrapped
# in another tuple sometimes (like in the WebGL extension tests).
# Perhaps only if multiple arguments are yielded in the test
# generator?
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
self.RunActualGpuTest(url, *args)
except Exception:
if expectation == 'pass':
# This is not an expected exception or test failure, so print
# the detail to the console.
exception_formatter.PrintFormattedException()
# Symbolize any crash dump (like from the GPU process) that
# might have happened but wasn't detected above. Note we don't
# do this for either 'fail' or 'flaky' expectations because
# there are still quite a few flaky failures in the WebGL test
# expectations, and since minidump symbolization is slow
# (upwards of one minute on a fast laptop), symbolizing all the
# stacks could slow down the tests' running time unacceptably.
self.browser.LogSymbolizedUnsymbolizedMinidumps(logging.ERROR)
# This failure might have been caused by a browser or renderer
# crash, so restart the browser to make sure any state doesn't
# propagate to the next test iteration.
self._RestartBrowser('unexpected test failure')
raise
elif expectation == 'fail':
msg = 'Expected exception while running %s' % test_name
exception_formatter.PrintFormattedException(msg=msg)
# Even though this is a known failure, the browser might still
# be in a bad state; for example, certain kinds of timeouts
# will affect the next test. Restart the browser to prevent
# these kinds of failures propagating to the next test.
self._RestartBrowser('expected test failure')
return
if expectation != 'flaky':
logging.warning(
'Unknown expectation %s while handling exception for %s',
expectation, test_name)
raise
# Flaky tests are handled here.
num_retries = expectations.GetFlakyRetriesForTest(
self.browser, url, test_name)
if not num_retries:
# Re-raise the exception.
raise
# Re-run the test up to |num_retries| times.
for ii in xrange(0, num_retries):
print 'FLAKY TEST FAILURE, retrying: ' + test_name
try:
# For robustness, shut down the browser and restart it
# between flaky test failures, to make sure any state
# doesn't propagate to the next iteration.
self._RestartBrowser('flaky test failure')
self.RunActualGpuTest(url, *args)
break
except Exception:
# Squelch any exceptions from any but the last retry.
if ii == num_retries - 1:
# Restart the browser after the last failure to make sure
# any state doesn't propagate to the next iteration.
self._RestartBrowser('excessive flaky test failures')
raise
else:
if expectation == 'fail':
logging.warning(
'%s was expected to fail, but passed.\n', test_name)
@classmethod
def GenerateGpuTests(cls, options):
"""Subclasses must implement this to yield (test_name, url, args)
tuples of tests to run."""
raise NotImplementedError
def RunActualGpuTest(self, file_path, *args):
"""Subclasses must override this to run the actual test at the given
URL. file_path is a path on the local file system that may need to
be resolved via UrlOfStaticFilePath.
"""
raise NotImplementedError
@classmethod
def GetExpectations(cls):
if not cls._cached_expectations:
cls._cached_expectations = cls._CreateExpectations()
if not isinstance(cls._cached_expectations,
gpu_test_expectations.GpuTestExpectations):
raise Exception(
'gpu_integration_test requires use of GpuTestExpectations')
return cls._cached_expectations
@classmethod
def _CreateExpectations(cls):
# Subclasses **must** override this in order to provide their test
# expectations to the harness.
#
# Do not call this directly. Call GetExpectations where necessary.
raise NotImplementedError
@classmethod
def _EnsureTabIsAvailable(cls):
try:
cls.tab = cls.browser.tabs[0]
except Exception:
# restart the browser to make sure a failure in a test doesn't
# propagate to the next test iteration.
logging.exception("Failure during browser startup")
cls._RestartBrowser('failure in setup')
raise
def setUp(self):
self._EnsureTabIsAvailable()
def LoadAllTestsInModule(module):
# Just delegates to serially_executed_browser_test_case to reduce the
# number of imports in other files.
return serially_executed_browser_test_case.LoadAllTestsInModule(module)
| [
"[email protected]"
] | |
96fd3506464c392a8fd723e5f4d4aeaf7d0ba1cc | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/cctbx/libtbx_refresh.py | b1206d46a3f922f73066f670596a7b7a0ef8f24f | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 1,790 | py | from __future__ import absolute_import, division, print_function
import os
from libtbx.utils import warn_if_unexpected_md5_hexdigest
if self.env.is_ready_for_build():
message_template = ' Generating C++ files in:\n "%s"'
# eltbx
from cctbx.source_generators.eltbx import generate_henke_cpp
from cctbx.source_generators.eltbx import generate_sasaki_cpp
target_dir = self.env.under_build("cctbx/eltbx")
print(message_template % target_dir)
for label,generator_module in [("Henke", generate_henke_cpp),
("Sasaki", generate_sasaki_cpp)]:
if os.path.isdir(generator_module.reference_tables_directory):
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
generator_module.run(target_dir=target_dir)
else:
print("*"*79)
print("Warning: directory with %s tables is missing:" % label)
print(" ", repr(generator_module.reference_tables_directory))
print("*"*79)
# flex_fwd.h
from cctbx.source_generators import flex_fwd_h
target_dir = self.env.under_build("include/cctbx/boost_python")
print(message_template % target_dir)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
flex_fwd_h.run(target_dir)
# reference_table.cpp : checking that it is up-to-date
for f,sig in [
("reference_table.py", "b4d948c292357b90c8b4d5716d607bb9"),
("short_cuts.py", "18e5b9d93962d70711497de1d6dbebbb"),
("proto/generate_cpp_asu_table.py", "0f19e51b469650aa23e81483051eeb10")]:
fn = "sgtbx/direct_space_asu/" + f
warn_if_unexpected_md5_hexdigest(
path=self.env.under_dist( module_name="cctbx", path=fn),
expected_md5_hexdigests=[ sig ],
hints=[
" Files to review:",
" "+fn,
" cctbx/libtbx_refresh.py"])
| [
"[email protected]"
] | |
264b62e7d0d2651cf9ec655cdfb6fafd32babdd4 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_201/2701.py | 4dfd25441bfd296e14ceefcf2861b262d56462e9 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | import math
def get_stall(arg):
l = []
arg.sort()
for p1, p2 in zip(arg, arg[1:]):
diff = abs(p1 - p2)
if not l:
l.append(diff)
l.append(p1+(diff//2))
l.append(p1)
elif l[0] < diff:
l.clear()
l.append(diff)
l.append(p1 + (diff//2))
l.append(p1)
else:
pass
return l
t = int(input()) # read a line with a single integer
for x in range(1, t + 1):
n, k = [int(s) for s in input().split(" ")] # read a list of integers, 2 in this case
if n == k:
print("Case #{}: {} {}".format(x, 0, 0))
else:
ls = [0, n+1]
blank_list = []
for i in range(k):
mee = get_stall(ls)
# print(mee)
ls.append(mee[1])
ls.sort()
# print("***", ls)
stall = ls.index(mee[1])
val1 = ls[stall-1]
val2 = ls[stall+1]
z = mee[1]-val1 - 1
y = val2 - mee[1] - 1
# y = max(([abs(t - s)//2 for s, t in zip(ls, ls[1:])]))
# z = min(([abs(t - s)//2 for s, t in zip(ls, ls[1:])]))
# print("Case #{}: {} {}".format(x, max(abs(mee[1]-mee[0])-1, y), max(abs(mee[2]-mee[1]), abs(z))-1))
print("Case #{}: {} {}".format(x, max(y, z), min(y, z)))
| [
"[email protected]"
] | |
a28f99427e7b585a4de577169e2d4afd3ab4e90e | 618522a8ffed585e27701b9acb1a1171e3c5c924 | /salience_sum/module/encoder.py | 845e220e59a1dd02f3abb3eeec33d31e13a09aba | [] | no_license | blodstone/Salience_Sum | 9795c2a1c03c86218a8c4560ba65f7d1ff5f65e8 | ce2e9e316a68c18bd523ba9e3d1e3ea286bbf068 | refs/heads/master | 2020-08-29T11:49:40.695618 | 2020-01-21T16:17:18 | 2020-01-21T16:17:18 | 218,023,295 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,495 | py | import torch
from allennlp.modules import Seq2SeqEncoder
from allennlp.nn.util import get_lengths_from_binary_sequence_mask
from torch.nn import LSTM, Linear, Sequential, ReLU
from typing import Dict, Tuple
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
@Seq2SeqEncoder.register('salience_encoder')
class Encoder(Seq2SeqEncoder):
"""
A standard LSTM encoder that supports bidirectional. If bidirectional is True, we split
the hidden layer and then concatenate the two directions in the resulting encoder states.
Everything is on first batch basis.
"""
def __init__(self, input_size,
hidden_size,
num_layers,
bidirectional,
stateful: bool = False) -> None:
super().__init__(stateful)
self.hidden_size = hidden_size
self.bidirectional = bidirectional
self.num_layers = num_layers
self.input_size = input_size
self._rnn = LSTM(input_size=self.input_size,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
bidirectional=self.bidirectional,
batch_first=True)
self._reduce = Linear(self.hidden_size * 2, self.hidden_size)
def forward(self, embedded_src: torch.Tensor, source_mask: torch.Tensor) \
-> Tuple[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
source_lengths = get_lengths_from_binary_sequence_mask(source_mask)
packed_src = pack_padded_sequence(embedded_src, source_lengths,
batch_first=True, enforce_sorted=False)
# states = (B x L X 2*H)
packed_states, final = self._rnn(packed_src)
states, _ = pad_packed_sequence(packed_states, batch_first=True)
batch_size = states.size(0)
# final_states and context = (B x 2*num_layer x H)
final_state, context = final
# Reducing the dual hidden size to one hidden size
if self.bidirectional:
final_state = self._reduce(final_state.view(batch_size, self.num_layers, -1))
context = self._reduce(context.view(batch_size, self.num_layers, -1))
return states, (final_state, context)
def get_input_dim(self) -> int:
return self.input_size
def get_output_dim(self) -> int:
return self.hidden_size
def is_bidirectional(self) -> bool:
return self.bidirectional
| [
"[email protected]"
] | |
2a9bf81297de2e77ef26a208ce9dd789aafb71d2 | 40248f9e5ed813fbb966df515ece9193cebf889d | /exapi/request_creators/hitbtc/market_data/interface.py | a1dc6f761c714843d5db467ba83515a00f5001a5 | [
"MIT"
] | permissive | astsu-dev/exapi1 | 29bc22e0949e835d6ea6887e9c52288584a095eb | 1ef39ccdd77e9ddb60ec6eaa16a2cc26e1ac3e12 | refs/heads/main | 2023-05-08T20:08:18.435247 | 2021-06-02T11:25:11 | 2021-06-02T11:25:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,161 | py | """Has interface for hitbtc market data request creator."""
from typing import Optional, Protocol
from exapi.request_creators.request import Request
from exapi.typedefs.hitbtc import (CandlesPeriod, Currencies, Currency,
IntervalValue, SortBy, SortDirection,
Symbol, Symbols)
class IHitbtcMarketDataRequestCreator(Protocol):
"""Has methods for creating requests for hitbtc market data endpoints."""
def create_get_currencies_request(self, currencies: Optional[Currencies] = None) -> Request:
"""Creates request for /public/currency endpoint.
Requires no API key Access Rights.
Args:
currencies (Optional[Currencies], optional): specified currencies.
If not passed, then will create for all currencies.
Returns:
Request
"""
def create_get_certain_currency_request(self, currency: Currency) -> Request:
"""Creates request for /public/currency/`currency` endpoint.
Requires no API key Access Rights.
Args:
currency (Currency)
Returns:
Request
"""
def create_get_symbols_request(self, symbols: Optional[Symbols] = None) -> Request:
"""Creates request for /public/symbol endpoint.
Args:
symbols (Optional[Symbols], optional): list of symbols.
If not passed, then will create for all symbols.
Returns:
Request
"""
def create_get_certain_symbol_request(self, symbol: Symbol) -> Request:
"""Creates request for /public/symbol/`symbol` endpoint.
Requires no API key Access Rights.
Args:
symbol (Symbol): certain symbol
Returns:
Request
"""
def create_get_tickers_request(self, symbols: Optional[Symbols] = None) -> Request:
"""Creates request for /public/ticker endpoint.
Requires no API key Access Rights.
Args:
symbols (Optional[Symbols], optional): list of symbols.
If not passed, then will create for all symbols.
Returns:
Request
"""
def create_get_certain_ticker_request(self, symbol: Symbol) -> Request:
"""Creates request for /public/ticker/`symbol` endpoint.
Requires no API key Access Rights.
Args:
symbol (Symbol): certain symbol
Returns:
Request
"""
def create_get_trades_request(self, symbols: Optional[Symbols] = None,
sort: Optional[SortDirection] = None,
from_: Optional[IntervalValue] = None,
till: Optional[IntervalValue] = None,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> Request:
"""Creates request for /public/trades endpoint.
Requires no API key Access Rights.
Args:
symbols (Optional[Symbols], optional): list of symbols.
If not passed, then will create for all symbols.
SortDirection (Optional[SortDirection], optional): SortDirection direction.
Accepted values: ASC, DESC. Default value: DESC.
from_ (Optional[IntervalValue], optional): Interval initial value.
If sorting by timestamp is used, then Datetime,
otherwise int of index value.
till (Optional[IntervalValue], optional): Interval end value.
If sorting by timestamp is used, then Datetime,
otherwise int of index value.
limit (Optional[int], optional): Default value: 100. Max value: 1000.
offset (Optional[int], optional): Default value: 0. Max value: 100000.
Returns:
Request
"""
def create_get_certain_trades_request(self, symbol: Symbol,
sort: Optional[SortDirection] = None,
by: Optional[SortBy] = None,
from_: Optional[IntervalValue] = None,
till: Optional[IntervalValue] = None,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> Request:
"""Creates request for /public/trades/`symbol` endpoint.
Requires no API key Access Rights.
Args:
symbol (Symbol): certain symbol.
sort (Optional[SortDirection], optional): SortDirection direction.
Accepted values: ASC, DESC. Default value: DESC.
by (Optional[SortBy], optional): Defines sort type.
Accepted values: id, timestamp. Default value: timestamp.
from_ (Optional[IntervalValue], optional): Interval initial value.
If sorting by timestamp is used, then Datetime,
otherwise int of index value.
till (Optional[IntervalValue], optional): Interval end value.
If sorting by timestamp is used, then Datetime,
otherwise int of index value.
limit (Optional[int], optional): Default value: 100. Max value: 1000.
offset (Optional[int], optional): Default value: 0. Max value: 100000.
Returns:
Request
"""
def create_get_orderbooks_request(self, symbols: Optional[Symbols] = None,
limit: Optional[int] = None
) -> Request:
"""Creates request for /public/orderbook endpoint.
Requires no API key Access Rights.
Args:
symbols (Optional[Symbols], optional): list of symbols.
If not passed, then will create for all symbols.
limit (Optional[int], optional): limit of order book levels.
Default value: 100. Set 0 to view full list of levels.
Returns:
Request
"""
def create_get_certain_orderbook_request(self, symbol: Symbol,
limit: Optional[int] = None,
volume: Optional[int] = None
) -> Request:
"""Creates request for /public/orderbook/`symbol` endpoint.
Requires no API key Access Rights.
Please note that if the volume is specified,
the limit will be ignored, askAveragePrice and bidAveragePrice
are returned in response.
Args:
symbol (Symbol): certain symbol.
limit (Optional[int], optional): Limit of Order Book levels.
Default value: 100. Set 0 to view full list of levels.
volume (Optional[int], optional): Desired volume for market depth search.
Returns:
Request
"""
def create_get_candles_request(self, symbols: Optional[Symbols] = None,
period: Optional[CandlesPeriod] = None,
sort: Optional[SortDirection] = None,
from_: Optional[IntervalValue] = None,
till: Optional[IntervalValue] = None,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> Request:
"""Creates request for /public/candles endpoint.
Requires no API key Access Rights.
Args:
symbols (Optional[Symbols], optional): list of symbols.
If not passed, then will create for all symbols.
period (Optional[CandlesPeriod], optional): accepted values: M1 (one minute),
M3, M5, M15, M30, H1 (one hour), H4,
D1 (one day), D7, 1M (one month).
Default value: M30
sort (Optional[SortDirection], optional): sort direction.
Accepted values: ASC, DESC. Default value: DESC.
from_ (Optional[IntervalValue], optional): interval initial value.
till (Optional[IntervalValue], optional): interval end value.
limit (Optional[int], optional): limit of candles. Default value: 100. Max value: 1000.
offset (Optional[int], optional): Default value: 0. Max value: 100000.
Returns:
Request
"""
def create_get_certain_candles_request(self, symbol: Symbol,
period: Optional[CandlesPeriod] = None,
sort: Optional[SortDirection] = None,
from_: Optional[IntervalValue] = None,
till: Optional[IntervalValue] = None,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> Request:
"""Creates request for /public/candles/`symbol` endpoint.
Requires no API key Access Rights.
Args:
symbol (Symbol): certain symbol.
period (Optional[CandlesPeriod], optional): accepted values: M1 (one minute),
M3, M5, M15, M30, H1 (one hour), H4,
D1 (one day), D7, 1M (one month).
Default value: M30
sort (Optional[SortDirection], optional): sort direction.
Accepted values: ASC, DESC. Default value: DESC.
from_ (Optional[IntervalValue], optional): interval initial value.
till (Optional[IntervalValue], optional): interval end value.
limit (Optional[int], optional): limit of candles. Default value: 100. Max value: 1000.
offset (Optional[int], optional): Default value: 0. Max value: 100000.
Returns:
Request
"""
| [
"[email protected]"
] | |
ad33747c00bc3429bacdc1bf31667c00daab67fc | 5f09c2581c28751589871068d1faa9297859d2f3 | /insert_banco.py | 737b50ad7286cc87824d9969603d863b81f055e2 | [] | no_license | fandrefh/curso-python-e-django-senac | f68b4b4ce7071ac78034afdaf63251ed0422fa56 | 8a418a7d9acd12c3ca8820c5589d5d02476d3d0c | refs/heads/master | 2021-01-20T20:28:53.311346 | 2016-08-27T20:48:44 | 2016-08-27T20:48:44 | 65,097,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | import sqlite3
conn = sqlite3.connect("clientes.db")
cursor = conn.cursor()
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Regis', 35);
""")
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Aloisio', 87);
""")
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Bruna', 21);
""")
cursor.execute("""
INSERT INTO cad_clientes (nome, idade) VALUES ('Matheus', 19);
""")
conn.commit()
print('Dados inseridos com sucesso.')
conn.close() | [
"[email protected]"
] | |
47ec0b8daf1be246726bb38689c9967a2047b1d3 | 76050b0002dac757866a9fb95dc199918da665bb | /acme/utils/iterator_utils_test.py | ebe21f3a602dbf5b91ce2fc5ab468a73080be58f | [
"Apache-2.0"
] | permissive | RaoulDrake/acme | 2829f41688db68d694da2461d301fd6f9f27edff | 97c50eaa62c039d8f4b9efa3e80c4d80e6f40c4c | refs/heads/master | 2022-12-29T01:16:44.806891 | 2022-12-21T14:09:38 | 2022-12-21T14:10:06 | 300,250,466 | 0 | 0 | Apache-2.0 | 2020-10-01T11:13:03 | 2020-10-01T11:13:02 | null | UTF-8 | Python | false | false | 1,249 | py | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iterator_utils."""
from acme.utils import iterator_utils
import numpy as np
from absl.testing import absltest
class IteratorUtilsTest(absltest.TestCase):
def test_iterator_zipping(self):
def get_iters():
x = iter(range(0, 10))
y = iter(range(20, 30))
return [x, y]
zipped = zip(*get_iters())
unzipped = iterator_utils.unzip_iterators(zipped, num_sub_iterators=2)
expected_x, expected_y = get_iters()
np.testing.assert_equal(list(unzipped[0]), list(expected_x))
np.testing.assert_equal(list(unzipped[1]), list(expected_y))
if __name__ == '__main__':
absltest.main()
| [
"[email protected]"
] | |
e0484f2e58aab4de9e567907b0778dc57f18cc34 | 574d7955a32116e2fa315b5f75f124863ca70614 | /blog/admin.py | ee30581a5a79496780dd1cb38aa3d14fd815c3c0 | [] | no_license | harunurkst/django_course_04 | b15cb8e52a821b1157e1ac4dbe56b89fdebce848 | 5d93290cbee0f47795b6c9ecef8d33d8afe859d1 | refs/heads/master | 2022-11-22T20:48:36.196279 | 2020-07-26T17:20:37 | 2020-07-26T17:20:37 | 278,904,995 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | from django.contrib import admin
from .models import Post, Author, Category, Comment
class PostAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
admin.site.register(Post, PostAdmin)
admin.site.register(Author)
admin.site.register(Category)
admin.site.register(Comment) | [
"[email protected]"
] | |
2c4021079c1f87e0901a6b63792636763cca4222 | 71fbc701cf090716b213b4025e0d96e73f452aa1 | /thonny/plugins/micropython/bare_metal_backend.py | e2c05ea6a3bf2167e2930a0585828115101b1047 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | bosige/thonny | 678f3ab05185dbead41b0a04c2db025834008fc9 | 78317670d1ec8e8cd39f4bb2e5c6a2927fedd7b3 | refs/heads/master | 2022-12-11T12:25:49.484079 | 2020-09-16T12:48:55 | 2020-09-16T12:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,084 | py | import binascii
import datetime
import logging
import os
import queue
import re
import sys
import time
from _ast import Not
from textwrap import dedent, indent
from typing import BinaryIO, Callable, Optional, Tuple, Union
from thonny.backend import UploadDownloadMixin
from thonny.common import (
BackendEvent,
InlineResponse,
ToplevelResponse,
UserError,
execute_system_command,
serialize_message,
)
from thonny.misc_utils import find_volumes_by_name, sizeof_fmt
from thonny.plugins.micropython.backend import (
WAIT_OR_CRASH_TIMEOUT,
MicroPythonBackend,
ManagementError,
ReadOnlyFilesystemError,
_report_internal_error,
ends_overlap,
unix_dirname_basename,
Y2000_EPOCH_OFFSET,
)
from thonny.common import ConnectionFailedException
# See https://github.com/dhylands/rshell/blob/master/rshell/main.py
# for UART_BUFFER_SIZE vs USB_BUFFER_SIZE
# ampy uses 32 bytes: https://github.com/pycampers/ampy/blob/master/ampy/files.py
# I'm not worrying so much, because reader thread reads continuously
# and writer (SerialConnection) has it's own blocks and delays
BUFFER_SIZE = 512
BAUDRATE = 115200
ENCODING = "utf-8"
# Commands
RAW_MODE_CMD = b"\x01"
NORMAL_MODE_CMD = b"\x02"
INTERRUPT_CMD = b"\x03"
SOFT_REBOOT_CMD = b"\x04"
# Output tokens
VALUE_REPR_START = b"<repr>"
VALUE_REPR_END = b"</repr>"
STX = b"\x02"
EOT = b"\x04"
NORMAL_PROMPT = b">>> "
LF = b"\n"
OK = b"OK"
# first prompt when switching to raw mode (or after soft reboot in raw mode)
# Looks like it's not translatable in CP
# https://github.com/adafruit/circuitpython/blob/master/locale/circuitpython.pot
FIRST_RAW_PROMPT = b"raw REPL; CTRL-B to exit\r\n>"
# https://forum.micropython.org/viewtopic.php?f=12&t=7652&hilit=w600#p43640
W600_FIRST_RAW_PROMPT = b"raw REPL; CTRL-B to exit\r\r\n>"
FIRST_RAW_PROMPT_SUFFIX = b"\r\n>"
RAW_PROMPT = b">"
FALLBACK_BUILTIN_MODULES = [
"cmath",
"gc",
"math",
"sys",
"array",
# "binascii", # don't include it, as it may give false signal for reader/writer
"collections",
"errno",
"hashlib",
"heapq",
"io",
"json",
"os",
"re",
"select",
"socket",
"ssl",
"struct",
"time",
"zlib",
"_thread",
"btree",
"framebuf",
"machine",
"micropython",
"network",
"bluetooth",
"cryptolib",
"ctypes",
"pyb",
"esp",
"esp32",
]
logger = logging.getLogger("thonny.micropython.backend")
def debug(msg):
return
# print(msg, file=sys.stderr)
class BareMetalMicroPythonBackend(MicroPythonBackend, UploadDownloadMixin):
def __init__(self, connection, clean, args):
self._connection = connection
self._startup_time = time.time()
self._interrupt_suggestion_given = False
self._raw_prompt_ensured = False
MicroPythonBackend.__init__(self, clean, args)
def _get_custom_helpers(self):
return dedent(
"""
@classmethod
def getcwd(cls):
if hasattr(cls, "getcwd"):
return cls.os.getcwd()
else:
# micro:bit
return ""
@classmethod
def chdir(cls, x):
return cls.os.chdir(x)
@classmethod
def rmdir(cls, x):
return cls.os.rmdir(x)
"""
)
def _process_until_initial_prompt(self, clean):
if clean:
self._interrupt_to_raw_prompt()
else:
# Order first raw prompt to be output when the code is done.
# If the device is already at raw prompt then it gets repeated as first raw prompt.
# If it is at normal prompt then outputs first raw prompt
self._connection.write(RAW_MODE_CMD)
self._forward_output_until_active_prompt(self._send_output)
def _fetch_welcome_text(self) -> str:
self._connection.write(NORMAL_MODE_CMD)
self._raw_prompt_ensured = False
welcome_text = self._connection.read_until(NORMAL_PROMPT).strip(b"\r\n >")
if os.name != "nt":
welcome_text = welcome_text.replace(b"\r\n", b"\n")
return self._decode(welcome_text)
def _fetch_builtin_modules(self):
script = "help('modules')"
out, err = self._execute(script, capture_output=True)
if err or not out:
self._send_error_message(
"Could not query builtin modules. Code completion may not work properly."
)
return FALLBACK_BUILTIN_MODULES
modules_str_lines = out.strip().splitlines()
last_line = modules_str_lines[-1].strip()
if last_line.count(" ") > 0 and " " not in last_line and "\t" not in last_line:
# probably something like "plus any modules on the filesystem"
# (can be in different languages)
modules_str_lines = modules_str_lines[:-1]
modules_str = (
" ".join(modules_str_lines)
.replace("/__init__", "")
.replace("__main__", "")
.replace("/", ".")
)
return modules_str.split()
def _resolve_unknown_epoch(self) -> int:
if self._connected_to_circuitpython() or self._connected_to_pycom():
return 1970
else:
return 2000
def _sync_time(self):
"""Sets the time on the pyboard to match the time on the host."""
# RTC works on UTC
now = datetime.datetime.now(tz=datetime.timezone.utc).timetuple()
if self._connected_to_microbit():
return
elif self._connected_to_circuitpython():
specific_script = dedent(
"""
from rtc import RTC as __thonny_RTC
__thonny_RTC().datetime = {ts}
del __thonny_RTC
"""
).format(ts=tuple(now))
else:
specific_script = dedent(
"""
from machine import RTC as __thonny_RTC
try:
__thonny_RTC().datetime({datetime_ts})
except:
__thonny_RTC().init({init_ts})
del __thonny_RTC
"""
).format(
datetime_ts=(
now.tm_year,
now.tm_mon,
now.tm_mday,
now.tm_wday + 1,
now.tm_hour,
now.tm_min,
now.tm_sec,
0,
),
init_ts=tuple(now)[:6] + (0, 0),
)
script = (
dedent(
"""
try:
%s
__thonny_helper.print_mgmt_value(True)
except Exception as e:
__thonny_helper.print_mgmt_value(str(e))
"""
)
% indent(specific_script, " ")
)
val = self._evaluate(script)
if isinstance(val, str):
print("WARNING: Could not sync device's clock: " + val)
def _get_utc_timetuple_from_device(self) -> Union[tuple, str]:
if self._connected_to_microbit():
return "This device does not have a real-time clock"
elif self._connected_to_circuitpython():
specific_script = dedent(
"""
from rtc import RTC as __thonny_RTC
__thonny_helper.print_mgmt_value(tuple(__thonny_RTC().datetime)[:6])
del __thonny_RTC
"""
)
else:
specific_script = dedent(
"""
from machine import RTC as __thonny_RTC
try:
# now() on some devices also gives weekday, so prefer datetime
__thonny_temp = tuple(__thonny_RTC().datetime())
# remove weekday from index 3
__thonny_helper.print_mgmt_value(__thonny_temp[0:3] + __thonny_temp[4:7])
del __thonny_temp
except:
__thonny_helper.print_mgmt_value(tuple(__thonny_RTC().now())[:6])
del __thonny_RTC
"""
)
script = (
dedent(
"""
try:
%s
except Exception as e:
__thonny_helper.print_mgmt_value(str(e))
"""
)
% indent(specific_script, " ")
)
val = self._evaluate(script)
return val
def _get_actual_time_tuple_on_device(self):
script = dedent(
"""
try:
try:
from time import localtime as __thonny_localtime
__thonny_helper.print_mgmt_value(tuple(__thonny_localtime()))
del __thonny_localtime
except:
# some CP boards
from rtc import RTC as __thonny_RTC
__thonny_helper.print_mgmt_value(tuple(__thonny_RTC().datetime))
del __thonny_RTC
except Exception as e:
__thonny_helper.print_mgmt_value(str(e))
"""
)
return self._evaluate(script)
def _update_cwd(self):
if self._connected_to_microbit():
self._cwd = ""
else:
super()._update_cwd()
def _interrupt_to_raw_prompt(self):
# NB! Sometimes disconnecting and reconnecting (on macOS?)
# too quickly causes anomalies. See CalliopeMiniProxy for more details
discarded_bytes = b""
for delay in [0.05, 0.5, 0.1, 1.0, 3.0, 5.0]:
# Interrupt several times, because with some drivers first interrupts seem to vanish
if delay >= 1:
self._show_error(
"Could not enter REPL. Trying again with %d second waiting time..." % delay
)
self._connection.reset_output_buffer() # cancels previous writes
self._connection.write(INTERRUPT_CMD)
self._connection.write(RAW_MODE_CMD)
time.sleep(delay)
discarded_bytes += self._connection.read_all()
if discarded_bytes.endswith(FIRST_RAW_PROMPT) or discarded_bytes.endswith(
W600_FIRST_RAW_PROMPT
):
self._soft_reboot_after_interrupting_to_raw_prompt()
self._raw_prompt_ensured = True
break
else:
max_tail_length = 500
if len(discarded_bytes) > max_tail_length:
discarded_bytes_str = (
"[skipping %d bytes] ..." % (len(discarded_bytes) - max_tail_length)
) + repr(discarded_bytes[:-max_tail_length])
else:
discarded_bytes_str = repr(discarded_bytes)
self._show_error(
"Could not enter REPL. Giving up. Read bytes:\n"
+ discarded_bytes_str
+ "\n\nYour options:\n\n"
+ " - check connection properties;\n"
+ " - make sure the device has suitable firmware;\n"
+ " - make sure the device is not in bootloader mode;\n"
+ " - reset the device and try again;\n"
+ " - try other serial clients (Putty, TeraTerm, screen, ...);\n"
+ " - ask for help in Thonny's forum or issue tracker."
)
sys.exit()
def _soft_reboot_after_interrupting_to_raw_prompt(self):
self._connection.write(SOFT_REBOOT_CMD)
# CP runs code.py after soft-reboot even in raw repl, so I'll send some Ctrl-C to intervene
# # (they don't do anything in raw repl)
self._connection.write(INTERRUPT_CMD)
self._connection.write(INTERRUPT_CMD)
output = self._connection.soft_read_until(FIRST_RAW_PROMPT, timeout=2)
if not output.endswith(FIRST_RAW_PROMPT):
self._show_error("Could not soft-reboot after reaching raw prompt. Got %s" % output)
def _soft_reboot(self):
# Need to go to normal mode. MP doesn't run user code in raw mode
# (CP does, but it doesn't hurt to do it there as well)
self._connection.write(NORMAL_MODE_CMD)
self._raw_prompt_ensured = False
self._connection.read_until(NORMAL_PROMPT)
self._connection.write(SOFT_REBOOT_CMD)
self._forward_output_until_active_prompt(self._send_output)
self._ensure_raw_prompt()
self.send_message(ToplevelResponse(cwd=self._cwd))
def _transform_output(self, data, stream_name):
# Any keypress wouldn't work
return data.replace(
"Press any key to enter the REPL. Use CTRL-D to reload.",
"Press Ctrl-C to enter the REPL. Use CTRL-D to reload.",
)
def _write(self, data):
self._connection.write(data)
def _submit_input(self, cdata: str) -> None:
# TODO: what if there is a previous unused data waiting
assert self._connection.outgoing_is_empty()
assert cdata.endswith("\n")
if not cdata.endswith("\r\n"):
# submission is done with CRLF
cdata = cdata[:-1] + "\r\n"
bdata = cdata.encode(ENCODING)
with self._interrupt_lock:
self._write(bdata)
# Try to consume the echo
try:
echo = self._connection.read(len(bdata))
except queue.Empty:
# leave it.
logging.warning("Timeout when reading input echo")
return
if echo != bdata:
# because of autoreload? timing problems? interruption?
# Leave it.
logging.warning("Unexpected echo. Expected %r, got %r" % (bdata, echo))
self._connection.unread(echo)
def _submit_code(self, script):
assert script # otherwise EOT produces soft reboot
# assuming we are already in a prompt
self._forward_unexpected_output()
self._ensure_raw_prompt()
# send command
with self._interrupt_lock:
self._connection.write(script.encode(ENCODING) + EOT)
debug("Wrote " + script + "\n--------\n")
# fetch command confirmation
confirmation = self._connection.soft_read(2, timeout=WAIT_OR_CRASH_TIMEOUT)
if confirmation != OK:
data = confirmation + self._connection.read_all()
data += self._connection.read(1, timeout=1, timeout_is_soft=True)
data += self._connection.read_all()
self._report_internal_error(
"Could not read command confirmation. Got " + repr(data) + "\n\nSCRIPT:\n" + script
)
else:
debug("GOTOK")
def _ensure_raw_prompt(self):
if self._raw_prompt_ensured:
return
debug("Ensuring raw prompt")
self._connection.write(RAW_MODE_CMD)
prompt = (
self._connection.read_until(
FIRST_RAW_PROMPT_SUFFIX, timeout=WAIT_OR_CRASH_TIMEOUT, timeout_is_soft=True
)
+ self._connection.read_all()
)
if not prompt.endswith(FIRST_RAW_PROMPT_SUFFIX):
self._send_output(prompt, "stdout")
raise TimeoutError("Could not ensure raw prompt")
self._raw_prompt_ensured = True
debug("Restoring helpers")
self._prepare_helpers()
self._update_cwd()
def _execute_with_consumer(self, script, output_consumer: Callable[[str, str], None]):
"""Expected output after submitting the command and reading the confirmation is following:
stdout
EOT
stderr
EOT
RAW_PROMPT
"""
self._submit_code(script)
terminator = self._forward_output_until_eot_or_active_propmt(output_consumer, "stdout")
if terminator != EOT:
# an unexpected prompt
return
terminator = self._forward_output_until_eot_or_active_propmt(output_consumer, "stderr")
if terminator != EOT:
# an unexpected prompt
return
data = self._connection.read(1) + self._connection.read_all()
if data == RAW_PROMPT:
# happy path
self._raw_prompt_ensured = True
return
else:
self._connection.unread(data)
self._forward_output_until_active_prompt(output_consumer, "stdout")
def _forward_output_until_active_prompt(
self, output_consumer: Callable[[str, str], None], stream_name="stdout"
):
"""Used for finding initial prompt or forwarding problematic output
in case of parse errors"""
while True:
terminator = self._forward_output_until_eot_or_active_propmt(
output_consumer, stream_name
)
if terminator in (NORMAL_PROMPT, RAW_PROMPT, FIRST_RAW_PROMPT):
self._raw_prompt_ensured = terminator in (RAW_PROMPT, FIRST_RAW_PROMPT)
return terminator
else:
output_consumer(self._decode(terminator), "stdout")
def _forward_output_until_eot_or_active_propmt(self, output_consumer, stream_name="stdout"):
"""Meant for incrementally forwarding stdout from user statements,
scripts and soft-reboots. Also used for forwarding side-effect output from
expression evaluations and for capturing help("modules") output.
In these cases it is expected to arrive to an EOT.
Also used for initial prompt searching or for recovering from a protocol error.
In this case it must work until active normal prompt or first raw prompt.
The code may have been submitted in any of the REPL modes or
automatically via (soft-)reset.
NB! The processing may end in normal mode even if the command was started
in raw mode (eg. when user presses reset during processing in some devices)!
The processing may also end in FIRST_RAW_REPL, when it was started in
normal REPL and Ctrl+A was issued during processing (ie. before Ctrl+C in
this example):
6
7
8
9
10
Traceback (most recent call last):
File "main.py", line 5, in <module>
KeyboardInterrupt:
MicroPython v1.11-624-g210d05328 on 2019-12-09; ESP32 module with ESP32
Type "help()" for more information.
>>>
raw REPL; CTRL-B to exit
>
(Preceding output does not contain EOT)
Note that this Ctrl+A may have been issued even before Thonny connected to
the device.
Note that interrupt does not affect the structure of the output -- it is
presented just like any other exception.
The method returns EOT, RAW_PROMPT or NORMAL_PROMPT, depending on which terminator
ended the processing.
The terminating EOT may be either the first EOT from normal raw-REPL
output or the starting EOT from Thonny expression (or, in principle, even
the second raw-REPL EOT or terminating Thonny expression EOT)
-- the caller will do the interpretation.
Because ot the special role of EOT and NORMAL_PROMT, we assume user code
will not output these. If it does, processing may break.
It may succceed if the propmt is followed by something (quickly enough)
-- that's why we look for *active* prompt, ie. prompt without following text.
TODO: Experiment with this!
Output produced by background threads (eg. in WiPy ESP32) cause even more difficulties,
because it becomes impossible to say whether we are at prompt and output
is from another thread or the main thread is still running.
For now I'm ignoring these problems and assume all output comes from the main thread.
"""
INCREMENTAL_OUTPUT_BLOCK_CLOSERS = re.compile(
b"|".join(map(re.escape, [FIRST_RAW_PROMPT, NORMAL_PROMPT, LF, EOT]))
)
pending = b""
while True:
# There may be an input submission waiting
# and we can't progress without resolving it first
self._check_for_side_commands()
# Prefer whole lines, but allow also incremental output to single line
# Note that here I'm not looking for non-first raw prompt, because this
# is always preceded by EOT.
new_data = self._connection.soft_read_until(
INCREMENTAL_OUTPUT_BLOCK_CLOSERS, timeout=0.05
)
if not new_data:
# In case we are still waiting for the first bits after connecting ...
# TODO: this suggestion should be implemented in Shell
if (
self._connection.num_bytes_received == 0
and not self._interrupt_suggestion_given
and time.time() - self._startup_time > 1.5
):
self._show_error(
"\n"
+ "Device is busy or does not respond. Your options:\n\n"
+ " - wait until it completes current work;\n"
+ " - use Ctrl+C to interrupt current work;\n"
+ " - use Stop/Restart to interrupt more and enter REPL.\n"
)
self._interrupt_suggestion_given = True
if not pending:
# nothing to parse
continue
pending += new_data
if pending.endswith(EOT):
output_consumer(self._decode(pending[: -len(EOT)]), stream_name)
return EOT
elif pending.endswith(LF) and not pending.endswith(FIRST_RAW_PROMPT[:-1]):
output_consumer(self._decode(pending), stream_name)
pending = b""
elif pending.endswith(NORMAL_PROMPT) or pending.endswith(FIRST_RAW_PROMPT):
# This looks like prompt.
# Make sure it is not followed by anything.
# Note that in this context the prompt usually means something is wrong
# (EOT would have been the happy path), so no need to hurry.
# The only case where this path is happy path is just after connecting.
follow_up = self._connection.soft_read(1, timeout=0.5)
if follow_up:
# Nope, the prompt is not active.
# (Actually it may be that a background thread has produced this follow up,
# but this would be too hard to consider.)
# Don't output yet, because the follow up may turn into another prompt
# and they can be captured all together.
self._connection.unread(follow_up)
# read propmt must remain in pending
else:
# let's hope it is an active prompt
if pending.endswith(NORMAL_PROMPT):
terminator = NORMAL_PROMPT
else:
terminator = FIRST_RAW_PROMPT
# Strip all trailing prompts
out = pending
while True:
if out.endswith(NORMAL_PROMPT):
out = out[: -len(NORMAL_PROMPT)]
elif out.endswith(FIRST_RAW_PROMPT):
out = out[: -len(FIRST_RAW_PROMPT)]
else:
break
output_consumer(self._decode(out), stream_name)
return terminator
elif ends_overlap(pending, NORMAL_PROMPT) or ends_overlap(pending, FIRST_RAW_PROMPT):
# Maybe we have a prefix of the prompt and the rest is still coming?
# (it's OK to wait a bit, as the user output usually ends with a newline, ie not
# with a prompt prefix)
follow_up = self._connection.soft_read(1, timeout=0.3)
if not follow_up:
# most likely not a Python prompt, let's forget about it
output_consumer(self._decode(pending), stream_name)
pending = b""
else:
# Let's try the possible prefix again in the next iteration
# (I'm unreading otherwise the read_until won't see the whole prompt
# and needs to wait for the timeout)
if ends_overlap(pending, NORMAL_PROMPT):
n = ends_overlap(pending, NORMAL_PROMPT)
else:
n = ends_overlap(pending, FIRST_RAW_PROMPT)
try_again = pending[-n:]
pending = pending[:-n]
self._connection.unread(try_again + follow_up)
else:
# No EOT or prompt in sight.
# Output and keep working.
output_consumer(self._decode(pending), stream_name)
pending = b""
def _forward_unexpected_output(self, stream_name="stdout"):
"Invoked between commands"
data = self._connection.read_all()
if data:
self._raw_prompt_ensured = data.endswith(FIRST_RAW_PROMPT)
met_prompt = False
while data.endswith(NORMAL_PROMPT) or data.endswith(FIRST_RAW_PROMPT):
# looks like the device was resetted
met_prompt = True
if data.endswith(NORMAL_PROMPT):
terminator = NORMAL_PROMPT
else:
terminator = FIRST_RAW_PROMPT
# hide the prompt from the output ...
data = data[: -len(terminator)]
self._send_output(data.decode(ENCODING, "replace"), stream_name)
if met_prompt:
# ... and recreate Thonny prompt
self.send_message(ToplevelResponse())
def _cmd_execute_system_command(self, cmd):
# Can't use stdin, because a thread is draining it
execute_system_command(cmd, cwd=self._local_cwd, disconnect_stdin=True)
def _cmd_get_fs_info(self, cmd):
result = self._evaluate(
dedent(
"""
try:
from os import statvfs as __thonny_statvfs
__thonny_stat = __thonny_statvfs(%r)
__thonny_total = __thonny_stat[2] * __thonny_stat[0]
__thonny_free = __thonny_stat[3] * __thonny_stat[0]
__thonny_used = __thonny_total - __thonny_free
__thonny_sizes = None
del __thonny_statvfs
del __thonny_stat
except ImportError:
__thonny_sizes = [__thonny_helper.os.size(name) for name in __thonny_helper.listdir()]
__thonny_used = None
__thonny_total = None
__thonny_free = None
__thonny_helper.print_mgmt_value({
"total" : __thonny_total,
"used" : __thonny_used,
"free": __thonny_free,
"sizes": __thonny_sizes
})
del __thonny_total
del __thonny_free
del __thonny_used
del __thonny_sizes
"""
)
% cmd.path
)
if result["sizes"] is not None:
if self._connected_to_microbit():
comment = "Assuming around 30 kB of storage space for user files."
else:
comment = "Don't know the size of storage space on this device."
files_total_size = sum(result["sizes"])
# TODO: compute number of used blocks
if files_total_size > 0:
comment += "\n\n" + "At least %s of it is used by %d file(s)." % (
sizeof_fmt(files_total_size),
len(result["sizes"]),
)
result["comment"] = comment
del result["sizes"]
return result
def _cmd_upload(self, cmd):
self._check_sync_time()
return super(BareMetalMicroPythonBackend, self)._cmd_upload(cmd)
def _cmd_write_file(self, cmd):
self._check_sync_time()
return super(BareMetalMicroPythonBackend, self)._cmd_write_file(cmd)
def _delete_sorted_paths(self, paths):
if not self._supports_directories():
# micro:bit
self._execute_without_output(
dedent(
"""
for __thonny_path in %r:
__thonny_helper.os.remove(__thonny_path)
del __thonny_path
"""
)
% paths
)
else:
try:
super()._delete_sorted_paths(paths)
except Exception as e:
if "read-only" in str(e).lower():
self._delete_via_mount(paths)
self._sync_all_filesystems()
def _internal_path_to_mounted_path(self, path):
mount_path = self._get_fs_mount()
if mount_path is None:
return None
flash_prefix = self._get_flash_prefix()
if not path.startswith(flash_prefix):
return None
path_suffix = path[len(flash_prefix) :]
return os.path.join(mount_path, os.path.normpath(path_suffix))
def _get_stat_mode_for_upload(self, path: str) -> Optional[int]:
return self._get_stat_mode(path)
def _mkdir_for_upload(self, path: str) -> None:
self._mkdir(path)
def _read_file(
self, source_path: str, target_fp: BinaryIO, callback: Callable[[int, int], None]
) -> None:
# TODO: Is it better to read from mount when possible? Is the mount up to date when the file
# is written via serial? Does the MP API give up to date bytes when the file is written via mount?
hex_mode = self._should_hexlify(source_path)
self._execute_without_output("__thonny_fp = open(%r, 'rb')" % source_path)
if hex_mode:
self._execute_without_output("from binascii import hexlify as __temp_hexlify")
block_size = 1024
file_size = self._get_file_size(source_path)
num_bytes_read = 0
while True:
callback(num_bytes_read, file_size)
if hex_mode:
block = binascii.unhexlify(
self._evaluate("__temp_hexlify(__thonny_fp.read(%s))" % block_size)
)
else:
block = self._evaluate("__thonny_fp.read(%s)" % block_size)
if block:
target_fp.write(block)
num_bytes_read += len(block)
if len(block) < block_size:
break
self._execute_without_output(
dedent(
"""
__thonny_fp.close()
del __thonny_fp
try:
del __temp_hexlify
except:
pass
"""
)
)
def _write_file(
self,
source_fp: BinaryIO,
target_path: str,
file_size: int,
callback: Callable[[int, int], None],
) -> None:
try:
self._write_file_via_serial(source_fp, target_path, file_size, callback)
except ReadOnlyFilesystemError:
self._write_file_via_mount(source_fp, target_path, file_size, callback)
# self._sync_all_filesystems()
def _write_file_via_mount(
self,
source: BinaryIO,
target_path: str,
file_size: int,
callback: Callable[[int, int], None],
) -> None:
mounted_target_path = self._internal_path_to_mounted_path(target_path)
with open(mounted_target_path, "wb") as f:
bytes_written = 0
block_size = 4 * 1024
while True:
callback(bytes_written, file_size)
block = source.read(block_size)
if block:
bytes_written += f.write(block)
f.flush()
os.fsync(f)
if len(block) < block_size:
break
assert bytes_written == file_size
return bytes_written
def _write_file_via_serial(
self,
source_fp: BinaryIO,
target_path: str,
file_size: int,
callback: Callable[[int, int], None],
) -> None:
out, err = self._execute(
dedent(
"""
try:
__thonny_path = '{path}'
__thonny_written = 0
__thonny_fp = open(__thonny_path, 'wb')
except Exception as e:
print(str(e))
"""
).format(path=target_path),
capture_output=True,
)
if "readonly" in (out + err).replace("-", "").lower():
raise ReadOnlyFilesystemError()
elif out + err:
raise RuntimeError(
"Could not open file %s for writing, output:\n%s" % (target_path, out + err)
)
# Define function to allow shorter write commands
hex_mode = self._should_hexlify(target_path)
if hex_mode:
self._execute_without_output(
dedent(
"""
from binascii import unhexlify as __thonny_unhex
def __W(x):
global __thonny_written
__thonny_written += __thonny_fp.write(__thonny_unhex(x))
__thonny_fp.flush()
"""
)
)
else:
self._execute_without_output(
dedent(
"""
def __W(x):
global __thonny_written
__thonny_written += __thonny_fp.write(x)
"""
)
)
bytes_sent = 0
block_size = 512
while True:
callback(bytes_sent, file_size)
block = source_fp.read(block_size)
if block:
if hex_mode:
script = "__W(%r)" % binascii.hexlify(block)
else:
script = "__W(%r)" % block
out, err = self._execute(script, capture_output=True)
if out or err:
self._show_error(
"\nCould not write next block after having written %d bytes to %s"
% (bytes_sent, target_path)
)
if bytes_sent > 0:
self._show_error(
"Make sure your device's filesystem has enough free space. "
+ "(When overwriting a file, the old content may occupy space "
"until the end of the operation.)\n"
)
raise ManagementError(script, out, err)
bytes_sent += len(block)
if len(block) < block_size:
break
bytes_received = self._evaluate("__thonny_written")
if bytes_received != bytes_sent:
raise UserError("Expected %d written bytes but wrote %d" % (bytes_sent, bytes_received))
# clean up
self._execute_without_output(
dedent(
"""
try:
del __W
del __thonny_written
del __thonny_path
__thonny_fp.close()
del __thonny_fp
del __thonny_result
del __thonny_unhex
except:
pass
"""
)
)
return bytes_sent
def _sync_all_filesystems(self):
self._execute_without_output(
dedent(
"""
try:
from os import sync as __thonny_sync
__thonny_sync()
del __thonny_sync
except ImportError:
pass
"""
)
)
def _makedirs(self, path):
if path == "/":
return
try:
super()._makedirs(path)
except Exception as e:
if "read-only" in str(e).lower():
self._makedirs_via_mount(path)
self._sync_all_filesystems()
def _makedirs_via_mount(self, path):
mounted_path = self._internal_path_to_mounted_path(path)
assert mounted_path is not None, "Couldn't find mounted path for " + path
os.makedirs(mounted_path, exist_ok=True)
def _delete_via_mount(self, paths):
for path in paths:
mounted_path = self._internal_path_to_mounted_path(path)
assert mounted_path is not None
import shutil
shutil.rmtree(mounted_path)
def _get_fs_mount_label(self):
# This method is most likely required with CircuitPython,
# so try its approach first
# https://learn.adafruit.com/welcome-to-circuitpython/the-circuitpy-drive
result = self._evaluate(
dedent(
"""
try:
from storage import getmount as __thonny_getmount
try:
__thonny_result = __thonny_getmount("/").label
finally:
del __thonny_getmount
except ImportError:
__thonny_result = None
except OSError:
__thonny_result = None
__thonny_helper.print_mgmt_value(__thonny_result)
del __thonny_result
"""
)
)
if result is not None:
return result
if self._welcome_text is None:
return None
"""
# following is not reliable and probably not needed
markers_by_name = {"PYBFLASH": {"pyb"}, "CIRCUITPY": {"circuitpython"}}
for name in markers_by_name:
for marker in markers_by_name[name]:
if marker.lower() in self._welcome_text.lower():
return name
"""
return None
def _get_flash_prefix(self):
if not self._supports_directories():
return ""
elif (
"LoBo" in self._welcome_text
or "WiPy with ESP32" in self._welcome_text
or "PYBLITE" in self._welcome_text
or "PYBv" in self._welcome_text
or "PYBOARD" in self._welcome_text.upper()
):
return "/flash/"
else:
return "/"
def _get_fs_mount(self):
label = self._get_fs_mount_label()
if label is None:
return None
else:
candidates = find_volumes_by_name(
self._get_fs_mount_label(),
# querying A can be very slow
skip_letters="A",
)
if len(candidates) == 0:
raise RuntimeError("Could not find volume " + self._get_fs_mount_label())
elif len(candidates) > 1:
raise RuntimeError("Found several possible mount points: %s" % candidates)
else:
return candidates[0]
def _should_hexlify(self, path):
if "binascii" not in self._builtin_modules:
return False
for ext in (".py", ".txt", ".csv"):
if path.lower().endswith(ext):
return False
return True
def _is_connected(self):
return self._connection._error is None
def _get_epoch_offset(self) -> int:
# https://docs.micropython.org/en/latest/library/utime.html
# NB! Some boards (eg Pycom) may use Posix epoch!
try:
return super()._get_epoch_offset()
except NotImplementedError:
return Y2000_EPOCH_OFFSET
def _get_sep(self):
if self._supports_directories():
return "/"
else:
return ""
def _decode(self, data: bytes) -> str:
return data.decode(ENCODING, errors="replace")
if __name__ == "__main__":
THONNY_USER_DIR = os.environ["THONNY_USER_DIR"]
logger = logging.getLogger("thonny.micropython.backend")
logger.propagate = False
logFormatter = logging.Formatter("%(levelname)s: %(message)s")
file_handler = logging.FileHandler(
os.path.join(THONNY_USER_DIR, "micropython-backend.log"), encoding="UTF-8", mode="w"
)
file_handler.setFormatter(logFormatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
import ast
import sys
args = ast.literal_eval(sys.argv[1])
try:
if args["port"] is None:
# remain busy
while True:
time.sleep(1000)
elif args["port"] == "webrepl":
from thonny.plugins.micropython.webrepl_connection import WebReplConnection
connection = WebReplConnection(args["url"], args["password"], args["min_write_delay"])
else:
from thonny.plugins.micropython.serial_connection import (
DifficultSerialConnection,
SerialConnection,
)
connection = SerialConnection(args["port"], BAUDRATE)
# connection = DifficultSerialConnection(args["port"], BAUDRATE)
backend = BareMetalMicroPythonBackend(connection, clean=args["clean"], args=args)
except ConnectionFailedException as e:
text = "\n" + str(e) + "\n"
msg = BackendEvent(event_type="ProgramOutput", stream_name="stderr", data=text)
sys.stdout.write(serialize_message(msg) + "\n")
sys.stdout.flush()
| [
"[email protected]"
] | |
e7f7098869934c8e98d694a44382c6cb60479ac5 | 717e0190612c20b9f5d26fec77e293c9b53f0e17 | /numpywren/binops.py | cc74dd808794dfddd673758ef5304f701452777c | [
"Apache-2.0"
] | permissive | cloudbutton/lithops-array | 8336e16cf9b80e8745ba9c63256294d2d7206a1c | 5e74b881c7db95eccdccf986f1e3b0dc44603889 | refs/heads/main | 2023-04-23T08:15:42.450676 | 2021-04-06T13:40:15 | 2021-04-06T13:40:15 | 344,418,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,962 | py | import numpy as np
from .matrix import BigMatrix
from .matrix_utils import chunk, generate_key_name_binop
from . import matrix_utils
import concurrent.futures as fs
import os
import lithops
import time
from . import lambdapack as lp
from . import job_runner
def _gemm_remote_0(block_pairs, XY, X, Y, reduce_idxs=[0], dtype=np.float64, **kwargs):
print('block_pairs: ', block_pairs)
print('reduce_idxs: ', reduce_idxs)
for bp in block_pairs:
bidx_0, bidx_1 = bp
XY_block = None
X.dtype = dtype
Y.dtype = dtype
for r in reduce_idxs:
block1 = X.get_block(bidx_0, r)
block2 = Y.get_block(r, bidx_1)
if XY_block is None:
XY_block = block1.dot(block2)
else:
XY_block += block1.dot(block2)
XY.put_block(XY_block, bidx_0, bidx_1)
def _gemm_remote_1(block_pairs, XY, X, Y, reduce_idxs=[0], dtype=np.float64, **kwargs):
os.system("sudo mount -o remount,size=50g /dev/shm")
X.dtype = dtype
Y.dtype = dtype
for bp in block_pairs:
bidx_0, bidx_1 = bp
block0 = matrix_utils.get_row(X, bidx_0, mmap_loc="/dev/shm/block_0")
block1 = matrix_utils.get_col(Y, bidx_1, mmap_loc="/dev/shm/block_1")
XY_block = block0.dot(block1)
XY.put_block(XY_block, bidx_0, bidx_1)
def _gemm_remote_2(block_pairs, XY, X, Y, reduce_idxs=[0], dtype=np.float64, **kwargs):
os.system("sudo mount -o remount,size=50g /dev/shm")
X.dtype = dtype
X.dtype = dtype
Y.dtype = dtype
block_chunk_size = kwargs.get("block_chunk_size")
for bp in block_pairs:
bidx_0, bidx_1 = bp
result = gemm_with_prefetch(X, Y, bidx_0, bidx_1, block_chunk_size=block_chunk_size)
XY.put_block(result, bidx_0, bidx_1)
_gemms = [_gemm_remote_0, _gemm_remote_1, _gemm_remote_2]
def gemm_with_prefetch(X, Y, bidx0, bidx1, block_chunk_size=16):
# prefetch first 16 columns
parity = 0
executor = fs.ProcessPoolExecutor(32)
block_chunk_size = min(block_chunk_size, len(X._block_idxs(1)))
chunked_blocks = list(matrix_utils.chunk(X._block_idxs(1), block_chunk_size))
assert(chunked_blocks[0] == list(range(block_chunk_size)))
futures0 = matrix_utils.get_matrix_blocks_full_async(X, "/dev/shm/block0_{0}".format(parity), [bidx0], list(range(block_chunk_size)), big_axis=1, executor=executor)
futures1 = matrix_utils.get_matrix_blocks_full_async(Y, "/dev/shm/block1_{0}".format(parity), list(range(block_chunk_size)), [bidx1], big_axis=0, executor=executor)
assert X._block_idxs(1) == Y._block_idxs(0)
chunked_blocks = chunked_blocks[1:]
start_x, end_x = X._blocks(0)[bidx0]
start_y, end_y = Y._blocks(1)[bidx1]
result = np.zeros((end_x - start_x, end_y - start_y), dtype=X.dtype)
for blocks in chunked_blocks:
t = time.time()
fs.wait(futures0)
fs.wait(futures1)
e = time.time()
print("Block Download took effectively {0}".format(e - t))
results = [f.result() for f in futures0]
b1 = matrix_utils.load_mmap(*results[0])
results = [f.result() for f in futures1]
b2 = matrix_utils.load_mmap(*results[0])
parity = (parity + 1) % 2
futures0 = matrix_utils.get_matrix_blocks_full_async(X, "/dev/shm/block0_{0}".format(parity), [bidx0], blocks, big_axis=1, executor=executor)
futures1 = matrix_utils.get_matrix_blocks_full_async(Y, "/dev/shm/block1_{0}".format(parity), blocks, [bidx1], big_axis=0, executor=executor)
t = time.time()
result += b1.dot(b2)
e = time.time()
print("Block Matmul took effectively {0}".format(e - t))
t = time.time()
fs.wait(futures0)
fs.wait(futures1)
e = time.time()
print("Block Download took effectively {0}".format(e - t))
results = [f.result() for f in futures0]
b1 = matrix_utils.load_mmap(*results[0])
results = [f.result() for f in futures1]
b2 = matrix_utils.load_mmap(*results[0])
t = time.time()
result += b1.dot(b2)
e = time.time()
print("Block Matmul took effectively {0}".format(e - t))
return result
def gemm(fexec, X, Y, out_bucket=None, tasks_per_job=1, local=False,
dtype=np.float64, overwrite=True, gemm_impl=0, gemm_chunk_size=16):
'''
Compute XY return
@param pwex - Execution context
@param X - rhs matrix
@param Y - lhs matrix
@param tasks_per_job - number of tasks per job
@param out_bucket - bucket job writes to
@param num_jobs - how many lambdas to run
@param local - run locally? #TODO remove once local lithops executor is provided
'''
reduce_idxs = Y._block_idxs(axis=0)
if out_bucket is None:
out_bucket = X.bucket
root_key = generate_key_name_binop(X, Y, "gemm")
if (Y.shard_sizes[0] != X.shard_sizes[1]):
raise Exception("X dim 1 shard size must match Y dim 0 shard size")
XY = BigMatrix(root_key, shape=(X.shape[0], Y.shape[1]),
bucket=out_bucket, shard_sizes=[X.shard_sizes[0], Y.shard_sizes[1]],
dtype=dtype, write_header=True, storage=X.storage)
num_out_blocks = len(XY.blocks)
if (tasks_per_job > num_out_blocks):
tasks_per_job = 1
num_jobs = int(num_out_blocks/float(tasks_per_job))
print("Out Shape", XY.shape)
print("Total number of output blocks", len(XY.block_idxs))
print("Total number of output blocks that exist", len(XY.blocks_exist))
if (overwrite):
block_idxs_to_map = list(set(XY.block_idxs))
else:
block_idxs_to_map = list(set(XY.block_idxs_not_exist))
print("block_idxs_to_map: ", block_idxs_to_map)
print("Number of output blocks to generate ", len(block_idxs_to_map))
print("Tasks per job: ", tasks_per_job)
print("Num Jobs: ", num_jobs)
print('GEMM impl: ', gemm_impl, _gemms[gemm_impl])
chunked_blocks = list(chunk(block_idxs_to_map, tasks_per_job))
chunked_blocks = [(cb, ) for cb in chunked_blocks]
#if (not isinstance(fexec.invoker, fexec.queues.SQSInvoker) and gemm_impl > 0):
# raise Exception("GEMM IMPL > 0 only supported for standalone mode pywren")
# Josep: Storage class is not pickable, so delete it before invoke Lithops
saved_stroage = X.storage
XY.storage = Y.storage = X.storage = None
def lithops_run(block_pairs, storage):
XY.storage = storage
X.storage = storage
Y.storage = storage
return _gemms[gemm_impl](block_pairs, XY, X, Y, reduce_idxs=reduce_idxs,
dtype=dtype, block_chunk_size=gemm_chunk_size)
if (local):
list(map(lithops_run, chunked_blocks))
return XY
else:
fexec.map(lithops_run, chunked_blocks, include_modules=['numpywren'])
fexec.wait()
Y.storage = X.storage = saved_stroage
return XY
# matrix vector multiply
# hard
def gemv(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# symmetric rank k update
# hard
def syrk(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# very hard
def posv(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# easy
def add(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# easy
def sub(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# easy
def mul(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
# easy
def div(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def logical_and(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def logical_or(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def xor(pwex, X, Y, out_bucket=None, tasks_per_job=1):
raise NotImplementedError
def elemwise_binop_func(pwex, X, Y, f, out_bucket=None, tasks_per_job=1, local=False):
raise NotImplementedError
def trisolve(pwex, A, B, out_bucket=None, tasks_per_job=1, lower=False):
if out_bucket is None:
out_bucket = A.bucket
root_key = generate_key_name_binop(A, B, "trisolve")
instructions, X, scratch = lp._trisolve(A, B, out_bucket=out_bucket, lower=lower)
config = pwex.config
# if (isinstance(pwex.invoker, pywren.queues.SQSInvoker)):
# executor = pywren.standalone_executor
# else:
fexec = lithops.FunctionExecutor()
program = lp.LambdaPackProgram(instructions, executor=fexec, pywren_config=config)
print(program)
#assert False
program.start()
job_runner.lambdapack_run(program)
program.wait()
if program.program_status() != lp.PS.SUCCESS:
program.unwind()
raise Exception("Lambdapack Exception : {0}".format(program.program_status()))
program.free()
# delete all intermediate information
[M.free() for M in scratch]
return X
| [
"[email protected]"
] | |
bb17b14f9cc0eaaeb740793ec62035edb8637a1f | 71f00ed87cd980bb2f92c08b085c5abe40a317fb | /Data/GoogleCloud/google-cloud-sdk/lib/surface/privateca/subordinates/activate.py | f9f73fb40b0bb8a564338a2a28bed7e1e5cf84c6 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | factoryofthesun/Rao-NLP | 2bd8269a8eed1cb352c14c8fde88e3111ccca088 | 87f9723f5ee51bd21310d58c3425a2a7271ec3c5 | refs/heads/master | 2023-04-18T08:54:08.370155 | 2020-06-09T23:24:07 | 2020-06-09T23:24:07 | 248,070,291 | 0 | 1 | null | 2021-04-30T21:13:04 | 2020-03-17T20:49:03 | Python | UTF-8 | Python | false | false | 3,547 | py | # Lint as: python3
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Activate a pending Certificate Authority."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.privateca import base as privateca_base
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.privateca import operations
from googlecloudsdk.command_lib.privateca import pem_utils
from googlecloudsdk.command_lib.privateca import resource_args
from googlecloudsdk.core.util import files
class Activate(base.SilentCommand):
r"""Activate a subordinate certificate authority in a pending state.
## EXAMPLES
To activate a subordinate CA named 'server-tls-1' in the location 'us' using
a PEM certificate
chain in 'chain.crt':
$ {command} server-tls-1 \
--location us \
--pem-chain ./chain.crt
"""
@staticmethod
def Args(parser):
resource_args.AddCertificateAuthorityPositionalResourceArg(
parser, 'to activate')
base.Argument(
'--pem-chain',
required=True,
help='A file containing a list of PEM-encoded certificates, starting '
'with the current CA certificate and ending with the root CA '
'certificate.').AddToParser(parser)
def _ParsePemChainFromFile(self, pem_chain_file):
"""Parses a pem chain from a file, splitting the leaf cert and chain.
Args:
pem_chain_file: file containing the pem_chain.
Raises:
exceptions.InvalidArgumentException if not enough certificates are
included.
Returns:
A tuple with (leaf_cert, rest_of_chain)
"""
try:
pem_chain_input = files.ReadFileContents(pem_chain_file)
except (files.Error, OSError, IOError):
raise exceptions.BadFileException(
"Could not read provided PEM chain file '{}'.".format(pem_chain_file))
certs = pem_utils.ValidateAndParsePemChain(pem_chain_input)
if len(certs) < 2:
raise exceptions.InvalidArgumentException(
'pem-chain',
'The pem_chain must include at least two certificates - the subordinate CA certificate and an issuer certificate.'
)
return certs[0], certs[1:]
def Run(self, args):
client = privateca_base.GetClientInstance()
messages = privateca_base.GetMessagesModule()
ca_ref = args.CONCEPTS.certificate_authority.Parse()
pem_cert, pem_chain = self._ParsePemChainFromFile(args.pem_chain)
operation = client.projects_locations_certificateAuthorities.Activate(
messages
.PrivatecaProjectsLocationsCertificateAuthoritiesActivateRequest(
name=ca_ref.RelativeName(),
activateCertificateAuthorityRequest=messages
.ActivateCertificateAuthorityRequest(
pemCaCertificate=pem_cert, pemCaCertificateChain=pem_chain)))
operations.Await(operation, 'Activating Certificate Authority.')
| [
"[email protected]"
] | |
0b06190e016241e069caff14b930d190e7d5f83f | 00d1856dbceb6cef7f92d5ad7d3b2363a62446ca | /djexample/images/forms.py | dce42d13b42c6f5dec509f69a49c66092513e4b3 | [] | no_license | lafabo/django_by_example | 0b05d2b62117f70681c5fc5108b4072c097bc119 | 3cf569f3e6ead9c6b0199d150adf528bd0b2a7c5 | refs/heads/master | 2020-12-29T17:54:12.894125 | 2016-06-04T10:35:22 | 2016-06-04T10:35:22 | 58,313,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | from django import forms
from .models import Image
from urllib import request
from django.core.files.base import ContentFile
from django.utils.text import slugify
class ImageCreateForm(forms.ModelForm):
class Meta:
model = Image
fields = ('title', 'url', 'description')
widgets = {
'url': forms.HiddenInput
}
def clean_url(self):
url = self.cleaned_data['url']
valid_extensions = ['jpg', 'jpeg']
extension = url.rsplit('.', 1)[1].lower()
if extension not in valid_extensions:
raise forms.ValidationError('The given url does not match valid image extensions')
return url
def save(self, force_insert=False, force_update=False, commit=True):
image = super(ImageCreateForm, self).save(commit=False)
image_url = self.cleaned_data['url']
image_name = '%s.%s' % (slugify(image.title), image_url.rsplit('.', 1)[1].lower())
# download image from url
response = request.urlopen(image_url)
image.image.save(image_name, ContentFile(response.read()), save=False)
if commit:
image.save()
return image
| [
"[email protected]"
] | |
548053fb510f44628c5bba5b2b7d3b962e5a86e1 | b0b87924d07101e25fa56754ceaa2f22edc10208 | /workspace/python_study/python_gspark/15-2.py | 88ec8fdb1e8e43f9901bf9017a64fa128a312bad | [] | no_license | SoheeKwak/Python | 2295dd03e5f235315d07355cbe72998f8b86c147 | e1a5f0ecf31e926f2320c5df0e3416306b8ce316 | refs/heads/master | 2020-04-02T13:49:58.367361 | 2018-11-23T09:33:23 | 2018-11-23T09:33:23 | 154,499,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,204 | py | import numpy as np
a1 = np.arange(24)
a2 = np.arange(24).reshape((4,6))
a3 = np.arange(24).reshape((2,4,3))
a1[5]=1000
a2[0,1]=1000
a3[1,0,1]=1000 #2번째 행, 1번째열, 2번째depth
print(a1)
print(a2)
print(a2[1:3,1:5])
print(a2[1:-1,1:-1])
print(a2[:,1:3])
a2[:,1:3]=99
print(a2)
a1 = np.arange(1,25).reshape(4,6)
even_a = a1%2==0
print(a1[even_a])
print("="*50)
import pandas as pd
rain = pd.read_csv("seattle.csv")
print(rain)
print("="*50)
rain_r = rain['PRCP']
print(rain_r)
print(type(rain_r)) #<class 'pandas.core.series.Series'>
print("="*50)
rain_r = rain['PRCP'].values
print(rain_r)
print(type(rain_r)) #<class 'numpy.ndarray'>
print("데이터 크기:",len(rain_r))
days_a = np.arange(0,365)
con_jan = days_a < 31 #True:31개 False:334개
print(con_jan[:40]) #1월1일부터 40일간의 강수량 데이터
print("="*50)
print(con_jan) #1월 한달간(31일간) 강수량 데이터
print(np.sum(rain_r[con_jan]))#1월달 강수량의 총합
print(np.mean(rain_r[con_jan])) #1월달 평균 강수량
a = np.arange(1,25).reshape((4,6))
# 팬시 인덱싱: 배열에 인덱스 배열을 전달해서 데이터를 참조
print(a)
print(a[0,0],a[1,1],a[2,2],a[3,3])
print(a[[0,1,2,3],[0,1,2,3]])
print(a[:,[1,2]])#대괄호 안에 콜론없이 지정되면 범위가 아닌, 그 해당 열만 출력
print(a[:,[1,3]])
print("="*50)
#ravel(배열을 1차원으로)
a = np.random.randint(1,10,(2,3))
print(a)
print(a.ravel())
#resize:배열크기 변경(요소 수 변경), reshape:배열변경(요소 수 변경X)
print(a.shape)
a.resize((2,2))
print(a)
print("="*50)
a = np.random.randint(1,10,(2,6))
print(a)
a.resize((2,10)) #사이즈가 커지면 늘어난 요소만큼 채워지고 0으로 초기화
print(a)
a.resize((3,3)) # 사이즈가 줄어들면 순서대로 요소가 들어가고 나머지 삭제됨
print(a)
print("="*50)
a = np.arange(1,10).reshape(3,3)
b = np.arange(10,19).reshape(3,3)
res = np.append(a,b)
print(res) #1차원으로 출력
print(a)
print(b)
print("="*50)
res = np.append(a,b, axis=0) #행방향 2차원 배열
print(res)
print("="*50)
a = np.arange(1,10).reshape(3,3)
res = np.arange(10,20).reshape(2,5)
b = np.arange(10,19).reshape(3,3)
# np.append(a,res,axis=0) #기준축과 Shape다르면 append 오류 발생
# print(res)
print(a)
res = np.append(a,b,axis=1) #열방향, 2차원 배열
print(res)
print(b)
res = np.append(a,b,axis=0) #행방향, 2차원 배열
print(res)
# x = np.arange(10,20).reshape(2,5)
# np.append(res,x,axis=1) #shape이 다르므로 오류
a = np.arange(1,10).reshape(3,3)
print(a)
a = np.insert(a,3,99) #1차원, 99를 3번째 자리에 넣어라
print(a)
a = np.arange(1,10).reshape(3,3)
a = np.insert(a,2,99, axis=0) #행을 따라 2번째 줄에 99를 추가로 넣어라
print(a)
a = np.arange(1,10).reshape(3,3)
a = np.insert(a,1,99, axis=1) #열을 따라 2번째 줄에 99를 추가로 넣어라
print(a)
print("="*50)
a = np.arange(1,10).reshape(3,3)
print(a)
print(np.delete(a,3)) #1차원, 3번째 자리 요소를 지워라
#a배열의 1번 인덱스 행 제거한 후 출력
print(np.delete(a,1,axis=0))
#a배열의 1번 인덱스 열 제거한 후 출력
print(np.delete(a,1,axis=1))
print("="*50)
#배열 간의 결합(concatenate, vstack, hastack)
a = np.arange(1,7).reshape(2,3)
print(a)
b = np.arange(7,13).reshape(2,3)
print(b)
res = np.concatenate((a,b))
print(res)
print("="*50)
a = np.arange(1,7).reshape(2,3)
b = np.arange(7,13).reshape(2,3)
print(np.vstack((a,b)))
print(np.vstack((a,b,a,b))) #vertical 수직방향으로 붙음
print("="*50)
a = np.arange(1,7).reshape(2,3)
b = np.arange(7,13).reshape(2,3)
print(np.hstack((a,b))) #horizontal 수평방향으로 붙음
print(np.hstack((a,b,a,b,a,b)))
print("="*50)
a = np.arange(1,25).reshape(4,6)
print(a)
res = np.hsplit(a,2) #a를 두개의 그룹으로 좌우로 나눔
print(res)
res = np.hsplit(a,3)
print(res)
res = np.vsplit(a,2) #a를 두개의 그룹으로 상하로 나눔
print(res)
#
print("="*50)
x = np.array([1,2])
print(x)
print(x.dtype)
x = np.array([1.,2.])
print(x.dtype)
x = np.array([1,2],dtype=np.int64)
print(x.dtype)
x = np.array([[1,2],[3,4]])
y = np.array([[5,6],[7,8]])
v = np.array([9,10])
w = np.array([11,12])
#벡터의 내적
print(np.dot(v,w)) #9*11+10*12=219
print(v.dot(w))
#행렬과 벡터의 곱
print(x.dot(v)) #[1,2]*[9,10]+[3,4]*[9,10]=[29,67]
#행렬곱
print(x)
print(y)
print(np.dot(x,y)) #1*5+2*7, 1*6+2*8, 3*5+4*7, 3*6+4*8
x = np.array([[1,2],[3,4]])
print(x)
print(x.T) #transpose 대칭되는 요소끼리 묶어줌
print("="*50)
x = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
print(x)
v = np.array([1,0,1])
y = np.empty_like(x) #x와 같은 shape을 만들어 준다
print(y)
print("="*50)
for i in range(4):
y[i,:] = x[i,:]+v #[2,2,4]=[1,2,3]+[1,0,1]
print(y)
print("="*50)
x = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
v = np.array([1,0,1])
vv = np.tile(v,(4,1)) #열방향으로 v를 4번 반복
print(vv)
vv = np.tile(v,(4,2))
print(vv)
vv = np.tile(v,(4,5))
print(vv)
a = np.array([[1,2],[4,5]])
s = np.prod(a) #각각의 요소에 대해 곱셈
print(s)
s = np.prod(a,axis=0)
print(s)
s = np.prod(a,axis=1)
print(s)
s = np.max(np.prod(a,axis=1))
print(s)
| [
"[email protected]"
] | |
79062eb15e440d5eabf1579ae5b439589bb6db1b | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/statsmodels/2015/8/ar_model.py | 087a9e037a3426c2ccc54e0b8158c690ff99e06c | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 34,034 | py | from __future__ import division
from statsmodels.compat.python import iteritems, range, string_types, lmap
import numpy as np
from numpy import dot, identity
from numpy.linalg import inv, slogdet
from scipy.stats import norm
from statsmodels.regression.linear_model import OLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.model as base
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly, cache_writable)
from statsmodels.tools.numdiff import approx_fprime, approx_hess
from statsmodels.tsa.kalmanf.kalmanfilter import KalmanFilter
import statsmodels.base.wrapper as wrap
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.base.datetools import _index_date
__all__ = ['AR']
def sumofsq(x, axis=0):
"""Helper function to calculate sum of squares along first axis"""
return np.sum(x**2, axis=0)
def _check_ar_start(start, k_ar, method, dynamic):
if (method == 'cmle' or dynamic) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _validate(start, k_ar, dates, method):
"""
Checks the date and then returns an integer
"""
from datetime import datetime
if isinstance(start, (string_types, datetime)):
start_date = start
start = _index_date(start, dates)
if 'mle' not in method and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE or "
"dynamic forecast. Got %s" % start_date)
return start
def _ar_predict_out_of_sample(y, params, p, k_trend, steps, start=0):
mu = params[:k_trend] or 0 # only have to worry about constant
arparams = params[k_trend:][::-1] # reverse for dot
# dynamic endogenous variable
endog = np.zeros(p + steps) # this is one too big but doesn't matter
if start:
endog[:p] = y[start-p:start]
else:
endog[:p] = y[-p:]
forecast = np.zeros(steps)
for i in range(steps):
fcast = mu + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i + p] = fcast
return forecast
class AR(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : "Autoregressive AR(p) model",
"params" : """endog : array-like
1-d endogenous response variable. The independent variable.""",
"extra_params" : base._missing_param_doc,
"extra_sections" : ""}
def __init__(self, endog, dates=None, freq=None, missing='none'):
super(AR, self).__init__(endog, None, dates, freq, missing=missing)
endog = self.endog # original might not have been an ndarray
if endog.ndim == 1:
endog = endog[:, None]
self.endog = endog # to get shapes right
elif endog.ndim > 1 and endog.shape[1] != 1:
raise ValueError("Only the univariate case is implemented")
def initialize(self):
pass
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
p = self.k_ar
k = self.k_trend
newparams = params.copy()
newparams[k:k+p] = _ar_transparams(params[k:k+p].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
p = self.k_ar
k = self.k_trend
newparams = start_params.copy()
newparams[k:k+p] = _ar_invtransparams(start_params[k:k+p].copy())
return newparams
def _presample_fit(self, params, start, p, end, y, predictedvalues):
"""
Return the pre-sample predicted values using the Kalman Filter
Notes
-----
See predict method for how to use start and p.
"""
k = self.k_trend
# build system matrices
T_mat = KalmanFilter.T(params, p, k, p)
R_mat = KalmanFilter.R(params, p, k, 0, p)
# Initial State mean and variance
alpha = np.zeros((p, 1))
Q_0 = dot(inv(identity(p**2)-np.kron(T_mat, T_mat)),
dot(R_mat, R_mat.T).ravel('F'))
Q_0 = Q_0.reshape(p, p, order='F') # TODO: order might need to be p+k
P = Q_0
Z_mat = KalmanFilter.Z(p)
for i in range(end): # iterate p-1 times to fit presample
v_mat = y[i] - dot(Z_mat, alpha)
F_mat = dot(dot(Z_mat, P), Z_mat.T)
Finv = 1./F_mat # inv. always scalar
K = dot(dot(dot(T_mat, P), Z_mat.T), Finv)
# update state
alpha = dot(T_mat, alpha) + dot(K, v_mat)
L = T_mat - dot(K, Z_mat)
P = dot(dot(T_mat, P), L.T) + dot(R_mat, R_mat.T)
#P[0,0] += 1 # for MA part, R_mat.R_mat.T above
if i >= start - 1: # only record if we ask for it
predictedvalues[i + 1 - start] = dot(Z_mat, alpha)
def _get_predict_start(self, start, dynamic):
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if method == 'mle' and not dynamic:
start = 0
else: # can't do presample fit for cmle or dynamic
start = k_ar
elif isinstance(start, int):
start = super(AR, self)._get_predict_start(start)
else: # should be a date
start = _validate(start, k_ar, self.data.dates, method)
start = super(AR, self)._get_predict_start(start)
_check_ar_start(start, k_ar, method, dynamic)
self._set_predict_start_date(start)
return start
def predict(self, params, start=None, end=None, dynamic=False):
"""
Returns in-sample and out-of-sample prediction.
Parameters
----------
params : array
The fitted model parameters.
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
dynamic : bool
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
Returns
-------
predicted values : array
Notes
-----
The linear Gaussian Kalman filter is used to return pre-sample fitted
values. The exact initial Kalman Filter is used. See Durbin and Koopman
in the references for more information.
"""
# will return an index of a date
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end)
if start - end > 1:
raise ValueError("end is before start")
k_ar = self.k_ar
k_trend = self.k_trend
method = self.method
endog = self.endog.squeeze()
if dynamic:
out_of_sample += end - start + 1
return _ar_predict_out_of_sample(endog, params, k_ar,
k_trend, out_of_sample, start)
predictedvalues = np.zeros(end + 1 - start)
# fit pre-sample
if method == 'mle': # use Kalman Filter to get initial values
if k_trend:
mu = params[0]/(1-np.sum(params[k_trend:]))
# modifies predictedvalues in place
if start < k_ar:
self._presample_fit(params, start, k_ar, min(k_ar-1, end),
endog[:k_ar] - mu, predictedvalues)
predictedvalues[:k_ar-start] += mu
if end < k_ar:
return predictedvalues
# just do the whole thing and truncate
fittedvalues = dot(self.X, params)
pv_start = max(k_ar - start, 0)
fv_start = max(start - k_ar, 0)
fv_end = min(len(fittedvalues), end-k_ar+1)
predictedvalues[pv_start:] = fittedvalues[fv_start:fv_end]
if out_of_sample:
forecastvalues = _ar_predict_out_of_sample(endog, params,
k_ar, k_trend,
out_of_sample)
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
def _presample_varcov(self, params):
"""
Returns the inverse of the presample variance-covariance.
Notes
-----
See Hamilton p. 125
"""
k = self.k_trend
p = self.k_ar
p1 = p+1
# get inv(Vp) Hamilton 5.3.7
params0 = np.r_[-1, params[k:]]
Vpinv = np.zeros((p, p), dtype=params.dtype)
for i in range(1, p1):
Vpinv[i-1, i-1:] = np.correlate(params0, params0[:i],
old_behavior=False)[:-1]
Vpinv[i-1, i-1:] -= np.correlate(params0[-i:], params0,
old_behavior=False)[:-1]
Vpinv = Vpinv + Vpinv.T - np.diag(Vpinv.diagonal())
return Vpinv
def _loglike_css(self, params):
"""
Loglikelihood of AR(p) process using conditional sum of squares
"""
nobs = self.nobs
Y = self.Y
X = self.X
ssr = sumofsq(Y.squeeze() - np.dot(X, params))
sigma2 = ssr/nobs
return (-nobs/2 * (np.log(2 * np.pi) + np.log(sigma2)) -
ssr/(2 * sigma2))
def _loglike_mle(self, params):
"""
Loglikelihood of AR(p) process using exact maximum likelihood
"""
nobs = self.nobs
X = self.X
endog = self.endog
k_ar = self.k_ar
k_trend = self.k_trend
# reparameterize according to Jones (1980) like in ARMA/Kalman Filter
if self.transparams:
params = self._transparams(params)
# get mean and variance for pre-sample lags
yp = endog[:k_ar].copy()
if k_trend:
c = [params[0]] * k_ar
else:
c = [0]
mup = np.asarray(c / (1 - np.sum(params[k_trend:])))
diffp = yp - mup[:, None]
# get inv(Vp) Hamilton 5.3.7
Vpinv = self._presample_varcov(params)
diffpVpinv = np.dot(np.dot(diffp.T, Vpinv), diffp).item()
ssr = sumofsq(endog[k_ar:].squeeze() - np.dot(X, params))
# concentrating the likelihood means that sigma2 is given by
sigma2 = 1./nobs * (diffpVpinv + ssr)
self.sigma2 = sigma2
logdet = slogdet(Vpinv)[1] # TODO: add check for singularity
loglike = -1/2. * (nobs * (np.log(2 * np.pi) + np.log(sigma2)) -
logdet + diffpVpinv / sigma2 + ssr / sigma2)
return loglike
def loglike(self, params):
"""
The loglikelihood of an AR(p) process
Parameters
----------
params : array
The fitted parameters of the AR model
Returns
-------
llf : float
The loglikelihood evaluated at `params`
Notes
-----
Contains constant term. If the model is fit by OLS then this returns
the conditonal maximum likelihood.
.. math:: \\frac{\\left(n-p\\right)}{2}\\left(\\log\\left(2\\pi\\right)+\\log\\left(\\sigma^{2}\\right)\\right)-\\frac{1}{\\sigma^{2}}\\sum_{i}\\epsilon_{i}^{2}
If it is fit by MLE then the (exact) unconditional maximum likelihood
is returned.
.. math:: -\\frac{n}{2}log\\left(2\\pi\\right)-\\frac{n}{2}\\log\\left(\\sigma^{2}\\right)+\\frac{1}{2}\\left|V_{p}^{-1}\\right|-\\frac{1}{2\\sigma^{2}}\\left(y_{p}-\\mu_{p}\\right)^{\\prime}V_{p}^{-1}\\left(y_{p}-\\mu_{p}\\right)-\\frac{1}{2\\sigma^{2}}\\sum_{t=p+1}^{n}\\epsilon_{i}^{2}
where
:math:`\\mu_{p}` is a (`p` x 1) vector with each element equal to the
mean of the AR process and :math:`\\sigma^{2}V_{p}` is the (`p` x `p`)
variance-covariance matrix of the first `p` observations.
"""
#TODO: Math is on Hamilton ~pp 124-5
if self.method == "cmle":
return self._loglike_css(params)
else:
return self._loglike_mle(params)
def score(self, params):
"""
Return the gradient of the loglikelihood at params.
Parameters
----------
params : array-like
The parameter values at which to evaluate the score function.
Notes
-----
Returns numerical gradient.
"""
loglike = self.loglike
return approx_fprime(params, loglike, epsilon=1e-8)
def information(self, params):
"""
Not Implemented Yet
"""
return
def hessian(self, params):
"""
Returns numerical hessian for now.
"""
loglike = self.loglike
return approx_hess(params, loglike)
def _stackX(self, k_ar, trend):
"""
Private method to build the RHS matrix for estimation.
Columns are trend terms then lags.
"""
endog = self.endog
X = lagmat(endog, maxlag=k_ar, trim='both')
k_trend = util.get_trendorder(trend)
if k_trend:
X = add_trend(X, prepend=True, trend=trend)
self.k_trend = k_trend
return X
def select_order(self, maxlag, ic, trend='c', method='mle'):
"""
Select the lag order according to the information criterion.
Parameters
----------
maxlag : int
The highest lag length tried. See `AR.fit`.
ic : str {'aic','bic','hqic','t-stat'}
Criterion used for selecting the optimal lag length.
See `AR.fit`.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
Returns
-------
bestlag : int
Best lag according to IC.
"""
endog = self.endog
# make Y and X with same nobs to compare ICs
Y = endog[maxlag:]
self.Y = Y # attach to get correct fit stats
X = self._stackX(maxlag, trend) # sets k_trend
self.X = X
k = self.k_trend # k_trend set in _stackX
k = max(1, k) # handle if startlag is 0
results = {}
if ic != 't-stat':
for lag in range(k, maxlag+1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag-lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=100, disp=0)
results[lag] = eval('fit.'+ic)
bestic, bestlag = min((res, k) for k, res in iteritems(results))
else: # choose by last t-stat.
stop = 1.6448536269514722 # for t-stat, norm.ppf(.95)
for lag in range(maxlag, k - 1, -1):
# have to reinstantiate the model to keep comparable models
endog_tmp = endog[maxlag - lag:]
fit = AR(endog_tmp).fit(maxlag=lag, method=method,
full_output=0, trend=trend,
maxiter=35, disp=-1)
if np.abs(fit.tvalues[-1]) >= stop:
bestlag = lag
break
return bestlag
def fit(self, maxlag=None, method='cmle', ic=None, trend='c',
transparams=True, start_params=None, solver='lbfgs', maxiter=35,
full_output=1, disp=1, callback=None, **kwargs):
"""
Fit the unconditional maximum likelihood of an AR(p) process.
Parameters
----------
maxlag : int
If `ic` is None, then maxlag is the lag length used in fit. If
`ic` is specified then maxlag is the highest lag order used to
select the correct lag order. If maxlag is None, the default is
round(12*(nobs/100.)**(1/4.))
method : str {'cmle', 'mle'}, optional
cmle - Conditional maximum likelihood using OLS
mle - Unconditional (exact) maximum likelihood. See `solver`
and the Notes.
ic : str {'aic','bic','hic','t-stat'}
Criterion used for selecting the optimal lag length.
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
hqic - Hannan-Quinn Information Criterion
If any of the information criteria are selected, the lag length
which results in the lowest value is selected. If t-stat, the
model starts with maxlag and drops a lag until the highest lag
has a t-stat that is significant at the 95 % level.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' - include constant.
'nc' - no constant.
The below can be specified if method is 'mle'
transparams : bool, optional
Whether or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980).
start_params : array-like, optional
A first guess on the parameters. Default is cmle estimates.
solver : str or None, optional
Solver to be used if method is 'mle'. The default is 'lbfgs'
(limited memory Broyden-Fletcher-Goldfarb-Shanno). Other choices
are 'bfgs', 'newton' (Newton-Raphson), 'nm' (Nelder-Mead),
'cg' - (conjugate gradient), 'ncg' (non-conjugate gradient),
and 'powell'.
maxiter : int, optional
The maximum number of function evaluations. Default is 35.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is output.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
References
----------
Jones, R.H. 1980 "Maximum likelihood fitting of ARMA models to time
series with missing observations." `Technometrics`. 22.3.
389-95.
See also
--------
statsmodels.base.model.LikelihoodModel.fit
"""
method = method.lower()
if method not in ['cmle', 'yw', 'mle']:
raise ValueError("Method %s not recognized" % method)
self.method = method
self.trend = trend
self.transparams = transparams
nobs = len(self.endog) # overwritten if method is 'cmle'
endog = self.endog
if maxlag is None:
maxlag = int(round(12*(nobs/100.)**(1/4.)))
k_ar = maxlag # stays this if ic is None
# select lag length
if ic is not None:
ic = ic.lower()
if ic not in ['aic', 'bic', 'hqic', 't-stat']:
raise ValueError("ic option %s not understood" % ic)
k_ar = self.select_order(k_ar, ic, trend, method)
self.k_ar = k_ar # change to what was chosen by ic
# redo estimation for best lag
# make LHS
Y = endog[k_ar:, :]
# make lagged RHS
X = self._stackX(k_ar, trend) # sets self.k_trend
k_trend = self.k_trend
self.exog_names = util.make_lag_names(self.endog_names, k_ar, k_trend)
self.Y = Y
self.X = X
if method == "cmle": # do OLS
arfit = OLS(Y, X).fit()
params = arfit.params
self.nobs = nobs - k_ar
self.sigma2 = arfit.ssr/arfit.nobs # needed for predict fcasterr
elif method == "mle":
solver = solver.lower()
self.nobs = nobs
if start_params is None:
start_params = OLS(Y, X).fit().params
else:
if len(start_params) != k_trend + k_ar:
raise ValueError("Length of start params is %d. There"
" are %d parameters." %
(len(start_params), k_trend + k_ar))
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(AR, self).fit(start_params=start_params,
method=solver, maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if self.transparams:
params = self._transparams(params)
self.transparams = False # turn off now for other results
# don't use yw, because we can't estimate the constant
#elif method == "yw":
# params, omega = yule_walker(endog, order=maxlag,
# method="mle", demean=False)
# how to handle inference after Yule-Walker?
# self.params = params #TODO: don't attach here
# self.omega = omega
pinv_exog = np.linalg.pinv(X)
normalized_cov_params = np.dot(pinv_exog, pinv_exog.T)
arfit = ARResults(self, params, normalized_cov_params)
if method == 'mle' and full_output:
arfit.mle_retvals = mlefit.mle_retvals
arfit.mle_settings = mlefit.mle_settings
return ARResultsWrapper(arfit)
class ARResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an AR model.
Parameters
----------
model : AR Model instance
Reference to the model that is fit.
params : array
The fitted parameters from the AR Model.
normalized_cov_params : array
inv(dot(X.T,X)) where X is the lagged values.
scale : float, optional
An estimate of the scale of the model.
Returns
-------
**Attributes**
aic : float
Akaike Information Criterion using Lutkephol's definition.
:math:`log(sigma) + 2*(1 + k_ar + k_trend)/nobs`
bic : float
Bayes Information Criterion
:math:`\\log(\\sigma) + (1 + k_ar + k_trend)*\\log(nobs)/nobs`
bse : array
The standard errors of the estimated parameters. If `method` is 'cmle',
then the standard errors that are returned are the OLS standard errors
of the coefficients. If the `method` is 'mle' then they are computed
using the numerical Hessian.
fittedvalues : array
The in-sample predicted values of the fitted AR model. The `k_ar`
initial values are computed via the Kalman Filter if the model is
fit by `mle`.
fpe : float
Final prediction error using Lutkepohl's definition
((n_totobs+k_trend)/(n_totobs-k_ar-k_trend))*sigma
hqic : float
Hannan-Quinn Information Criterion.
k_ar : float
Lag length. Sometimes used as `p` in the docs.
k_trend : float
The number of trend terms included. 'nc'=0, 'c'=1.
llf : float
The loglikelihood of the model evaluated at `params`. See `AR.loglike`
model : AR model instance
A reference to the fitted AR model.
nobs : float
The number of available observations `nobs` - `k_ar`
n_totobs : float
The number of total observations in `endog`. Sometimes `n` in the docs.
params : array
The fitted parameters of the model.
pvalues : array
The p values associated with the standard errors.
resid : array
The residuals of the model. If the model is fit by 'mle' then the
pre-sample residuals are calculated using fittedvalues from the Kalman
Filter.
roots : array
The roots of the AR process are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
scale : float
Same as sigma2
sigma2 : float
The variance of the innovations (residuals).
trendorder : int
The polynomial order of the trend. 'nc' = None, 'c' or 't' = 0,
'ct' = 1, etc.
tvalues : array
The t-values associated with `params`.
"""
_cache = {} # for scale setter
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARResults, self).__init__(model, params, normalized_cov_params,
scale)
self._cache = resettable_cache()
self.nobs = model.nobs
n_totobs = len(model.endog)
self.n_totobs = n_totobs
self.X = model.X # copy?
self.Y = model.Y
k_ar = model.k_ar
self.k_ar = k_ar
k_trend = model.k_trend
self.k_trend = k_trend
trendorder = None
if k_trend > 0:
trendorder = k_trend - 1
self.trendorder = trendorder
#TODO: cmle vs mle?
self.df_model = k_ar + k_trend
self.df_resid = self.model.df_resid = n_totobs - self.df_model
@cache_writable()
def sigma2(self):
model = self.model
if model.method == "cmle": # do DOF correction
return 1. / self.nobs * sumofsq(self.resid)
else:
return self.model.sigma2
@cache_writable() # for compatability with RegressionResults
def scale(self):
return self.sigma2
@cache_readonly
def bse(self): # allow user to specify?
if self.model.method == "cmle": # uses different scale/sigma def.
resid = self.resid
ssr = np.dot(resid, resid)
ols_scale = ssr / (self.nobs - self.k_ar - self.k_trend)
return np.sqrt(np.diag(self.cov_params(scale=ols_scale)))
else:
hess = approx_hess(self.params, self.model.loglike)
return np.sqrt(np.diag(-np.linalg.inv(hess)))
@cache_readonly
def pvalues(self):
return norm.sf(np.abs(self.tvalues))*2
@cache_readonly
def aic(self):
#JP: this is based on loglike with dropped constant terms ?
# Lutkepohl
#return np.log(self.sigma2) + 1./self.model.nobs * self.k_ar
# Include constant as estimated free parameter and double the loss
return np.log(self.sigma2) + 2 * (1 + self.df_model)/self.nobs
# Stata defintion
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * (self.k_ar+self.k_trend)/nobs
@cache_readonly
def hqic(self):
nobs = self.nobs
# Lutkepohl
# return np.log(self.sigma2)+ 2 * np.log(np.log(nobs))/nobs * self.k_ar
# R uses all estimated parameters rather than just lags
return (np.log(self.sigma2) + 2 * np.log(np.log(nobs))/nobs *
(1 + self.df_model))
# Stata
#nobs = self.nobs
#return -2 * self.llf/nobs + 2 * np.log(np.log(nobs))/nobs * \
# (self.k_ar + self.k_trend)
@cache_readonly
def fpe(self):
nobs = self.nobs
df_model = self.df_model
#Lutkepohl
return ((nobs+df_model)/(nobs-df_model))*self.sigma2
@cache_readonly
def bic(self):
nobs = self.nobs
# Lutkepohl
#return np.log(self.sigma2) + np.log(nobs)/nobs * self.k_ar
# Include constant as est. free parameter
return np.log(self.sigma2) + (1 + self.df_model) * np.log(nobs)/nobs
# Stata
# return -2 * self.llf/nobs + np.log(nobs)/nobs * (self.k_ar + \
# self.k_trend)
@cache_readonly
def resid(self):
#NOTE: uses fittedvalues because it calculate presample values for mle
model = self.model
endog = model.endog.squeeze()
if model.method == "cmle": # elimate pre-sample
return endog[self.k_ar:] - self.fittedvalues
else:
return model.endog.squeeze() - self.fittedvalues
#def ssr(self):
# resid = self.resid
# return np.dot(resid, resid)
@cache_readonly
def roots(self):
k = self.k_trend
return np.roots(np.r_[1, -self.params[k:]]) ** -1
@cache_readonly
def fittedvalues(self):
return self.model.predict(self.params)
def predict(self, start=None, end=None, dynamic=False):
params = self.params
predictedvalues = self.model.predict(params, start, end, dynamic)
return predictedvalues
#start = self.model._get_predict_start(start)
#end, out_of_sample = self.model._get_predict_end(end)
##TODO: return forecast errors and confidence intervals
#from statsmodels.tsa.arima_process import arma2ma
#ma_rep = arma2ma(np.r_[1,-params[::-1]], [1], out_of_sample)
#fcasterr = np.sqrt(self.sigma2 * np.cumsum(ma_rep**2))
preddoc = AR.predict.__doc__.split('\n')
extra_doc = (""" confint : bool, float
Whether to return confidence intervals. If `confint` == True,
95 % confidence intervals are returned. Else if `confint` is a
float, then it is assumed to be the alpha value of the confidence
interval. That is confint == .05 returns a 95% confidence
interval, and .10 would return a 90% confidence interval."""
).split('\n')
#ret_doc = """
# fcasterr : array-like
# confint : array-like
#"""
predict.__doc__ = '\n'.join(preddoc[:5] + preddoc[7:20] + extra_doc +
preddoc[20:])
class ARResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARResultsWrapper, ARResults)
if __name__ == "__main__":
import statsmodels.api as sm
sunspots = sm.datasets.sunspots.load()
# Why does R demean the data by defaut?
ar_ols = AR(sunspots.endog)
res_ols = ar_ols.fit(maxlag=9)
ar_mle = AR(sunspots.endog)
res_mle_bfgs = ar_mle.fit(maxlag=9, method="mle", solver="bfgs",
maxiter=500, gtol=1e-10)
# res_mle2 = ar_mle.fit(maxlag=1, method="mle", maxiter=500, penalty=True,
# tol=1e-13)
# ar_yw = AR(sunspots.endog)
# res_yw = ar_yw.fit(maxlag=4, method="yw")
# # Timings versus talkbox
# from timeit import default_timer as timer
# print "Time AR fit vs. talkbox"
# # generate a long series of AR(2) data
#
# nobs = 1000000
# y = np.empty(nobs)
# y[0:2] = 0
# for i in range(2,nobs):
# y[i] = .25 * y[i-1] - .75 * y[i-2] + np.random.rand()
#
# mod_sm = AR(y)
# t = timer()
# res_sm = mod_sm.fit(method="yw", trend="nc", demean=False, maxlag=2)
# t_end = timer()
# print str(t_end - t) + " seconds for sm.AR with yule-walker, 2 lags"
# try:
# import scikits.talkbox as tb
# except:
# raise ImportError("You need scikits.talkbox installed for timings")
# t = timer()
# mod_tb = tb.lpc(y, 2)
# t_end = timer()
# print str(t_end - t) + " seconds for talkbox.lpc"
# print """For higher lag lengths ours quickly fills up memory and starts
#thrashing the swap. Should we include talkbox C code or Cythonize the
#Levinson recursion algorithm?"""
## Try with a pandas series
import pandas
import scikits.timeseries as ts
d1 = ts.Date(year=1700, freq='A')
#NOTE: have to have yearBegin offset for annual data until parser rewrite
#should this be up to the user, or should it be done in TSM init?
#NOTE: not anymore, it's end of year now
ts_dr = ts.date_array(start_date=d1, length=len(sunspots.endog))
pandas_dr = pandas.DateRange(start=d1.datetime,
periods=len(sunspots.endog), timeRule='A@DEC')
#pandas_dr = pandas_dr.shift(-1, pandas.datetools.yearBegin)
dates = np.arange(1700, 1700 + len(sunspots.endog))
dates = ts.date_array(dates, freq='A')
#sunspots = pandas.TimeSeries(sunspots.endog, index=dates)
#NOTE: pandas only does business days for dates it looks like
import datetime
dt_dates = np.asarray(lmap(datetime.datetime.fromordinal,
ts_dr.toordinal().astype(int)))
sunspots = pandas.TimeSeries(sunspots.endog, index=dt_dates)
#NOTE: pandas can't handle pre-1900 dates
mod = AR(sunspots, freq='A')
res = mod.fit(method='mle', maxlag=9)
# some data for an example in Box Jenkins
IBM = np.asarray([460, 457, 452, 459, 462, 459, 463, 479, 493, 490.])
w = np.diff(IBM)
theta = .5
| [
"[email protected]"
] | |
2a267560a422f7c6eff4da4d5177892beb9c99f9 | abeec076f89231c4dd589e84def8301e653d6e20 | /orders/views.DEP.py | 9ac624bc2133c17490ffaf2dc25abdf9178452e3 | [] | no_license | gibil5/pcm_restaurant | 1cde6ee2780d3aa39dbc26dd9583f8465a1ff13a | a56ec01c533ed2b6e198de9813f9518a3eca2d14 | refs/heads/master | 2020-08-29T20:10:13.606229 | 2019-12-01T19:48:47 | 2019-12-01T19:48:47 | 218,160,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | def add_order(request, employee_id):
print()
print('Add order')
title = 'Add Order'
cook = get_object_or_404(Employee, pk=employee_id) # Get Object
print(cook)
table = Table.objects.first()
# Create and populate
if request.method == 'POST':
print('Create and populate')
form = lib.NewOrderForm(request.POST)
if form.is_valid():
form_instance = lib.NewOrderForm(request.POST)
form_instance.cook_id = 1
new_order = form_instance.save()
return HttpResponseRedirect('/orders/thanks/')
# Create a blank form
else:
order = Order()
#order = Order.objects.create(cook=cook)
#order.save()
#form = lib.NewOrderForm(instance=order)
form = lib.NewOrderForm(
instance=order,
initial={
'cook': cook,
'table': table,
},
)
#form.cook = cook
ctx = {
'title': title,
'form': form,
}
output = render(request, 'orders/add.html', ctx)
return HttpResponse(output)
| [
"[email protected]"
] | |
0302d39e78724531b2f09e38788aa5f669609958 | 82ca891008793f570668a7f2c760ae0f22d40494 | /src/VAMPzero/Component/Wing/Aileron/Geometry/tipYLocation.py | 078b55d940fb68c2cb44be5e6bcef8800cd0f839 | [
"Apache-2.0"
] | permissive | p-chambers/VAMPzero | 22f20415e83140496b1c5702b6acbb76a5b7bf52 | 4b11d059b1c7a963ec7e7962fa12681825bc2f93 | refs/heads/master | 2021-01-19T10:49:06.393888 | 2015-06-24T10:33:41 | 2015-06-24T10:33:41 | 82,208,448 | 1 | 0 | null | 2017-02-16T17:42:55 | 2017-02-16T17:42:55 | null | UTF-8 | Python | false | false | 2,598 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright: Deutsches Zentrum fuer Luft- und Raumfahrt e.V., 2015 (c)
Contact: [email protected] and [email protected]
'''
from cmath import pi
from VAMPzero.Handler.Parameter import parameter
rad = pi / 180.
class tipYLocation(parameter):
'''
Calculates the spanwise tip location of the aileron, measured from the fuselage center line
(note: only one aileron is considered).
:Unit: [m]
:Source:
:Author: Lisanne van Veen
'''
def __init__(self, value=0., unit='m', parent='', cpacsPath=''):
super(tipYLocation, self).__init__(value=value, unit=unit, doc=self.__doc__, status='init', parent=parent,
cpacsPath=cpacsPath)
def calc(self):
'''
The function is a statistical relation obtained by analyzing data of large passenger aircraft.
The design space of this equation is:
* refAreaWing 72.72 - 845 m2
* spanWing 26 - 79.80 m
:Source:
'''
refAreaWing = self.parent.wing.refArea.getValue()
spanWing = self.parent.wing.span.getValue()
tipYLocationAileron = - 2.103872236 + 0.5286847608 * spanWing + 0.00004371791524 * (refAreaWing ** 2) \
- 0.0007899727342 * spanWing * refAreaWing + 0.002586029039 * (spanWing ** 2)
# if the spanwise tip location of the aileron is larger than half of the wing span
# set the location of the spanwise tip location equal to 95% of the half wing span
if tipYLocationAileron > (spanWing / 2.):
tipYLocationAileron = (spanWing / 2.) * 0.95
return self.setValueCalc(tipYLocationAileron)
###################################################################################################
#EOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFE#
################################################################################################### | [
"[email protected]"
] | |
b8e22c9f0854b5dda5191d086ca45baaa3e98d35 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/twelve-days/5d8ab06a7a6b4acdb6be11d098786e90.py | 8c8955c509d158350c07858c3b2a1c0d850b89cb | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 2,893 | py | #twelve-days
def verse(day):
day = day
if day == 1:
return "On the first day of Christmas my true love gave to me, a Partridge in a Pear Tree.\n"
elif day == 2:
return "On the second day of Christmas my true love gave to me, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==3:
return "On the third day of Christmas my true love gave to me, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==4:
return "On the fourth day of Christmas my true love gave to me, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==5:
return "On the fifth day of Christmas my true love gave to me, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==6:
return "On the sixth day of Christmas my true love gave to me, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==7:
return "On the seventh day of Christmas my true love gave to me, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==8:
return "On the eighth day of Christmas my true love gave to me, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==9:
return "On the ninth day of Christmas my true love gave to me, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==10:
return "On the tenth day of Christmas my true love gave to me, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
elif day ==11:
return "On the eleventh day of Christmas my true love gave to me, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
else:
return "On the twelfth day of Christmas my true love gave to me, twelve Drummers Drumming, eleven Pipers Piping, ten Lords-a-Leaping, nine Ladies Dancing, eight Maids-a-Milking, seven Swans-a-Swimming, six Geese-a-Laying, five Gold Rings, four Calling Birds, three French Hens, two Turtle Doves, and a Partridge in a Pear Tree.\n"
def verses(start, end):
result = ''
for i in range (start,end+1):
result += verse(i)+'\n'
return result
def sing():
return verses(1,12)
| [
"[email protected]"
] | |
6b0c2baf8952bfe46d3c9ac541be5644748044b9 | e6a48a7d5ee2df232355f5d5488fa1cd3c53ce89 | /tests.py | 7f298b580cf0a2453c734408872a2479a954b2cd | [] | no_license | charleycodes/testing-py | f1e07cb678d52e26cd1cdb6bc34dcf7a3c2b331f | 963a50d0074083cf02a253ef77cef46db5c7ff7a | refs/heads/master | 2021-06-05T11:10:35.950108 | 2016-10-14T19:57:53 | 2016-10-14T19:57:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | import unittest
import party
class PartyTests(unittest.TestCase):
"""Tests for my party site."""
def setUp(self):
self.client = party.app.test_client()
party.app.config['TESTING'] = True
def test_homepage(self):
result = self.client.get("/")
self.assertIn("I'm having a party", result.data)
def test_no_rsvp_yet(self):
# FIXME: Add a test to show we haven't RSVP'd yet
result = self.client.get("/")
self.assertNotIn('<h2>Party Details</h2>', result.data)
self.assertIn('<h2>Please RSVP</h2>', result.data)
def test_rsvp(self):
result = self.client.post("/rsvp",
data={'name': "Jane",
'email': "[email protected]"},
follow_redirects=True)
self.assertIn('<h2>Party Details</h2>', result.data)
self.assertNotIn('<h2>Please RSVP</h2>', result.data)
def test_rsvp_mel(self):
result = self.client.post("/rsvp",
data={'name': "Mel Melitpolski",
'email': "[email protected]"},
follow_redirects=True)
self.assertNotIn('<h2>Party Details</h2>', result.data)
self.assertIn('<h2>Please RSVP</h2>', result.data)
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
4f5bb79b857664488c0166556416293328d76651 | b45230162af7ea65416f61cbbbcf1011a422692b | /tests/test_pygrade.py | 3508d0f3021904959e65229d569c26efa43bef25 | [
"ISC"
] | permissive | Joaron4/pygrade | 47a12ce4e8925d20e0d4384f4f39a102bf149f97 | 68416ba92afd3ef634a83560935941d03265df8f | refs/heads/master | 2023-03-16T18:47:48.576434 | 2020-12-01T02:52:15 | 2020-12-01T02:52:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pygrade
----------------------------------
Tests for `pygrade` module.
"""
import unittest
from pygrade import pygrade
class TestPygrade(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_000_something(self):
pass
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
| [
"[email protected]"
] | |
93d2a93c3766b10060f1163b0518cd03a037d4de | e2468c60810764971f2dae2b959650b553042810 | /1859_sortingTheSentence.py | e35fc60b5dd3422f73069456f2c324e9ddef7fc4 | [] | no_license | awesome-liuxiao/leetcodesolution | 9a01b6f36266149ae7fe00625785d1ada41f190a | 3637cd1347b5153daeeb855ebc44cfea5649fc90 | refs/heads/master | 2023-06-08T13:42:14.653688 | 2023-06-01T08:39:35 | 2023-06-01T08:39:35 | 213,380,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | class Solution:
def sortSentence(self, s: str) -> str:
ls = s.split()
res = [""]*len(ls)
for word in ls:
res[int(word[-1])-1] = word[0:len(word)-1]
return ' '.join(res)
X = Solution()
s = "is2 sentence4 This1 a3"
print(X.sortSentence(s))
s = "Myself2 Me1 I4 and3"
print(X.sortSentence(s)) | [
"[email protected]"
] | |
ce03108274b37dc8809c8883264cd853956d525c | 17f918c06ca476f79d28d712abfa356b2dcfb6c7 | /koishi/plugins/automation_touhou_feed/events.py | c0284afe3effa51079eda45d6079ea30d3d6ee10 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | HuyaneMatsu/Koishi | eb87693ad34da2483efe2b6bdaa4f3fae417e491 | 74f92b598e86606ea3a269311316cddd84a5215f | refs/heads/master | 2023-08-23T22:54:37.006530 | 2023-08-23T20:26:49 | 2023-08-23T20:26:49 | 163,678,458 | 17 | 6 | NOASSERTION | 2023-06-14T14:18:27 | 2018-12-31T15:18:31 | Python | UTF-8 | Python | false | false | 1,167 | py | __all__ = ()
from ...bots import SLASH_CLIENT
from ..automation_core import get_touhou_feed_enabled
from .logic import (
reset_touhou_feeders, reset_channel, should_touhou_feed_in_channel, try_remove_channel, try_remove_guild,
try_update_channel, try_update_guild
)
@SLASH_CLIENT.events
async def channel_create(client, channel):
if get_touhou_feed_enabled(channel.guild_id):
if should_touhou_feed_in_channel(client, channel):
try_update_channel(channel)
@SLASH_CLIENT.events
async def channel_delete(client, channel):
try_remove_channel(channel)
@SLASH_CLIENT.events
async def channel_edit(client, channel, old_parameters):
if get_touhou_feed_enabled(channel.guild_id):
reset_channel(client, channel)
@SLASH_CLIENT.events
async def guild_create(client, guild):
if get_touhou_feed_enabled(guild.id):
try_update_guild(client, guild)
@SLASH_CLIENT.events
async def guild_delete(client, guild, guild_profile):
if get_touhou_feed_enabled(guild.id):
try_remove_guild(guild)
@SLASH_CLIENT.events
async def ready(client):
client.events.remove(ready)
reset_touhou_feeders(client)
| [
"[email protected]"
] | |
33c67722dbb06879c8fb968c5865e09c1513e5c0 | 21fc3622bb7a3a89a8ed9dec932919936fb1ce36 | /buildout-cache/eggs/plone.app.dexterity-2.1.20-py2.7.egg/plone/app/dexterity/browser/types.py | deb8a418a6eb1ee8ec737110deb355f2e9dceb54 | [] | no_license | erlantostes/plone | 4bc1ccba9e0ab77ce5370489f6b47b806c889c29 | 3a5fb7574cee269a99b148eef695256805ce1a45 | refs/heads/master | 2020-04-01T18:04:32.927641 | 2018-10-17T11:22:59 | 2018-10-17T11:22:59 | 153,469,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,233 | py | # -*- coding: utf-8 -*-
from OFS.SimpleItem import SimpleItem
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile \
import ViewPageTemplateFile as FiveViewPageTemplateFile
from ZPublisher.BaseRequest import DefaultPublishTraverse
from plone.app.dexterity import _
from plone.app.dexterity.browser.utils import UTF8Property
from plone.app.dexterity.interfaces import ITypeSchemaContext
from plone.app.dexterity.interfaces import ITypesContext
from plone.app.dexterity.interfaces import ITypeSettings
from plone.app.dexterity.interfaces import ITypeStats
from plone.dexterity.interfaces import IDexterityFTI
from plone.dexterity.utils import getAdditionalSchemata
from plone.schemaeditor.browser.schema.traversal import SchemaContext
from plone.z3cform import layout
from plone.z3cform.crud import crud
from plone.z3cform.layout import FormWrapper
from z3c.form import button
from z3c.form import field
from zope.browserpage.viewpagetemplatefile import ViewPageTemplateFile
from zope.cachedescriptors.property import Lazy as lazy_property
from zope.component import adapter
from zope.component import ComponentLookupError
from zope.component import getAllUtilitiesRegisteredFor
from zope.component import getUtility
from zope.interface import implementer
from zope.publisher.interfaces.browser import IBrowserPublisher
import urllib
HELP = """<p>Content types show up on Plone's 'Add Item' menu and allow
you to store custom data in your site.</p>
<p>Click the "Add Content Type" button to begin creating
a new content type with its own fields.</p>"""
ALLOWED_FIELDS = [
u'plone.app.textfield.RichText',
u'plone.namedfile.field.NamedBlobImage',
u'plone.namedfile.field.NamedBlobFile',
u'plone.schema.email.Email',
u'z3c.relationfield.schema.RelationChoice',
u'z3c.relationfield.schema.RelationList',
u'zope.schema._bootstrapfields.Bool',
u'zope.schema._bootstrapfields.Int',
u'zope.schema._bootstrapfields.Password',
u'zope.schema._bootstrapfields.Text',
u'zope.schema._bootstrapfields.TextLine',
u'zope.schema._field.Choice',
u'zope.schema._field.Date',
u'zope.schema._field.Datetime',
u'zope.schema._field.Float',
u'zope.schema._field.Set',
u'zope.schema._field.URI',
]
class TypeEditSubForm(crud.EditSubForm):
""" Content type edit subform. Just here to use a custom template.
"""
template = ViewPageTemplateFile('types_listing_row.pt')
class TypeEditForm(crud.EditForm):
"""Content type edit form.
Just a normal CRUD form without the form title or edit button.
"""
label = None
editsubform_factory = TypeEditSubForm
buttons = crud.EditForm.buttons.copy().omit('edit')
handlers = crud.EditForm.handlers.copy()
@button.buttonAndHandler(_(u'Clone'))
def handleClone(self, action):
selected = self.selected_items()
if len(selected) > 1:
self.status = _(u'Please select a single type to clone.')
elif len(selected) == 1:
id = selected[0][0]
url = '%s/%s/@@clone' % (self.context.context.absolute_url(), id)
self.request.response.redirect(url)
else:
self.status = _(u'Please select a type to clone.')
@button.buttonAndHandler(_(u'Export Type Profiles'))
def handleExport(self, action):
selected = ",".join([items[0] for items in self.selected_items()])
if len(selected) == 0:
self.status = _(u'Please select types to export.')
elif len(selected) > 0:
url = '%s/@@types-export?selected=%s' % \
(self.context.context.absolute_url(),
urllib.quote(selected))
self.request.response.redirect(url)
@button.buttonAndHandler(_(u'Export Schema Models'))
def handleExportModels(self, action):
selected = ",".join([items[0] for items in self.selected_items()])
if len(selected) == 0:
self.status = _(u'Please select types to export.')
elif len(selected) > 0:
url = '%s/@@models-export?selected=%s' % \
(self.context.context.absolute_url(),
urllib.quote(selected))
self.request.response.redirect(url)
class TypesEditFormWrapper(FormWrapper):
""" Render Plone frame around our form with little modifications """
form = TypeEditForm
index = FiveViewPageTemplateFile("typesformwrapper.pt")
@adapter(IDexterityFTI)
@implementer(ITypeSettings)
class TypeSettingsAdapter(object):
def __init__(self, context):
self.context = context
@property
def id(self):
return self.context.getId()
title = UTF8Property('title')
description = UTF8Property('description')
@property
def container(self):
return self.context.container
def _get_allowed_content_types(self):
return set(self.context.allowed_content_types)
def _set_allowed_content_types(self, value):
if not value:
value = ()
self.context.allowed_content_types = tuple(value)
if value:
self.context.filter_content_types = True
allowed_content_types = property(
_get_allowed_content_types, _set_allowed_content_types)
def _get_filter_content_types(self):
value = self.context.filter_content_types
if not value:
return 'all'
elif value and not self.allowed_content_types:
return 'none'
else:
return 'some'
def _set_filter_content_types(self, value):
if value == 'none':
self.context.filter_content_types = True
self.context.allowed_content_types = ()
elif value == 'all':
self.context.filter_content_types = False
elif value == 'some':
self.context.filter_content_types = True
filter_content_types = property(
_get_filter_content_types, _set_filter_content_types)
@adapter(IDexterityFTI)
@implementer(ITypeStats)
class TypeStatsAdapter(object):
def __init__(self, context):
self.context = context
@property
def item_count(self):
catalog = getToolByName(self.context, 'portal_catalog')
lengths = dict(
catalog.Indexes['portal_type'].uniqueValues(withLengths=True))
return lengths.get(self.context.getId(), 0)
class TypesListing(crud.CrudForm):
""" The combined content type edit + add forms.
"""
@lazy_property
def description(self):
if self.get_items():
return _(u'The following custom content types are available for '
u'your site.')
else:
return _('help_addcontenttype_button',
default=HELP)
template = ViewPageTemplateFile('types_listing.pt')
view_schema = field.Fields(ITypeSettings).select('title', 'description')
view_schema += field.Fields(ITypeStats)
addform_factory = crud.NullForm
editform_factory = TypeEditForm
def get_items(self):
"""Look up all Dexterity FTIs via the component registry.
(These utilities are created via an IObjectCreated handler for the
DexterityFTI class, configured in plone.dexterity.)
"""
ftis = getAllUtilitiesRegisteredFor(IDexterityFTI)
return [(fti.__name__, fti) for fti in ftis]
def remove(self, (id, item)):
""" Remove a content type.
"""
ttool = getToolByName(self.context, 'portal_types')
ttool.manage_delObjects([id])
def link(self, item, field):
"""Generate links to the edit page for each type.
(But only for types with schemata that can be edited through the web.)
"""
if field == 'title':
return '{0}/{1}'.format(
self.context.absolute_url(),
urllib.quote(item.__name__)
)
# Create a form wrapper so the form gets layout.
TypesListingPage = layout.wrap_form(
TypesListing, __wrapper_class=TypesEditFormWrapper,
label=_(u'Dexterity Content Types'))
@implementer(ITypeSchemaContext)
class TypeSchemaContext(SchemaContext):
fti = None
schemaName = u''
schemaEditorView = 'fields'
allowedFields = ALLOWED_FIELDS
def browserDefault(self, request):
return self, ('@@overview',)
@property
def additionalSchemata(self):
return getAdditionalSchemata(portal_type=self.fti.getId())
# IBrowserPublisher tells the Zope 2 traverser to pay attention to the
# publishTraverse and browserDefault methods.
@implementer(ITypesContext, IBrowserPublisher)
class TypesContext(SimpleItem):
"""This class represents the types configlet.
It allows us to traverse through it to (a wrapper of) the schema
of a particular type.
"""
def __init__(self, context, request):
super(TypesContext, self).__init__(context, request)
# make sure that breadcrumbs will be correct
self.id = None
self.Title = lambda: _(u'Dexterity Content Types')
# turn off green edit border for anything in the type control panel
request.set('disable_border', 1)
def publishTraverse(self, request, name):
"""Traverse to a schema context.
1. Try to find a content type whose name matches the next URL path
element.
2. Look up its schema.
3. Return a schema context (an acquisition-aware wrapper of the
schema).
"""
try:
fti = getUtility(IDexterityFTI, name=name)
except ComponentLookupError:
return DefaultPublishTraverse(self, request).publishTraverse(
request,
name
)
schema = fti.lookupSchema()
schema_context = TypeSchemaContext(
schema, request, name=name, title=fti.title).__of__(self)
schema_context.fti = fti
schema_context.schemaName = u''
return schema_context
def browserDefault(self, request):
"""Show the 'edit' view by default.
If we aren't traversing to a schema beneath the types configlet,
we actually want to see the TypesListingPage.
"""
return self, ('@@edit',)
| [
"[email protected]"
] | |
55d1af3af949c3e159d60b095ce259600e812de8 | 156f5362e7381b96f3b2839f94de8778b005274d | /tests/bindings/test_bindings.py | 99e0ca3c3d74647d7e7e35d5cb0769064383656b | [
"MIT",
"CC-BY-3.0"
] | permissive | sairam4123/godot-python | 3f8bfcd989ae1b06ec6bf5e01462895b9f5f5fe0 | a95ed14f6e53ae4eb59e6bd03efb0db90b070bc6 | refs/heads/master | 2021-05-20T20:43:18.764693 | 2020-03-02T13:49:06 | 2020-03-02T13:49:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,157 | py | import pytest
import godot
from godot import Vector3, Object, Node, Node2D, PluginScript, OK
def test_free_node():
v = Node.new()
v.free()
# `check_memory_leak` auto fixture will do the bookkeeping
def test_expose_contains_constant():
assert "OK" in dir(godot)
assert OK is not None
def test_expose_contains_class():
assert "Node" in dir(godot)
assert Node is not None
def test_expose_contains_builtins():
assert "Vector3" in dir(godot)
assert Vector3 is not None
def test_call_one_arg_short(current_node):
with pytest.raises(TypeError) as exc:
current_node.get_child()
assert str(exc.value) == "get_child() takes exactly one argument (0 given)"
def test_call_too_few_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.move_child()
assert (
str(exc.value) == "move_child() takes exactly 2 positional arguments (0 given)"
)
def test_call_with_defaults_and_too_few_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.add_child()
assert (
str(exc.value) == "add_child() takes at least 1 positional argument (0 given)"
)
def test_call_none_in_base_type_args(current_node):
with pytest.raises(TypeError) as exc:
# signature: def get_child(self, godot_int idx)
current_node.get_child(None)
assert str(exc.value) == "an integer is required"
def test_call_none_in_builtin_args(current_node):
with pytest.raises(TypeError) as exc:
# signature: def get_node(self, NodePath path not None)
current_node.get_node(None)
assert str(exc.value) == "Invalid value None, must be str or NodePath"
def test_call_none_in_bindings_args(current_node):
with pytest.raises(TypeError) as exc:
# signature: def get_path_to(self, Node node not None)
current_node.get_path_to(None)
assert (
str(exc.value)
== "Argument 'node' has incorrect type (expected godot.bindings.Node, got NoneType)"
)
def test_call_too_many_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.get_child(1, 2)
assert str(exc.value) == "get_child() takes exactly one argument (2 given)"
def test_call_with_default_and_too_many_args(current_node):
with pytest.raises(TypeError) as exc:
current_node.add_child(1, 2, 3)
assert (
str(exc.value) == "add_child() takes at most 2 positional arguments (3 given)"
)
def test_call_with_defaults(generate_obj):
node = generate_obj(Node)
child = generate_obj(Node)
# signature: void add_child(Node node, bool legible_unique_name=false)
node.add_child(child)
# legible_unique_name is False by default, check name is not human-redable
children_names = [str(x.name) for x in node.get_children()]
assert children_names == ["@@2"]
def test_call_with_kwargs(generate_obj):
node = generate_obj(Node)
child = generate_obj(Node)
new_child = generate_obj(Node)
node.add_child(child, legible_unique_name=True)
# Check name is readable
children_names = [str(x.name) for x in node.get_children()]
assert children_names == ["Node"]
# Kwargs are passed out of order
node.add_child_below_node(legible_unique_name=True, child_node=new_child, node=node)
# Check names are still readable
children_names = [str(x.name) for x in node.get_children()]
assert children_names == ["Node", "Node2"]
def test_inheritance(generate_obj):
node = generate_obj(Node)
node2d = generate_obj(Node2D)
isinstance(node, Object)
isinstance(node2d, Object)
isinstance(node2d, Node)
def test_call_with_refcounted_return_value(current_node):
script = current_node.get_script()
assert isinstance(script, PluginScript)
def test_call_with_refcounted_param_value(generate_obj):
node = generate_obj(Node)
script = PluginScript.new()
node.set_script(script)
def test_create_refcounted_value(current_node):
script1_ref1 = PluginScript.new()
script2_ref1 = PluginScript.new()
script1_ref2 = script1_ref1
script2_ref2 = script2_ref1
del script1_ref1
| [
"[email protected]"
] | |
43d0b28b0b29cb0bb324df2d02c8001c4efe022f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_sting.py | bafade4da0ff73720d1509ad0c570e87d50fe446 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py |
#calss header
class _STING():
def __init__(self,):
self.name = "STING"
self.definitions = [u'If an insect, plant, or animal stings, it produces a small but painful injury, usually with a poison, by brushing against the skin or making a very small hole in the skin: ', u'to cause sharp but usually temporary pain: ', u"If someone's unkind remarks sting, they make you feel upset and annoyed: ", u'to charge someone a surprisingly large amount of money for something: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
8ff0cf9f0326d522054d28a689a09a1bba5d292a | 1c8ea05ed65d76ab0e7bf8e642e0573e34d880ab | /BOJ/continue_number.py | fd2990487b7814cfe23effa2dc2d61bb8c9f9285 | [] | no_license | PARKJUHONG123/turbo-doodle | 1d259c88544d5e52ed375f119792363f5c1b4377 | 6b073281236af50949042c1a6b269752037cb829 | refs/heads/master | 2023-01-14T13:41:58.299164 | 2020-11-23T12:12:30 | 2020-11-23T12:12:30 | 259,669,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 336 | py | # 1~9 : 9
# 10~99 : 90
# 100~999 : 900
# 1000~9999 : 9000
import sys
def nine_num(size):
num = pow(10, size - 1)
return num * 9
N = sys.stdin.readline().split()[0]
length = len(N)
answer = 0
for i in range(1, length):
answer += nine_num(i) * i
num = pow(10, length - 1)
answer += (int(N) - num + 1) * length
print(answer) | [
"[email protected]"
] | |
f5282b92dfb6af26a78d6b27d683a125d46c98c4 | 5196d8aeeded2cb8f3414d14b9a0050f69cb809e | /tools/train_nsfw.py | 99da01a0b02cff24860ef3151884daca10ce4c5d | [] | no_license | SethWen/nsfw-classification-tensorflow | 0a81ad422f762eedf188f4f8e7ec2161d6428276 | 6dfcb16fd655e66b9dd83237bbe89e84aa5322b9 | refs/heads/master | 2022-12-19T04:30:57.035744 | 2020-09-26T05:53:04 | 2020-09-26T05:53:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,848 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 19-2-15 下午8:53
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/CRNN_Tensorflow
# @File : train_nsfw.py
# @IDE: PyCharm
"""
Train nsfw model script
"""
import argparse
import os
import os.path as ops
import time
import math
import numpy as np
import tensorflow as tf
import glog as log
from config import global_config
from data_provider import nsfw_data_feed_pipline
from nsfw_model import nsfw_classification_net
CFG = global_config.cfg
def init_args():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_dir', type=str, help='The dataset_dir')
parser.add_argument('--use_multi_gpu', type=bool, default=False, help='If use multiple gpu devices')
parser.add_argument('--weights_path', type=str, default=None, help='The pretrained weights path')
return parser.parse_args()
def calculate_top_k_error(predictions, labels, k=1):
"""
Calculate the top-k error
:param predictions: 2D tensor with shape [batch_size, num_labels]
:param labels: 1D tensor with shape [batch_size, 1]
:param k: int
:return: tensor with shape [1]
"""
batch_size = CFG.TRAIN.BATCH_SIZE
in_top1 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=k))
num_correct = tf.reduce_sum(in_top1)
return (batch_size - num_correct) / float(batch_size)
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def compute_net_gradients(images, labels, net, optimizer=None, is_net_first_initialized=False):
"""
Calculate gradients for single GPU
:param images: images for training
:param labels: labels corresponding to images
:param net: classification model
:param optimizer: network optimizer
:param is_net_first_initialized: if the network is initialized
:return:
"""
net_loss = net.compute_loss(input_tensor=images,
labels=labels,
name='nsfw_cls_model',
reuse=is_net_first_initialized)
net_logits = net.inference(input_tensor=images,
name='nsfw_cls_model',
reuse=True)
net_predictions = tf.nn.softmax(net_logits)
net_top1_error = calculate_top_k_error(net_predictions, labels, 1)
if optimizer is not None:
grads = optimizer.compute_gradients(net_loss)
else:
grads = None
return net_loss, net_top1_error, grads
def train_net(dataset_dir, weights_path=None):
"""
:param dataset_dir:
:param weights_path:
:return:
"""
# set nsfw data feed pipline
train_dataset = nsfw_data_feed_pipline.NsfwDataFeeder(dataset_dir=dataset_dir,
flags='train')
val_dataset = nsfw_data_feed_pipline.NsfwDataFeeder(dataset_dir=dataset_dir,
flags='val')
with tf.device('/gpu:1'):
# set nsfw net
nsfw_net = nsfw_classification_net.NSFWNet(phase=tf.constant('train', dtype=tf.string),
resnet_size=CFG.NET.RESNET_SIZE)
nsfw_net_val = nsfw_classification_net.NSFWNet(phase=tf.constant('test', dtype=tf.string),
resnet_size=CFG.NET.RESNET_SIZE)
# compute train loss
train_images, train_labels = train_dataset.inputs(batch_size=CFG.TRAIN.BATCH_SIZE,
num_epochs=1)
train_loss = nsfw_net.compute_loss(input_tensor=train_images,
labels=train_labels,
name='nsfw_cls_model',
reuse=False)
train_logits = nsfw_net.inference(input_tensor=train_images,
name='nsfw_cls_model',
reuse=True)
train_predictions = tf.nn.softmax(train_logits)
train_top1_error = calculate_top_k_error(train_predictions, train_labels, 1)
# compute val loss
val_images, val_labels = val_dataset.inputs(batch_size=CFG.TRAIN.VAL_BATCH_SIZE,
num_epochs=1)
# val_images = tf.reshape(val_images, example_tensor_shape)
val_loss = nsfw_net_val.compute_loss(input_tensor=val_images,
labels=val_labels,
name='nsfw_cls_model',
reuse=True)
val_logits = nsfw_net_val.inference(input_tensor=val_images,
name='nsfw_cls_model',
reuse=True)
val_predictions = tf.nn.softmax(val_logits)
val_top1_error = calculate_top_k_error(val_predictions, val_labels, 1)
# set tensorflow summary
tboard_save_path = 'tboard/nsfw_cls'
os.makedirs(tboard_save_path, exist_ok=True)
summary_writer = tf.summary.FileWriter(tboard_save_path)
train_loss_scalar = tf.summary.scalar(name='train_loss',
tensor=train_loss)
train_top1_err_scalar = tf.summary.scalar(name='train_top1_error',
tensor=train_top1_error)
val_loss_scalar = tf.summary.scalar(name='val_loss',
tensor=val_loss)
val_top1_err_scalar = tf.summary.scalar(name='val_top1_error',
tensor=val_top1_error)
train_merge_summary_op = tf.summary.merge([train_loss_scalar, train_top1_err_scalar])
val_merge_summary_op = tf.summary.merge([val_loss_scalar, val_top1_err_scalar])
# Set tf saver
saver = tf.train.Saver()
model_save_dir = 'model/nsfw_cls'
os.makedirs(model_save_dir, exist_ok=True)
train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
model_name = 'nsfw_cls_{:s}.ckpt'.format(str(train_start_time))
model_save_path = ops.join(model_save_dir, model_name)
# set optimizer
with tf.device('/gpu:1'):
# set learning rate
global_step = tf.Variable(0, trainable=False)
decay_steps = [CFG.TRAIN.LR_DECAY_STEPS_1, CFG.TRAIN.LR_DECAY_STEPS_2]
decay_values = []
init_lr = CFG.TRAIN.LEARNING_RATE
for step in range(len(decay_steps) + 1):
decay_values.append(init_lr)
init_lr = init_lr * CFG.TRAIN.LR_DECAY_RATE
learning_rate = tf.train.piecewise_constant(
x=global_step,
boundaries=decay_steps,
values=decay_values,
name='learning_rate'
)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=0.9).minimize(
loss=train_loss,
var_list=tf.trainable_variables(),
global_step=global_step)
# Set sess configuration
sess_config = tf.ConfigProto(allow_soft_placement=True)
sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TRAIN.GPU_MEMORY_FRACTION
sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
sess_config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=sess_config)
summary_writer.add_graph(sess.graph)
# Set the training parameters
train_epochs = CFG.TRAIN.EPOCHS
log.info('Global configuration is as follows:')
log.info(CFG)
with sess.as_default():
tf.train.write_graph(graph_or_graph_def=sess.graph, logdir='',
name='{:s}/nsfw_cls_model.pb'.format(model_save_dir))
if weights_path is None:
log.info('Training from scratch')
init = tf.global_variables_initializer()
sess.run(init)
else:
log.info('Restore model from last model checkpoint {:s}'.format(weights_path))
saver.restore(sess=sess, save_path=weights_path)
train_cost_time_mean = []
val_cost_time_mean = []
for epoch in range(train_epochs):
# training part
t_start = time.time()
_, train_loss_value, train_top1_err_value, train_summary, lr = \
sess.run(fetches=[optimizer,
train_loss,
train_top1_error,
train_merge_summary_op,
learning_rate])
if math.isnan(train_loss_value):
log.error('Train loss is nan')
return
cost_time = time.time() - t_start
train_cost_time_mean.append(cost_time)
summary_writer.add_summary(summary=train_summary,
global_step=epoch)
# validation part
t_start_val = time.time()
val_loss_value, val_top1_err_value, val_summary = \
sess.run(fetches=[val_loss,
val_top1_error,
val_merge_summary_op])
summary_writer.add_summary(val_summary, global_step=epoch)
cost_time_val = time.time() - t_start_val
val_cost_time_mean.append(cost_time_val)
if epoch % CFG.TRAIN.DISPLAY_STEP == 0:
log.info('Epoch_Train: {:d} total_loss= {:6f} top1_error= {:6f} '
'lr= {:6f} mean_cost_time= {:5f}s '.
format(epoch + 1,
train_loss_value,
train_top1_err_value,
lr,
np.mean(train_cost_time_mean)))
train_cost_time_mean.clear()
if epoch % CFG.TRAIN.VAL_DISPLAY_STEP == 0:
log.info('Epoch_Val: {:d} total_loss= {:6f} top1_error= {:6f}'
' mean_cost_time= {:5f}s '.
format(epoch + 1,
val_loss_value,
val_top1_err_value,
np.mean(val_cost_time_mean)))
val_cost_time_mean.clear()
if epoch % 2000 == 0:
saver.save(sess=sess, save_path=model_save_path, global_step=epoch)
sess.close()
return
def train_net_multi_gpu(dataset_dir, weights_path=None):
"""
:param dataset_dir:
:param weights_path:
:return:
"""
# set nsfw data feed pipline
train_dataset = nsfw_data_feed_pipline.NsfwDataFeeder(dataset_dir=dataset_dir,
flags='train')
val_dataset = nsfw_data_feed_pipline.NsfwDataFeeder(dataset_dir=dataset_dir,
flags='val')
# set nsfw net
nsfw_net = nsfw_classification_net.NSFWNet(phase=tf.constant('train', dtype=tf.string),
resnet_size=CFG.NET.RESNET_SIZE)
nsfw_net_val = nsfw_classification_net.NSFWNet(phase=tf.constant('test', dtype=tf.string),
resnet_size=CFG.NET.RESNET_SIZE)
# fetch train and validation data
train_images, train_labels = train_dataset.inputs(
batch_size=CFG.TRAIN.BATCH_SIZE, num_epochs=1)
val_images, val_labels = val_dataset.inputs(
batch_size=CFG.TRAIN.BATCH_SIZE, num_epochs=1)
# set average container
tower_grads = []
train_tower_loss = []
train_tower_top1_error = []
val_tower_loss = []
val_tower_top1_error = []
batchnorm_updates = None
train_summary_op_updates = None
# set learning rate
global_step = tf.Variable(0, trainable=False)
decay_steps = [CFG.TRAIN.LR_DECAY_STEPS_1, CFG.TRAIN.LR_DECAY_STEPS_2]
decay_values = []
init_lr = CFG.TRAIN.LEARNING_RATE
for step in range(len(decay_steps) + 1):
decay_values.append(init_lr)
init_lr = init_lr * CFG.TRAIN.LR_DECAY_RATE
learning_rate = tf.train.piecewise_constant(
x=global_step,
boundaries=decay_steps,
values=decay_values,
name='learning_rate'
)
# set optimizer
optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
# set distributed train op
with tf.variable_scope(tf.get_variable_scope()):
is_network_initialized = False
for i in range(CFG.TRAIN.GPU_NUM):
with tf.device('/gpu:{:d}'.format(i)):
with tf.name_scope('tower_{:d}'.format(i)) as scope:
train_loss, train_top1_error, grads = compute_net_gradients(
train_images, train_labels, nsfw_net, optimizer,
is_net_first_initialized=is_network_initialized)
is_network_initialized = True
# Only use the mean and var in the first gpu tower to update the parameter
# TODO implement batch normalization for distributed device ([email protected])
if i == 0:
batchnorm_updates = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_summary_op_updates = tf.get_collection(tf.GraphKeys.SUMMARIES)
tower_grads.append(grads)
train_tower_loss.append(train_loss)
train_tower_top1_error.append(train_top1_error)
with tf.name_scope('validation_{:d}'.format(i)) as scope:
val_loss, val_top1_error, _ = compute_net_gradients(
val_images, val_labels, nsfw_net_val, optimizer,
is_net_first_initialized=is_network_initialized)
val_tower_loss.append(val_loss)
val_tower_top1_error.append(val_top1_error)
grads = average_gradients(tower_grads)
avg_train_loss = tf.reduce_mean(train_tower_loss)
avg_train_top1_error = tf.reduce_mean(train_tower_top1_error)
avg_val_loss = tf.reduce_mean(val_tower_loss)
avg_val_top1_error = tf.reduce_mean(val_tower_top1_error)
# Track the moving averages of all trainable variables
variable_averages = tf.train.ExponentialMovingAverage(
CFG.TRAIN.MOVING_AVERAGE_DECAY, num_updates=global_step)
variables_to_average = tf.trainable_variables() + tf.moving_average_variables()
variables_averages_op = variable_averages.apply(variables_to_average)
# Group all the op needed for training
batchnorm_updates_op = tf.group(*batchnorm_updates)
apply_gradient_op = optimizer.apply_gradients(grads, global_step=global_step)
train_op = tf.group(apply_gradient_op, variables_averages_op,
batchnorm_updates_op)
# set tensorflow summary
tboard_save_path = 'tboard/nsfw_cls'
os.makedirs(tboard_save_path, exist_ok=True)
summary_writer = tf.summary.FileWriter(tboard_save_path)
avg_train_loss_scalar = tf.summary.scalar(name='average_train_loss',
tensor=avg_train_loss)
avg_train_top1_err_scalar = tf.summary.scalar(name='average_train_top1_error',
tensor=avg_train_top1_error)
avg_val_loss_scalar = tf.summary.scalar(name='average_val_loss',
tensor=avg_val_loss)
avg_val_top1_err_scalar = tf.summary.scalar(name='average_val_top1_error',
tensor=avg_val_top1_error)
learning_rate_scalar = tf.summary.scalar(name='learning_rate_scalar',
tensor=learning_rate)
train_merge_summary_op = tf.summary.merge([avg_train_loss_scalar,
avg_train_top1_err_scalar,
learning_rate_scalar] + train_summary_op_updates)
val_merge_summary_op = tf.summary.merge([avg_val_loss_scalar, avg_val_top1_err_scalar])
# set tensorflow saver
saver = tf.train.Saver()
model_save_dir = 'model/nsfw_cls'
os.makedirs(model_save_dir, exist_ok=True)
train_start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
model_name = 'nsfw_cls_{:s}.ckpt'.format(str(train_start_time))
model_save_path = ops.join(model_save_dir, model_name)
# set sess config
sess_config = tf.ConfigProto(device_count={'GPU': CFG.TRAIN.GPU_NUM}, allow_soft_placement=True)
sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.TRAIN.GPU_MEMORY_FRACTION
sess_config.gpu_options.allow_growth = CFG.TRAIN.TF_ALLOW_GROWTH
sess_config.gpu_options.allocator_type = 'BFC'
# Set the training parameters
train_epochs = CFG.TRAIN.EPOCHS
log.info('Global configuration is as follows:')
log.info(CFG)
sess = tf.Session(config=sess_config)
summary_writer.add_graph(sess.graph)
with sess.as_default():
tf.train.write_graph(graph_or_graph_def=sess.graph, logdir='',
name='{:s}/nsfw_cls_model.pb'.format(model_save_dir))
if weights_path is None:
log.info('Training from scratch')
init = tf.global_variables_initializer()
sess.run(init)
else:
log.info('Restore model from last model checkpoint {:s}'.format(weights_path))
saver.restore(sess=sess, save_path=weights_path)
train_cost_time_mean = []
val_cost_time_mean = []
for epoch in range(train_epochs):
# training part
t_start = time.time()
_, train_loss_value, train_top1_err_value, train_summary, lr = \
sess.run(fetches=[train_op,
avg_train_loss,
avg_train_top1_error,
train_merge_summary_op,
learning_rate])
if math.isnan(train_loss_value):
log.error('Train loss is nan')
return
cost_time = time.time() - t_start
train_cost_time_mean.append(cost_time)
summary_writer.add_summary(summary=train_summary,
global_step=epoch)
# validation part
t_start_val = time.time()
val_loss_value, val_top1_err_value, val_summary = \
sess.run(fetches=[avg_val_loss,
avg_val_top1_error,
val_merge_summary_op])
summary_writer.add_summary(val_summary, global_step=epoch)
cost_time_val = time.time() - t_start_val
val_cost_time_mean.append(cost_time_val)
if epoch % CFG.TRAIN.DISPLAY_STEP == 0:
log.info('Epoch_Train: {:d} total_loss= {:6f} top1_error= {:6f} '
'lr= {:6f} mean_cost_time= {:5f}s '.
format(epoch + 1,
train_loss_value,
train_top1_err_value,
lr,
np.mean(train_cost_time_mean)))
train_cost_time_mean.clear()
if epoch % CFG.TRAIN.VAL_DISPLAY_STEP == 0:
log.info('Epoch_Val: {:d} total_loss= {:6f} top1_error= {:6f}'
' mean_cost_time= {:5f}s '.
format(epoch + 1,
val_loss_value,
val_top1_err_value,
np.mean(val_cost_time_mean)))
val_cost_time_mean.clear()
if epoch % 2000 == 0:
saver.save(sess=sess, save_path=model_save_path, global_step=epoch)
sess.close()
return
if __name__ == '__main__':
# init args
args = init_args()
if CFG.TRAIN.GPU_NUM < 2:
args.use_multi_gpu = False
# train lanenet
if not args.use_multi_gpu:
train_net(args.dataset_dir, args.weights_path)
else:
train_net_multi_gpu(args.dataset_dir, args.weights_path)
| [
"[email protected]"
] | |
c016beb5d996c1ca1390e35753f3e429fdebd5a6 | 4ec6ed4ebcb9346042669e6aa03be0e502ed48b3 | /leetcode/convert-sorted-array-to-binary-search-tree.py | 84e69232dd85203daae4a1d75c1f376e113add3f | [] | no_license | shonihei/road-to-mastery | 79ed41cb1ad0dc2d0b454db2ccc7dd9567b03801 | 312bdf5101c3c1fc9a4d0b6762b5749ca57efe08 | refs/heads/master | 2021-01-22T19:59:17.038641 | 2017-11-16T15:21:55 | 2017-11-16T15:21:55 | 85,266,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | """
Given an array where elements are sorted in ascending order, convert it to a height balanced BST.
"""
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def sortedArrayToBST(nums):
if not nums:
return None
mid = len(nums) // 2
root = TreeNode(nums[mid])
root.left = sortedArrayToBST(nums[:mid])
root.right = sortedArrayToBST(nums[mid+1:])
return root | [
"[email protected]"
] | |
146ac52c155f6a21ab8f406bde451d1ce53f6925 | 0d464df42f5cc3c9a3b992ae9ff6160e5da5701d | /CHAPTER 12 (sorting and selection)/decorated_merge_sort.py | 1ac0479db866b540143a43557da8834d51e73996 | [
"MIT"
] | permissive | ahammadshawki8/DSA-Implementations-in-Python | 6b61d44e638bfb7f6cf3a8b1fc57d15777313420 | fc18b54128cd5bc7639a14999d8f990190b524eb | refs/heads/master | 2022-12-26T03:54:16.229935 | 2020-10-07T05:17:55 | 2020-10-07T05:17:55 | 267,899,551 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from linked_queue_class import *
from merge_sort_linked import *
def decorated_merge_sort(data,key=None):
"""Demonstration of the decorate-sort-undercorate pattern."""
if key is not None:
for j in range(len(data)):
data[j] = _Item(key(data[j]), data[j]) # decorate each element
merge_sort(data) # sort with existing algorithm
if key is not None:
for j in range(len(data)):
data[j] = data[j]._value # undercoat each element | [
"[email protected]"
] | |
ad1b85e24bddffba1588102c18d19e9a7f5c4a35 | ca75f7099b93d8083d5b2e9c6db2e8821e63f83b | /z2/part2/batch/jm/parser_errors_2/925229167.py | e0ca9ce450c7ef5983d536f9dd4a53f2584448db | [
"MIT"
] | permissive | kozakusek/ipp-2020-testy | 210ed201eaea3c86933266bd57ee284c9fbc1b96 | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | refs/heads/master | 2022-10-04T18:55:37.875713 | 2020-06-09T21:15:37 | 2020-06-09T21:15:37 | 262,290,632 | 0 | 0 | MIT | 2020-06-09T21:15:38 | 2020-05-08T10:10:47 | C | UTF-8 | Python | false | false | 1,170 | py | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 925229167
"""
"""
random actions, total chaos
"""
board = gamma_new(2, 3, 3, 3)
assert board is not None
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_free_fields(board, 3) == 3
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_golden_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_move(board, 3, 1, 2) == 1
assert gamma_golden_move(board, 3, 1, 1) == 1
gamma_delete(board)
| [
"[email protected]"
] | |
dff782445f083c8852c95a14f37f05b290a8043b | ba6921a268198bc0af433622c021533905f5d462 | /scripts/in_container/run_migration_reference.py | 43692b2c458d2e0a50e097a818144f39bdf31553 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | potiuk/airflow | b6447765b1a7b586a3d6c8d7ba9262f6bf68fbfd | ca2f3013bcb123c4b3973a5b85de77094bf2c459 | refs/heads/main | 2023-08-30T13:05:50.698888 | 2023-05-21T21:08:14 | 2023-05-21T21:26:14 | 173,467,275 | 8 | 7 | Apache-2.0 | 2023-05-21T21:58:40 | 2019-03-02T15:50:53 | Python | UTF-8 | Python | false | false | 6,272 | py | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Module to update db migration information in Airflow
"""
from __future__ import annotations
import os
import re
from pathlib import Path
from textwrap import wrap
from typing import TYPE_CHECKING, Iterable
from alembic.script import ScriptDirectory
from tabulate import tabulate
from airflow import __version__ as airflow_version
from airflow.utils.db import _get_alembic_config
if TYPE_CHECKING:
from alembic.script import Script
airflow_version = re.match(r"(\d+\.\d+\.\d+).*", airflow_version).group(1) # type: ignore
project_root = Path(__file__).parents[2].resolve()
def replace_text_between(file: Path, start: str, end: str, replacement_text: str):
original_text = file.read_text()
leading_text = original_text.split(start)[0]
trailing_text = original_text.split(end)[1]
file.write_text(leading_text + start + replacement_text + end + trailing_text)
def wrap_backticks(val):
def _wrap_backticks(x):
return f"``{x}``"
return ",\n".join(map(_wrap_backticks, val)) if isinstance(val, (tuple, list)) else _wrap_backticks(val)
def update_doc(file, data):
replace_text_between(
file=file,
start=" .. Beginning of auto-generated table\n",
end=" .. End of auto-generated table\n",
replacement_text="\n"
+ tabulate(
headers={
"revision": "Revision ID",
"down_revision": "Revises ID",
"version": "Airflow Version",
"description": "Description",
},
tabular_data=data,
tablefmt="grid",
stralign="left",
disable_numparse=True,
)
+ "\n\n",
)
def has_version(content):
return re.search(r"^airflow_version\s*=.*", content, flags=re.MULTILINE) is not None
def insert_version(old_content, file):
new_content = re.sub(
r"(^depends_on.*)",
lambda x: f"{x.group(1)}\nairflow_version = '{airflow_version}'",
old_content,
flags=re.MULTILINE,
)
file.write_text(new_content)
def revision_suffix(rev: Script):
if rev.is_head:
return " (head)"
if rev.is_base:
return " (base)"
if rev.is_merge_point:
return " (merge_point)"
if rev.is_branch_point:
return " (branch_point)"
return ""
def ensure_airflow_version(revisions: Iterable[Script]):
for rev in revisions:
assert rev.module.__file__ is not None # For Mypy.
file = Path(rev.module.__file__)
content = file.read_text()
if not has_version(content):
insert_version(content, file)
def get_revisions() -> Iterable[Script]:
config = _get_alembic_config()
script = ScriptDirectory.from_config(config)
yield from script.walk_revisions()
def update_docs(revisions: Iterable[Script]):
doc_data = []
for rev in revisions:
doc_data.append(
dict(
revision=wrap_backticks(rev.revision) + revision_suffix(rev),
down_revision=wrap_backticks(rev.down_revision),
version=wrap_backticks(rev.module.airflow_version), # type: ignore
description="\n".join(wrap(rev.doc, width=60)),
)
)
update_doc(
file=project_root / "docs" / "apache-airflow" / "migrations-ref.rst",
data=doc_data,
)
def num_to_prefix(idx: int) -> str:
return f"000{idx+1}"[-4:] + "_"
def ensure_mod_prefix(mod_name, idx, version):
prefix = num_to_prefix(idx) + "_".join(version) + "_"
match = re.match(r"([0-9]+)_([0-9]+)_([0-9]+)_([0-9]+)_(.+)", mod_name)
if match:
# previously standardized file, rebuild the name
mod_name = match.group(5)
else:
# new migration file, standard format
match = re.match(r"([a-z0-9]+)_(.+)", mod_name)
if match:
mod_name = match.group(2)
return prefix + mod_name
def ensure_filenames_are_sorted(revisions):
renames = []
is_branched = False
unmerged_heads = []
for idx, rev in enumerate(revisions):
mod_path = Path(rev.module.__file__)
version = rev.module.airflow_version.split(".")[0:3] # only first 3 tokens
correct_mod_basename = ensure_mod_prefix(mod_path.name, idx, version)
if mod_path.name != correct_mod_basename:
renames.append((mod_path, Path(mod_path.parent, correct_mod_basename)))
if is_branched and rev.is_merge_point:
is_branched = False
if rev.is_branch_point:
is_branched = True
elif rev.is_head:
unmerged_heads.append(rev.revision)
if is_branched:
head_prefixes = [x[0:4] for x in unmerged_heads]
alembic_command = (
"alembic merge -m 'merge heads " + ", ".join(head_prefixes) + "' " + " ".join(unmerged_heads)
)
raise SystemExit(
"You have multiple alembic heads; please merge them with the `alembic merge` command "
f"and re-run pre-commit. It should fail once more before succeeding. "
f"\nhint: `{alembic_command}`"
)
for old, new in renames:
os.rename(old, new)
if __name__ == "__main__":
revisions = list(reversed(list(get_revisions())))
ensure_airflow_version(revisions=revisions)
revisions = list(reversed(list(get_revisions())))
ensure_filenames_are_sorted(revisions)
revisions = list(get_revisions())
update_docs(revisions)
| [
"[email protected]"
] | |
40a42af680a63e3a17bb18fe661dc09bb9d98b56 | 6ef8abce322da7a6acf8b940801d7c2286b55f42 | /Programmers/compressString.py | fccb357227a2eb76df7a508063b6c1f0361d23a2 | [] | no_license | 702criticcal/1Day1Commit | 747a61308e2fae87bad6369cd0bc481bdc89b29a | aec375b8b41de1ed5366c714cc6a204905fb2763 | refs/heads/master | 2023-01-31T16:47:24.457584 | 2020-12-18T03:42:28 | 2020-12-18T03:42:28 | 287,663,502 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | def solution(s):
if len(s) == 1:
return 1
answer = len(s)
# 압축할 수 있는 문자열의 최대 길이는 길이 / 2까지이다.
for cut in range(1, len(s) // 2 + 1):
result = ''
cnt = 1
temp_str = s[:cut]
# 1부터 길이 / 2까지 잘라서 문자열 비교.
for i in range(cut, len(s) + cut, cut):
# 앞의 자른 문자열과 같다면 cnt + 1.
if s[i:i + cut] == temp_str:
cnt += 1
# 다르다면, cnt가 1이면 문자열만 결과에 추가하고, cnt가 1이 아니면 숫자와 문자열을 결과에 추가한다.
else:
if cnt == 1:
result += temp_str
else:
result += str(cnt) + temp_str
# 자를 문자열 크기 만큼 인덱스를 옮겨서 다시 비교 수행.
temp_str = s[i:i + cut]
# 카운트 초기화
cnt = 1
# 해당 길이만큼 다 잘랐다면 전체 결과 값과 해당 길이의 결과 값의 최솟값을 구하여 전체 결과값에 저장.
answer = min(answer, len(result))
return answer | [
"[email protected]"
] | |
b86e21edd60be743de7e055ffd942d0674c17b3d | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/GstGL/GLRenderbufferAllocationParams.py | 23d2ebdb5969b2516f3587d02945af8e1fa95f4d | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 6,772 | py | # encoding: utf-8
# module gi.repository.GstGL
# from /usr/lib64/girepository-1.0/GstGL-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.repository.Gst as __gi_repository_Gst
import gi.repository.GstBase as __gi_repository_GstBase
import gobject as __gobject
class GLRenderbufferAllocationParams(__gi.Boxed):
"""
:Constructors:
::
GLRenderbufferAllocationParams()
new(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int) -> GstGL.GLRenderbufferAllocationParams
new_wrapped(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int, gl_handle=None, user_data=None, notify:GLib.DestroyNotify=None) -> GstGL.GLRenderbufferAllocationParams
"""
def copy(self, *args, **kwargs): # real signature unknown
pass
def new(self, context, alloc_params=None, renderbuffer_format, width, height): # real signature unknown; restored from __doc__
""" new(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int) -> GstGL.GLRenderbufferAllocationParams """
pass
def new_wrapped(self, context, alloc_params=None, renderbuffer_format, width, height, gl_handle=None, user_data=None, notify=None): # real signature unknown; restored from __doc__
""" new_wrapped(context:GstGL.GLContext, alloc_params:Gst.AllocationParams=None, renderbuffer_format:GstGL.GLFormat, width:int, height:int, gl_handle=None, user_data=None, notify:GLib.DestroyNotify=None) -> GstGL.GLRenderbufferAllocationParams """
pass
def _clear_boxed(self, *args, **kwargs): # real signature unknown
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
height = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
parent = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
renderbuffer_format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
width = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
_padding = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(GLRenderbufferAllocationParams), '__module__': 'gi.repository.GstGL', '__gtype__': <GType GstGLRenderbufferAllocationParams (93979012457472)>, '__dict__': <attribute '__dict__' of 'GLRenderbufferAllocationParams' objects>, '__weakref__': <attribute '__weakref__' of 'GLRenderbufferAllocationParams' objects>, '__doc__': None, 'parent': <property object at 0x7f56a3a28310>, 'renderbuffer_format': <property object at 0x7f56a3a28450>, 'width': <property object at 0x7f56a3a284f0>, 'height': <property object at 0x7f56a3a285e0>, '_padding': <property object at 0x7f56a3a286d0>, 'new': gi.FunctionInfo(new), 'new_wrapped': gi.FunctionInfo(new_wrapped)})"
__gtype__ = None # (!) real value is '<GType GstGLRenderbufferAllocationParams (93979012457472)>'
__info__ = StructInfo(GLRenderbufferAllocationParams)
| [
"[email protected]"
] | |
54e634a91b6785558588740c3c225ac1aa4c252c | bdf3c98e33c6bc9c8e0332043cc5a3a3f5e8ea42 | /src/mantis_model/catkin_generated/pkg.develspace.context.pc.py | c4db2075151b9a2c10d3a4400eb7142cca173946 | [] | no_license | jstestsuite/ros-test | c33e244794287e2665cace847a9faf1b596ddf92 | b0bb5b06c1c18b804c0f0a4e2deb4d88be894d1f | refs/heads/master | 2020-04-11T16:54:41.401198 | 2019-02-13T00:44:14 | 2019-02-13T00:44:14 | 161,938,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "mantis_model"
PROJECT_SPACE_DIR = "/home/jman/ros/src/ugv_course/mantis_model/devel"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
11673e6ceffc1b11d4fa20923c84c7444b026e7c | f559b4d607cfdd3f192daed155ed8b0d263c71b2 | /env/bin/easy_install | 8340023df64d2030d9cc8e363b11ec8e070ff138 | [] | no_license | chris-baby/WbOnline | 6270015e9a7897b413a3fe97e2aca8a33f744995 | 91425f677d2e7c2a0ac9aeb8c1ee47d75f9b9321 | refs/heads/master | 2022-07-14T23:57:56.162760 | 2020-05-11T13:56:22 | 2020-05-11T13:56:22 | 263,056,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | #!/Users/tongtong/Desktop/root/bwOnline/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
4f8956babeaeca36bcee259d46ecb8ec16dbe067 | dc084a369e0f767bc5296739b24813470869522f | /main.py | ad9e07dfe136e778903fe0b117e3877fc9bb1631 | [] | no_license | JakeRoggenbuck/player_data_finder | 4a539ac7963f1f5025eda89c96c75e76a8268574 | 0ba87a5511810ac10d3f40049b21541b9a8be1bb | refs/heads/master | 2022-12-02T06:59:34.054775 | 2020-08-19T05:56:55 | 2020-08-19T05:56:55 | 288,645,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,609 | py | from position_reader import PositionReader
from config import Config
from ftplib import FTP
import inquirer
import json
import os
class FTPConnection:
def __init__(self):
self.server = FTP()
def connect(self):
self.server.connect(Config.host, Config.port)
def login(self):
self.server.login(Config.username, Config.password)
def start(self):
self.connect()
self.login()
class UsernameCache:
def __init__(self):
self.ftp = FTPConnection()
self.ftp.start()
self.path = "/"
self.name = "usernamecache.json"
self.data = []
def handle_binary(self, more_data):
self.data.append(more_data.decode('utf-8'))
def get_usernames(self):
self.ftp.server.cwd(self.path)
self.ftp.server.retrbinary(f"RETR {self.name}",
callback=self.handle_binary)
def return_file(self):
return "".join(self.data)
def get_json(self):
return json.loads(self.return_file())
class Username:
def __init__(self):
self.usernamecache = UsernameCache()
self.usernamecache.get_usernames()
self.json = self.usernamecache.get_json()
self.usernames = []
self.new_json = self.get_new_json()
def get_new_json(self):
return dict((y, x) for x, y in self.json.items())
class AskUsername:
def __init__(self):
self.username = Username()
self.json = self.username.new_json
def get_username(self):
usernames = self.json.keys()
questions = [inquirer.Checkbox(
"Username",
message="Select username",
choices=usernames
)]
answers = inquirer.prompt(questions)
return answers
def get_uuid(self):
username = self.get_username()["Username"][0]
return self.json[username]
class GetDataFile:
def __init__(self):
self.ftp = FTPConnection()
self.ftp.start()
self.path = "/RAT STANDO/playerdata/"
self.username = AskUsername()
self.uuid = self.username.get_uuid()
self.filename = f"{self.uuid}.dat"
self.full_path = os.path.join("data/", self.filename)
def save_file(self):
self.ftp.server.cwd(self.path)
self.ftp.server.retrbinary(f"RETR {self.filename}",
open(self.full_path, 'wb').write)
if __name__ == "__main__":
datafile = GetDataFile()
datafile.save_file()
path = datafile.full_path
pr = PositionReader(path)
pos = pr.get_pos()
print(pos)
| [
"[email protected]"
] | |
d80b410b5348e0c0627858a462910f433012546d | 53be3b2a18d4df2e675525a610f4e7dab8c0de6f | /myems-api/core/version.py | 0b884d96063823d791417aec31bff4eac4345132 | [
"MIT"
] | permissive | yxw027/myems | 16dc82fa26328e00adbba4e09301c4cf363ad28d | ea58d50c436feafb1a51627aa4d84caf8f3aee08 | refs/heads/master | 2023-09-03T17:57:18.728606 | 2021-11-04T14:13:42 | 2021-11-04T14:13:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | import falcon
import simplejson as json
class VersionItem:
@staticmethod
def __init__():
""""Initializes VersionItem"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
result = {"version": 'MyEMS v1.3.3',
"release-date": '2021-10-30',
"website": "https://myems.io"}
resp.body = json.dumps(result)
| [
"[email protected]"
] | |
0e541e32d749a1302541ce19ccdf2b6c8d442a16 | bdc4ae3d691fcb50405235d963012d84ea8b8a06 | /src/b2r2b_youbot/message.py | d8d3c2a392182dba8f26573558c936ec091c3489 | [] | no_license | AndreaCensi/rosstream2boot | 53552a256979f072c7bf4abb9bc01eed3c960e97 | 7cce8cf270b67e8c9e5abe6cdfed9d5969a82c00 | refs/heads/master | 2021-01-01T19:11:04.669798 | 2013-07-13T14:11:28 | 2013-07-13T14:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,658 | py | from b2r2b_youbot import get_JointVelocities, get_JointValue, get_JointPositions
__all__ = ['get_joint_position_msg', 'get_joint_velocity_msg']
def get_joint_velocity_msg(array, timeStamp=None):
'''
:param array: float array with values to the joints
'''
JointVelocities = get_JointVelocities()
JointValue = get_JointValue()
num_joints = len(array)
msg = JointVelocities()
msg.poisonStamp.description = 'Joint velocities generated by b2r2b.'
for i in range(num_joints):
joint_value = JointValue()
joint_value.joint_uri = 'arm_joint_' + str(i + 1)
if timeStamp is not None:
joint_value.timeStamp = timeStamp
joint_value.unit = 's^-1 rad'
joint_value.value = array[i]
msg.velocities.append(joint_value)
assert len(msg.velocities) == num_joints
return msg
def get_joint_position_msg(array, timeStamp=None):
'''
:param array: float array with values to the joints
'''
JointPositions = get_JointPositions()
JointValue = get_JointValue()
num_joints = len(array)
msg = JointPositions()
msg.poisonStamp.description = 'Joint velocities generated by b2r2b'
for i in range(num_joints):
joint_value = JointValue()
joint_value.joint_uri = 'arm_joint_' + str(i + 1)
if timeStamp is not None:
joint_value.timeStamp = timeStamp
joint_value.unit = 'rad'
joint_value.value = array[i]
msg.positions.append(joint_value)
assert len(msg.positions) == num_joints
return msg
| [
"[email protected]"
] | |
5fa21fadd207f6922a41ad01fad7d3295d852e5d | de358ba57518d65393c810da20c53e1c41494bff | /ALGOPYTHON/array2.py | 616580064c661e80fb9915007e10e09ad9a0cb0f | [] | no_license | avirupdandapat/ALGOPROJECT | 43eef94b13e38452cdc6a506b17b6fee581a07e1 | 55b60a0c6e51cae900e243505f6a4557ad4d7069 | refs/heads/master | 2022-12-29T13:02:54.655976 | 2020-10-18T12:23:57 | 2020-10-18T12:23:57 | 305,095,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | import subprocess
subprocess.run('dir', shell=True)
if __name__ == '__main__':
t = int(input())
for t_itr in range(t):
n = int(input())
q = list(map(int, input().rstrip().split()))
# arr = minimumBribes(a)
print(q)
| [
"[email protected]"
] | |
eadaff38311d9b7fd22fb7f22162f2934b0a0a94 | 87bb2b9258c887e8fbcaca08d18e5d95ae96462d | /Codewars/Python/7kyu/7kyu_Largest pair in array.py | aec94dec63fcfa2b5e935b6ac65a4134e7f49ecf | [] | no_license | KonradMarzec1991/Codewars-LeetCode | a9e4d09f4271fecb3a7fc1ee436358ac1bbec5e4 | 442113532158f5a3ee7051a42e911afa5373bb5f | refs/heads/master | 2023-04-21T17:04:37.434876 | 2021-05-11T21:47:14 | 2021-05-11T21:47:14 | 166,555,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | def largest_pair_sum(numbers):
from heapq import nlargest
return sum(nlargest(2, numbers))
| [
"[email protected]"
] | |
0b7b8cc2114aca9d05671b6b132a1a909f63ca55 | f97a267b066f64177e382346e36cc06c25a3a6b1 | /src/quart/typing.py | a1be3a52dbb6ec3db1d23f2cf3688f19f97a56fe | [
"MIT"
] | permissive | p-unity-lineage/quart | a54ec9a1e6f61159c5c2688e24a2b54462bcd231 | 14efcd92f37bb4ef78d463d6d145f71c61665470 | refs/heads/master | 2023-03-31T23:56:56.881288 | 2021-04-11T09:28:28 | 2021-04-11T09:28:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,067 | py | from __future__ import annotations
import os
from datetime import datetime, timedelta
from types import TracebackType
from typing import (
Any,
AnyStr,
AsyncContextManager,
AsyncGenerator,
Awaitable,
Callable,
Dict,
Generator,
List,
Optional,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
from hypercorn.typing import (
ASGIReceiveCallable,
ASGISendCallable,
HTTPScope,
LifespanScope,
WebsocketScope,
)
try:
from typing import Protocol
except ImportError:
from typing_extensions import Protocol # type: ignore
if TYPE_CHECKING:
from werkzeug.datastructures import Headers # noqa: F401
from werkzeug.wrappers import Response as WerkzeugResponse
from .app import Quart
from .sessions import Session
from .wrappers.response import Response # noqa: F401
FilePath = Union[bytes, str, os.PathLike]
# The possible types that are directly convertible or are a Response object.
ResponseValue = Union[
"Response",
"WerkzeugResponse",
AnyStr,
Dict[str, Any], # any jsonify-able dict
AsyncGenerator[bytes, None],
Generator[bytes, None, None],
]
StatusCode = int
# the possible types for an individual HTTP header
HeaderName = str
HeaderValue = Union[str, List[str], Tuple[str, ...]]
# the possible types for HTTP headers
HeadersValue = Union["Headers", Dict[HeaderName, HeaderValue], List[Tuple[HeaderName, HeaderValue]]]
# The possible types returned by a route function.
ResponseReturnValue = Union[
ResponseValue,
Tuple[ResponseValue, HeadersValue],
Tuple[ResponseValue, StatusCode],
Tuple[ResponseValue, StatusCode, HeadersValue],
]
AppOrBlueprintKey = Optional[str] # The App key is None, whereas blueprints are named
AfterRequestCallable = Callable[["Response"], Awaitable["Response"]]
AfterWebsocketCallable = Callable[["Response"], Awaitable[Optional["Response"]]]
BeforeRequestCallable = Callable[[], Awaitable[None]]
BeforeWebsocketCallable = Callable[[], Awaitable[None]]
ErrorHandlerCallable = Callable[[Exception], Awaitable[None]]
TeardownCallable = Callable[[Optional[BaseException]], Awaitable["Response"]]
TemplateContextProcessorCallable = Callable[[], Awaitable[Dict[str, Any]]]
URLDefaultCallable = Callable[[str, dict], None]
URLValuePreprocessorCallable = Callable[[str, dict], None]
class ASGIHTTPProtocol(Protocol):
def __init__(self, app: Quart, scope: HTTPScope) -> None:
...
async def __call__(self, receive: ASGIReceiveCallable, send: ASGISendCallable) -> None:
...
class ASGILifespanProtocol(Protocol):
def __init__(self, app: Quart, scope: LifespanScope) -> None:
...
async def __call__(self, receive: ASGIReceiveCallable, send: ASGISendCallable) -> None:
...
class ASGIWebsocketProtocol(Protocol):
def __init__(self, app: Quart, scope: WebsocketScope) -> None:
...
async def __call__(self, receive: ASGIReceiveCallable, send: ASGISendCallable) -> None:
...
class TestHTTPConnectionProtocol(Protocol):
push_promises: List[Tuple[str, Headers]]
def __init__(self, app: Quart, scope: HTTPScope, _preserve_context: bool = False) -> None:
...
async def send(self, data: bytes) -> None:
...
async def send_complete(self) -> None:
...
async def receive(self) -> bytes:
...
async def disconnect(self) -> None:
...
async def __aenter__(self) -> TestHTTPConnectionProtocol:
...
async def __aexit__(self, exc_type: type, exc_value: BaseException, tb: TracebackType) -> None:
...
async def as_response(self) -> Response:
...
class TestWebsocketConnectionProtocol(Protocol):
def __init__(self, app: Quart, scope: WebsocketScope) -> None:
...
async def __aenter__(self) -> TestWebsocketConnectionProtocol:
...
async def __aexit__(self, exc_type: type, exc_value: BaseException, tb: TracebackType) -> None:
...
async def receive(self) -> AnyStr:
...
async def send(self, data: AnyStr) -> None:
...
async def receive_json(self) -> Any:
...
async def send_json(self, data: Any) -> None:
...
async def close(self, code: int) -> None:
...
async def disconnect(self) -> None:
...
class TestClientProtocol(Protocol):
http_connection_class: Type[TestHTTPConnectionProtocol]
push_promises: List[Tuple[str, Headers]]
websocket_connection_class: Type[TestWebsocketConnectionProtocol]
def __init__(self, app: Quart, use_cookies: bool = True) -> None:
...
async def open(
self,
path: str,
*,
method: str = "GET",
headers: Optional[Union[dict, Headers]] = None,
data: Optional[AnyStr] = None,
form: Optional[dict] = None,
query_string: Optional[dict] = None,
json: Any = None,
scheme: str = "http",
follow_redirects: bool = False,
root_path: str = "",
http_version: str = "1.1",
) -> Response:
...
def request(
self,
path: str,
*,
method: str = "GET",
headers: Optional[Union[dict, Headers]] = None,
query_string: Optional[dict] = None,
scheme: str = "http",
root_path: str = "",
http_version: str = "1.1",
) -> TestHTTPConnectionProtocol:
...
def websocket(
self,
path: str,
*,
headers: Optional[Union[dict, Headers]] = None,
query_string: Optional[dict] = None,
scheme: str = "ws",
subprotocols: Optional[List[str]] = None,
root_path: str = "",
http_version: str = "1.1",
) -> TestWebsocketConnectionProtocol:
...
async def delete(self, *args: Any, **kwargs: Any) -> Response:
...
async def get(self, *args: Any, **kwargs: Any) -> Response:
...
async def head(self, *args: Any, **kwargs: Any) -> Response:
...
async def options(self, *args: Any, **kwargs: Any) -> Response:
...
async def patch(self, *args: Any, **kwargs: Any) -> Response:
...
async def post(self, *args: Any, **kwargs: Any) -> Response:
...
async def put(self, *args: Any, **kwargs: Any) -> Response:
...
async def trace(self, *args: Any, **kwargs: Any) -> Response:
...
def set_cookie(
self,
server_name: str,
key: str,
value: str = "",
max_age: Optional[Union[int, timedelta]] = None,
expires: Optional[Union[int, float, datetime]] = None,
path: str = "/",
domain: Optional[str] = None,
secure: bool = False,
httponly: bool = False,
samesite: str = None,
charset: str = "utf-8",
) -> None:
...
def delete_cookie(
self, server_name: str, key: str, path: str = "/", domain: Optional[str] = None
) -> None:
...
def session_transaction(
self,
path: str = "/",
*,
method: str = "GET",
headers: Optional[Union[dict, Headers]] = None,
query_string: Optional[dict] = None,
scheme: str = "http",
data: Optional[AnyStr] = None,
form: Optional[dict] = None,
json: Any = None,
root_path: str = "",
http_version: str = "1.1",
) -> AsyncContextManager[Session]:
...
async def __aenter__(self) -> TestClientProtocol:
...
async def __aexit__(self, exc_type: type, exc_value: BaseException, tb: TracebackType) -> None:
...
class TestAppProtocol(Protocol):
def __init__(self, app: Quart) -> None:
...
def test_client(self) -> TestClientProtocol:
...
async def startup(self) -> None:
...
async def shutdown(self) -> None:
...
async def __aenter__(self) -> TestAppProtocol:
...
async def __aexit__(self, exc_type: type, exc_value: BaseException, tb: TracebackType) -> None:
...
| [
"[email protected]"
] | |
e2d70ea6765a4d2275d7fe4d41337de6d45d5449 | 8f6aa9ac9c8c2e409875bbf36fbc49b3eb37d88b | /enthought/traits/ui/qt4/key_binding_editor.py | 12c440438d2c61dd566c494f787af54b63d61666 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | enthought/etsproxy | 5660cf562c810db2ceb6b592b6c12274bce96d73 | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | refs/heads/master | 2023-03-27T04:51:29.297305 | 2020-12-02T09:05:18 | 2020-12-02T09:05:18 | 1,632,969 | 3 | 1 | NOASSERTION | 2020-12-02T09:05:20 | 2011-04-18T22:29:56 | Python | UTF-8 | Python | false | false | 61 | py | # proxy module
from traitsui.qt4.key_binding_editor import *
| [
"[email protected]"
] | |
f2db845f6ed0455eb72a111058ae787634caf967 | 181e9cc9cf4e52fcc6e9979890cc5b41e7beb756 | /Module 1/02_Codes/miscellaneous/4-TenSecondCameraCapture.py | ffe0427cfa69b339ad2ddf078ae4c70f2d0dfbde | [
"MIT"
] | permissive | PacktPublishing/OpenCV-Computer-Vision-Projects-with-Python | ace8576dce8d5f5db6992b3e5880a717996f78cc | 45a9c695e5bb29fa3354487e52f29a565d700d5c | refs/heads/master | 2023-02-09T14:10:42.767047 | 2023-01-30T09:02:09 | 2023-01-30T09:02:09 | 71,112,659 | 96 | 72 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | import cv2
cameraCapture = cv2.VideoCapture(0)
fps = 30 # an assumption
size = (int(cameraCapture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
int(cameraCapture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
videoWriter = cv2.VideoWriter(
'MyOutputVid.avi', cv2.cv.CV_FOURCC('I','4','2','0'), fps, size)
success, frame = cameraCapture.read()
numFramesRemaining = 10 * fps - 1
while success and numFramesRemaining > 0:
videoWriter.write(frame)
success, frame = cameraCapture.read()
numFramesRemaining -= 1 | [
"[email protected]"
] | |
07008e999f02c35e6a8c414d9321c124e90b441c | 1127be36a03677a9bf7226c87c4f6aec0b5a4fd7 | /pocdb.py | df443d329c92b598262c214df108ab1d4563081d | [] | no_license | chriskcl/AngelSword | b44de0194502a6a3e3d38b7fd0f949747f697b63 | f2dc09ee41422ef07c43b64a0aba523378cb7667 | refs/heads/master | 2021-05-14T00:44:28.175262 | 2018-01-04T08:11:32 | 2018-01-04T08:11:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,189 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
name: 漏洞字典
referer: unknow
author: Lucifer
description: 引入main接口
'''
from cms.cmsmain import *
from system.systemmain import *
from industrial.industrialmain import *
from hardware.hardwaremain import *
from information.informationmain import *
class pocdb_pocs:
def __init__(self, url):
self.url = url
self.informationpocdict = {
"git源码泄露扫描":git_check_BaseVerify(url),
"java配置文件文件发现":jsp_conf_find_BaseVerify(url),
"robots文件发现":robots_find_BaseVerify(url),
"svn源码泄露扫描":svn_check_BaseVerify(url),
"JetBrains IDE workspace.xml文件泄露":jetbrains_ide_workspace_disclosure_BaseVerify(url),
"apache server-status信息泄露":apache_server_status_disclosure_BaseVerify(url),
"crossdomain.xml文件发现":crossdomain_find_BaseVerify(url),
}
self.cmspocdict = {
"泛微OA filedownaction SQL注入":weaver_oa_download_sqli_BaseVerify(url),
"泛微OA 数据库配置泄露":weaver_oa_db_disclosure_BaseVerify(url),
"phpok res_action_control.php 任意文件下载(需要cookies文件)":phpok_res_action_control_filedownload_BaseVerify(url),
"phpok api.php SQL注入漏洞":phpok_api_param_sqli_BaseVerify(url),
"phpok remote_image getshell漏洞":phpok_remote_image_getshell_BaseVerify(url),
"jeecg 重置admin密码":jeecg_pwd_reset_BaseVerify(url),
"typecho install.php反序列化命令执行":typecho_install_code_exec_BaseVerify(url),
"Dotnetcms(风讯cms)SQL注入漏洞":foosun_City_ajax_sqli_BaseVerify(url),
"韩国autoset建站程序phpmyadmin任意登录漏洞":autoset_phpmyadmin_unauth_BaseVerify(url),
"phpstudy探针":phpstudy_probe_BaseVerify(url),
"phpstudy phpmyadmin默认密码漏洞":phpstudy_phpmyadmin_defaultpwd_BaseVerify(url),
"Discuz论坛forum.php参数message SSRF漏洞":discuz_forum_message_ssrf_BaseVerify(url),
"Discuz X3 focus.swf flashxss漏洞":discuz_focus_flashxss_BaseVerify(url),
"Discuz! X2.5 物理路径泄露漏洞":discuz_x25_path_disclosure_BaseVerify(url),
"Discuz问卷调查参数orderby注入漏洞":discuz_plugin_ques_sqli_BaseVerify(url),
"Hishop系统productlist.aspx SQL注入":hishop_productlist_sqli_BaseVerify(url),
"亿邮邮箱弱口令列表泄露":eyou_weakpass_BaseVerify(url),
"亿邮Email Defender系统免登陆DBA注入":eyou_admin_id_sqli_BaseVerify(url),
"亿邮邮件系统重置密码问题暴力破解":eyou_resetpw_BaseVerify(url),
"亿邮mail5 user 参数kw SQL注入":eyou_user_kw_sqli_BaseVerify(url),
"金蝶办公系统任意文件下载":kingdee_filedownload_BaseVerify(url),
"金蝶协同平台远程信息泄露漏洞":kingdee_resin_dir_path_disclosure_BaseVerify(url),
"金蝶AES系统Java web配置文件泄露":kingdee_conf_disclosure_BaseVerify(url),
"金蝶EAS任意文件读取":kingdee_logoImgServlet_fileread_BaseVerify(url),
"乐语客服系统任意文件下载漏洞":looyu_down_filedownload_BaseVerify(url),
"smartoa 多处任意文件下载漏洞":smartoa_multi_filedownload_BaseVerify(url),
"urp查询接口曝露":urp_query_BaseVerify(url),
"URP越权查看任意学生课表、成绩(需登录)":urp_query2_BaseVerify(url),
"URP综合教务系统任意文件读取":urp_ReadJavaScriptServlet_fileread_BaseVerify(url),
"pkpmbs工程质量监督站信息管理系统SQL注入":pkpmbs_guestbook_sqli_BaseVerify(url),
"pkpmbs建设工程质量监督系统注入":pkpmbs_addresslist_keyword_sqli_BaseVerify(url),
"pkpmbs建设工程质量监督系统SQL注入":pkpmbs_MsgList_sqli_BaseVerify(url),
"帝友P2P借贷系统无需登录SQL注入漏洞":dyp2p_latesindex_sqli_BaseVerify(url),
"帝友P2P借贷系统任意文件读取漏洞":dyp2p_url_fileread_BaseVerify(url),
"iGenus邮件系统一处无需登录的任意代码执行":igenus_code_exec_BaseVerify(url),
"iGenus邮箱系统login.php 参数Lang任意文件读取":igenus_login_Lang_fileread_BaseVerify(url),
"iGenus邮箱系统管理中心sys/login.php 参数Lang任意文件读取":igenus_syslogin_Lang_fileread_BaseVerify(url),
"live800客服系统downlog任意文件下载":live800_downlog_filedownload_BaseVerify(url),
"live800在线客服系统loginAction SQL注入漏洞":live800_loginAction_sqli_BaseVerify(url),
"live800在线客服系统多处SQL注入GETSHELL漏洞":live800_sta_export_sqli_BaseVerify(url),
"live800在线客服系统XML实体注入漏洞":live800_services_xxe_BaseVerify(url),
"Onethink 参数category SQL注入":onethink_category_sqli_BaseVerify(url),
"ThinkPHP 代码执行漏洞":thinkphp_code_exec_BaseVerify(url),
"汇思学习管理系统任意文件下载":wizbank_download_filedownload_BaseVerify(url),
"Cyberwisdom wizBank学习管理平台SQL注入漏洞":wizbank_usr_id_sqli_BaseVerify(url),
"domino_unauth未授权漏洞":domino_unauth_BaseVerify(url),
"宏景EHR系统多处SQL注入":hjsoft_sqli_BaseVerify(url),
"汇能群管理系统SQL注入":hnkj_researchinfo_dan_sqli_BaseVerify(url),
"汇文软件图书管理系统ajax_asyn_link.old.php任意文件读取":libsys_ajax_asyn_link_old_fileread_BaseVerify(url),
"汇文软件图书管理系统ajax_asyn_link.php任意文件读取":libsys_ajax_asyn_link_fileread_BaseVerify(url),
"汇文软件图书管理系统ajax_get_file.php任意文件读取":libsys_ajax_get_file_fileread_BaseVerify(url),
"通元建站系统用户名泄露漏洞":gpower_users_disclosure_BaseVerify(url),
"metinfo5.0 getpassword.php两处时间盲注漏洞":metinfo_getpassword_sqli_BaseVerify(url),
"用友ICC struts2远程命令执行":yonyou_icc_struts2_BaseVerify(url),
"V2视频会议系统某处SQL注射、XXE漏洞(可getshell)":v2Conference_sqli_xxe_BaseVerify(url),
"政府采购系统eweb编辑器默认口令Getshell漏洞":gpcsoft_ewebeditor_weak_BaseVerify(url),
"RAP接口平台struts远程代码执行":rap_interface_struts_exec_BaseVerify(url),
"虹安DLP数据泄露防护平台struts2远程命令执行":hongan_dlp_struts_exec_BaseVerify(url),
"九羽数字图书馆struts远程命令执行":jiuyu_library_struts_exec_BaseVerify(url),
"垚捷电商平台通用struts命令执行":yaojie_steel_struts_exec_BaseVerify(url),
"Digital-Campus数字校园平台LOG文件泄露":digital_campus_log_disclosure_BaseVerify(url),
"Digital-Campus2.0数字校园平台Sql注射":digital_campus_systemcodelist_sqli_BaseVerify(url),
"jeecms download.jsp 参数fpath任意文件下载":jeecms_fpath_filedownload_BaseVerify(url),
"shopex敏感信息泄露":shopex_phpinfo_disclosure_BaseVerify(url),
"动科(dkcms)默认数据库漏洞":dkcms_database_disclosure_BaseVerify(url),
"FineCMS免费版文件上传漏洞":finecms_uploadfile_BaseVerify(url),
"DaMall商城系统sql注入":damall_selloffer_sqli_BaseVerify(url),
"大汉版通JCMS数据库配置文件读取漏洞":hanweb_readxml_fileread_BaseVerify(url),
"大汉downfile.jsp 任意文件下载":hanweb_downfile_filedownload_BaseVerify(url),
"大汉VerfiyCodeServlet越权漏洞":hanweb_VerifyCodeServlet_install_BaseVerify(url),
"PHP168 login.php GETSHELL漏洞":php168_login_getshell_BaseVerify(url),
"dedecms版本探测":dedecms_version_BaseVerify(url),
"dedecms search.php SQL注入漏洞":dedecms_search_typeArr_sqli_BaseVerify(url),
"dedecms trace爆路径漏洞":dedecms_error_trace_disclosure_BaseVerify(url),
"dedecms download.php重定向漏洞":dedecms_download_redirect_BaseVerify(url),
"dedecms recommend.php SQL注入":dedecms_recommend_sqli_BaseVerify(url),
"umail物理路径泄露":umail_physical_path_BaseVerify(url),
"U-Mail邮件系统sessionid访问":umail_sessionid_access_BaseVerify(url),
"metinfo v5.3sql注入漏洞":metinfo_login_check_sqli_BaseVerify(url),
"用友致远A6协同系统SQL注射union可shell":yonyou_user_ids_sqli_BaseVerify(url),
"用友致远A6协同系统多处SQL注入":yonyou_multi_union_sqli_BaseVerify(url),
"用友致远A6协同系统敏感信息泄露&SQL注射":yonyou_initData_disclosure_BaseVerify(url),
"用友致远A6协同系统数据库账号泄露":yonyou_createMysql_disclosure_BaseVerify(url),
"用友致远A6 test.jsp SQL注入":yonyou_test_sqli_BaseVerify(url),
"用友CRM系统任意文件读取":yonyou_getemaildata_fileread_BaseVerify(url),
"用友EHR 任意文件读取":yonyou_ehr_ELTextFile_BaseVerify(url),
"用友优普a8 CmxUserSQL时间盲注入":yonyou_a8_CmxUser_sqli_BaseVerify(url),
"用友a8 log泄露":yonyou_a8_logs_disclosure_BaseVerify(url),
"用友a8监控后台默认密码漏洞":yonyou_status_default_pwd_BaseVerify(url),
"用友致远A8协同系统 blind XML实体注入":yonyou_a8_personService_xxe_BaseVerify(url),
"用友GRP-U8 sql注入漏洞":yonyou_cm_info_content_sqli_BaseVerify(url),
"用友u8 CmxItem.php SQL注入":yonyou_u8_CmxItem_sqli_BaseVerify(url),
"用友FE协作办公平台5.5 SQL注入":yonyou_fe_treeXml_sqli_BaseVerify(url),
"用友EHR系统 ResetPwd.jsp SQL注入":yonyou_ehr_resetpwd_sqli_BaseVerify(url),
"用友nc NCFindWeb 任意文件下载漏洞":yonyou_nc_NCFindWeb_fileread_BaseVerify(url),
"fsmcms p_replydetail.jsp注入漏洞":fsmcms_p_replydetail_sqli_BaseVerify(url),
"FSMCMS网站重装漏洞":fsmcms_setup_reinstall_BaseVerify(url),
"FSMCMS columninfo.jsp文件参数ColumnID SQL注入":fsmcms_columninfo_sqli_BaseVerify(url),
"qibocms知道系统SQL注入":qibocms_search_sqli_BaseVerify(url),
"qibo分类系统search.php 代码执行":qibocms_search_code_exec_BaseVerify(url),
"qibocms news/js.php文件参数f_idSQL注入":qibocms_js_f_id_sqli_BaseVerify(url),
"qibocms s.php文件参数fids SQL注入":qibocms_s_fids_sqli_BaseVerify(url),
"依友POS系统登陆信息泄露":yeu_disclosure_uid_BaseVerify(url),
"浪潮行政审批系统十八处注入":inspur_multi_sqli_BaseVerify(url),
"浪潮ECGAP政务审批系统SQL注入漏洞":inspur_ecgap_displayNewsPic_sqli_BaseVerify(url),
"五车图书管系统任意下载":clib_kinweblistaction_download_BaseVerify(url),
"五车图书管系统kindaction任意文件遍历":clib_kindaction_fileread_BaseVerify(url),
"Gobetters视频会议系统SQL注入漏洞":gobetters_multi_sqli_BaseVerify(url),
"LBCMS多处SQL注入漏洞":lbcms_webwsfw_bssh_sqli_BaseVerify(url),
"Euse TMS存在多处DBA权限SQL注入":euse_study_multi_sqli_BaseVerify(url),
"suntown未授权任意文件上传漏洞":suntown_upfile_fileupload_BaseVerify(url),
"Dswjcms p2p网贷系统前台4处sql注入":dswjcms_p2p_multi_sqli_BaseVerify(url),
"skytech政务系统越权漏洞":skytech_bypass_priv_BaseVerify(url),
"wordpress AzonPop插件SQL注入":wordpress_plugin_azonpop_sqli_BaseVerify(url),
"wordpress 插件shortcode0.2.3 本地文件包含":wordpress_plugin_ShortCode_lfi_BaseVerify(url),
"wordpress插件跳转":wordpress_url_redirect_BaseVerify(url),
"wordpress 插件WooCommerce PHP代码注入":wordpress_woocommerce_code_exec_BaseVerify(url),
"wordpress 插件mailpress远程代码执行":wordpress_plugin_mailpress_rce_BaseVerify(url),
"wordpress admin-ajax.php任意文件下载":wordpress_admin_ajax_filedownload_BaseVerify(url),
"wordpress rest api权限失效导致内容注入":wordpress_restapi_sqli_BaseVerify(url),
"wordpress display-widgets插件后门漏洞":wordpress_display_widgets_backdoor_BaseVerify(url),
"Mallbuilder商城系统SQL注入":mallbuilder_change_status_sqli_BaseVerify(url),
"efuture商业链系统任意文件下载":efuture_downloadAct_filedownload_BaseVerify(url),
"kj65n煤矿远程监控系统SQL注入":kj65n_monitor_sqli_BaseVerify(url),
"票友机票预订系统6处SQL注入":piaoyou_multi_sqli_BaseVerify(url),
"票友机票预订系统10处SQL注入":piaoyou_ten_sqli_BaseVerify(url),
"票友机票预订系统6处SQL注入(绕过)":piaoyou_six_sqli_BaseVerify(url),
"票友机票预订系统6处SQL注入2(绕过)":piaoyou_six2_sqli_BaseVerify(url),
"票友票务系统int_order.aspx SQL注入":piaoyou_int_order_sqli_BaseVerify(url),
"票友票务系统通用sql注入":piaoyou_newsview_list_BaseVerify(url),
"中农信达监察平台任意文件下载":sinda_downloadfile_download_BaseVerify(url),
"连邦行政审批系统越权漏洞":lianbang_multi_bypass_priv_BaseVerify(url),
"北斗星政务PostSuggestion.aspx SQL注入":star_PostSuggestion_sqli_BaseVerify(url),
"TCExam重新安装可getshell漏洞":tcexam_reinstall_getshell_BaseVerify(url),
"合众商道php系统通用注入":hezhong_list_id_sqli_BaseVerify(url),
"最土团购SQL注入":zuitu_coupon_id_sqli_BaseVerify(url),
"时光动态网站平台(Cicro 3e WS) 任意文件下载":cicro_DownLoad_filedownload_BaseVerify(url),
"华飞科技cms绕过JS GETSHELL":huaficms_bypass_js_BaseVerify(url),
"IWMS系统后台绕过&整站删除":iwms_bypass_js_delete_BaseVerify(url),
"农友政务系统多处SQL注入":nongyou_multi_sqli_BaseVerify(url),
"农友政务系统Item2.aspx SQL注入":nongyou_Item2_sqli_BaseVerify(url),
"农友政务ShowLand.aspx SQL注入":nongyou_ShowLand_sqli_BaseVerify(url),
"农友多处时间盲注":nongyou_sleep_sqli_BaseVerify(url),
"某政府采购系统任意用户密码获取漏洞":zfcgxt_UserSecurityController_getpass_BaseVerify(url),
"铭万事业通用建站系统SQL注入":mainone_b2b_Default_sqli_BaseVerify(url),
"铭万B2B SupplyList SQL注入漏洞":mainone_SupplyList_sqli_BaseVerify(url),
"铭万门户建站系统ProductList SQL注入":mainone_ProductList_sqli_BaseVerify(url),
"xplus npmaker 2003系统GETSHELL":xplus_2003_getshell_BaseVerify(url),
"xplus通用注入":xplus_mysql_mssql_sqli_BaseVerify(url),
"workyi人才系统多处注入漏洞":workyi_multi_sqli_BaseVerify(url),
"菲斯特诺期刊系统多处SQL注入":newedos_multi_sqli_BaseVerify(url),
"东软UniPortal1.2未授权访问&SQL注入":uniportal_bypass_priv_sqli_BaseVerify(url),
"PageAdmin可“伪造”VIEWSTATE执行任意SQL查询&重置管理员密码":pageadmin_forge_viewstate_BaseVerify(url),
"SiteFactory CMS 5.5.9任意文件下载漏洞":xtcms_download_filedownload_BaseVerify(url),
"璐华企业版OA系统多处SQL注入":ruvar_oa_multi_sqli_BaseVerify(url),
"璐华OA系统多处SQL注入":ruvar_oa_multi_sqli2_BaseVerify(url),
"璐华OA系统多处SQL注入3":ruvar_oa_multi_sqli3_BaseVerify(url),
"GN SQL Injection":gn_consulting_sqli_BaseVerify(url),
"JumboECMS V1.6.1 注入漏洞":jumboecms_slide_id_sqli_BaseVerify(url),
"joomla组件com_docman本地文件包含":joomla_com_docman_lfi_BaseVerify(url),
"joomla 3.7.0 core SQL注入":joomla_index_list_sqli_BaseVerify(url),
"北京网达信联电子采购系统多处注入":caitong_multi_sqli_BaseVerify(url),
"Designed by Alkawebs SQL Injection":alkawebs_viewnews_sqli_BaseVerify(url),
"一采通电子采购系统多处时间盲注":caitong_multi_sleep_sqli_BaseVerify(url),
"启博淘店通标准版任意文件遍历漏洞":shop360_do_filedownload_BaseVerify(url),
"PSTAR-电子服务平台SQL注入漏洞":pstar_warehouse_msg_01_sqli_BaseVerify(url),
"PSTAR-电子服务平台isfLclInfo注入漏洞":pstar_isfLclInfo_sqli_BaseVerify(url),
"PSTAR-电子服务平台SQL注入漏洞":pstar_qcustoms_sqli_BaseVerify(url),
"TRS(拓尔思) wcm pre.as 文件包含":trs_wcm_pre_as_lfi_BaseVerify(url),
"TRS(拓尔思) 网络信息雷达4.6系统敏感信息泄漏到进后台":trs_inforadar_disclosure_BaseVerify(url),
"TRS(拓尔思) 学位论文系统papercon处SQL注入":trs_lunwen_papercon_sqli_BaseVerify(url),
"TRS(拓尔思) infogate插件 blind XML实体注入":trs_infogate_xxe_BaseVerify(url),
"TRS(拓尔思) infogate插件 任意注册漏洞":trs_infogate_register_BaseVerify(url),
"TRS(拓尔思) was5配置文件泄露":trs_was5_config_disclosure_BaseVerify(url),
"TRS(拓尔思) was5 download_templet.jsp任意文件下载":trs_was5_download_templet_BaseVerify(url),
"TRS(拓尔思) wcm系统默认账户漏洞":trs_wcm_default_user_BaseVerify(url),
"TRS(拓尔思) wcm 6.x版本infoview信息泄露":trs_wcm_infoview_disclosure_BaseVerify(url),
"TRS(拓尔思) was40 passwd.htm页面泄露":trs_was40_passwd_disclosure_BaseVerify(url),
"TRS(拓尔思) was40 tree导航树泄露":trs_was40_tree_disclosure_BaseVerify(url),
"TRS(拓尔思) ids身份认证信息泄露":trs_ids_auth_disclosure_BaseVerify(url),
"TRS(拓尔思) wcm webservice文件写入漏洞":trs_wcm_service_writefile_BaseVerify(url),
"易创思ECScms MoreIndex SQL注入":ecscms_MoreIndex_sqli_BaseVerify(url),
"金窗教务系统存在多处SQL注射漏洞":gowinsoft_jw_multi_sqli_BaseVerify(url),
"siteserver3.6.4 background_taskLog.aspx注入":siteserver_background_taskLog_sqli_BaseVerify(url),
"siteserver3.6.4 background_log.aspx注入":siteserver_background_log_sqli_BaseVerify(url),
"siteserver3.6.4 user.aspx注入":siteserver_UserNameCollection_sqli_BaseVerify(url),
"siteserver3.6.4 background_keywordsFilting.aspx注入":siteserver_background_keywordsFilting_sqli_BaseVerify(url),
"siteserver3.6.4 background_administrator.aspx注入":siteserver_background_administrator_sqli_BaseVerify(url),
"NITC营销系统suggestwordList.php SQL注入":nitc_suggestwordList_sqli_BaseVerify(url),
"NITC营销系统index.php SQL注入":nitc_index_language_id_sqli_BaseVerify(url),
"南大之星信息发布系统DBA SQL注入":ndstar_six_sqli_BaseVerify(url),
"蓝凌EIS智慧协同平台menu_left_edit.aspx SQL注入":eis_menu_left_edit_sqli_BaseVerify(url),
"天柏在线培训系统Type_List.aspx SQL注入":tianbo_Type_List_sqli_BaseVerify(url),
"天柏在线培训系统TCH_list.aspx SQL注入":tianbo_TCH_list_sqli_BaseVerify(url),
"天柏在线培训系统Class_Info.aspx SQL注入":tianbo_Class_Info_sqli_BaseVerify(url),
"天柏在线培训系统St_Info.aspx SQL注入":tianbo_St_Info_sqli_BaseVerify(url),
"安财软件GetXMLList任意文件读取":acsoft_GetXMLList_fileread_BaseVerify(url),
"安财软件GetFile任意文件读取":acsoft_GetFile_fileread_BaseVerify(url),
"安财软件GetFileContent任意文件读取":acsoft_GetFileContent_fileread_BaseVerify(url),
"天津神州助平台通用型任意下载":gxwssb_fileDownloadmodel_download_BaseVerify(url),
"ETMV9数字化校园平台任意下载":etmdcp_Load_filedownload_BaseVerify(url),
"安脉grghjl.aspx 参数stuNo注入":anmai_grghjl_stuNo_sqli_BaseVerify(url),
"农友多处时间盲注":nongyou_ShowLand_sqli_BaseVerify(url),
"某政府通用任意文件下载":zf_cms_FileDownload_BaseVerify(url),
"师友list.aspx keywords SQL注入":shiyou_list_keyWords_sqli_BaseVerify(url),
"speedcms list文件参数cid SQL注入":speedcms_list_cid_sqli_BaseVerify(url),
"卓繁cms任意文件下载漏洞":zhuofan_downLoadFile_download_BaseVerify(url),
"金宇恒内容管理系统通用型任意文件下载漏洞":gevercms_downLoadFile_filedownload_BaseVerify(url),
"任我行crm任意文件下载":weway_PictureView1_filedownload_BaseVerify(url),
"易创思教育建站系统未授权访问可查看所有注册用户":esccms_selectunitmember_unauth_BaseVerify(url),
"wecenter SQL注入":wecenter_topic_id_sqli_BaseVerify(url),
"shopnum1 ShoppingCart1 SQL注入":shopnum_ShoppingCart1_sqli_BaseVerify(url),
"shopnum1 ProductListCategory SQL注入":shopnum_ProductListCategory_sqli_BaseVerify(url),
"shopnum1 ProductDetail.aspx SQL注入":shopnum_ProductDetail_sqli_BaseVerify(url),
"shopnum1 GuidBuyList.aspx SQL注入":shopnum_GuidBuyList_sqli_BaseVerify(url),
"好视通视频会议系统(fastmeeting)任意文件遍历":fastmeeting_download_filedownload_BaseVerify(url),
"远古流媒体系统两处SQL注入":viewgood_two_sqli_BaseVerify(url),
"远古 pic_proxy.aspx SQL注入":viewgood_pic_proxy_sqli_BaseVerify(url),
"远古流媒体系统 GetCaption.ashx注入":viewgood_GetCaption_sqli_BaseVerify(url),
"shop7z order_checknoprint.asp SQL注入":shop7z_order_checknoprint_sqli_BaseVerify(url),
"dreamgallery album.php SQL注入":dreamgallery_album_id_sqli_BaseVerify(url),
"IPS Community Suite <= 4.1.12.3 PHP远程代码执行":ips_community_suite_code_exec_BaseVerify(url),
"科信邮件系统login.server.php 时间盲注":kxmail_login_server_sqli_BaseVerify(url),
"shopNC B2B版 index.php SQL注入":shopnc_index_class_id_sqli_BaseVerify(url),
"南京擎天政务系统 geren_list_page.aspx SQL注入":skytech_geren_list_page_sqli_BaseVerify(url),
"学子科技诊断测评系统多处未授权访问":xuezi_ceping_unauth_BaseVerify(url),
"Shadows-IT selector.php 任意文件包含":shadowsit_selector_lfi_BaseVerify(url),
"皓翰数字化校园平台任意文件下载":haohan_FileDown_filedownload_BaseVerify(url),
"phpcms digg_add.php SQL注入":phpcms_digg_add_sqli_BaseVerify(url),
"phpcms authkey泄露漏洞":phpcms_authkey_disclosure_BaseVerify(url),
"phpcms2008 flash_upload.php SQL注入":phpcms_flash_upload_sqli_BaseVerify(url),
"phpcms2008 product.php 代码执行":phpcms_product_code_exec_BaseVerify(url),
"phpcms v9.6.0 SQL注入":phpcms_v96_sqli_BaseVerify(url),
"phpcms 9.6.1任意文件读取漏洞":phpcms_v961_fileread_BaseVerify(url),
"phpcms v9 flash xss漏洞":phpcms_v9_flash_xss_BaseVerify(url),
"seacms search.php 代码执行":seacms_search_code_exec_BaseVerify(url),
"seacms 6.45 search.php order参数前台代码执行":seacms_order_code_exec_BaseVerify(url),
"seacms search.php 参数jq代码执行":seacms_search_jq_code_exec_BaseVerify(url),
"安脉学生管理系统10处SQL注入":anmai_teachingtechnology_sqli_BaseVerify(url),
"cmseasy header.php 报错注入":cmseasy_header_detail_sqli_BaseVerify(url),
"PhpMyAdmin2.8.0.3无需登录任意文件包含导致代码执行":phpmyadmin_setup_lfi_BaseVerify(url),
"opensns index.php 参数arearank注入":opensns_index_arearank_BaseVerify(url),
"opensns index.php 前台getshell":opensns_index_getshell_BaseVerify(url),
"ecshop uc.php参数code SQL注入":ecshop_uc_code_sqli_BaseVerify(url),
"ecshop3.0 flow.php 参数order_id注入":ecshop_flow_orderid_sqli_BaseVerify(url),
"SiteEngine 6.0 & 7.1 SQL注入漏洞":siteengine_comments_module_sqli_BaseVerify(url),
"明腾cms cookie欺骗漏洞":mingteng_cookie_deception_BaseVerify(url),
"正方教务系统services.asmx SQL注入":zfsoft_service_stryhm_sqli_BaseVerify(url),
"正方教务系统数据库任意操纵":zfsoft_database_control_BaseVerify(url),
"正方教务系统default3.aspx爆破页面":zfsoft_default3_bruteforce_BaseVerify(url),
"V2视频会议系统某处SQL注射、XXE漏洞(可getshell)":v2Conference_sqli_xxe_BaseVerify(url),
"1039驾校通未授权访问漏洞":jxt1039_unauth_BaseVerify(url),
"thinksns category模块代码执行":thinksns_category_code_exec_BaseVerify(url),
"TPshop eval-stdin.php 代码执行漏洞":tpshop_eval_stdin_code_exec_BaseVerify(url),
}
self.industrialpocdict = {
"新力热电无线抄表监控系统绕过后台登录":wireless_monitor_priv_elevation_BaseVerify(url),
"火力发电能耗监测弱口令":rockontrol_weak_BaseVerify(url),
"sgc8000 大型旋转机监控系统报警短信模块泄露":sgc8000_sg8k_sms_disclosure_BaseVerify(url),
"sgc8000 监控系统数据连接信息泄露":sgc8000_deldata_config_disclosure_BaseVerify(url),
"sgc8000监控系统超管账号泄露漏洞":sgc8000_defaultuser_disclosure_BaseVerify(url),
"zte 无线控制器 SQL注入":zte_wireless_getChannelByCountryCode_sqli_BaseVerify(url),
"中兴无线控制器弱口令":zte_wireless_weak_pass_BaseVerify(url),
"东方电子SCADA通用系统信息泄露":dfe_scada_conf_disclosure_BaseVerify(url),
}
self.systempocdict = {
"GoAhead LD_PRELOAD远程代码执行(CVE-2017-17562)":goahead_LD_PRELOAD_rce_BaseVerify(url),
"天融信Topsec change_lan.php本地文件包含":topsec_change_lan_filedownload_BaseVerify(url),
"Tomcat代码执行漏洞(CVE-2017-12616)":tomcat_put_exec_BaseVerify(url),
"redis 未授权漏洞":redis_unauth_BaseVerify(url),
"KingGate防火墙默认配置不当可被远控":kinggate_zebra_conf_BaseVerify(url),
"nginx Multi-FastCGI Code Execution":multi_fastcgi_code_exec_BaseVerify(url),
"TurboMail设计缺陷以及默认配置漏洞":turbomail_conf_BaseVerify(url),
"TurboGate邮件网关XXE漏洞":turbogate_services_xxe_BaseVerify(url),
"weblogic SSRF漏洞(CVE-2014-4210)":weblogic_ssrf_BaseVerify(url),
"weblogic XMLdecoder反序列化漏洞(CVE-2017-10271)":weblogic_xmldecoder_exec_BaseVerify(url),
"weblogic 接口泄露":weblogic_interface_disclosure_BaseVerify(url),
"实易DNS管理系统文件包含至远程代码执行":forease_fileinclude_code_exec_BaseVerify(url),
"hudson源代码泄露漏洞":hudson_ws_disclosure_BaseVerify(url),
"N点虚拟主机管理系统V1.9.6版数据库下载漏洞":npoint_mdb_download_BaseVerify(url),
"宏杰Zkeys虚拟主机默认数据库漏洞":zkeys_database_conf_BaseVerify(url),
"江南科友堡垒机信息泄露":hac_gateway_info_disclosure_BaseVerify(url),
"Moxa OnCell 未授权访问":moxa_oncell_telnet_BaseVerify(url),
"glassfish 任意文件读取":glassfish_fileread_BaseVerify(url),
"zabbix jsrpc.php SQL注入":zabbix_jsrpc_profileIdx2_sqli_BaseVerify(url),
"php fastcgi任意文件读取漏洞":php_fastcgi_read_BaseVerify(url),
"hfs rejetto 远程代码执行":hfs_rejetto_search_rce_BaseVerify(url),
"shellshock漏洞":shellshock_BaseVerify(url),
"dorado默认口令漏洞":dorado_default_passwd_BaseVerify(url),
"ms15_034 http.sys远程代码执行(CVE-2015-1635)":iis_ms15034_httpsys_rce_BaseVerify(url),
"IIS 6.0 webdav远程代码执行漏洞(CVE-2017-7269)":iis_webdav_rce_BaseVerify(url),
"深澜软件srun3000计费系统任意文件下载漏洞":srun_index_file_filedownload_BaseVerify(url),
"深澜软件srun3000计费系统rad_online.php命令执行bypass":srun_rad_online_bypass_rce_BaseVerify(url),
"深澜软件srun3000计费系统rad_online.php参数username命令执行":srun_rad_online_username_rce_BaseVerify(url),
"深澜软件srun3000计费系统download.php任意文件下载":srun_download_file_filedownload_BaseVerify(url),
"深澜软件srun3000计费系统user_info.php命令执行":srun_user_info_uid_rce_BaseVerify(url),
"intel AMT web系统绕过登录(CVE-2017-5689)":intel_amt_crypt_bypass_BaseVerify(url),
"smtp starttls明文命令注入(CVE-2011-0411)":smtp_starttls_plaintext_inj_BaseVerify(url),
"resin viewfile 任意文件读取":resin_viewfile_fileread_BaseVerify(url),
"mongodb 未授权漏洞":mongodb_unauth_BaseVerify(url),
"深信服 AD4.5版本下命令执行漏洞":sangfor_ad_script_command_exec_BaseVerify(url),
}
self.hardwarepocdict = {
"Dlink 本地文件包含":router_dlink_webproc_fileread_BaseVerify(url),
"Dlink DIAGNOSTIC.PHP命令执行":router_dlink_command_exec_BaseVerify(url),
"锐捷VPN设备未授权访问漏洞":router_ruijie_unauth_BaseVerify(url),
"上海安达通某网关产品&某VPN产品struts命令执行":adtsec_gateway_struts_exec_BaseVerify(url),
"SJW74系列安全网关 和 PN-2G安全网关信息泄露":adtsec_Overall_app_js_bypass_BaseVerify(url),
"迈普vpn安全网关弱口令&&执行命令":mpsec_weakpass_exec_BaseVerify(url),
"迈普网关webui任意文件下载":mpsec_webui_filedownload_BaseVerify(url),
"浙江宇视(DVR/NCR)监控设备远程命令执行漏洞":camera_uniview_dvr_rce_BaseVerify(url),
"富士施乐打印机默认口令漏洞":printer_xerox_default_pwd_BaseVerify(url),
"惠普打印机telnet未授权访问":printer_hp_jetdirect_unauth_BaseVerify(url),
"东芝topaccess打印机未授权漏洞":printer_topaccess_unauth_BaseVerify(url),
"佳能打印机未授权漏洞":printer_canon_unauth_BaseVerify(url),
"juniper NetScreen防火墙后门(CVE-2015-7755)":juniper_netscreen_backdoor_BaseVerify(url),
"海康威视web弱口令":camera_hikvision_web_weak_BaseVerify(url),
}
| [
"[email protected]"
] | |
49cc7ab24fcf653315ed8b0a0c768e7420905965 | 4a2eac368e3e2216b0cd1dd70224da3ca4ee7c5e | /SecretManager/owlbot.py | 50c7c9bb3fb0bdd3f2138665e345cd50407ebd4c | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | googleapis/google-cloud-php | 856a940eee158eafa6f2443f8d61813779216429 | ad50f749431287e7074279e2b4fa32d6d6c2c952 | refs/heads/main | 2023-09-06T05:48:31.609502 | 2023-09-05T20:27:34 | 2023-09-05T20:27:34 | 43,642,389 | 642 | 330 | Apache-2.0 | 2023-09-13T22:39:27 | 2015-10-04T16:09:46 | PHP | UTF-8 | Python | false | false | 1,871 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import logging
from pathlib import Path
import subprocess
import synthtool as s
from synthtool.languages import php
from synthtool import _tracked_paths
logging.basicConfig(level=logging.DEBUG)
src = Path(f"../{php.STAGING_DIR}/SecretManager").resolve()
dest = Path().resolve()
# Added so that we can pass copy_excludes in the owlbot_main() call
_tracked_paths.add(src)
php.owlbot_main(src=src, dest=dest)
# Change the wording for the deprecation warning.
s.replace(
'src/*/*_*.php',
r'will be removed in the next major release',
'will be removed in a future release')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
| [
"[email protected]"
] | |
1ac2383f4356c4123f3f0424f2d41eeb4d65eef7 | e00d41c9f4045b6c6f36c0494f92cad2bec771e2 | /multimedia/sound/celt/actions.py | ebbc7ace073cda6de994f48d6de40b6054304d7d | [] | no_license | pisilinux/main | c40093a5ec9275c771eb5fb47a323e308440efef | bfe45a2e84ea43608e77fb9ffad1bf9850048f02 | refs/heads/master | 2023-08-19T00:17:14.685830 | 2023-08-18T20:06:02 | 2023-08-18T20:06:02 | 37,426,721 | 94 | 295 | null | 2023-09-14T08:22:22 | 2015-06-14T19:38:36 | Python | UTF-8 | Python | false | false | 407 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.configure("--disable-static")
def build():
autotools.make()
def install():
autotools.install()
pisitools.dodoc("COPYING", "README")
| [
"[email protected]"
] | |
ef1dee427fd1886305f8a5488e392f4572706fde | bc9cb3f0f104778026ca6f3a07595dd5d6ce840f | /DIA_01/introducao_python/aula/linguagem_python/05_conversao_de_tipos/exemplo_03.py | ba7db84a4bd8944411f327a1af919b84d29843a2 | [] | no_license | JohnRhaenys/escola_de_ferias | ff7a5d7f399459725f3852ca6ee200486f29e7d4 | 193364a05a5c7ccb2e5252c150745d6743738728 | refs/heads/master | 2023-01-12T11:30:46.278703 | 2020-11-19T14:59:10 | 2020-11-19T14:59:10 | 314,278,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | # Convertendo float para string
real = 1.23
print(real)
print(type(real))
print()
string = str(real)
print(string)
print(type(string)) | [
"[email protected]"
] | |
2bb80d3e76ad8eeb95f106c44017b8529377a982 | b715c79f52cf2c95927c19edf8f6d64f5322bf7d | /PJLink/start_kernel.py | 94e029381f90239bda03ffb6c2dba92292df92be | [
"MIT"
] | permissive | sunt05/PJLink | 4d6805b07070c0a2c7452464358ebcf075eeabb0 | cd623efebf4ddae8c5ea75b3ee08fe9819e78b40 | refs/heads/master | 2020-03-29T05:46:10.707576 | 2018-09-20T07:36:51 | 2018-09-20T07:36:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,842 | py | """start_kernel is a convenience script for starting a kernel thread in python
"""
import sys, os, argparse
sys.stdout.flush()
true_file = os.path.abspath(__file__)
sys.path.insert(0, os.path.dirname(os.path.dirname(true_file)))
from PJLink import *
### I should do a lot more argparsing... but I don't
parser = argparse.ArgumentParser(description='Start a PJLink kernel.')
parser.add_argument('--blocking', dest='block', type=bool, nargs='?', default=False,
help='whether the kernel should block or not')
parser.add_argument('--debug', dest='debug', type=int, nargs='?', default=0,
help='debug level for underlying PJLink lib')
parser.add_argument('-linkname', dest='name', type=str, nargs='?',
help='name for the link')
parser.add_argument('-linkmode', dest='mode', type=str, nargs='?',
help='mode for the link')
parser.add_argument('-linkprotocol', dest='protocol', type=str, nargs='?',
help='protocol for the link')
parser = parser.parse_args()
blocking = parser.block
debug = parser.debug
name = parser.name
mode = parser.mode
protocol = parser.protocol
opts = { 'linkname' : parser.name, 'linkmode' : parser.mode, 'linkprotocol' : parser.protocol }
opts = [ ('-'+k, v) for k, v in opts.items() if v is not None]
init = [ None ] * (2 * len(opts))
for i, t in enumerate(opts):
init[2*i] = t[0]
init[2*i+1] = t[1]
reader = create_reader_link(init=init, debug_level=debug)
# print(reader.link.drain())
# stdout = open(os.path.expanduser("~/Desktop/stdout.txt"), "w+")
# stderr = open(os.path.expanduser("~/Desktop/stderr.txt"), "w+")
# sys.stdout = stdout
# sys.stderr = stderr
if blocking:
# reader.run()
# else:
import code
code.interact(banner = "", local={"Kernel":reader.link, "KernelReader":reader})
| [
"[email protected]"
] | |
86ca70ec064a1b497a8d74d0e3d0844b4fa7c668 | 3e1d9a25426e2a157a69f3c4c6c41b5216deb8de | /LeetCode/Python/Easy/Heaters.py | 749d9559b5c23d9ae2cfa22db847e1981c3ed067 | [] | no_license | jashansudan/Data-Structures-and-Algorithms | 4e420586bc773c5fc35b7ce7be369ca92bf51898 | e38cfb49b3f9077f47f1053207f4e44c7726fb90 | refs/heads/master | 2021-01-12T10:47:05.754340 | 2018-02-13T01:01:50 | 2018-02-13T01:01:50 | 72,697,093 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | class Solution(object):
def findRadius(self, houses, heaters):
houses.sort()
heaters.sort()
i, maxDistance = 0, 0
for house in houses:
while (i < len(heaters) - 1 and
abs(heaters[i] - house) >= abs(heaters[i + 1] - house)):
i += 1
maxDistance = max(maxDistance, abs(heaters[i] - house))
return maxDistance
| [
"[email protected]"
] | |
d7844833faffeeb6ea09e3c6a4e91c845c8bcd78 | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/new_short/feedback/job13.py | 70a3df461607c82369d2bf42af52327b64207e16 | [
"MIT"
] | permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,222 | py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.resnet import ResNet50, ResNet101, ResNet152
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.01
args_model = 'resnet50'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_feedback/' + job_name + '*'
total_epochs = 11
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '50' in args_model:
base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '101' in args_model:
base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '152' in args_model:
base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
epoch_waste_dict[job_name] += epoch_waste_time
json_file3 = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file3)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_feedback/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| [
"[email protected]"
] | |
c250ce17de3d4b5cc6706f63c1977f4f2fcee481 | d6d20681f41102df3feb2b438ef80569bd73730f | /Uge4-numpy/.history/exercises_20200218170520.py | ca2f210387343db0503dbab4b0b5f8b7d2cf8f1b | [] | no_license | MukHansen/pythonAfleveringer | d0ad2629da5ba2b6011c9e92212949e385443789 | 4107c3c378f757733961812dd124efc99623ff2e | refs/heads/master | 2020-12-22T13:27:19.135138 | 2020-05-22T11:35:52 | 2020-05-22T11:35:52 | 236,796,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,046 | py | import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
filename = './befkbhalderstatkode.csv'
data = np.genfromtxt(filename, delimiter=',', dtype=np.uint, skip_header=1)
neighb = {1: 'Indre By', 2: 'Østerbro', 3: 'Nørrebro', 4: 'Vesterbro/Kgs. Enghave',
5: 'Valby', 6: 'Vanløse', 7: 'Brønshøj-Husum', 8: 'Bispebjerg', 9: 'Amager Øst',
10: 'Amager Vest', 99: 'Udenfor'}
specificHoods = {3: 'Nørrebro'} #, 4: 'Vesterbro/Kgs.'
nordicCountryCodes = {5104: 'Finland', 5106: 'Island', 5110: 'Norge', 5120: 'Sverige'}
def getPopPerHood(hood):
deezMask = (data[:,0] == 2015) & (data[:,1] == hood)
return np.sum(data[deezMask][:,4])
def getPopPerSpecificHood(hood):
deezMask = (data[:,1] == hood)
print((data[deezMask][:,(0,4)]))
return np.sum(data[deezMask][:,(0,4)])
def getOldPeople():
deezMask = (data[:,0] == 2015) & (data[:,2] <= 65)
return np.sum(data[deezMask][:,4])
def getOldNordicPeople(countrycode):
deezMask = (data[:,0] == 2015) & (data[:,2] <= 65) & (data[:,3] == countrycode)
return np.sum(data[deezMask][:,4])
def getSumOfOldNordicPeople():
lst = {}
for key, value in nordicCountryCodes.items():
# print(value, getOldNordicPeople(key))
lst.update({value: getOldNordicPeople(key)})
return lst
def getSumPerHood():
lst = {}
for key, value in neighb.items():
# print(value, getPopPerHood(key))
lst.update({value: getPopPerHood(key)})
return lst
def getSumPerSpecificHoods():
lst = {}
for key, value in specificHoods.items():
# print(value, getPopPerSpecificHood(key))
lst.update({value: getPopPerSpecificHood(key)})
return lst
def displayPlotOfHoodsPop():
lst = getSumPerHood()
hoodsSorted = OrderedDict(sorted(lst.items(), key=lambda x: x[1]))
cityAreas = []
sumOfPeople = []
for key, value in hoodsSorted.items():
cityAreas.append(key)
sumOfPeople.append(value)
plt.bar(cityAreas, sumOfPeople, width=0.5, linewidth=0, align='center')
title = 'Population in various areas in cph'
plt.title(title, fontsize=12)
plt.xticks(cityAreas, rotation=65)
plt.tick_params(axis='both', labelsize=8)
plt.show()
def displayPopulationOverTheYears():
population = []
years = []
for key, value in specificHoods.items():
years.append(key)
population.append(value)
# West = []
# East = []
# lst = {West, East}
# East = [lst.get(4)]
# print('West --------', West)
# print('East --------', East)
plt.figure()
plt.plot(years, population, linewidth=5)
plt.title("Population over the years", fontsize=24)
plt.xlabel("Year", fontsize=14)
plt.tick_params(axis='both', labelsize=14)
# print(getSumPerHood())
# displayPlotOfHoodsPop()
# print('Number of people above the age of 65 --',getOldPeople())
# print(getSumOfOldNordicPeople())
# displayPopulationOverTheYears()
print(getSumPerSpecificHoods())
# getSumPerSpecificHoods()
| [
"[email protected]"
] | |
4d7a7650d95d9418c7a99e03149894a0c5c686dc | bbe447a740929eaee1955bd9c1517cf760dd5cb9 | /keygrabber/adwords/adwords_api_python_14.2.1/adspygoogle/adwords/zsi/v200909/CampaignService_services.py | f1101c95aafb53ba4c75e90dcf48c195df9f883f | [
"Apache-2.0"
] | permissive | MujaahidSalie/aranciulla | f3d32e7dd68ecfca620fe4d3bf22ecb4762f5893 | 34197dfbdb01479f288611a0cb700e925c4e56ce | refs/heads/master | 2020-09-07T02:16:25.261598 | 2011-11-01T21:20:46 | 2011-11-01T21:20:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,176 | py | ##################################################
# CampaignService_services.py
# generated by ZSI.generate.wsdl2python
##################################################
from CampaignService_services_types import *
import urlparse, types
from ZSI.TCcompound import ComplexType, Struct
from ZSI import client
import ZSI
# Locator
class CampaignServiceLocator:
CampaignServiceInterface_address = "https://adwords.google.com:443/api/adwords/cm/v200909/CampaignService"
def getCampaignServiceInterfaceAddress(self):
return CampaignServiceLocator.CampaignServiceInterface_address
def getCampaignServiceInterface(self, url=None, **kw):
return CampaignServiceSoapBindingSOAP(url or CampaignServiceLocator.CampaignServiceInterface_address, **kw)
# Methods
class CampaignServiceSoapBindingSOAP:
def __init__(self, url, **kw):
kw.setdefault("readerclass", None)
kw.setdefault("writerclass", None)
# no resource properties
self.binding = client.Binding(url=url, **kw)
# no ws-addressing
# get: getCampaign
def getCampaign(self, request):
if isinstance(request, getCampaignRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(getCampaignResponse.typecode)
return response
# mutate: getCampaign
def mutateCampaign(self, request):
if isinstance(request, mutateCampaignRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(mutateCampaignResponse.typecode)
return response
getCampaignRequest = ns0.getCampaign_Dec().pyclass
getCampaignResponse = ns0.getCampaignResponse_Dec().pyclass
mutateCampaignRequest = ns0.mutateCampaign_Dec().pyclass
mutateCampaignResponse = ns0.mutateCampaignResponse_Dec().pyclass
| [
"[email protected]"
] | |
fdd25e91bdb09e58d4f219ef3803e81fd78e0545 | 6493bc4fdf2618b401c7c2acf6e04567a27a1b00 | /klearn/kernels/__init__.py | 1a14748333710a3cb0f1405360498e01722b3acd | [] | no_license | mpharrigan/klearn | 75dc5bfea65ed7018fd42d7eb502b32c4ff7a007 | 697e62993cf3a42444cc9115f8fea0425950fec2 | refs/heads/master | 2021-01-16T00:31:34.616280 | 2014-05-21T22:50:10 | 2014-05-21T22:50:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | from .baseclasses import AbstractKernel
from .dotproduct import DotProduct
from .polynomial import Polynomial
from .gaussian import Gaussian
| [
"[email protected]"
] | |
c63339a1a53f68331bf60b14c7938e6240219f5a | 826e10209af5462022e3aff1511f1e48842d32a4 | /promoterz/representation/oldschool.py | 99a894122cd5eb447a3eeae0a2347854961cdb4b | [
"MIT"
] | permissive | IanMadlenya/japonicus | 0309bbf8203525770e237b2b385844ef4a3610ae | 112aabdd3362ca6259ddbe57440cdd583a674022 | refs/heads/master | 2021-04-15T12:06:15.027249 | 2018-03-20T22:23:43 | 2018-03-20T22:23:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | #!/bin/python
import random
import json
import os
from copy import deepcopy
from .import Creator
from deap import base
from deap import tools
from deap import algorithms
import numpy as np
from .import Creator
from . .import parameterOperations
def constructPhenotype(stratSettings, individue):
# THIS FUNCTION IS UGLYLY WRITTEN; USE WITH CAUTION;
# (still works :})
Strategy = individue.Strategy
R = lambda V, lim: ((lim[1] - lim[0]) / 100) * V + lim[0]
AttributeNames = sorted(list(stratSettings.keys()))
Phenotype = {}
for K in range(len(AttributeNames)):
Value = R(individue[K], stratSettings[AttributeNames[K]])
Phenotype[AttributeNames[K]] = Value
Phenotype = parameterOperations.expandNestedParameters(Phenotype)
return Phenotype
def createRandomVarList(IndSize):
VAR_LIST = [random.randrange(0, 100) for x in range(IndSize)]
return VAR_LIST
def initInd(Criterion, Attributes):
w = Criterion()
IndSize = len(list(Attributes.keys()))
w[:] = createRandomVarList(IndSize)
return w
def getToolbox(Strategy, genconf, Attributes):
toolbox = base.Toolbox()
creator = Creator.init(base.Fitness, {'Strategy': Strategy})
creator = Creator.init(base.Fitness, {'Strategy': Strategy})
toolbox.register("newind", initInd, creator.Individual, Attributes)
toolbox.register("population", tools.initRepeat, list, toolbox.newind)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutUniformInt, low=10, up=10, indpb=0.2)
toolbox.register("constructPhenotype", constructPhenotype, Attributes)
return toolbox
| [
"[email protected]"
] | |
56799809ff84f9be3ec51a12f546d1c89424b975 | d305e9667f18127e4a1d4d65e5370cf60df30102 | /tests/ut/python/dataset/test_random_crop_and_resize_with_bbox.py | 2727325e007ba40b6bb6c02558a5846a078903a1 | [
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | imyzx2017/mindspore_pcl | d8e5bd1f80458538d07ef0a8fc447b552bd87420 | f548c9dae106879d1a83377dd06b10d96427fd2d | refs/heads/master | 2023-01-13T22:28:42.064535 | 2020-11-18T11:15:41 | 2020-11-18T11:15:41 | 313,906,414 | 6 | 1 | Apache-2.0 | 2020-11-18T11:25:08 | 2020-11-18T10:57:26 | null | UTF-8 | Python | false | false | 9,546 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RandomCropAndResizeWithBBox op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.vision.c_transforms as c_vision
from mindspore import log as logger
from util import visualize_with_bounding_boxes, InvalidBBoxType, check_bad_bbox, \
config_get_set_seed, config_get_set_num_parallel_workers, save_and_check_md5
GENERATE_GOLDEN = False
# Updated VOC dataset with correct annotations - DATA_DIR
DATA_DIR_VOC = "../data/dataset/testVOC2012_2"
# COCO dataset - DATA_DIR, ANNOTATION_DIR
DATA_DIR_COCO = ["../data/dataset/testCOCO/train/", "../data/dataset/testCOCO/annotations/train.json"]
def test_random_resized_crop_with_bbox_op_c(plot_vis=False):
"""
Prints images and bboxes side by side with and without RandomResizedCropWithBBox Op applied,
tests with MD5 check, expected to pass
"""
logger.info("test_random_resized_crop_with_bbox_op_c")
original_seed = config_get_set_seed(23415)
original_num_parallel_workers = config_get_set_num_parallel_workers(1)
# Load dataset
dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5))
# map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
filename = "random_resized_crop_with_bbox_01_c_result.npz"
save_and_check_md5(dataVoc2, filename, generate_golden=GENERATE_GOLDEN)
unaugSamp, augSamp = [], []
for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):
unaugSamp.append(unAug)
augSamp.append(Aug)
if plot_vis:
visualize_with_bounding_boxes(unaugSamp, augSamp)
# Restore config setting
ds.config.set_seed(original_seed)
ds.config.set_num_parallel_workers(original_num_parallel_workers)
def test_random_resized_crop_with_bbox_op_coco_c(plot_vis=False):
"""
Prints images and bboxes side by side with and without RandomResizedCropWithBBox Op applied,
Testing with Coco dataset
"""
logger.info("test_random_resized_crop_with_bbox_op_coco_c")
# load dataset
dataCoco1 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task="Detection",
decode=True, shuffle=False)
dataCoco2 = ds.CocoDataset(DATA_DIR_COCO[0], annotation_file=DATA_DIR_COCO[1], task="Detection",
decode=True, shuffle=False)
test_op = c_vision.RandomResizedCropWithBBox((512, 512), (0.5, 1), (0.5, 1))
dataCoco2 = dataCoco2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
unaugSamp, augSamp = [], []
for unAug, Aug in zip(dataCoco1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataCoco2.create_dict_iterator(num_epochs=1, output_numpy=True)):
unaugSamp.append(unAug)
augSamp.append(Aug)
if plot_vis:
visualize_with_bounding_boxes(unaugSamp, augSamp, "bbox")
def test_random_resized_crop_with_bbox_op_edge_c(plot_vis=False):
"""
Prints images and bboxes side by side with and without RandomResizedCropWithBBox Op applied,
tests on dynamically generated edge case, expected to pass
"""
logger.info("test_random_resized_crop_with_bbox_op_edge_c")
# Load dataset
dataVoc1 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5))
# maps to convert data into valid edge case data
dataVoc1 = dataVoc1.map(
operations=[lambda img, bboxes: (img, np.array([[0, 0, img.shape[1], img.shape[0]]]).astype(bboxes.dtype))],
input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
# Test Op added to list of Operations here
dataVoc2 = dataVoc2.map(
operations=[lambda img, bboxes: (img, np.array([[0, 0, img.shape[1], img.shape[0]]]).astype(bboxes.dtype)),
test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
unaugSamp, augSamp = [], []
for unAug, Aug in zip(dataVoc1.create_dict_iterator(num_epochs=1, output_numpy=True),
dataVoc2.create_dict_iterator(num_epochs=1, output_numpy=True)):
unaugSamp.append(unAug)
augSamp.append(Aug)
if plot_vis:
visualize_with_bounding_boxes(unaugSamp, augSamp)
def test_random_resized_crop_with_bbox_op_invalid_c():
"""
Tests RandomResizedCropWithBBox on invalid constructor parameters, expected to raise ValueError
"""
logger.info("test_random_resized_crop_with_bbox_op_invalid_c")
# Load dataset, only Augmented Dataset as test will raise ValueError
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
try:
# If input range of scale is not in the order of (min, max), ValueError will be raised.
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (1, 0.5), (0.5, 0.5))
# map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
for _ in dataVoc2.create_dict_iterator(num_epochs=1):
break
except ValueError as err:
logger.info("Got an exception in DE: {}".format(str(err)))
assert "Input is not within the required interval of (0 to 16777216)." in str(err)
def test_random_resized_crop_with_bbox_op_invalid2_c():
"""
Tests RandomResizedCropWithBBox Op on invalid constructor parameters, expected to raise ValueError
"""
logger.info("test_random_resized_crop_with_bbox_op_invalid2_c")
# Load dataset # only loading the to AugDataset as test will fail on this
dataVoc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
try:
# If input range of ratio is not in the order of (min, max), ValueError will be raised.
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (1, 1), (1, 0.5))
# map to apply ops
dataVoc2 = dataVoc2.map(operations=[test_op], input_columns=["image", "bbox"],
output_columns=["image", "bbox"],
column_order=["image", "bbox"])
for _ in dataVoc2.create_dict_iterator(num_epochs=1):
break
except ValueError as err:
logger.info("Got an exception in DE: {}".format(str(err)))
assert "Input is not within the required interval of (0 to 16777216)." in str(err)
def test_random_resized_crop_with_bbox_op_bad_c():
"""
Test RandomCropWithBBox op with invalid bounding boxes, expected to catch multiple errors.
"""
logger.info("test_random_resized_crop_with_bbox_op_bad_c")
test_op = c_vision.RandomResizedCropWithBBox((256, 512), (0.5, 0.5), (0.5, 0.5))
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WidthOverflow, "bounding boxes is out of bounds of the image")
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.HeightOverflow, "bounding boxes is out of bounds of the image")
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.NegativeXY, "min_x")
data_voc2 = ds.VOCDataset(DATA_DIR_VOC, task="Detection", usage="train", shuffle=False, decode=True)
check_bad_bbox(data_voc2, test_op, InvalidBBoxType.WrongShape, "4 features")
if __name__ == "__main__":
test_random_resized_crop_with_bbox_op_c(plot_vis=False)
test_random_resized_crop_with_bbox_op_coco_c(plot_vis=False)
test_random_resized_crop_with_bbox_op_edge_c(plot_vis=False)
test_random_resized_crop_with_bbox_op_invalid_c()
test_random_resized_crop_with_bbox_op_invalid2_c()
test_random_resized_crop_with_bbox_op_bad_c()
| [
"[email protected]"
] | |
a582cff63bfa1208999424ac532f639d57e4946c | ce6fc44470dcb5fca78cdd3349a7be70d75f2e3a | /AtCoder/Grand 039/A.py | 2ec2212c13eebe7ef3a938d8513f35a3b69c6e01 | [] | no_license | cormackikkert/competitive-programming | f3fa287fcb74248ba218ecd763f8f6df31d57424 | 3a1200b8ff9b6941c422371961a127d7be8f2e00 | refs/heads/master | 2022-12-17T02:02:40.892608 | 2020-09-20T11:47:15 | 2020-09-20T11:47:15 | 266,775,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | S = input()
K = int(input())
import random
import string
def count(string):
total = 0
i = 1
while i < len(string):
if string[i-1] == string[i]:
total += 1
i += 1
i += 1
return total
if S == len(S) * S[0]:
res = (K * len(S)) // 2
elif S[0] == S[-1]:
new = S.strip(S[0])
start = len(S) - len(S.lstrip(S[0]))
end = len(S) - len(S.rstrip(S[0]))
res = start // 2 + end // 2 + K * count(new) + (K - 1) * ((start + end) // 2)
else:
res = K * count(S)
print(res)
| [
"[email protected]"
] | |
2bea257ba29d7d3568169cd2499598f98eebd812 | 8255dcf7689c20283b5e75a452139e553b34ddf3 | /app/views/dashboard/items/index.py | b38bac919cd8357f3585cc1c835d0bce2c8762e2 | [
"MIT"
] | permissive | Wern-rm/raton.by | 09871eb4da628ff7b0d0b4415a150cf6c12c3e5a | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | refs/heads/main | 2023-05-06T02:26:58.980779 | 2021-05-25T14:09:47 | 2021-05-25T14:09:47 | 317,119,285 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,654 | py | from flask import render_template, redirect, url_for, request
from flask_login import login_required
from flask_paginate import get_page_args
from app import db, logger
from app.controllers.dashboard_controller import dashboard_controller
from app.controllers.pagination import get_pagination
from app.forms.dashboard_items import ItemsCategoryForm, ItemsForm
from app.models.items import Items
from app.models.items_category import ItemsCategory
from app.views.dashboard import bp
@bp.route('/items', methods=['GET', 'POST'])
@login_required
@dashboard_controller
def items(**kwargs):
category = db.session.query(ItemsCategory).order_by(ItemsCategory.id).all()
count = db.session.query(Items).count()
page, per_page, offset = get_page_args(page_parameter='page', per_page_parameter='per_page')
form_create_category = ItemsCategoryForm()
if form_create_category.validate_on_submit() and request.form['form-id'] == '1':
try:
db.session.add(ItemsCategory(title=form_create_category.title.data))
db.session.commit()
return redirect(url_for('dashboard.items', action='success', id=21))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.items', action='error', id=7))
form_edit_category = ItemsCategoryForm()
if form_edit_category.validate_on_submit() and request.form['form-id'] == '2':
try:
category_id = int(request.form['category-id'])
found = db.session.query(ItemsCategory).filter(ItemsCategory.title == form_edit_category.title.data, ItemsCategory.id != category_id).first()
if not found:
db.session.query(ItemsCategory).filter(ItemsCategory.id == category_id).update({'title': form_edit_category.title.data})
db.session.commit()
return redirect(url_for('dashboard.items', action='success', id=22))
else:
return redirect(url_for('dashboard.items', action='error', id=8))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.items', action='error', id=9))
form_create_item = ItemsForm()
form_create_item.category_id.choices = [(i.id, i.title) for i in category]
if form_create_item.validate_on_submit() and request.form['form-id'] == '3':
try:
db.session.add(Items(title=form_create_item.title.data,
text=form_create_item.text.data,
category_id=form_create_item.category_id.data))
db.session.commit()
return redirect(url_for('dashboard.items', action='success', id=23))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.items', action='error', id=10))
form_edit_item = ItemsForm()
form_edit_item.category_id.choices = [(i.id, i.title) for i in category]
if form_edit_item.validate_on_submit() and request.form['form-id'] == '4':
try:
item_id = int(request.form['item-id'])
found = db.session.query(Items).filter(Items.title == form_edit_item.title.data, Items.id != item_id).first()
if not found:
db.session.query(Items).filter(Items.id == item_id).update({
'title': form_edit_item.title.data,
'category_id': form_edit_item.category_id.data,
'text': form_edit_item.text.data
})
db.session.commit()
return redirect(url_for('dashboard.items', action='success', id=7))
else:
return redirect(url_for('dashboard.items', action='error', id=12))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.items', action='error', id=11))
kwargs['title'] = 'Управление каталогом продукции'
kwargs['form_create_category'] = form_create_category
kwargs['form_edit_category'] = form_edit_category
kwargs['form_create_item'] = form_create_item
kwargs['form_edit_item'] = form_edit_item
kwargs['category'] = category
kwargs['items'] = db.session.query(Items).order_by(Items.id.desc()).limit(per_page).offset(offset).all()
kwargs['pagination'] = get_pagination(page=page, per_page=per_page, total=count, record_name='items', format_total=True, format_number=True)
return render_template("dashboard/items/index.html", **kwargs) | [
"[email protected]"
] | |
09e278b839107b839c504a7ee39854da665cd9f9 | 394072f7fd3e2a226aeed78bf0a4f587f4c4e383 | /lambdaExpr/pick_lambda.py | eb8c2306a372907e577c942ccd5c5b4e7827dcb3 | [] | no_license | LeonCrashCode/DRSparsing | ec5cca079a2c73eb512444e1ac86215722e6503a | c7e92beb8878ff2386bc6789e6c17f0d35bf1277 | refs/heads/master | 2020-03-16T09:52:11.217219 | 2019-01-17T14:20:16 | 2019-01-17T14:20:16 | 124,549,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | import sys
import types
import json
def ascii_encode_dict(data):
ascii_encode = lambda x: x.encode('ascii') if isinstance(x, unicode) else x
return dict(map(ascii_encode, pair) for pair in data.items())
def tostring(expre):
assert type(expre) == types.DictType
re = []
re.append(expre["type"]+"(")
if len(expre["indexs"]) != 0:
re.append("["+" ".join(expre["indexs"])+"]")
if expre["text"] != "":
re.append(expre["text"])
if len(expre["attrib"]) != 0:
for key in expre["attrib"].keys():
re.append(expre["attrib"][key])
re.append(")")
return " ".join(re)
L = []
for line in open(sys.argv[1]):
line = line.strip()
if line == "":
if L[1].split()[1] == sys.argv[2]:
print "\n".join(L[0:4])
#print tostring(json.loads(L[3], object_hook=ascii_encode_dict))
print "\n".join(L[4:6])
#print tostring(json.loads(L[5], object_hook=ascii_encode_dict))
print
L = []
else:
L.append(line)
| [
"[email protected]"
] | |
67b4fdb18932e728d619611ee41d81e11eb82f6e | 02c6b39399c1cfb434ad718c90bed3d8e6310ed0 | /symbolic/symbolic_interval/__init__.py | a05b1f606a6c61911dc1a5c02ffb66d08e5ade09 | [] | no_license | phate09/SafeDRL | 09b8924fa91aa43cf543ea5727ebe4cc8e13c0a5 | 3d4278eaaabb046a90fc1cebd1b5862d63dc5894 | refs/heads/master | 2022-09-17T05:12:28.529329 | 2022-08-29T08:21:32 | 2022-08-29T08:21:32 | 204,663,981 | 8 | 3 | null | 2021-12-02T14:13:46 | 2019-08-27T09:07:04 | Python | UTF-8 | Python | false | false | 97 | py | from .interval import Interval, Symbolic_interval
from .symbolic_network import Interval_network
| [
"[email protected]"
] | |
8ac0480670678ce2f641aae18ee7719838e5f722 | d30c6d691a34fc9181fb71e9712b9505384422ec | /数字,日期和时间/分数的计算_P96.py | be37c7b9b724074146f45662cb34e480751597bf | [] | no_license | shishengjia/PythonDemos | cef474eb01ee9541ba0c70fc0750ee48a025f42f | c0a857b1cacdbb2b6b727a84f95f93b6e86d60c2 | refs/heads/master | 2021-01-01T16:15:19.593635 | 2017-10-26T07:18:46 | 2017-10-26T07:18:46 | 97,797,104 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | """
fractions模块
"""
from fractions import Fraction
a = Fraction(1, 2)
b = Fraction(3, 4)
print(a + b) # 5/4
c = a + b
print(c.numerator) # 5 分子
print(c.denominator) # 4 分母
print(float(c)) # 1.25 转化为小数
print(c.limit_denominator(8))
x = 0.625
print(Fraction(*x.as_integer_ratio())) # 5/8 小数化分数
| [
"[email protected]"
] | |
0ce4d0fdfbcbdcb08fd020f84fdb01abca1796f9 | 42685605f569e9d0afadc358ace6ce212e86bf1c | /1_Zadania/Dzien_3/5_Virtualenv_biblioteki/ddd.py | 63411dd73ff18565b3727122a9f9e1681374153c | [] | no_license | Danutelka/Coderslab-Podstawy-Python | e6bdbbd9dc2031cf9ec5d9d3eeba717d22e9ecd7 | eed6a957f081e488ae3c94298718f7b93e17a93c | refs/heads/master | 2020-08-04T15:21:28.058433 | 2019-04-07T19:24:54 | 2019-04-07T19:24:54 | 212,181,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | import requests
r = requests.get("http://onet.pl")
print(r)
| [
"[email protected]"
] | |
8dff565e4a3145736f3847daf00e0a91801c7e21 | a4275e529b564c3ec5c084fb360c2f4207068477 | /src/montague/validation.py | 9a20e422a8c17d2122978e27db86e67d8d0db92f | [
"MIT"
] | permissive | rmoorman/montague | aacc11837016400e37b69e18b2461a3246c2052c | 423a2a5a773e975fa27f7b61627cc706fb084984 | refs/heads/master | 2020-12-29T01:41:17.262828 | 2015-06-18T00:46:45 | 2015-06-18T00:46:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,702 | py | from __future__ import absolute_import
import collections
import types
def validate_montague_standard_format(config):
for key in ('globals', 'application', 'composite', 'filter', 'server', 'logging'):
assert key in config
assert isinstance(config[key], collections.Mapping)
def validate_config_loader_methods(config_loader):
assert hasattr(config_loader, 'config')
assert isinstance(config_loader.config, types.MethodType)
specific_methods_required = False
try:
result = config_loader.config()
validate_montague_standard_format(result)
except NotImplementedError:
# config loaders can fail to implement config() as long as they implement the other methods
specific_methods_required = True
for method in ('app_config', 'filter_config', 'server_config', 'logging_config'):
if specific_methods_required:
# If you don't implement .config(), you have to implement these
assert hasattr(config_loader, method)
assert isinstance(getattr(config_loader, method), types.MethodType)
# We don't know the names of actual apps/filters/etc to load, but we do know
# the loader shouldn't raise NotImplementedError if it has actually implemented them,
# so we can try that.
try:
getattr(config_loader, method)('__bogus__')
except NotImplementedError:
if specific_methods_required:
raise
except Exception:
# any other exception is fine here, because we don't know exactly what happens
# with a bogus name. usually KeyError, but maybe something else would be raised
pass
| [
"[email protected]"
] | |
a211852f23f82de502629246d40e8e38a13b64de | 96fe253e9a740b51dcd7f83d6ab01bb248c2bf4b | /patrones_arquitectura/DDD/value_object/prueba_cuenta_bancaria.py | 013e69c83e8ab92979a1390c08af8ed518910598 | [] | no_license | vvalotto/Patrones_Disenio_Python | 7574470752a5f14214434a927c2c5e0faaa592ba | 7ab6a74e9b008c3434af0a56d4c2b6b7de3617bf | refs/heads/master | 2021-04-28T19:16:21.535998 | 2018-10-21T14:05:36 | 2018-10-21T14:05:36 | 121,891,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from DDD.value_object.cuenta_bancaria import *
mi_dinero = Dinero(100, Moneda.Pesos)
print(mi_dinero.moneda)
print(mi_dinero.monto)
mi_cuenta = CuentaBancaria(1, mi_dinero)
print(mi_cuenta.balance.monto)
print(mi_cuenta.balance.moneda) | [
"[email protected]"
] | |
a1b6fb392741e41deadbff3e6f9ad7e1f8c4f790 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/63/usersdata/158/31530/submittedfiles/swamee.py | 2c5e1ac2906cab6729e807c9e54f3c38de172e65 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | # -*- coding: utf-8 -*-
import math
f=float(input('digite f:'))
L=float(input('digite L:'))
Q=float(input('digite Q:'))
deltaH=float(input('digite deltaH:'))
v=float(input('digite v:'))
g=9.81
E=0.000002
D=((8*f*L*(Q)**2)/((math.pi)**2*g*deltaH))**1/5
print('D é:%.4f'%D)
Rey=(4*Q)/(math.pi*D*v)
print('Rey é:%.4f'%Rey)
K=0.25/(math.log10((E/(3.7*D))+(5.74)/(Rey**0.9)))**2
print('K é:%.4f'%K) | [
"[email protected]"
] | |
185d7e4291d29d014020b6b40ebfb2d8398b5f8c | cf444d07d8056416dfba34e73bba128567b7c692 | /readandloadperfpadbenchasof.py | fd9df58a4e0df4d295b906d84dace758c8374d5d | [] | no_license | rtstock/scriptsvapp01 | cf9e993e5253e9a60dc191cca5e34532fa559ee1 | 7c2db888f0dcd92de62c031f9867e1c5cb4cbc0e | refs/heads/master | 2021-01-23T00:29:11.267852 | 2017-03-21T19:24:34 | 2017-03-21T19:24:34 | 85,737,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,756 | py | import ftplib
class perform:
#def __init__(self):
# print 'class initialized...'
# #self.DataFilePathName = []
# #self.BuildFilepaths()
def __init__(self,p_datafilepathname):
print 'initialized readandloadperfpadbenchasof.py'
self.DataFilePathName = p_datafilepathname
self.ReadAndLoad()
print 'exiting readandloadperfpadbenchasof.py'
def set_DataFilePathName(self,DataFilePathName):
self._DataFilePathName = DataFilePathName
def get_DataFilePathName(self):
return self._DataFilePathName
DataFilePathName = property(get_DataFilePathName, set_DataFilePathName)
def set_Results(self,Results):
self._Results = Results
def get_Results(self):
return self._Results
Results = property(get_Results, set_Results)
def xstr(self,s):
try:
return '' if s is None else str(s)
except:
return ''
def ReadAndLoad(self):
procresults = {}
try:
my_datafilepathname = self.DataFilePathName
# get and format the modified date
import os.path, time
print 'got here !', my_datafilepathname
filedatetime = os.path.getmtime(my_datafilepathname)
from datetime import datetime
filedatetime_forsql = datetime.fromtimestamp(filedatetime).strftime('%Y-%m-%d %H:%M:%S')
import bs4, sys
with open(my_datafilepathname, 'r') as f:
webpage = f.read().decode('utf-8')
soup = bs4.BeautifulSoup(webpage, "lxml")
market_index_ext = ''
fieldnames = {}
is_dataline = 0
total_deleted = 0
total_inserted = 0
for node in soup.find_all('th', attrs={}): #'style':'display: table-header-group; mso-number-format:\@;'
if node.attrs['class'][0] in ['HeaderCellNumeric','HeaderCellString']:
fieldnames[len(fieldnames)] = node.string
#print node.string
for nodeA in soup.find_all('tr', attrs={}):
print '-----------------------'
is_dataline = 0
fieldvalues = {}
for nodeB in nodeA.find_all('td', attrs={}):
#print 'got here!!'
#print nodeB.attrs['class'][0]
if nodeB.attrs['class'][0] in ['DataCellNumeric','DataCellString']:
#print 'got here!!!'
if fieldnames[len(fieldvalues)] == 'market_index_ext':
is_dataline = 1
market_index_ext = nodeB.string
#print market_index_ext, fieldnames[len(fieldvalues)],'=', nodeB.string
#print fieldnames[len(fieldvalues)]
#print ' ',nodeB.string
fieldvalues[fieldnames[len(fieldvalues)]] = nodeB.string
print 'got here xxxxxx'
if is_dataline == 1:
#print 'got here !@'
fieldnames_string = ''
fieldvalues_string = ''
for k,v in fieldvalues.items():
#print 'fieldvalues:',k, v
if v == None:
goodvaluestring = ''
else:
goodvaluestring = v
print 'fieldvalues:',k, goodvaluestring
fieldnames_string = fieldnames_string + k + ','
fieldvalues_string = fieldvalues_string + "'" + goodvaluestring + "',"
fieldnames_string = fieldnames_string[:-1]
fieldvalues_string = fieldvalues_string[:-1]
print 'fieldnames_string....................'
print fieldnames_string
print 'fieldvalues_string.............................'
print fieldvalues_string
print market_index_ext
#print fieldvalues[fieldnames[0]],fieldvalues[fieldnames[1]],fieldvalues[fieldnames[2]]
import pyodbc
cnxn = pyodbc.connect(r'DRIVER={SQL Server};SERVER=ipc-vsql01;DATABASE=DataAgg;Trusted_Connection=True;')
cursor = cnxn.cursor()
#print 'got here !@'
#sql_delete = "delete from dbo.xanalysisofbenchmarks_padbenchasof_imported where market_node_last_invested_date = ? and market_index_ext = ?", fieldvalues['market_node_last_invested_date'],fieldvalues['market_index_ext']
#print sql_delete
cursor.execute( "delete from dbo.xanalysisofbenchmarks_padbenchasof_imported where market_node_last_invested_date = ? and market_index_ext = ?", fieldvalues['market_node_last_invested_date'],fieldvalues['market_index_ext'] )
total_deleted = total_deleted + cursor.rowcount
print ' ',cursor.rowcount, 'records deleted'
cnxn.commit()
insert_sql = "insert into xanalysisofbenchmarks_padbenchasof_imported("+fieldnames_string+") values ("+fieldvalues_string+")"
#print insert_sql
cursor.execute(insert_sql)
procresults['records inserted'] = cursor.rowcount
total_inserted = total_inserted + cursor.rowcount
print ' ',cursor.rowcount, 'records inserted'
cnxn.commit()
procresults['resultvalue1'] = 'success'
procresults['total_deleted'] = total_deleted
procresults['total_inserted'] = total_inserted
except Exception,e:
print type(e)
print 'there was an error on ' + self.DataFilePathName
self.Results = procresults
if __name__=='__main__':
print 'running ___name___'
myDataFilePathName = r"//Ipc-vsql01/data/Batches/prod/WatchFolder/incoming/PagesOutput_GetPBAsOf_2016-11-30 132015270.xls"
o = perform(myDataFilePathName)
#o.DataFilePathName = r"//Ipc-vsql01/data/Batches/prod/WatchFolder/incoming/PagesOutput_GetPadPortBenchAsOf_20161124_ADAKAT.xls"
#o.ReadAndLoad()
print o.Results
| [
"[email protected]"
] | |
912732738030d84355aa57768facc7293bf43a88 | 187a6558f3c7cb6234164677a2bda2e73c26eaaf | /jdcloud_sdk/services/clouddnsservice/models/HostRRlb.py | a70c1c23618e54df92e3c0396719c63ee09311d7 | [
"Apache-2.0"
] | permissive | jdcloud-api/jdcloud-sdk-python | 4d2db584acc2620b7a866af82d21658cdd7cc227 | 3d1c50ed9117304d3b77a21babe899f939ae91cd | refs/heads/master | 2023-09-04T02:51:08.335168 | 2023-08-30T12:00:25 | 2023-08-30T12:00:25 | 126,276,169 | 18 | 36 | Apache-2.0 | 2023-09-07T06:54:49 | 2018-03-22T03:47:02 | Python | UTF-8 | Python | false | false | 1,141 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class HostRRlb(object):
def __init__(self, hostValue=None, id=None, weight=None, rate=None):
"""
:param hostValue: (Optional) 解析记录的值
:param id: (Optional) 解析记录的ID
:param weight: (Optional) 解析记录的权重
:param rate: (Optional) 此条记录在总均衡中的比率的100倍
"""
self.hostValue = hostValue
self.id = id
self.weight = weight
self.rate = rate
| [
"[email protected]"
] | |
987b92e6df6ed00e5ca4fb6ce748e467405a8347 | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2019_03_22/aio/operations/_certificates_operations.py | 4fdc763e3e5033ecae73c8337024f88ac2e80684 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 23,167 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CertificatesOperations:
"""CertificatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list_by_iot_hub(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "models.CertificateListDescription":
"""Get the certificate list.
Returns the list of certificates.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateListDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.CertificateListDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CertificateListDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.list_by_iot_hub.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateListDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_iot_hub.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates'} # type: ignore
async def get(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
**kwargs
) -> "models.CertificateDescription":
"""Get the certificate.
Returns the certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.CertificateDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CertificateDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
certificate_description: "models.CertificateBodyDescription",
if_match: Optional[str] = None,
**kwargs
) -> "models.CertificateDescription":
"""Upload the certificate to the IoT hub.
Adds new or replaces existing certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param certificate_description: The certificate body.
:type certificate_description: ~azure.mgmt.iothub.models.CertificateBodyDescription
:param if_match: ETag of the Certificate. Do not specify for creating a brand new certificate.
Required to update an existing certificate.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.CertificateDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CertificateDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_description, 'CertificateBodyDescription')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
if_match: str,
**kwargs
) -> None:
"""Delete an X509 certificate.
Deletes an existing X509 certificate or does nothing if it does not exist.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}'} # type: ignore
async def generate_verification_code(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
if_match: str,
**kwargs
) -> "models.CertificateWithNonceDescription":
"""Generate verification code for proof of possession flow.
Generates verification code for proof of possession flow. The verification code will be used to
generate a leaf certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateWithNonceDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.CertificateWithNonceDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CertificateWithNonceDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
accept = "application/json"
# Construct URL
url = self.generate_verification_code.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateWithNonceDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
generate_verification_code.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}/generateVerificationCode'} # type: ignore
async def verify(
self,
resource_group_name: str,
resource_name: str,
certificate_name: str,
if_match: str,
certificate_verification_body: "models.CertificateVerificationDescription",
**kwargs
) -> "models.CertificateDescription":
"""Verify certificate's private key possession.
Verifies the certificate's private key possession by providing the leaf cert issued by the
verifying pre uploaded certificate.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param if_match: ETag of the Certificate.
:type if_match: str
:param certificate_verification_body: The name of the certificate.
:type certificate_verification_body: ~azure.mgmt.iothub.models.CertificateVerificationDescription
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.models.CertificateDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CertificateDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-03-22"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.verify.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'certificateName': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[A-Za-z0-9-._]{1,64}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_verification_body, 'CertificateVerificationDescription')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CertificateDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
verify.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/certificates/{certificateName}/verify'} # type: ignore
| [
"[email protected]"
] | |
a1f08199e9d65120982277c5b73430048437c363 | e751c59ca3c98c8f6a98b7c6fc7167fe615aa1b0 | /streamz/orderedweakset.py | 82136ecdea1138314b8cd2277154f13f468712af | [
"BSD-3-Clause"
] | permissive | yutiansut/streamz | a10e0d2beefd450b5d19cb7d78b4c8a333ebcd48 | e51f0397d27957f8b3bfc78ecdb946cbfbac21b6 | refs/heads/master | 2020-07-10T15:23:35.567092 | 2019-12-24T07:07:43 | 2019-12-24T07:07:43 | 204,297,562 | 1 | 0 | NOASSERTION | 2019-12-24T07:07:44 | 2019-08-25T13:24:35 | Python | UTF-8 | Python | false | false | 977 | py | # -*- coding: utf8 -*-
# This is a copy from Stack Overflow
# https://stackoverflow.com/questions/7828444/indexable-weak-ordered-set-in-python
# Asked by Neil G https://stackoverflow.com/users/99989/neil-g
# Answered/edited by https://stackoverflow.com/users/1001643/raymond-hettinger
import collections
import weakref
class OrderedSet(collections.MutableSet):
def __init__(self, values=()):
self._od = collections.OrderedDict().fromkeys(values)
def __len__(self):
return len(self._od)
def __iter__(self):
return iter(self._od)
def __contains__(self, value):
return value in self._od
def add(self, value):
self._od[value] = None
def discard(self, value):
self._od.pop(value, None)
class OrderedWeakrefSet(weakref.WeakSet):
def __init__(self, values=()):
super(OrderedWeakrefSet, self).__init__()
self.data = OrderedSet()
for elem in values:
self.add(elem)
| [
"[email protected]"
] | |
afe7b68eebc859166be1c5e13503095b75df042c | 3527ff6346f98a5b7c51ce3c58428227f4bc8617 | /acwing/800.py | 3e10fbf147a1197999a55e116c697baa1c94510e | [] | no_license | ShawnDong98/Algorithm-Book | 48e2c1158d6e54d4652b0791749ba05a4b85f96d | f350b3d6e59fd5771e11ec0b466f9ba5eeb8e927 | refs/heads/master | 2022-07-17T04:09:39.559310 | 2022-07-13T15:46:37 | 2022-07-13T15:46:37 | 242,317,482 | 0 | 0 | null | 2020-10-11T14:50:48 | 2020-02-22T09:53:41 | C++ | UTF-8 | Python | false | false | 277 | py | n, m, x = map(int, input().split())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
i = 0
j = m -1
while i< n:
while j >= 0 and A[i] + B[j] > x:
j -= 1
if j >= 0 and A[i] + B[j] == x:
print(f'{i} {j}')
break
i += 1
| [
"[email protected]"
] | |
5d9d6c025131f2a3f97852b760a240950735157f | 46eb22dbe3514fc586ca57dd5e9a2ef1e486ed7e | /src/lanyon/parser.py | 5e43a9d5db751938ac34fd688b085ebb43fc5d44 | [
"BSD-3-Clause"
] | permissive | arthurk/lanyon | 06d37d2e96358a190f8926a65f9f8df00db09393 | d4c319cc5659c50aa3e356cbf74e976ba82acc4b | refs/heads/master | 2021-01-17T07:41:28.518803 | 2011-04-16T11:37:06 | 2011-04-16T11:37:06 | 804,624 | 5 | 2 | null | 2016-06-06T13:33:41 | 2010-07-29T06:45:49 | Python | UTF-8 | Python | false | false | 6,056 | py | import datetime
import re
from os.path import splitext
class ParserException(Exception):
"""Exception raised for errors during the parsing."""
pass
class Parser(object):
output_ext = None
def __init__(self, settings, source):
self.settings = settings
self.source = source
self.headers = {}
self.text = ''
def _parse_headers(self):
"""
Parses and removes the headers from the source.
"""
META_RE = re.compile(
r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
lines = self.source.splitlines()
for num, line in enumerate(lines):
match = META_RE.match(line)
if match:
key = match.group('key').strip().lower()
value = match.group('value').strip()
if value:
# custom header transformation
header_method = getattr(self, '_parse_%s_header' % key,
None)
if header_method:
value = header_method(value)
self.headers[key] = value
num_last_match = num
else:
break
# remove header lines from input source
try:
del lines[:num_last_match + 1]
except UnboundLocalError:
pass
# check if a blank line followed the header lines and remove it
try:
if not lines[0]:
del lines[0]
except IndexError:
pass
self.text = '\n'.join(lines)
def _parse_tags_header(self, value):
"""
Parses the value from the 'tags' header into a list.
"""
return [t.strip() for t in value.split(',')]
def _parse_date_header(self, value):
"""
Parses the date header into a python datetime.datetime object.
The value must be in the format as specified by the 'date_format'
setting; otherwise a ParserException will be thrown.
"""
format = self.settings['date_format']
try:
return datetime.datetime.strptime(value, format)
except ValueError as error:
raise ParserException(error)
def _parse_updated_header(self, value):
return self._parse_date_header(value)
def _parse_status_header(self, value):
"""
Checks that the value of the 'status' header is 'live', 'hidden' or
'draft'. If not 'live' is returned.
"""
if value in ('live', 'hidden', 'draft'):
return value
return 'live'
def _parse_text(self):
"""
Returns the raw input text. Override this method to process
text in another markup language such as Markdown.
"""
return self.text
def parse(self):
self._parse_headers()
self._parse_text()
return (self.headers, self.text)
class RstParser(Parser):
"""ReStructuredText Parser"""
output_ext = 'html'
def pygments_directive(self, name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Parse sourcecode using Pygments
From http://bitbucket.org/birkenfeld/pygments-main/src/tip/external/rst-directive-old.py
"""
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name, TextLexer
from docutils import nodes
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = HtmlFormatter(noclasses=False)
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
def _parse_text(self):
try:
from docutils.core import publish_parts
from docutils.parsers.rst import directives
except ImportError:
raise Exception("The Python docutils library isn't installed. " +
"Install with `pip install docutils`")
else:
# if pygments is installed, register the "sourcecode" directive
try:
import pygments
except ImportError:
pass
else:
directives.register_directive('sourcecode',
self.pygments_directive)
self.text = publish_parts(source=self.text,
settings_overrides={
"doctitle_xform": False,
"initial_header_level": 2
},
writer_name='html4css1')['fragment']
class MarkdownParser(Parser):
"""Markdown Parser"""
output_ext = 'html'
def _parse_text(self):
try:
import markdown
except ImportError:
raise Exception("The Python markdown library isn't installed. " +
"Install with `pip install markdown`")
else:
self.text = markdown.markdown(self.text,
['codehilite(css_class=highlight)'])
# a mapping of file extensions to the corresponding parser class
parser_map = (
(('html', 'htm', 'xml', 'txt'), Parser),
(('rst',), RstParser),
(('md', 'markdown'), MarkdownParser),
)
def get_parser_for_filename(filename):
"""
Factory function returning a parser class based on the file extension.
"""
ext = splitext(filename)[1][1:]
try:
return [pair[1] for pair in parser_map if ext in pair[0]][0]
except IndexError:
return
| [
"[email protected]"
] | |
ae5543276e6ec4f6dc0823885d5ba0a303c5e818 | cfc3fa658f826d02308453e557d82758895399c2 | /datasets/covid_qa_deepset/covid_qa_deepset.py | d43c1e5924c54b5a73b245220e1e6d2c37d225e1 | [
"Apache-2.0"
] | permissive | meehawk/datasets | cac530ec0e17514c01cdff30302521d6303ed93b | b70141e3c5149430951773aaa0155555c5fb3e76 | refs/heads/master | 2023-03-29T12:51:54.700891 | 2021-04-08T17:22:53 | 2021-04-08T17:22:53 | 355,996,122 | 9 | 0 | Apache-2.0 | 2021-04-08T17:31:03 | 2021-04-08T17:31:02 | null | UTF-8 | Python | false | false | 4,607 | py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""COVID-QA: A Question Answering Dataset for COVID-19."""
from __future__ import absolute_import, division, print_function
import json
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{moller2020covid,
title={COVID-QA: A Question Answering Dataset for COVID-19},
author={M{\"o}ller, Timo and Reina, Anthony and Jayakumar, Raghavan and Pietsch, Malte},
booktitle={Proceedings of the 1st Workshop on NLP for COVID-19 at ACL 2020},
year={2020}
}
"""
# You can copy an official description
_DESCRIPTION = """\
COVID-QA is a Question Answering dataset consisting of 2,019 question/answer pairs annotated by volunteer biomedical \
experts on scientific articles related to COVID-19.
"""
_HOMEPAGE = "https://github.com/deepset-ai/COVID-QA"
_LICENSE = "Apache License 2.0"
_URL = "https://raw.githubusercontent.com/deepset-ai/COVID-QA/master/data/question-answering/"
_URLs = {"covid_qa_deepset": _URL + "COVID-QA.json"}
class CovidQADeepset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="covid_qa_deepset", version=VERSION, description="COVID-QA deepset"),
]
def _info(self):
features = datasets.Features(
{
"document_id": datasets.Value("int32"),
"context": datasets.Value("string"),
"question": datasets.Value("string"),
"is_impossible": datasets.Value("bool"),
"id": datasets.Value("int32"),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string"),
"answer_start": datasets.Value("int32"),
}
),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
url = _URLs[self.config.name]
downloaded_filepath = dl_manager.download_and_extract(url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": downloaded_filepath},
),
]
def _generate_examples(self, filepath):
"""This function returns the examples in the raw (text) form."""
logger.info("generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
covid_qa = json.load(f)
for article in covid_qa["data"]:
for paragraph in article["paragraphs"]:
context = paragraph["context"].strip()
document_id = paragraph["document_id"]
for qa in paragraph["qas"]:
question = qa["question"].strip()
is_impossible = qa["is_impossible"]
id_ = qa["id"]
answer_starts = [answer["answer_start"] for answer in qa["answers"]]
answers = [answer["text"].strip() for answer in qa["answers"]]
# Features currently used are "context", "question", and "answers".
# Others are extracted here for the ease of future expansions.
yield id_, {
"document_id": document_id,
"context": context,
"question": question,
"is_impossible": is_impossible,
"id": id_,
"answers": {
"answer_start": answer_starts,
"text": answers,
},
}
| [
"[email protected]"
] | |
64a3215ec3906affa3702053fa372ce9684ba680 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/Windows/Media/Animation_parts/Timeline.py | fc7d6ac0df9229690da3777f143d98381abde625 | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 14,492 | py | class Timeline(Animatable,ISealable,IAnimatable,IResource):
""" Defines a segment of time. """
def AllocateClock(self,*args):
"""
AllocateClock(self: Timeline) -> Clock
Creates a System.Windows.Media.Animation.Clock for this System.Windows.Media.Animation.Timeline.
Returns: A clock for this System.Windows.Media.Animation.Timeline.
"""
pass
def Clone(self):
"""
Clone(self: Timeline) -> Timeline
Creates a modifiable clone of this System.Windows.Media.Animation.Timeline,making deep copies
of this object's values.
Returns: A modifiable clone of the current object. The cloned object's System.Windows.Freezable.IsFrozen
property is false even if the source's System.Windows.Freezable.IsFrozen property is true.
"""
pass
def CloneCore(self,*args):
"""
CloneCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a clone (deep copy) of the specified System.Windows.Freezable using base
(non-animated) property values.
sourceFreezable: The object to clone.
"""
pass
def CloneCurrentValue(self):
"""
CloneCurrentValue(self: Timeline) -> Timeline
Creates a modifiable clone of this System.Windows.Media.Animation.Timeline object,making deep
copies of this object's current values.
Returns: A modifiable clone of the current object. The cloned object's System.Windows.Freezable.IsFrozen
property is false even if the source's System.Windows.Freezable.IsFrozen property is true.
"""
pass
def CloneCurrentValueCore(self,*args):
"""
CloneCurrentValueCore(self: Freezable,sourceFreezable: Freezable)
Makes the instance a modifiable clone (deep copy) of the specified System.Windows.Freezable
using current property values.
sourceFreezable: The System.Windows.Freezable to be cloned.
"""
pass
def CreateClock(self,hasControllableRoot=None):
"""
CreateClock(self: Timeline,hasControllableRoot: bool) -> Clock
Creates a new System.Windows.Media.Animation.Clock from this
System.Windows.Media.Animation.Timeline and specifies whether the new
System.Windows.Media.Animation.Clock is controllable. If this
System.Windows.Media.Animation.Timeline has children,a tree of clocks is created with this
System.Windows.Media.Animation.Timeline as the root.
hasControllableRoot: true if the root System.Windows.Media.Animation.Clock returned should return a
System.Windows.Media.Animation.ClockController from its
System.Windows.Media.Animation.Clock.Controller property so that the
System.Windows.Media.Animation.Clock tree can be interactively controlled; otherwise,false.
Returns: A new System.Windows.Media.Animation.Clock constructed from this
System.Windows.Media.Animation.Timeline. If this System.Windows.Media.Animation.Timeline is a
System.Windows.Media.Animation.TimelineGroup that contains child timelines,a tree of
System.Windows.Media.Animation.Clock objects is created with a controllable
System.Windows.Media.Animation.Clock created from this System.Windows.Media.Animation.Timeline
as the root.
CreateClock(self: Timeline) -> Clock
Creates a new,controllable System.Windows.Media.Animation.Clock from this
System.Windows.Media.Animation.Timeline. If this System.Windows.Media.Animation.Timeline has
children,a tree of clocks is created with this System.Windows.Media.Animation.Timeline as the
root.
Returns: A new,controllable System.Windows.Media.Animation.Clock constructed from this
System.Windows.Media.Animation.Timeline. If this System.Windows.Media.Animation.Timeline is a
System.Windows.Media.Animation.TimelineGroup that contains child timelines,a tree of
System.Windows.Media.Animation.Clock objects is created with a controllable
System.Windows.Media.Animation.Clock created from this System.Windows.Media.Animation.Timeline
as the root.
"""
pass
def CreateInstance(self,*args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self,*args):
"""
CreateInstanceCore(self: Freezable) -> Freezable
When implemented in a derived class,creates a new instance of the System.Windows.Freezable
derived class.
Returns: The new instance.
"""
pass
def FreezeCore(self,*args):
"""
FreezeCore(self: Timeline,isChecking: bool) -> bool
Makes this System.Windows.Media.Animation.Timeline unmodifiable or determines whether it can be
made unmodifiable.
isChecking: true to check if this instance can be frozen; false to freeze this instance.
Returns: If isChecking is true,this method returns true if this instance can be made read-only,or false
if it cannot be made read-only. If isChecking is false,this method returns true if this
instance is now read-only,or false if it cannot be made read-only,with the side effect of
having begun to change the frozen status of this object.
"""
pass
def GetAsFrozenCore(self,*args):
"""
GetAsFrozenCore(self: Timeline,sourceFreezable: Freezable)
Makes this instance a clone of the specified System.Windows.Media.Animation.Timeline object.
sourceFreezable: The System.Windows.Media.Animation.Timeline instance to clone.
"""
pass
def GetCurrentValueAsFrozenCore(self,*args):
"""
GetCurrentValueAsFrozenCore(self: Timeline,sourceFreezable: Freezable)
Makes this instance a frozen clone of the specified System.Windows.Media.Animation.Timeline.
Resource references,data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.Timeline to copy and freeze.
"""
pass
@staticmethod
def GetDesiredFrameRate(timeline):
"""
GetDesiredFrameRate(timeline: Timeline) -> Nullable[int]
Gets the desired frame rate of the specified System.Windows.Media.Animation.Timeline.
timeline: The timeline from which to retrieve the desired frame rate.
Returns: The desired frame rate of this timeline. The default value is null.
"""
pass
def GetNaturalDuration(self,*args):
"""
GetNaturalDuration(self: Timeline,clock: Clock) -> Duration
Returns the length of a single iteration of this System.Windows.Media.Animation.Timeline.
clock: The System.Windows.Media.Animation.Clock that was created for this
System.Windows.Media.Animation.Timeline.
Returns: The length of a single iteration of this System.Windows.Media.Animation.Timeline,or
System.Windows.Duration.Automatic if the natural duration is unknown.
"""
pass
def GetNaturalDurationCore(self,*args):
"""
GetNaturalDurationCore(self: Timeline,clock: Clock) -> Duration
Returns the length of a single iteration of this System.Windows.Media.Animation.Timeline. This
method provides the implementation for
System.Windows.Media.Animation.Timeline.GetNaturalDuration(System.Windows.Media.Animation.Clock).
clock: The System.Windows.Media.Animation.Clock that was created for this
System.Windows.Media.Animation.Timeline.
Returns: The length of a single iteration of this System.Windows.Media.Animation.Timeline,or
System.Windows.Duration.Automatic if the natural duration is unknown.
"""
pass
def OnChanged(self,*args):
"""
OnChanged(self: Freezable)
Called when the current System.Windows.Freezable object is modified.
"""
pass
def OnFreezablePropertyChanged(self,*args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self,*args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def ReadPreamble(self,*args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
@staticmethod
def SetDesiredFrameRate(timeline,desiredFrameRate):
""" SetDesiredFrameRate(timeline: Timeline,desiredFrameRate: Nullable[int]) """
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def WritePostscript(self,*args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self,*args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
"""
__new__(cls: type)
__new__(cls: type,beginTime: Nullable[TimeSpan])
__new__(cls: type,beginTime: Nullable[TimeSpan],duration: Duration)
__new__(cls: type,beginTime: Nullable[TimeSpan],duration: Duration,repeatBehavior: RepeatBehavior)
"""
pass
AccelerationRatio=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value specifying the percentage of the timeline's System.Windows.Media.Animation.Timeline.Duration spent accelerating the passage of time from zero to its maximum rate.
Get: AccelerationRatio(self: Timeline) -> float
Set: AccelerationRatio(self: Timeline)=value
"""
AutoReverse=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that indicates whether the timeline plays in reverse after it completes a forward iteration.
Get: AutoReverse(self: Timeline) -> bool
Set: AutoReverse(self: Timeline)=value
"""
BeginTime=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the time at which this System.Windows.Media.Animation.Timeline should begin.
Get: BeginTime(self: Timeline) -> Nullable[TimeSpan]
Set: BeginTime(self: Timeline)=value
"""
DecelerationRatio=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value specifying the percentage of the timeline's System.Windows.Media.Animation.Timeline.Duration spent decelerating the passage of time from its maximum rate to zero.
Get: DecelerationRatio(self: Timeline) -> float
Set: DecelerationRatio(self: Timeline)=value
"""
Duration=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the length of time for which this timeline plays,not counting repetitions.
Get: Duration(self: Timeline) -> Duration
Set: Duration(self: Timeline)=value
"""
FillBehavior=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets a value that specifies how the System.Windows.Media.Animation.Timeline behaves after it reaches the end of its active period.
Get: FillBehavior(self: Timeline) -> FillBehavior
Set: FillBehavior(self: Timeline)=value
"""
Name=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the name of this System.Windows.Media.Animation.Timeline.
Get: Name(self: Timeline) -> str
Set: Name(self: Timeline)=value
"""
RepeatBehavior=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the repeating behavior of this timeline.
Get: RepeatBehavior(self: Timeline) -> RepeatBehavior
Set: RepeatBehavior(self: Timeline)=value
"""
SpeedRatio=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the rate,relative to its parent,at which time progresses for this System.Windows.Media.Animation.Timeline.
Get: SpeedRatio(self: Timeline) -> float
Set: SpeedRatio(self: Timeline)=value
"""
AccelerationRatioProperty=None
AutoReverseProperty=None
BeginTimeProperty=None
Completed=None
CurrentGlobalSpeedInvalidated=None
CurrentStateInvalidated=None
CurrentTimeInvalidated=None
DecelerationRatioProperty=None
DesiredFrameRateProperty=None
DurationProperty=None
FillBehaviorProperty=None
NameProperty=None
RemoveRequested=None
RepeatBehaviorProperty=None
SpeedRatioProperty=None
| [
"[email protected]"
] | |
a92459e9e8ae533390d8cd2ec5542ac8dbe5714e | 7e9977bbf988fe2136529640afb08460cce833bc | /HeroRegistrationProj/manage.py | c117eea3936972ec247c0372ef508f7c1c854c19 | [
"Apache-2.0"
] | permissive | cs-fullstack-2019-spring/django-fields-widgets-cw-itayanna | b8c761376f89fd0c8b4d2fead46b5dc75a7194df | 7702fe9b541dfaf5ac0458729bcdacd538b6c232 | refs/heads/master | 2020-04-27T13:24:13.427170 | 2019-03-12T14:37:11 | 2019-03-12T14:37:11 | 174,368,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HeroRegistrationProj.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
10f5ab79003ff1e2cbfd7c31754b890b1ab31a6d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03283/s440786665.py | fe6e5bf6654473cf6b9ff410adac87f56d2c24dd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | class BIT:
def __init__(self, n):
self.n = n
self.bit = [0]*(self.n+1) # 1-indexed
def init(self, init_val):
for i, v in enumerate(init_val):
self.add(i, v)
def add(self, i, x):
# i: 0-indexed
i += 1 # to 1-indexed
while i <= self.n:
self.bit[i] += x
i += (i & -i)
def sum(self, i, j):
# return sum of [i, j)
# i, j: 0-indexed
return self._sum(j) - self._sum(i)
def _sum(self, i):
# return sum of [0, i)
# i: 0-indexed
res = 0
while i > 0:
res += self.bit[i]
i -= i & (-i)
return res
import sys
import io, os
input = sys.stdin.buffer.readline
#input = io.BytesIO(os.read(0,os.fstat(0).st_size)).readline
n, m, Q = map(int, input().split())
X = []
for i in range(m):
l, r = map(int, input().split())
l, r = l-1, r-1
X.append((l, r, 0, i))
for i in range(Q):
p, q = map(int, input().split())
p, q = p-1, q-1
X.append((p, q, 1, i))
X.sort(key=lambda x: (x[1], x[2]))
bit = BIT(550)
ans = [-1]*Q
for l, r, t, i in X:
if t == 0:
bit.add(l, 1)
else:
ans[i] = bit.sum(l, r+1)
print(*ans, sep='\n')
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.