blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
179469489a69ca59e2930a07ad28fb243302e0f3 | 12c1b33e27d841bb25899d6601f1de75b522d88d | /python/udacity/draw_turtles.py | 6d504799dbe85482a068035ecfff4a600108ee55 | [] | no_license | conflabermits/Scripts | ec27456b5b26ad7b1edaf30686addff2cacc6619 | c91ef0594dda1228a523fcaccb4af3313d370718 | refs/heads/main | 2023-07-06T21:41:12.033118 | 2023-06-25T19:24:54 | 2023-06-25T19:24:54 | 66,151,253 | 4 | 0 | null | 2023-09-10T19:56:17 | 2016-08-20T14:35:34 | HTML | UTF-8 | Python | false | false | 893 | py | import turtle
def draw_square(a_turtle):
for i in range(0, 4):
a_turtle.forward(100)
a_turtle.right(90)
def draw_circle(a_turtle):
a_turtle.circle(100)
def draw_triangle(a_turtle):
for i in range(0, 3):
a_turtle.forward(100)
a_turtle.left(120)
def draw_turtles():
window = turtle.Screen()
window.bgcolor("red")
brad = turtle.Turtle()
brad.shape("turtle")
brad.color("black", "green")
brad.speed(10)
# angie = turtle.Turtle()
# angie.shape("circle")
# angie.color("blue")
# charlie = turtle.Turtle()
# charlie.shape("arrow")
# charlie.color("yellow")
# charlie.speed(4)
# charlie.left(180)
for i in range(0, 72):
draw_square(brad)
brad.right(95)
# draw_square(brad)
# draw_circle(angie)
# draw_triangle(charlie)
window.exitonclick()
draw_turtles()
| [
"[email protected]"
] | |
42835590fa2d772e8fd35ff631574e8c3dda8650 | 2f30cf20d58e2cde4037441e67213223c69a6998 | /lesson19_接口2/d02request.py | 34aa5e3f987860394f8ccb9da1afab99314bd07e | [] | no_license | zengcong1314/python1205 | b11db7de7d0ad1f8401b8b0c9b20024b4405ae6c | da800ed3374d1d43eb75485588ddb8c3a159bb41 | refs/heads/master | 2023-05-25T07:17:25.065004 | 2021-06-08T08:27:54 | 2021-06-08T08:27:54 | 318,685,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | """
第三方库
pip install requests
python 用来发送HTTP 请求
"""
import requests
#发送get 请求
url = "http://www.keyou.site:8000/projects/"
p = {"a":"b"}
resp = requests.get(url,params=p)
print(resp)
print(resp.status_code)
# 字符串
print(resp.text)
# 字典
print(resp.json())
# post请求
url = "http://www.keyou.site:8000/user/login/"
# 请求参数:json 格式的body
data = {
"username":"lemon1",
"password":"123456"
}
header = {
"Authorization":"JWT fow"
}
resp2 = requests.post(url,json=data,headers=header)
print(resp2.json())
# query string: params={}
# json json={}
# headers
| [
"[email protected]"
] | |
ef12a61a3f9668366b02a4f68d57fc5cb87247f6 | f9d4eee81dda90e41ee755f333e0d787dab749db | /eth2/beacon/scripts/quickstart_state/generate_beacon_genesis.py | 2ce8f76b744662551c3a38e4b0081d708144e70a | [
"MIT"
] | permissive | Palem1988/trinity | f10f21119a7ea98a7fc9458e5ff05b1e4cf6a021 | 79c21f8ae90bc765a78cb8052af0e4271e4c25e1 | refs/heads/master | 2022-02-20T05:21:18.576796 | 2019-09-24T22:09:21 | 2019-09-24T22:40:24 | 210,869,348 | 0 | 1 | MIT | 2019-09-25T14:45:12 | 2019-09-25T14:45:04 | null | UTF-8 | Python | false | false | 1,982 | py | from pathlib import Path
import time
from eth2._utils.hash import hash_eth2
from eth2.beacon.genesis import initialize_beacon_state_from_eth1
from eth2.beacon.tools.builder.initializer import create_mock_deposits_and_root
from eth2.beacon.tools.fixtures.config_types import Minimal
from eth2.beacon.tools.fixtures.loading import load_config_at_path, load_yaml_at
from eth2.beacon.tools.misc.ssz_vector import override_lengths
ROOT_DIR = Path("eth2/beacon/scripts")
KEY_SET_FILE = Path("keygen_16_validators.yaml")
def _load_config(config):
config_file_name = ROOT_DIR / Path(f"config_{config.name}.yaml")
return load_config_at_path(config_file_name)
def _main():
config_type = Minimal
config = _load_config(config_type)
override_lengths(config)
key_set = load_yaml_at(ROOT_DIR / KEY_SET_FILE)
pubkeys = ()
privkeys = ()
withdrawal_credentials = ()
keymap = {}
for key_pair in key_set:
pubkey = key_pair["pubkey"].to_bytes(48, byteorder="big")
privkey = key_pair["privkey"]
withdrawal_credential = (
config.BLS_WITHDRAWAL_PREFIX.to_bytes(1, byteorder="big")
+ hash_eth2(pubkey)[1:]
)
pubkeys += (pubkey,)
privkeys += (privkey,)
withdrawal_credentials += (withdrawal_credential,)
keymap[pubkey] = privkey
deposits, _ = create_mock_deposits_and_root(
pubkeys, keymap, config, withdrawal_credentials
)
eth1_block_hash = b"\x42" * 32
# NOTE: this timestamp is a placeholder
eth1_timestamp = 10000
state = initialize_beacon_state_from_eth1(
eth1_block_hash=eth1_block_hash,
eth1_timestamp=eth1_timestamp,
deposits=deposits,
config=config,
)
genesis_time = int(time.time())
print(f"creating genesis at time {genesis_time}")
genesis_state = state.copy(genesis_time=genesis_time)
print(genesis_state.hash_tree_root.hex())
if __name__ == "__main__":
_main()
| [
"[email protected]"
] | |
3ea5bee3bd4871ba78ed230af082be4efae65c9f | d76224386c2b359d6d3228567cbb274fea8fcaab | /final_back/asgi.py | 3b0602bc765f04f8b1a90f1b18b5d63842de6062 | [] | no_license | SSAFY-5th-seungwoon/Moya_backend | 2a270525dc3d0d53ee4b42274696d19f84edce9d | ac8d7004dafef9a4f9030dbe3a5762661f3f06ac | refs/heads/master | 2023-05-22T20:45:07.230178 | 2021-06-16T07:44:05 | 2021-06-16T07:44:05 | 369,787,652 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
ASGI config for final_back project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'final_back.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
0f3aaea69808c239b235c44f472f9e05b0f6e1ab | 63cf686bf970d28c045719de2f0e7e9dae5bed15 | /Contains Duplicate .py | f7d07154cc28b4a5d52c30ce29ed8bc9695a4146 | [] | no_license | menard-noe/LeetCode | 6461bda4a076849cf69f2cd87999275f141cc483 | 4e9c50d256c84d1b830a7642b265619a0b69d542 | refs/heads/master | 2022-12-13T09:41:41.682555 | 2020-09-14T12:46:53 | 2020-09-14T12:46:53 | 282,481,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | import math
from typing import List
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
dico = dict()
for num in nums:
if num in dico:
return True
else:
dico[num] = 0
return False
if __name__ == "__main__":
# execute only if run as a script
input = [1,2,3,3]
solution = Solution()
print(solution.containsDuplicate(input)) | [
"[email protected]"
] | |
7ff1948228505514fa2fc18802fadd69dee1abbb | 81cac5d646fc14e52b3941279d59fdd957b10f7e | /tests/components/homekit_controller/specific_devices/test_ecobee_occupancy.py | 293ecd07dd2394ceabecc9061354e93c5bf4a172 | [
"Apache-2.0"
] | permissive | arsaboo/home-assistant | 6b6617f296408a42874a67a71ad9bc6074acd000 | 554e51017e7b1b6949783d9684c4a0e8ca21e466 | refs/heads/dev | 2023-07-27T20:56:52.656891 | 2022-01-19T19:30:57 | 2022-01-19T19:30:57 | 207,046,472 | 2 | 0 | Apache-2.0 | 2019-09-08T01:35:16 | 2019-09-08T01:35:16 | null | UTF-8 | Python | false | false | 1,226 | py | """
Regression tests for Ecobee occupancy.
https://github.com/home-assistant/core/issues/31827
"""
from tests.components.homekit_controller.common import (
DeviceTestInfo,
EntityTestInfo,
assert_devices_and_entities_created,
setup_accessories_from_file,
setup_test_accessories,
)
async def test_ecobee_occupancy_setup(hass):
"""Test that an Ecbobee occupancy sensor be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "ecobee_occupancy.json")
await setup_test_accessories(hass, accessories)
await assert_devices_and_entities_created(
hass,
DeviceTestInfo(
unique_id="00:00:00:00:00:00",
name="Master Fan",
model="ecobee Switch+",
manufacturer="ecobee Inc.",
sw_version="4.5.130201",
hw_version="",
serial_number="111111111111",
devices=[],
entities=[
EntityTestInfo(
entity_id="binary_sensor.master_fan",
friendly_name="Master Fan",
unique_id="homekit-111111111111-56",
state="off",
),
],
),
)
| [
"[email protected]"
] | |
74b87ca9cb07bcf0b829fb9f8d1acca0b0fd7381 | 182dd5305aedeaa197f302c0d830ab85413cdd53 | /plugins/Filters/Convert2Gray/Convert2Gray.py | 68772b2605be2aa6796c95576bfe72f1a8208b5f | [
"MIT"
] | permissive | UmSenhorQualquer/workflow-editor | 016dbf47759b2572a811b80fc8bc79c88404c4ab | 6f836f99e155c2f503cf59adf4e8b8b574184e6d | refs/heads/master | 2021-01-24T18:58:13.224476 | 2017-07-20T10:00:10 | 2017-07-20T10:00:10 | 86,163,117 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | import core.utils.tools as tools, cv2
from core.modules.OTModulePlugin import OTModulePlugin
from core.modules.ModuleConnection import ModuleConnection
from datatypes.TypeComponentsVideoPipe import TypeComponentsVideoPipe
from datatypes.TypeColorVideoPipe import TypeColorVideoPipe
from datatypes.TypeColorVideo import TypeColorVideo
from pyforms.Controls import ControlPlayer
from pyforms.Controls import ControlCombo
from pyforms.Controls import ControlButton
class Convert2Gray(OTModulePlugin,TypeColorVideoPipe):
def __init__(self, name):
icon_path = tools.getFileInSameDirectory(__file__, 'iconsubbg.jpg')
OTModulePlugin.__init__(self, name, iconFile=icon_path)
TypeColorVideoPipe.__init__(self)
self._video = ModuleConnection("Video", connecting=TypeColorVideo)
self._player = ControlPlayer("Video player")
self._video.changed = self.newVideoInputChoosen
self._player.processFrame = self.processFrame
self._formset = [
'_video',
"_player",
]
def newVideoInputChoosen(self):
ModuleConnection.changed_event(self._video)
value = self._video.value
if value:
self.open(value)
self._player.value = value
def processFrame(self, frame):
return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
| [
"[email protected]"
] | |
f084f2434510565b6756fe2e22ff2eee7bd9ae65 | 52272ef3de9036a1b23b35047ceb90a2027df1f3 | /selenium_doc/TEST/test1.py | 23254c71b70f3e7247cd31f2a4df400aa90ba122 | [] | no_license | luokeke/selenium_python | 9f2883cc158e473902e0c4bbf9fca20ecb61bfda | 3cc05034afd0bc0930921812393bd572db868fb3 | refs/heads/master | 2020-08-25T02:51:16.064938 | 2020-04-20T10:03:52 | 2020-04-20T10:03:52 | 216,950,778 | 3 | 1 | null | 2019-10-23T03:39:52 | 2019-10-23T02:29:06 | null | UTF-8 | Python | false | false | 849 | py | #!/usr/bin/env python
#-*- coding:utf8 -*-
#@author: 刘慧玲 2018/5/22 19:15
from selenium import webdriver
from time import sleep
from login01 import *
'''
脚本作用 :服务器ssp功能
'''
#批量删除存储空间
driver = webdriver.Firefox()
driver.delete_all_cookies()
driver.maximize_window()
# 直接访问景安站存储空间链接,并登录。用户名密码可修改
Login().Ky_login(driver,"luokeke", "1")
sleep(3)
driver.get("https://mc.kuaiyun.cn/host/hostList")
sleep(3)
driver.find_element_by_link_text(u"管理").click()
sleep(5)
#打开管理跳转到新页面,涉及到多窗口操作。
all_window_handle = driver.window_handles # 获取打开的所有窗口句柄
driver.switch_to.window(all_window_handle[-1]) # 激活最顶层窗口句柄
#重装系统标签
driver.find_element_by_id("tab3_7").click()
sleep(5)
| [
"[email protected]"
] | |
6991166ac2811bf5b5871d798c5766c22ed204be | 32cb0be487895629ad1184ea25e0076a43abba0a | /LifePictorial/top/api/rest/HotelRoomImgUploadRequest.py | ff545c17c529a9cad6b04e9031dcf6168e228996 | [] | no_license | poorevil/LifePictorial | 6814e447ec93ee6c4d5b0f1737335601899a6a56 | b3cac4aa7bb5166608f4c56e5564b33249f5abef | refs/heads/master | 2021-01-25T08:48:21.918663 | 2014-03-19T08:55:47 | 2014-03-19T08:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | '''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class HotelRoomImgUploadRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.gid = None
self.pic = None
self.position = None
def getapiname(self):
return 'taobao.hotel.room.img.upload'
def getMultipartParas(self):
return ['pic']
| [
"[email protected]"
] | |
99f94f0fc3ee9a38ec3c34db968e6e99a9ea7e86 | f47fe8a7d8cd87b3bfa2e172b4a9fc93e3a4abc2 | /2016/AST2/Bili/letnji/ponovo/konacno1.py | 7482940ccb3f15b8866efb9be0b7760bf88d483d | [] | no_license | ispastlibrary/Titan | a4a7e4bb56544d28b884a336db488488e81402e0 | f60e5c6dc43876415b36ad76ab0322a1f709b14d | refs/heads/master | 2021-01-17T19:23:32.839966 | 2016-06-03T13:47:44 | 2016-06-03T13:47:44 | 60,350,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,475 | py | from scipy.special import wofz
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy import interpolate
e=1.60217657e-19 #elementarno naelektrisanje [C]
eV=1.60217657e-19 #eV u J
AU=149597871000 #Astronomska jedinica [m]
Na=6.02*1e+23 #Avogadrov broj
M=23*1e-3 #molarna masa Na [kg/mol]
me=9.1e-31 #masa elektrona[kg]
Rk=400000 #poluprecnik kome[m] ?????????????????????????????
k=1.38*10e-23 #Bolcmanova konst [J\K]
dE=3.37*1e-19 #razlika energetskih stanja 3p i 3s, povezana sa talasnom duzinom [J]
R=Na*k #Univerzalna gasna konstanta [J/(molK)]
L0=589e-9 #centralna talasna duzina D2 linije [m]
h=6.63*1e-34 #Plankova konstanta [Js]
c=299792458 #brzina svetlosti [m/s]
A=6.14e+7 #Ajnstajnov koef za verovatnocu spontane emisije[s^-1] 3p-3s
g0=1 # statisticka tezina 3s orbitale (s)
g1=3 # statisticka tezina 3p orbitale (px,py,pz)
V0=c/L0 #centralna frekvencija [Hz]
#Tef=5777 #efektivna temperatura Sunca [K]
d=0 #rastojanje prave (posmatranog pravca) od centra jezgra
#niz koef za NaI
#niz koef za NaII
#koeficijenti aproksimativne polinomne funkcije za izracunavanje particione f-je
a=[-2.60507178e+3,1.71419244e+3,-4.50632658e+2,5.91751503e+1,-3.88164070e+0,1.01752936e-1]
b=[-3.68868193e-6,2.28941495e-6,-5.66767833e-7,6.99552282e-8,-4.30495956e-9,1.05668164e-10]
Ro=1000 #gustina jezgra [kg/m^3], Andrija
Rn=2.5e4 #poluprecnik jezgra [m]
S=1/2 #funkcija izvora
AMU=1.66*10e-27 #jedinica atomske mase u kg
sigma=5.670373e-8 #Stefan - Bolcmanova konstanta[Wm^-2K^-4]
mna=22.9877*AMU #atomska masa Na u kg
mnaoh=39.998*AMU #masa NaOH u kg
mh20=18.015*AMU #masa H2O u kg
def voigt(x,y): #Fojtova funkcija, ali samo realni deo Faddeeva f-je
z = x + 1j*y
w = wofz(z).real
return w
def part_funk(a,T): #izracunavanje part f-je
K=0
for i in range(6):
K += a[i]*pow(np.log(T),i)
Zp = np.exp(K)
return Zp
def rastojanje(T): #heliocentricno rastojanje [AU]
#Lt=-582*(T-273)+2.62e6 #latentna toplota sublimacije [J/kg], T u c, a ne u K
Lt=2.62e6
Sc=1361 #solarna const na 1AU [W/m^2]
Pr=1.2*(10**12)*np.exp(-6000/T) #pritisak zasicene pare na povrsini komete
Zm=Pr*np.sqrt(mh20/(2*np.pi*k*T)) #sublimacioni fluks H2O [kgs^-1m^-2] (masena stopa produktivnosti)
rasto=np.sqrt(Sc/(sigma*T**4+Lt*Zm))
return rasto
"""
def Q_H20(T):
mh20=18.015*AMU
Pr=1.2*(10e+12)*np.exp((-6000)/T)
prod=Pr*np.sqrt(1/(2*np.pi*k*T*mh20))
prod=prod*4*np.pi*Rn*Rn
return prod"""
def Q_Na(T): #ukupna stopa produktivnosti Na [s^-1]
Pr=1.2*(10e+12)*np.exp((-6000)/T) #pritisak zasicene pare
produ=Pr*np.sqrt(1/(2*np.pi*k*T*mna)) #stopa produktivnosti Na [m^-2s^-1]
produ=produ*4*np.pi*Rn*Rn #ukupna stopa
return produ
# 207.6 K -> 1 AU, ~1e+32 ukupna stopa produktivnosti Na
def brz_izb(T,masa): #brzina outflow (izlivanja)=termalna brzina [m/s]
#brz=20*pow(rastojanje(T),-0.41)*(10**3)
brz = np.sqrt((2*k*T)/masa)
return brz
tau_p0=10**3 #vreme dok se roditelj ne unisti [s]
tau_d0=1.67*(10**5) #vreme dok se cerka ne unisti [s]
def lp(T): #skalirana duzina roditelja [m]
roditelj = brz_izb(T,mnaoh)*tau_p0*(rastojanje(T))**2
return roditelj
def ld(T): #skalirana duzina cerke [m]
cerka = brz_izb(T,mna)*tau_d0*(rastojanje(T))**2
return cerka
#plt.gca().set_yscale('log')
def kon_Na_koma(T,r): #koncentracija Na u komi, Haserov model
Dr=r-Rn # redukovano trenutno rastojanje Rkome-Rjezgra
konc=(Q_Na(T)*ld(T)*(np.exp(-Dr/lp(T))-np.exp(-Dr/ld(T))))/(4*np.pi*brz_izb(T,mna)*r*r*(lp(T)-ld(T)))
#print(ld(T),lp(T))
return konc
def MaxBol(T,r): #koncentracija Na u osnovnom 3s stanju (n0) na rastojanju r od kome
NaI = kon_Na_koma(T,r) #ukupan broj Na iz Haserovog modela
Zp = part_funk(a,T) #particiona f-ja
Er=5.1390767*eV #energija 3s za Na [J]
#dE=h*c/L0
#Er=dE
#Er=0
n0 =(NaI*g0*np.exp(-Er/(k*T)))/Zp #konc Na u osnovnom stanju
return n0
def Bol(T,r):
NaI = kon_Na_koma(T,r)
dE = h*c/L0
odnos = g1*(np.exp(-dE/(k*T)))/g0
n0 = NaI/(1+odnos)
return n0
def ajnstajnB(A): # za A(koef emisije) (3p-3s) vraca B (Ajnstajnov koef apsorpcije za verovatnocu prelaza (3s-3p)[m^3s^-2J^-1])
B = (c**2*A*g1)/(8*np.pi*h*V0**3*g0)
return B
def Dopl_sirina(T): #Doplerova sirina u funkciji of temperature [Hz]
Dopler = np.sqrt(2*R*T/M)*(V0/c)
return Dopler
def koef_aps(V,T,r,av): #koeficijent apsorpcije
B = ajnstajnB(A)
konc_aps = n0 = Bol(T,r) #MAXBOLC statistika
Dopler = Dopl_sirina(T)
apsor = ((B*n0*h*V)/(4*np.pi*Dopler))*Fojtov_profil(V,av)
return apsor
br_ljuspica = 2500
dr = Rk/br_ljuspica #koma je podeljena na 50000 ljuspica
def opt_dub(d,V,T,av): #opticka dubina za nehomogenu komu
r1 = Rk
r2 = r1-dr
suma_opt = 0
broj = br_ljuspica - 1 - math.floor(d/dr)
"""for i in range(broj):
r2 = r1 - dr
ds = np.sqrt(r1*r1 - d*d) - np.sqrt(r2*r2 - d*d)
suma_opt += koef_aps(V,T,r1,av)*ds
r1 = r2"""
while (r1>(Rn+d)) and (r2>(Rn+d)):
ds = np.sqrt(r1*r1 - d*d) - np.sqrt(r2*r2 - d*d)
suma_opt += koef_aps(V,T,r1,av)*ds
r1=r2
r2=r1-dr
ds = np.sqrt(r1*r1 - d*d)
suma_opt += koef_aps(V,T,r1,av)*ds
suma_opt *= 2
return suma_opt
"""def poc_intez(V,T): #pocetni intezitet preko plankove fje
plank=(2*h*V*V*V)/(c*c*(np.exp((h*V)/(k*T))-1))
return plank"""
N_tacaka = 150
V1 = c/(589.02e-9)
V2 = c/(588.98e-9)
dV = (V2 - V1)/N_tacaka
def izlazni(V,T,av): #izlazni intenzitet normiran tako da je I0=1, a funkcija izvora S=1/2
tau = opt_dub(d,V,T,av) #opticka dubina
#2*Rk*koef_aps(V,T,Rk)
q = S*(1-np.exp(-tau))
return q
def Fojtov_profil(V,av):
F=voigt((V-V0)/Dopl_sirina(T),av)/(np.pi*Dopl_sirina(T))
return F
def E_W(x,y): #ekvivalentna sirina, metodom cubic spline
EkW=0
tck=interpolate.splrep(x,y)
i=0
while(i<(N_tacaka-1)):
EkW+=interpolate.splint(x[i],x[i+1],tck)
i+=1
return EkW
"""T=207
av=A/Dopl_sirina(T)
x=np.linspace(V1,V2,N_tacaka)
y0=izlazni(x,av,0)
#y1=izlazni(x,av,1)
#plt.suptitle('Uporedjivanje dveju metoda za nalazenja broja Na u osnovnom 3s stanju u odnosu na ukupan broj Na')
plt.plot(x,y0,lw=5,label='Maksvel - Bolcmanova statistika')
#plt.plot(x,y1,label='Bolcmanova raspodela')
plt.legend(loc='best')
plt.show()"""
d=0
x=np.linspace(V1,V2,N_tacaka)
T=300
print(d)
av=A/Dopl_sirina(T)
y0=izlazni(x,T,av)
dd=Rk/6
d=d+dd
print(d)
y1=izlazni(x,T,av)
d=d+dd
print(d)
y2=izlazni(x,T,av)
d=d+dd
print(d)
y3=izlazni(x,T,av)
d=d+dd
print(d)
y4=izlazni(x,T,av)
plt.suptitle('Profil linije Na za različite preseke na T=220K')
plt.xlabel('Frekvencija[Hz]')
plt.ylabel('Relativni intenzitet')
plt.plot(x,y0,label='d=0 m')
plt.plot(x,y1,label='d=66666 m')
plt.plot(x,y2,label='d=133333 m')
plt.plot(x,y3,label='d=200000 m')
plt.plot(x,y4,label='d=266666 m')
#plt.plot(x,y5,label='0.06 AU, 260K')
plt.legend(loc='best')
plt.show()
"""xt=np.linspace(100,210,100) yt=Q_Na(xt) plt.plot(xt,yt)
plt.xlabel('Temperatura[K]') plt.ylabel('Stopa produkcije Na[s^-1]')
plt.title('Grafik zavisnosti stope produkcije od temperature')"""
#plt.yscale('log')
| [
"[email protected]"
] | |
728b4497309cb53507d8324b36ea8bd2d0693130 | aaa07613c41fed96fb6d7fe5dc292975e17fb107 | /isovar/genetic_code.py | c703b7f00116baad44d36508c1c4167141a87eb6 | [
"Apache-2.0"
] | permissive | openvax/isovar | 2fa89f88525e72d94b974d5a20f038e3bdc15bf4 | e43e2574dc783a5dfc65b055f977bd0f11df015b | refs/heads/master | 2023-08-18T20:14:39.338144 | 2023-08-01T17:56:23 | 2023-08-01T17:56:23 | 51,102,454 | 17 | 10 | Apache-2.0 | 2020-08-19T18:48:54 | 2016-02-04T20:14:48 | Python | UTF-8 | Python | false | false | 8,454 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
"""
GeneticCode objects contain the rules for translating cDNA into a protein
sequence: the set of valid start and stop codons, as well as which
amino acid each DNA triplet is translated into.
"""
class GeneticCode(object):
"""
Represents distinct translation tables to go from cDNA triplets to amino
acids.
"""
def __init__(self, name, start_codons, stop_codons, codon_table):
self.name = name
self.start_codons = set(start_codons)
self.stop_codons = set(stop_codons)
self.codon_table = dict(codon_table)
self._check_codons()
def _check_codons(self):
"""
If codon table is missing stop codons, then add them.
"""
for stop_codon in self.stop_codons:
if stop_codon in self.codon_table:
if self.codon_table[stop_codon] != "*":
raise ValueError(
("Codon '%s' not found in stop_codons, but codon table "
"indicates that it should be") % (stop_codon,))
else:
self.codon_table[stop_codon] = "*"
for start_codon in self.start_codons:
if start_codon not in self.codon_table:
raise ValueError(
"Start codon '%s' missing from codon table" % (
start_codon,))
for codon, amino_acid in self.codon_table.items():
if amino_acid == "*" and codon not in self.stop_codons:
raise ValueError(
"Non-stop codon '%s' can't translate to '*'" % (
codon,))
if len(self.codon_table) != 64:
raise ValueError(
"Expected 64 codons but found %d in codon table" % (
len(self.codon_table,)))
def translate(self, cdna_sequence, first_codon_is_start=False):
"""
Given a cDNA sequence which is aligned to a reading frame, returns
the translated protein sequence and a boolean flag indicating whether
the translated sequence ended on a stop codon (or just ran out of codons).
Parameters
----------
cdna_sequence : str
cDNA sequence which is expected to start and end on complete codons.
first_codon_is_start : bool
Is the first codon of the sequence a start codon?
"""
if not isinstance(cdna_sequence, str):
cdna_sequence = str(cdna_sequence)
n = len(cdna_sequence)
# trim to multiple of 3 length, if there are 1 or 2 nucleotides
# dangling at the end of an mRNA they will not affect translation
# since ribosome will fall off at that point
end_idx = 3 * (n // 3)
codon_table = self.codon_table
if first_codon_is_start and cdna_sequence[:3] in self.start_codons:
amino_acid_list = ['M']
start_index = 3
else:
start_index = 0
amino_acid_list = []
ends_with_stop_codon = False
for i in range(start_index, end_idx, 3):
codon = cdna_sequence[i:i + 3]
aa = codon_table[codon]
if aa == "*":
ends_with_stop_codon = True
break
amino_acid_list.append(aa)
amino_acids = "".join(amino_acid_list)
return amino_acids, ends_with_stop_codon
def copy(
self,
name,
start_codons=None,
stop_codons=None,
codon_table=None,
codon_table_changes=None):
"""
Make copy of this GeneticCode object with optional replacement
values for all fields.
"""
new_start_codons = (
self.start_codons.copy()
if start_codons is None
else start_codons)
new_stop_codons = (
self.stop_codons.copy()
if stop_codons is None
else stop_codons)
new_codon_table = (
self.codon_table.copy()
if codon_table is None
else codon_table)
if codon_table_changes is not None:
new_codon_table.update(codon_table_changes)
return GeneticCode(
name=name,
start_codons=new_start_codons,
stop_codons=new_stop_codons,
codon_table=new_codon_table)
standard_genetic_code = GeneticCode(
name="standard",
start_codons={'ATG', 'CTG', 'TTG'},
stop_codons={'TAA', 'TAG', 'TGA'},
codon_table={
'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L',
'TCT': 'S', 'TCC': 'S', 'TCA': 'S', 'TCG': 'S',
'TAT': 'Y', 'TAC': 'Y', 'TAA': '*', 'TAG': '*',
'TGT': 'C', 'TGC': 'C', 'TGA': '*', 'TGG': 'W',
'CTT': 'L', 'CTC': 'L', 'CTA': 'L', 'CTG': 'L',
'CCT': 'P', 'CCC': 'P', 'CCA': 'P', 'CCG': 'P',
'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R',
'ATT': 'I', 'ATC': 'I', 'ATA': 'I', 'ATG': 'M',
'ACT': 'T', 'ACC': 'T', 'ACA': 'T', 'ACG': 'T',
'AAT': 'N', 'AAC': 'N', 'AAA': 'K', 'AAG': 'K',
'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V',
'GCT': 'A', 'GCC': 'A', 'GCA': 'A', 'GCG': 'A',
'GAT': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E',
'GGT': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G'
}
)
# Non-canonical start sites based on figure 2 of
# "Global mapping of translation initiation sites in mammalian
# cells at single-nucleotide resolution"
standard_genetic_code_with_extra_start_codons = standard_genetic_code.copy(
name="standard-with-extra-start-codons",
start_codons=standard_genetic_code.start_codons.union({
'GTG',
'AGG',
'ACG',
'AAG',
'ATC',
'ATA',
'ATT'}))
vertebrate_mitochondrial_genetic_code = standard_genetic_code.copy(
name="verterbrate-mitochondrial",
# "For thirty years AGA and AGG were considered terminators instead
# of coding for arginine. However, Temperley (2010) has recently shown
# that human mitochondria use only UAA and UAG stop codons."
# (http://mitomap.org/bin/view.pl/MITOMAP/HumanMitoCode)
stop_codons={'TAA', 'TAG'},
# "AUU codes for isoleucine during elongation but can code for
# methionine for initiation (ND2) See Fearnley & Walker (1987) and
# Peabody (1989)."
# (http://mitomap.org/bin/view.pl/MITOMAP/HumanMitoCode)
start_codons=['ATT', 'ATC', 'ATA', 'ATG', 'GTG'],
# "UGA codes for tryptophan instead of termination and AUA codes for
# methionine instead of isoleucine."
# (http://mitomap.org/bin/view.pl/MITOMAP/HumanMitoCode)
codon_table_changes={'TGA': 'W', 'ATA': 'M'},
)
def translate_cdna(
cdna_sequence,
first_codon_is_start=False,
mitochondrial=False):
"""
Given a cDNA sequence which is aligned to a reading frame, returns
the translated protein sequence and a boolean flag indicating whether
the translated sequence ended on a stop codon (or just ran out of codons).
Parameters
----------
cdna_sequence : str
cDNA sequence which is expected to start and end on complete codons.
first_codon_is_start : bool
mitochondrial : bool
Use the mitochondrial codon table instead of standard
codon to amino acid table.
"""
# once we drop some of the prefix nucleotides, we should be in a reading frame
# which allows us to translate this protein
if mitochondrial:
genetic_code = vertebrate_mitochondrial_genetic_code
else:
genetic_code = standard_genetic_code_with_extra_start_codons
return genetic_code.translate(
cdna_sequence=cdna_sequence,
first_codon_is_start=first_codon_is_start)
| [
"[email protected]"
] | |
90e8d38ca104a9333d1b862e16bbc7ebd1820480 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02747/s534963942.py | e59afea127c18cba4bb7f22ef8a7070d00c3b7dd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | S = str(input())
S_size = len(S)
hi_size = S.count('hi')
if S_size == hi_size * 2:
print('Yes')
else:
print('No') | [
"[email protected]"
] | |
d65dd43c9764aa5c4b8e093dd79520e1a748eb71 | 7b0f9a984dca4ad3fa536cf6ecd8f6654db02420 | /tencentcloud/live/v20180801/models.py | 15eaac07bf2987d54808c969fbf9decd6cab2b38 | [
"Apache-2.0"
] | permissive | SpencerHoGD/tencentcloud-sdk-python | 8cb6756722ec571f140a2dd8d2ade897f8bbd0c5 | c90e7719a253ea7928d4a510987df3ea6f3c23ac | refs/heads/master | 2020-06-27T13:24:31.736521 | 2019-07-26T10:31:52 | 2019-07-26T10:31:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212,493 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class AddDelayLiveStreamRequest(AbstractModel):
"""AddDelayLiveStream请求参数结构体
"""
def __init__(self):
"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
:param DelayTime: 延播时间,单位:秒,上限:600秒。
:type DelayTime: int
:param ExpireTime: 延播设置的过期时间。UTC 格式,例如:2018-11-29T19:00:00Z。
注意:默认7天后过期,且最长支持7天内生效。
:type ExpireTime: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
self.DelayTime = None
self.ExpireTime = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
self.DelayTime = params.get("DelayTime")
self.ExpireTime = params.get("ExpireTime")
class AddDelayLiveStreamResponse(AbstractModel):
"""AddDelayLiveStream返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class AddLiveDomainRequest(AbstractModel):
"""AddLiveDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 域名名称。
:type DomainName: str
:param DomainType: 域名类型,
0:推流域名,
1:播放域名。
:type DomainType: int
:param PlayType: 拉流域名类型:
1:国内,
2:全球,
3:境外。
:type PlayType: int
:param IsDelayLive: 默认 0 :普通直播,
1:慢直播。
:type IsDelayLive: int
"""
self.DomainName = None
self.DomainType = None
self.PlayType = None
self.IsDelayLive = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.DomainType = params.get("DomainType")
self.PlayType = params.get("PlayType")
self.IsDelayLive = params.get("IsDelayLive")
class AddLiveDomainResponse(AbstractModel):
"""AddLiveDomain返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class AddLiveWatermarkRequest(AbstractModel):
"""AddLiveWatermark请求参数结构体
"""
def __init__(self):
"""
:param PictureUrl: 水印图片url。
:type PictureUrl: str
:param WatermarkName: 水印名称。
:type WatermarkName: str
:param XPosition: 显示位置,X轴偏移。
:type XPosition: int
:param YPosition: 显示位置,Y轴偏移。
:type YPosition: int
:param Width: 水印宽度,占直播原始画面宽度百分比,建议高宽只设置一项,另外一项会自适应缩放,避免变形。
:type Width: int
:param Height: 水印高度,占直播原始画面宽度百分比,建议高宽只设置一项,另外一项会自适应缩放,避免变形。
:type Height: int
"""
self.PictureUrl = None
self.WatermarkName = None
self.XPosition = None
self.YPosition = None
self.Width = None
self.Height = None
def _deserialize(self, params):
self.PictureUrl = params.get("PictureUrl")
self.WatermarkName = params.get("WatermarkName")
self.XPosition = params.get("XPosition")
self.YPosition = params.get("YPosition")
self.Width = params.get("Width")
self.Height = params.get("Height")
class AddLiveWatermarkResponse(AbstractModel):
"""AddLiveWatermark返回参数结构体
"""
def __init__(self):
"""
:param WatermarkId: 水印ID。
:type WatermarkId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.WatermarkId = None
self.RequestId = None
def _deserialize(self, params):
self.WatermarkId = params.get("WatermarkId")
self.RequestId = params.get("RequestId")
class BillDataInfo(AbstractModel):
"""带宽和流量信息
"""
def __init__(self):
"""
:param Time: 时间点,格式为yyyy-mm-dd HH:MM:SS。
:type Time: str
:param Bandwidth: 带宽,单位是Mbps。
:type Bandwidth: float
:param Flux: 流量,单位是MB。
:type Flux: float
"""
self.Time = None
self.Bandwidth = None
self.Flux = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Bandwidth = params.get("Bandwidth")
self.Flux = params.get("Flux")
class BindLiveDomainCertRequest(AbstractModel):
"""BindLiveDomainCert请求参数结构体
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: int
:param DomainName: 播放域名。
:type DomainName: str
:param Status: 状态,0: 关闭 1:打开。
:type Status: int
"""
self.CertId = None
self.DomainName = None
self.Status = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.DomainName = params.get("DomainName")
self.Status = params.get("Status")
class BindLiveDomainCertResponse(AbstractModel):
"""BindLiveDomainCert返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CallBackRuleInfo(AbstractModel):
"""规则信息
"""
def __init__(self):
"""
:param CreateTime: 规则创建时间。
:type CreateTime: str
:param UpdateTime: 规则更新时间。
:type UpdateTime: str
:param TemplateId: 模板Id。
:type TemplateId: int
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径。
:type AppName: str
"""
self.CreateTime = None
self.UpdateTime = None
self.TemplateId = None
self.DomainName = None
self.AppName = None
def _deserialize(self, params):
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
self.TemplateId = params.get("TemplateId")
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
class CallBackTemplateInfo(AbstractModel):
"""回调模板信息
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param StreamBeginNotifyUrl: 开播回调URL。
:type StreamBeginNotifyUrl: str
:param StreamEndNotifyUrl: 断流回调URL。
:type StreamEndNotifyUrl: str
:param StreamMixNotifyUrl: 混流回调URL。
:type StreamMixNotifyUrl: str
:param RecordNotifyUrl: 录制回调URL。
:type RecordNotifyUrl: str
:param SnapshotNotifyUrl: 截图回调URL。
:type SnapshotNotifyUrl: str
:param PornCensorshipNotifyUrl: 鉴黄回调URL。
:type PornCensorshipNotifyUrl: str
:param CallbackKey: 回调的鉴权key
:type CallbackKey: str
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.StreamBeginNotifyUrl = None
self.StreamEndNotifyUrl = None
self.StreamMixNotifyUrl = None
self.RecordNotifyUrl = None
self.SnapshotNotifyUrl = None
self.PornCensorshipNotifyUrl = None
self.CallbackKey = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
self.StreamBeginNotifyUrl = params.get("StreamBeginNotifyUrl")
self.StreamEndNotifyUrl = params.get("StreamEndNotifyUrl")
self.StreamMixNotifyUrl = params.get("StreamMixNotifyUrl")
self.RecordNotifyUrl = params.get("RecordNotifyUrl")
self.SnapshotNotifyUrl = params.get("SnapshotNotifyUrl")
self.PornCensorshipNotifyUrl = params.get("PornCensorshipNotifyUrl")
self.CallbackKey = params.get("CallbackKey")
class CdnPlayStatData(AbstractModel):
"""下行播放统计指标
"""
def __init__(self):
"""
:param Time: 时间点,格式为yyyy-mm-dd HH:MM:SS。
:type Time: str
:param Bandwidth: 带宽,(单位Mbps)。
:type Bandwidth: float
:param Flux: 流量,(单位MB)。
:type Flux: float
:param Request: 新增请求数。
:type Request: int
:param Online: 并发连接数。
:type Online: int
"""
self.Time = None
self.Bandwidth = None
self.Flux = None
self.Request = None
self.Online = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Bandwidth = params.get("Bandwidth")
self.Flux = params.get("Flux")
self.Request = params.get("Request")
self.Online = params.get("Online")
class CertInfo(AbstractModel):
"""证书信息
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: int
:param CertName: 证书名称。
:type CertName: str
:param Description: 描述信息。
:type Description: str
:param CreateTime: 创建时间,UTC格式。
:type CreateTime: str
:param HttpsCrt: 证书内容。
:type HttpsCrt: str
:param CertType: 证书类型。
0:腾讯云托管证书
1:用户添加证书。
:type CertType: int
:param CertExpireTime: 证书过期时间,UTC格式。
:type CertExpireTime: str
:param DomainList: 使用此证书的域名列表。
:type DomainList: list of str
"""
self.CertId = None
self.CertName = None
self.Description = None
self.CreateTime = None
self.HttpsCrt = None
self.CertType = None
self.CertExpireTime = None
self.DomainList = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.CertName = params.get("CertName")
self.Description = params.get("Description")
self.CreateTime = params.get("CreateTime")
self.HttpsCrt = params.get("HttpsCrt")
self.CertType = params.get("CertType")
self.CertExpireTime = params.get("CertExpireTime")
self.DomainList = params.get("DomainList")
class ClientIpPlaySumInfo(AbstractModel):
"""客户端ip播放汇总信息
"""
def __init__(self):
"""
:param ClientIp: 客户端ip,点分型。
:type ClientIp: str
:param Province: 客户端所在省份。
:type Province: str
:param TotalFlux: 总流量。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
:param TotalFailedRequest: 总失败请求数。
:type TotalFailedRequest: int
"""
self.ClientIp = None
self.Province = None
self.TotalFlux = None
self.TotalRequest = None
self.TotalFailedRequest = None
def _deserialize(self, params):
self.ClientIp = params.get("ClientIp")
self.Province = params.get("Province")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
self.TotalFailedRequest = params.get("TotalFailedRequest")
class CreateLiveCallbackRuleRequest(AbstractModel):
"""CreateLiveCallbackRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
:param TemplateId: 模板ID。
:type TemplateId: int
"""
self.DomainName = None
self.AppName = None
self.TemplateId = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.TemplateId = params.get("TemplateId")
class CreateLiveCallbackRuleResponse(AbstractModel):
"""CreateLiveCallbackRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateLiveCallbackTemplateRequest(AbstractModel):
"""CreateLiveCallbackTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateName: 模板名称。非空的字符串
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param StreamBeginNotifyUrl: 开播回调URL,
相关协议文档:[事件消息通知](/document/product/267/32744)。
:type StreamBeginNotifyUrl: str
:param StreamEndNotifyUrl: 断流回调URL,
相关协议文档:[事件消息通知](/document/product/267/32744)。
:type StreamEndNotifyUrl: str
:param RecordNotifyUrl: 录制回调URL,
相关协议文档:[事件消息通知](/document/product/267/32744)。
:type RecordNotifyUrl: str
:param SnapshotNotifyUrl: 截图回调URL,
相关协议文档:[事件消息通知](/document/product/267/32744)。
:type SnapshotNotifyUrl: str
:param PornCensorshipNotifyUrl: 鉴黄回调URL,
相关协议文档:[事件消息通知](/document/product/267/32741)。
:type PornCensorshipNotifyUrl: str
:param CallbackKey: 回调key,回调URL公用,鉴权回调说明详见回调格式文档
:type CallbackKey: str
"""
self.TemplateName = None
self.Description = None
self.StreamBeginNotifyUrl = None
self.StreamEndNotifyUrl = None
self.RecordNotifyUrl = None
self.SnapshotNotifyUrl = None
self.PornCensorshipNotifyUrl = None
self.CallbackKey = None
def _deserialize(self, params):
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
self.StreamBeginNotifyUrl = params.get("StreamBeginNotifyUrl")
self.StreamEndNotifyUrl = params.get("StreamEndNotifyUrl")
self.RecordNotifyUrl = params.get("RecordNotifyUrl")
self.SnapshotNotifyUrl = params.get("SnapshotNotifyUrl")
self.PornCensorshipNotifyUrl = params.get("PornCensorshipNotifyUrl")
self.CallbackKey = params.get("CallbackKey")
class CreateLiveCallbackTemplateResponse(AbstractModel):
"""CreateLiveCallbackTemplate返回参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板ID。
:type TemplateId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TemplateId = None
self.RequestId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.RequestId = params.get("RequestId")
class CreateLiveCertRequest(AbstractModel):
"""CreateLiveCert请求参数结构体
"""
def __init__(self):
"""
:param CertType: 证书类型。0-用户添加证书;1-腾讯云托管证书。
:type CertType: int
:param HttpsCrt: 证书内容,即公钥。
:type HttpsCrt: str
:param HttpsKey: 私钥。
:type HttpsKey: str
:param CertName: 证书名称。
:type CertName: str
:param Description: 描述。
:type Description: str
"""
self.CertType = None
self.HttpsCrt = None
self.HttpsKey = None
self.CertName = None
self.Description = None
def _deserialize(self, params):
self.CertType = params.get("CertType")
self.HttpsCrt = params.get("HttpsCrt")
self.HttpsKey = params.get("HttpsKey")
self.CertName = params.get("CertName")
self.Description = params.get("Description")
class CreateLiveCertResponse(AbstractModel):
"""CreateLiveCert返回参数结构体
"""
def __init__(self):
"""
:param CertId: 证书ID
:type CertId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CertId = None
self.RequestId = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.RequestId = params.get("RequestId")
class CreateLiveRecordRequest(AbstractModel):
"""CreateLiveRecord请求参数结构体
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 推流域名。多域名推流必须设置。
:type DomainName: str
:param StartTime: 录制开始时间。中国标准时间,需要URLEncode(rfc3986)。如 2017-01-01 10:10:01,编码为:2017-01-01+10%3a10%3a01。
定时录制模式,必须设置该字段;实时视频录制模式,忽略该字段。
:type StartTime: str
:param EndTime: 录制结束时间。中国标准时间,需要URLEncode(rfc3986)。如 2017-01-01 10:30:01,编码为:2017-01-01+10%3a30%3a01。
定时录制模式,必须设置该字段;实时录制模式,为可选字段。如果通过Highlight参数,设置录制为实时视频录制模式,其设置的结束时间不应超过当前时间+30分钟,如果设置的结束时间超过当前时间+30分钟或者小于当前时间或者不设置该参数,则实际结束时间为当前时间+30分钟。
:type EndTime: str
:param RecordType: 录制类型。
“video” : 音视频录制【默认】。
“audio” : 纯音频录制。
在定时录制模式或实时视频录制模式下,该参数均有效,不区分大小写。
:type RecordType: str
:param FileFormat: 录制文件格式。其值为:
“flv”【默认】,“hls”,”mp4”,“aac”,”mp3”。
在定时录制模式或实时视频录制模式下,该参数均有效,不区分大小写。
:type FileFormat: str
:param Highlight: 开启实时视频录制模式标志。
0:不开启实时视频录制模式,即定时录制模式【默认】。见[示例一](#.E7.A4.BA.E4.BE.8B1-.E5.88.9B.E5.BB.BA.E5.AE.9A.E6.97.B6.E5.BD.95.E5.88.B6.E4.BB.BB.E5.8A.A1)。
1:开启实时视频录制模式。见[示例二](#.E7.A4.BA.E4.BE.8B2-.E5.88.9B.E5.BB.BA.E5.AE.9E.E6.97.B6.E5.BD.95.E5.88.B6.E4.BB.BB.E5.8A.A1)。
:type Highlight: int
:param MixStream: 开启A+B=C混流C流录制标志。
0:不开启A+B=C混流C流录制【默认】。
1:开启A+B=C混流C流录制。
在定时录制模式或实时视频录制模式下,该参数均有效。
:type MixStream: int
:param StreamParam: 录制流参数。当前支持以下参数:
record_interval - 录制分片时长,单位 秒,1800 - 7200
storage_time - 录制文件存储时长,单位 秒
eg. record_interval=3600&storage_time=2592000
注:参数需要url encode。
在定时录制模式或实时视频录制模式下,该参数均有效。
:type StreamParam: str
"""
self.StreamName = None
self.AppName = None
self.DomainName = None
self.StartTime = None
self.EndTime = None
self.RecordType = None
self.FileFormat = None
self.Highlight = None
self.MixStream = None
self.StreamParam = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.RecordType = params.get("RecordType")
self.FileFormat = params.get("FileFormat")
self.Highlight = params.get("Highlight")
self.MixStream = params.get("MixStream")
self.StreamParam = params.get("StreamParam")
class CreateLiveRecordResponse(AbstractModel):
"""CreateLiveRecord返回参数结构体
"""
def __init__(self):
"""
:param TaskId: 任务ID,全局唯一标识录制任务。
:type TaskId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskId = None
self.RequestId = None
def _deserialize(self, params):
self.TaskId = params.get("TaskId")
self.RequestId = params.get("RequestId")
class CreateLiveRecordRuleRequest(AbstractModel):
"""CreateLiveRecordRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param TemplateId: 模板Id。
:type TemplateId: int
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param StreamName: 流名称。
注:如果本参数设置为非空字符串,规则将只对此推流起作用。
:type StreamName: str
"""
self.DomainName = None
self.TemplateId = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.TemplateId = params.get("TemplateId")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class CreateLiveRecordRuleResponse(AbstractModel):
"""CreateLiveRecordRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateLiveRecordTemplateRequest(AbstractModel):
"""CreateLiveRecordTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateName: 模板名。非空的字符串
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param FlvParam: Flv录制参数,开启Flv录制时设置。
:type FlvParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param HlsParam: Hls录制参数,开启hls录制时设置。
:type HlsParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param Mp4Param: Mp4录制参数,开启Mp4录制时设置。
:type Mp4Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param AacParam: Aac录制参数,开启Aac录制时设置。
:type AacParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param IsDelayLive: 0:普通直播,
1:慢直播。
:type IsDelayLive: int
:param HlsSpecialParam: HLS专属录制参数。
:type HlsSpecialParam: :class:`tencentcloud.live.v20180801.models.HlsSpecialParam`
:param Mp3Param: Mp3录制参数,开启Mp3录制时设置。
:type Mp3Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
"""
self.TemplateName = None
self.Description = None
self.FlvParam = None
self.HlsParam = None
self.Mp4Param = None
self.AacParam = None
self.IsDelayLive = None
self.HlsSpecialParam = None
self.Mp3Param = None
def _deserialize(self, params):
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
if params.get("FlvParam") is not None:
self.FlvParam = RecordParam()
self.FlvParam._deserialize(params.get("FlvParam"))
if params.get("HlsParam") is not None:
self.HlsParam = RecordParam()
self.HlsParam._deserialize(params.get("HlsParam"))
if params.get("Mp4Param") is not None:
self.Mp4Param = RecordParam()
self.Mp4Param._deserialize(params.get("Mp4Param"))
if params.get("AacParam") is not None:
self.AacParam = RecordParam()
self.AacParam._deserialize(params.get("AacParam"))
self.IsDelayLive = params.get("IsDelayLive")
if params.get("HlsSpecialParam") is not None:
self.HlsSpecialParam = HlsSpecialParam()
self.HlsSpecialParam._deserialize(params.get("HlsSpecialParam"))
if params.get("Mp3Param") is not None:
self.Mp3Param = RecordParam()
self.Mp3Param._deserialize(params.get("Mp3Param"))
class CreateLiveRecordTemplateResponse(AbstractModel):
"""CreateLiveRecordTemplate返回参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TemplateId = None
self.RequestId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.RequestId = params.get("RequestId")
class CreateLiveSnapshotRuleRequest(AbstractModel):
"""CreateLiveSnapshotRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param TemplateId: 模板Id。
:type TemplateId: int
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param StreamName: 流名称。
注:如果本参数设置为非空字符串,规则将只对此推流起作用。
:type StreamName: str
"""
self.DomainName = None
self.TemplateId = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.TemplateId = params.get("TemplateId")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class CreateLiveSnapshotRuleResponse(AbstractModel):
"""CreateLiveSnapshotRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateLiveSnapshotTemplateRequest(AbstractModel):
"""CreateLiveSnapshotTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateName: 模板名称。非空的字符串。
:type TemplateName: str
:param CosAppId: Cos AppId。
:type CosAppId: int
:param CosBucket: Cos Bucket名称。
:type CosBucket: str
:param CosRegion: Cos地区。
:type CosRegion: str
:param Description: 描述信息。
:type Description: str
:param SnapshotInterval: 截图间隔,单位s,默认10s。
范围: 5s ~ 600s。
:type SnapshotInterval: int
:param Width: 截图宽度。默认:0(原始宽)。
:type Width: int
:param Height: 截图高度。默认:0(原始高)。
:type Height: int
:param PornFlag: 是否开启鉴黄,0:不开启,1:开启。默认:0。
:type PornFlag: int
"""
self.TemplateName = None
self.CosAppId = None
self.CosBucket = None
self.CosRegion = None
self.Description = None
self.SnapshotInterval = None
self.Width = None
self.Height = None
self.PornFlag = None
def _deserialize(self, params):
self.TemplateName = params.get("TemplateName")
self.CosAppId = params.get("CosAppId")
self.CosBucket = params.get("CosBucket")
self.CosRegion = params.get("CosRegion")
self.Description = params.get("Description")
self.SnapshotInterval = params.get("SnapshotInterval")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.PornFlag = params.get("PornFlag")
class CreateLiveSnapshotTemplateResponse(AbstractModel):
"""CreateLiveSnapshotTemplate返回参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TemplateId = None
self.RequestId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.RequestId = params.get("RequestId")
class CreateLiveTranscodeRuleRequest(AbstractModel):
"""CreateLiveTranscodeRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 播放域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
:param TemplateId: 指定已有的模板Id。
:type TemplateId: int
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
self.TemplateId = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
self.TemplateId = params.get("TemplateId")
class CreateLiveTranscodeRuleResponse(AbstractModel):
"""CreateLiveTranscodeRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateLiveTranscodeTemplateRequest(AbstractModel):
"""CreateLiveTranscodeTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateName: 模板名称,例:900 900p 仅支持字母和数字的组合。
:type TemplateName: str
:param VideoBitrate: 视频码率。范围:100-8000。
:type VideoBitrate: int
:param Vcodec: 视频编码:h264/h265,默认h264。
注意:当前该参数未生效,待后续支持!
:type Vcodec: str
:param Acodec: 音频编码:aac,默认原始音频格式。
注意:当前该参数未生效,待后续支持!
:type Acodec: str
:param AudioBitrate: 音频码率:默认0。0-500。
:type AudioBitrate: int
:param Description: 模板描述。
:type Description: str
:param Width: 宽,默认0。
:type Width: int
:param NeedVideo: 是否保留视频,0:否,1:是。默认1。
:type NeedVideo: int
:param NeedAudio: 是否保留音频,0:否,1:是。默认1。
:type NeedAudio: int
:param Height: 高,默认0。
:type Height: int
:param Fps: 帧率,默认0。
:type Fps: int
:param Gop: 关键帧间隔,单位:秒。默认原始的间隔
:type Gop: int
:param Rotate: 是否旋转,0:否,1:是。默认0。
:type Rotate: int
:param Profile: 编码质量:
baseline/main/high。默认baseline
:type Profile: str
:param BitrateToOrig: 是否不超过原始码率,0:否,1:是。默认0。
:type BitrateToOrig: int
:param HeightToOrig: 是否不超过原始高,0:否,1:是。默认0。
:type HeightToOrig: int
:param FpsToOrig: 是否不超过原始帧率,0:否,1:是。默认0。
:type FpsToOrig: int
"""
self.TemplateName = None
self.VideoBitrate = None
self.Vcodec = None
self.Acodec = None
self.AudioBitrate = None
self.Description = None
self.Width = None
self.NeedVideo = None
self.NeedAudio = None
self.Height = None
self.Fps = None
self.Gop = None
self.Rotate = None
self.Profile = None
self.BitrateToOrig = None
self.HeightToOrig = None
self.FpsToOrig = None
def _deserialize(self, params):
self.TemplateName = params.get("TemplateName")
self.VideoBitrate = params.get("VideoBitrate")
self.Vcodec = params.get("Vcodec")
self.Acodec = params.get("Acodec")
self.AudioBitrate = params.get("AudioBitrate")
self.Description = params.get("Description")
self.Width = params.get("Width")
self.NeedVideo = params.get("NeedVideo")
self.NeedAudio = params.get("NeedAudio")
self.Height = params.get("Height")
self.Fps = params.get("Fps")
self.Gop = params.get("Gop")
self.Rotate = params.get("Rotate")
self.Profile = params.get("Profile")
self.BitrateToOrig = params.get("BitrateToOrig")
self.HeightToOrig = params.get("HeightToOrig")
self.FpsToOrig = params.get("FpsToOrig")
class CreateLiveTranscodeTemplateResponse(AbstractModel):
"""CreateLiveTranscodeTemplate返回参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TemplateId = None
self.RequestId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.RequestId = params.get("RequestId")
class CreateLiveWatermarkRuleRequest(AbstractModel):
"""CreateLiveWatermarkRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
:param TemplateId: 水印Id,即调用[AddLiveWatermark](/document/product/267/30154)接口返回的WatermarkId。
:type TemplateId: int
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
self.TemplateId = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
self.TemplateId = params.get("TemplateId")
class CreateLiveWatermarkRuleResponse(AbstractModel):
"""CreateLiveWatermarkRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreatePullStreamConfigRequest(AbstractModel):
"""CreatePullStreamConfig请求参数结构体
"""
def __init__(self):
"""
:param FromUrl: 源Url。目前可支持直播流及点播文件。
:type FromUrl: str
:param ToUrl: 目的Url,目前限制该目标地址为腾讯域名。
:type ToUrl: str
:param AreaId: 区域id,1-深圳,2-上海,3-天津,4-香港。
:type AreaId: int
:param IspId: 运营商id,1-电信,2-移动,3-联通,4-其他,AreaId为4的时候,IspId只能为其他。
:type IspId: int
:param StartTime: 开始时间。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type StartTime: str
:param EndTime: 结束时间,注意:
1. 结束时间必须大于开始时间;
2. 结束时间和开始时间必须大于当前时间;
3. 结束时间 和 开始时间 间隔必须小于七天。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type EndTime: str
"""
self.FromUrl = None
self.ToUrl = None
self.AreaId = None
self.IspId = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.FromUrl = params.get("FromUrl")
self.ToUrl = params.get("ToUrl")
self.AreaId = params.get("AreaId")
self.IspId = params.get("IspId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
class CreatePullStreamConfigResponse(AbstractModel):
"""CreatePullStreamConfig返回参数结构体
"""
def __init__(self):
"""
:param ConfigId: 配置成功后的id。
:type ConfigId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ConfigId = None
self.RequestId = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
self.RequestId = params.get("RequestId")
class DayStreamPlayInfo(AbstractModel):
"""流播放信息
"""
def __init__(self):
"""
:param Time: 数据时间点,格式:yyyy-mm-dd HH:MM:SS。
:type Time: str
:param Bandwidth: 带宽(单位Mbps)。
:type Bandwidth: float
:param Flux: 流量 (单位MB)。
:type Flux: float
:param Request: 请求数。
:type Request: int
:param Online: 在线人数。
:type Online: int
"""
self.Time = None
self.Bandwidth = None
self.Flux = None
self.Request = None
self.Online = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Bandwidth = params.get("Bandwidth")
self.Flux = params.get("Flux")
self.Request = params.get("Request")
self.Online = params.get("Online")
class DelayInfo(AbstractModel):
"""延播信息
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
:param DelayInterval: 延播时间,单位:秒。
:type DelayInterval: int
:param CreateTime: 创建时间,UTC时间。
注意:UTC时间和北京时间相差8小时。
例如:2019-06-18T12:00:00Z(为北京时间 2019 年 6 月 18 日 20 点 0 分 0 秒)。
:type CreateTime: str
:param ExpireTime: 过期时间,UTC时间。
注意:UTC时间和北京时间相差8小时。
例如:2019-06-18T12:00:00Z(为北京时间 2019 年 6 月 18 日 20 点 0 分 0 秒)。
:type ExpireTime: str
:param Status: 当前状态,
-1:已过期,
1: 生效中。
:type Status: int
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
self.DelayInterval = None
self.CreateTime = None
self.ExpireTime = None
self.Status = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
self.DelayInterval = params.get("DelayInterval")
self.CreateTime = params.get("CreateTime")
self.ExpireTime = params.get("ExpireTime")
self.Status = params.get("Status")
class DeleteLiveCallbackRuleRequest(AbstractModel):
"""DeleteLiveCallbackRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
"""
self.DomainName = None
self.AppName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
class DeleteLiveCallbackRuleResponse(AbstractModel):
"""DeleteLiveCallbackRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveCallbackTemplateRequest(AbstractModel):
"""DeleteLiveCallbackTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DeleteLiveCallbackTemplateResponse(AbstractModel):
"""DeleteLiveCallbackTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveCertRequest(AbstractModel):
"""DeleteLiveCert请求参数结构体
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: int
"""
self.CertId = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
class DeleteLiveCertResponse(AbstractModel):
"""DeleteLiveCert返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveDomainRequest(AbstractModel):
"""DeleteLiveDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 要删除的域名
:type DomainName: str
:param DomainType: 类型。0-推流,1-播放
:type DomainType: int
"""
self.DomainName = None
self.DomainType = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.DomainType = params.get("DomainType")
class DeleteLiveDomainResponse(AbstractModel):
"""DeleteLiveDomain返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveRecordRequest(AbstractModel):
"""DeleteLiveRecord请求参数结构体
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param TaskId: 任务ID,全局唯一标识录制任务。
:type TaskId: int
"""
self.StreamName = None
self.TaskId = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.TaskId = params.get("TaskId")
class DeleteLiveRecordResponse(AbstractModel):
"""DeleteLiveRecord返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveRecordRuleRequest(AbstractModel):
"""DeleteLiveRecordRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
域名+AppName+StreamName唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
域名+AppName+StreamName唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type AppName: str
:param StreamName: 流名称。
域名+AppName+StreamName唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type StreamName: str
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class DeleteLiveRecordRuleResponse(AbstractModel):
"""DeleteLiveRecordRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveRecordTemplateRequest(AbstractModel):
"""DeleteLiveRecordTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板ID。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DeleteLiveRecordTemplateResponse(AbstractModel):
"""DeleteLiveRecordTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveSnapshotRuleRequest(AbstractModel):
"""DeleteLiveSnapshotRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class DeleteLiveSnapshotRuleResponse(AbstractModel):
"""DeleteLiveSnapshotRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveSnapshotTemplateRequest(AbstractModel):
"""DeleteLiveSnapshotTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DeleteLiveSnapshotTemplateResponse(AbstractModel):
"""DeleteLiveSnapshotTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveTranscodeRuleRequest(AbstractModel):
"""DeleteLiveTranscodeRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
域名维度转码,域名+AppName+StreamName唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
域名+AppName+StreamName+TemplateId唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type AppName: str
:param StreamName: 流名称。
域名+AppName+StreamName+TemplateId唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type StreamName: str
:param TemplateId: 模板ID。
域名+AppName+StreamName+TemplateId唯一标识单个转码规则,如需删除需要强匹配,比如AppName为空也需要传空字符串进行强匹配。
:type TemplateId: int
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
self.TemplateId = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
self.TemplateId = params.get("TemplateId")
class DeleteLiveTranscodeRuleResponse(AbstractModel):
"""DeleteLiveTranscodeRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveTranscodeTemplateRequest(AbstractModel):
"""DeleteLiveTranscodeTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DeleteLiveTranscodeTemplateResponse(AbstractModel):
"""DeleteLiveTranscodeTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveWatermarkRequest(AbstractModel):
"""DeleteLiveWatermark请求参数结构体
"""
def __init__(self):
"""
:param WatermarkId: 水印ID。
:type WatermarkId: int
"""
self.WatermarkId = None
def _deserialize(self, params):
self.WatermarkId = params.get("WatermarkId")
class DeleteLiveWatermarkResponse(AbstractModel):
"""DeleteLiveWatermark返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteLiveWatermarkRuleRequest(AbstractModel):
"""DeleteLiveWatermarkRule请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.DomainName = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class DeleteLiveWatermarkRuleResponse(AbstractModel):
"""DeleteLiveWatermarkRule返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeletePullStreamConfigRequest(AbstractModel):
"""DeletePullStreamConfig请求参数结构体
"""
def __init__(self):
"""
:param ConfigId: 配置id。
:type ConfigId: str
"""
self.ConfigId = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
class DeletePullStreamConfigResponse(AbstractModel):
"""DeletePullStreamConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeBillBandwidthAndFluxListRequest(AbstractModel):
"""DescribeBillBandwidthAndFluxList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS,起始和结束时间跨度不支持超过31天。
:type EndTime: str
:param PlayDomains: 直播播放域名,若不填,表示总体数据。
:type PlayDomains: list of str
:param MainlandOrOversea: 国内还是国外,若不填,表示国内+国外。
:type MainlandOrOversea: str
:param Granularity: 数据粒度,支持如下粒度:
5:5分钟粒度,默认值(跨度不支持超过1天);
60:1小时粒度(跨度不支持超过一个月);
1440:天粒度(跨度不支持超过一个月)。
:type Granularity: int
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
self.MainlandOrOversea = None
self.Granularity = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
self.MainlandOrOversea = params.get("MainlandOrOversea")
self.Granularity = params.get("Granularity")
class DescribeBillBandwidthAndFluxListResponse(AbstractModel):
"""DescribeBillBandwidthAndFluxList返回参数结构体
"""
def __init__(self):
"""
:param PeakBandwidthTime: 峰值带宽所在时间点,格式为yyyy-mm-dd HH:MM:SS。
:type PeakBandwidthTime: str
:param PeakBandwidth: 峰值带宽,单位是Mbps。
:type PeakBandwidth: float
:param P95PeakBandwidthTime: 95峰值带宽所在时间点,格式为yyyy-mm-dd HH:MM:SS。
:type P95PeakBandwidthTime: str
:param P95PeakBandwidth: 95峰值带宽,单位是Mbps。
:type P95PeakBandwidth: float
:param SumFlux: 总流量,单位是MB。
:type SumFlux: float
:param DataInfoList: 明细数据信息。
:type DataInfoList: list of BillDataInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PeakBandwidthTime = None
self.PeakBandwidth = None
self.P95PeakBandwidthTime = None
self.P95PeakBandwidth = None
self.SumFlux = None
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.PeakBandwidthTime = params.get("PeakBandwidthTime")
self.PeakBandwidth = params.get("PeakBandwidth")
self.P95PeakBandwidthTime = params.get("P95PeakBandwidthTime")
self.P95PeakBandwidth = params.get("P95PeakBandwidth")
self.SumFlux = params.get("SumFlux")
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = BillDataInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeGroupProIspPlayInfoListRequest(AbstractModel):
"""DescribeGroupProIspPlayInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS
时间跨度在(0,3小时],支持最近1个月数据查询。
:type EndTime: str
:param PlayDomains: 播放域名,默认为不填,表示求总体数据。
:type PlayDomains: list of str
:param ProvinceNames: 省份列表,默认不填,则返回各省份的数据。
:type ProvinceNames: list of str
:param IspNames: 运营商列表,默认不填,则返回个运营商的数据。
:type IspNames: list of str
:param MainlandOrOversea: 国内还是国外,如果为空,查询所有地区数据;如果为“Mainland”,查询国内数据;如果为“Oversea”,则查询国外数据。
:type MainlandOrOversea: str
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
self.ProvinceNames = None
self.IspNames = None
self.MainlandOrOversea = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
self.ProvinceNames = params.get("ProvinceNames")
self.IspNames = params.get("IspNames")
self.MainlandOrOversea = params.get("MainlandOrOversea")
class DescribeGroupProIspPlayInfoListResponse(AbstractModel):
"""DescribeGroupProIspPlayInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 数据内容。
:type DataInfoList: list of GroupProIspDataInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = GroupProIspDataInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeHttpStatusInfoListRequest(AbstractModel):
"""DescribeHttpStatusInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
StartTime不能为3个月前。
:type StartTime: str
:param EndTime: 结束时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
注:EndTime 和 StartTime 只支持最近1天的数据查询。
:type EndTime: str
:param PlayDomains: 播放域名列表。
:type PlayDomains: list of str
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
class DescribeHttpStatusInfoListResponse(AbstractModel):
"""DescribeHttpStatusInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 播放状态码列表。
:type DataInfoList: list of HttpStatusData
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = HttpStatusData()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveCallbackRulesRequest(AbstractModel):
"""DescribeLiveCallbackRules请求参数结构体
"""
class DescribeLiveCallbackRulesResponse(AbstractModel):
"""DescribeLiveCallbackRules返回参数结构体
"""
def __init__(self):
"""
:param Rules: 规则信息列表。
:type Rules: list of CallBackRuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = CallBackRuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveCallbackTemplateRequest(AbstractModel):
"""DescribeLiveCallbackTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DescribeLiveCallbackTemplateResponse(AbstractModel):
"""DescribeLiveCallbackTemplate返回参数结构体
"""
def __init__(self):
"""
:param Template: 回调模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.CallBackTemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = CallBackTemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveCallbackTemplatesRequest(AbstractModel):
"""DescribeLiveCallbackTemplates请求参数结构体
"""
class DescribeLiveCallbackTemplatesResponse(AbstractModel):
"""DescribeLiveCallbackTemplates返回参数结构体
"""
def __init__(self):
"""
:param Templates: 模板信息列表。
:type Templates: list of CallBackTemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = CallBackTemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveCertRequest(AbstractModel):
"""DescribeLiveCert请求参数结构体
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: int
"""
self.CertId = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
class DescribeLiveCertResponse(AbstractModel):
"""DescribeLiveCert返回参数结构体
"""
def __init__(self):
"""
:param CertInfo: 证书信息。
:type CertInfo: :class:`tencentcloud.live.v20180801.models.CertInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CertInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CertInfo") is not None:
self.CertInfo = CertInfo()
self.CertInfo._deserialize(params.get("CertInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveCertsRequest(AbstractModel):
"""DescribeLiveCerts请求参数结构体
"""
class DescribeLiveCertsResponse(AbstractModel):
"""DescribeLiveCerts返回参数结构体
"""
def __init__(self):
"""
:param CertInfoSet: 证书信息列表。
:type CertInfoSet: list of CertInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.CertInfoSet = None
self.RequestId = None
def _deserialize(self, params):
if params.get("CertInfoSet") is not None:
self.CertInfoSet = []
for item in params.get("CertInfoSet"):
obj = CertInfo()
obj._deserialize(item)
self.CertInfoSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveDelayInfoListRequest(AbstractModel):
"""DescribeLiveDelayInfoList请求参数结构体
"""
class DescribeLiveDelayInfoListResponse(AbstractModel):
"""DescribeLiveDelayInfoList返回参数结构体
"""
def __init__(self):
"""
:param DelayInfoList: 延播信息列表。
:type DelayInfoList: list of DelayInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DelayInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DelayInfoList") is not None:
self.DelayInfoList = []
for item in params.get("DelayInfoList"):
obj = DelayInfo()
obj._deserialize(item)
self.DelayInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveDomainCertRequest(AbstractModel):
"""DescribeLiveDomainCert请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 播放域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class DescribeLiveDomainCertResponse(AbstractModel):
"""DescribeLiveDomainCert返回参数结构体
"""
def __init__(self):
"""
:param DomainCertInfo: 证书信息。
:type DomainCertInfo: :class:`tencentcloud.live.v20180801.models.DomainCertInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DomainCertInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DomainCertInfo") is not None:
self.DomainCertInfo = DomainCertInfo()
self.DomainCertInfo._deserialize(params.get("DomainCertInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveDomainPlayInfoListRequest(AbstractModel):
"""DescribeLiveDomainPlayInfoList请求参数结构体
"""
def __init__(self):
"""
:param PlayDomains: 播放域名列表。
:type PlayDomains: list of str
"""
self.PlayDomains = None
def _deserialize(self, params):
self.PlayDomains = params.get("PlayDomains")
class DescribeLiveDomainPlayInfoListResponse(AbstractModel):
"""DescribeLiveDomainPlayInfoList返回参数结构体
"""
def __init__(self):
"""
:param Time: 数据时间,格式为yyyy-mm-dd HH:MM:SS。
:type Time: str
:param TotalBandwidth: 实时总带宽。
:type TotalBandwidth: float
:param TotalFlux: 实时总流量。
:type TotalFlux: float
:param TotalRequest: TotalRequest。
:type TotalRequest: int
:param TotalOnline: 实时总连接数。
:type TotalOnline: int
:param DomainInfoList: 分域名的数据情况。
:type DomainInfoList: list of DomainInfoList
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Time = None
self.TotalBandwidth = None
self.TotalFlux = None
self.TotalRequest = None
self.TotalOnline = None
self.DomainInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.TotalBandwidth = params.get("TotalBandwidth")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
self.TotalOnline = params.get("TotalOnline")
if params.get("DomainInfoList") is not None:
self.DomainInfoList = []
for item in params.get("DomainInfoList"):
obj = DomainInfoList()
obj._deserialize(item)
self.DomainInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveDomainRequest(AbstractModel):
"""DescribeLiveDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class DescribeLiveDomainResponse(AbstractModel):
"""DescribeLiveDomain返回参数结构体
"""
def __init__(self):
"""
:param DomainInfo: 域名信息。
:type DomainInfo: :class:`tencentcloud.live.v20180801.models.DomainInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DomainInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DomainInfo") is not None:
self.DomainInfo = DomainInfo()
self.DomainInfo._deserialize(params.get("DomainInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveDomainsRequest(AbstractModel):
"""DescribeLiveDomains请求参数结构体
"""
def __init__(self):
"""
:param DomainStatus: 域名状态过滤。0-停用,1-启用
:type DomainStatus: int
:param DomainType: 域名类型过滤。0-推流,1-播放
:type DomainType: int
:param PageSize: 分页大小,范围:10~100。默认10
:type PageSize: int
:param PageNum: 取第几页,范围:1~100000。默认1
:type PageNum: int
:param IsDelayLive: 0 普通直播 1慢直播 默认0
:type IsDelayLive: int
"""
self.DomainStatus = None
self.DomainType = None
self.PageSize = None
self.PageNum = None
self.IsDelayLive = None
def _deserialize(self, params):
self.DomainStatus = params.get("DomainStatus")
self.DomainType = params.get("DomainType")
self.PageSize = params.get("PageSize")
self.PageNum = params.get("PageNum")
self.IsDelayLive = params.get("IsDelayLive")
class DescribeLiveDomainsResponse(AbstractModel):
"""DescribeLiveDomains返回参数结构体
"""
def __init__(self):
"""
:param AllCount: 总记录数
:type AllCount: int
:param DomainList: 域名详细信息列表
:type DomainList: list of DomainInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AllCount = None
self.DomainList = None
self.RequestId = None
def _deserialize(self, params):
self.AllCount = params.get("AllCount")
if params.get("DomainList") is not None:
self.DomainList = []
for item in params.get("DomainList"):
obj = DomainInfo()
obj._deserialize(item)
self.DomainList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveForbidStreamListRequest(AbstractModel):
"""DescribeLiveForbidStreamList请求参数结构体
"""
def __init__(self):
"""
:param PageNum: 取得第几页,默认1。
:type PageNum: int
:param PageSize: 每页大小,最大100。
取值:1~100之前的任意整数。
默认值:10。
:type PageSize: int
"""
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribeLiveForbidStreamListResponse(AbstractModel):
"""DescribeLiveForbidStreamList返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页显示的条数。
:type PageSize: int
:param ForbidStreamList: 禁推流列表。
:type ForbidStreamList: list of ForbidStreamInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.TotalPage = None
self.PageNum = None
self.PageSize = None
self.ForbidStreamList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
if params.get("ForbidStreamList") is not None:
self.ForbidStreamList = []
for item in params.get("ForbidStreamList"):
obj = ForbidStreamInfo()
obj._deserialize(item)
self.ForbidStreamList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLivePlayAuthKeyRequest(AbstractModel):
"""DescribeLivePlayAuthKey请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class DescribeLivePlayAuthKeyResponse(AbstractModel):
"""DescribeLivePlayAuthKey返回参数结构体
"""
def __init__(self):
"""
:param PlayAuthKeyInfo: 播放鉴权key信息。
:type PlayAuthKeyInfo: :class:`tencentcloud.live.v20180801.models.PlayAuthKeyInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PlayAuthKeyInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PlayAuthKeyInfo") is not None:
self.PlayAuthKeyInfo = PlayAuthKeyInfo()
self.PlayAuthKeyInfo._deserialize(params.get("PlayAuthKeyInfo"))
self.RequestId = params.get("RequestId")
class DescribeLivePushAuthKeyRequest(AbstractModel):
"""DescribeLivePushAuthKey请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class DescribeLivePushAuthKeyResponse(AbstractModel):
"""DescribeLivePushAuthKey返回参数结构体
"""
def __init__(self):
"""
:param PushAuthKeyInfo: 推流鉴权key信息。
:type PushAuthKeyInfo: :class:`tencentcloud.live.v20180801.models.PushAuthKeyInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PushAuthKeyInfo = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PushAuthKeyInfo") is not None:
self.PushAuthKeyInfo = PushAuthKeyInfo()
self.PushAuthKeyInfo._deserialize(params.get("PushAuthKeyInfo"))
self.RequestId = params.get("RequestId")
class DescribeLiveRecordRulesRequest(AbstractModel):
"""DescribeLiveRecordRules请求参数结构体
"""
class DescribeLiveRecordRulesResponse(AbstractModel):
"""DescribeLiveRecordRules返回参数结构体
"""
def __init__(self):
"""
:param Rules: 规则列表。
:type Rules: list of RuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveRecordTemplateRequest(AbstractModel):
"""DescribeLiveRecordTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DescribeLiveRecordTemplateResponse(AbstractModel):
"""DescribeLiveRecordTemplate返回参数结构体
"""
def __init__(self):
"""
:param Template: 录制模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.RecordTemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = RecordTemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveRecordTemplatesRequest(AbstractModel):
"""DescribeLiveRecordTemplates请求参数结构体
"""
def __init__(self):
"""
:param IsDelayLive: 是否属于慢直播模板
:type IsDelayLive: int
"""
self.IsDelayLive = None
def _deserialize(self, params):
self.IsDelayLive = params.get("IsDelayLive")
class DescribeLiveRecordTemplatesResponse(AbstractModel):
"""DescribeLiveRecordTemplates返回参数结构体
"""
def __init__(self):
"""
:param Templates: 录制模板信息列表。
:type Templates: list of RecordTemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = RecordTemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveSnapshotRulesRequest(AbstractModel):
"""DescribeLiveSnapshotRules请求参数结构体
"""
class DescribeLiveSnapshotRulesResponse(AbstractModel):
"""DescribeLiveSnapshotRules返回参数结构体
"""
def __init__(self):
"""
:param Rules: 规则列表。
:type Rules: list of RuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveSnapshotTemplateRequest(AbstractModel):
"""DescribeLiveSnapshotTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DescribeLiveSnapshotTemplateResponse(AbstractModel):
"""DescribeLiveSnapshotTemplate返回参数结构体
"""
def __init__(self):
"""
:param Template: 截图模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.SnapshotTemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = SnapshotTemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveSnapshotTemplatesRequest(AbstractModel):
"""DescribeLiveSnapshotTemplates请求参数结构体
"""
class DescribeLiveSnapshotTemplatesResponse(AbstractModel):
"""DescribeLiveSnapshotTemplates返回参数结构体
"""
def __init__(self):
"""
:param Templates: 截图模板列表。
:type Templates: list of SnapshotTemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = SnapshotTemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveStreamEventListRequest(AbstractModel):
"""DescribeLiveStreamEventList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间。
UTC 格式,例如:2018-12-29T19:00:00Z。
支持查询60天内的历史记录。
:type StartTime: str
:param EndTime: 结束时间。
UTC 格式,例如:2018-12-29T20:00:00Z。
不超过当前时间,且和起始时间相差不得超过30天。
:type EndTime: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamName: 流名称,不支持通配符(*)查询,默认模糊匹配。
可使用IsStrict字段改为精确查询。
:type StreamName: str
:param PageNum: 取得第几页。
默认值:1。
注: 目前只支持10000条内的查询。
:type PageNum: int
:param PageSize: 分页大小。
最大值:100。
取值范围:1~100 之前的任意整数。
默认值:10。
注: 目前只支持10000条内的查询。
:type PageSize: int
:param IsFilter: 是否过滤,默认不过滤。
0:不进行任何过滤。
1:过滤掉开播失败的,只返回开播成功的。
:type IsFilter: int
:param IsStrict: 是否精确查询,默认模糊匹配。
0:模糊匹配。
1:精确查询。
注:使用StreamName时该参数生效。
:type IsStrict: int
:param IsAsc: 是否按结束时间正序显示,默认逆序。
0:逆序。
1:正序。
:type IsAsc: int
"""
self.StartTime = None
self.EndTime = None
self.AppName = None
self.DomainName = None
self.StreamName = None
self.PageNum = None
self.PageSize = None
self.IsFilter = None
self.IsStrict = None
self.IsAsc = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.IsFilter = params.get("IsFilter")
self.IsStrict = params.get("IsStrict")
self.IsAsc = params.get("IsAsc")
class DescribeLiveStreamEventListResponse(AbstractModel):
"""DescribeLiveStreamEventList返回参数结构体
"""
def __init__(self):
"""
:param EventList: 推断流事件列表。
:type EventList: list of StreamEventInfo
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页大小。
:type PageSize: int
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.EventList = None
self.PageNum = None
self.PageSize = None
self.TotalNum = None
self.TotalPage = None
self.RequestId = None
def _deserialize(self, params):
if params.get("EventList") is not None:
self.EventList = []
for item in params.get("EventList"):
obj = StreamEventInfo()
obj._deserialize(item)
self.EventList.append(obj)
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.RequestId = params.get("RequestId")
class DescribeLiveStreamOnlineInfoRequest(AbstractModel):
"""DescribeLiveStreamOnlineInfo请求参数结构体
"""
def __init__(self):
"""
:param PageNum: 取得第几页。
默认值:1。
:type PageNum: int
:param PageSize: 分页大小。
最大值:100。
取值范围:1~100 之前的任意整数。
默认值:10。
:type PageSize: int
:param Status: 0:未开始推流 1:正在推流
:type Status: int
:param StreamName: 流名称。
:type StreamName: str
"""
self.PageNum = None
self.PageSize = None
self.Status = None
self.StreamName = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.Status = params.get("Status")
self.StreamName = params.get("StreamName")
class DescribeLiveStreamOnlineInfoResponse(AbstractModel):
"""DescribeLiveStreamOnlineInfo返回参数结构体
"""
def __init__(self):
"""
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页大小。
:type PageSize: int
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param StreamInfoList: 流信息列表。
:type StreamInfoList: list of StreamInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PageNum = None
self.PageSize = None
self.TotalNum = None
self.TotalPage = None
self.StreamInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
if params.get("StreamInfoList") is not None:
self.StreamInfoList = []
for item in params.get("StreamInfoList"):
obj = StreamInfo()
obj._deserialize(item)
self.StreamInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveStreamOnlineListRequest(AbstractModel):
"""DescribeLiveStreamOnlineList请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param PageNum: 取得第几页,默认1。
:type PageNum: int
:param PageSize: 每页大小,最大100。
取值:10~100之间的任意整数。
默认值:10。
:type PageSize: int
:param StreamName: 流名称,用于精确查询。
:type StreamName: str
"""
self.DomainName = None
self.AppName = None
self.PageNum = None
self.PageSize = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.StreamName = params.get("StreamName")
class DescribeLiveStreamOnlineListResponse(AbstractModel):
"""DescribeLiveStreamOnlineList返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页显示的条数。
:type PageSize: int
:param OnlineInfo: 正在推送流的信息列表。
:type OnlineInfo: list of StreamOnlineInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.TotalPage = None
self.PageNum = None
self.PageSize = None
self.OnlineInfo = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
if params.get("OnlineInfo") is not None:
self.OnlineInfo = []
for item in params.get("OnlineInfo"):
obj = StreamOnlineInfo()
obj._deserialize(item)
self.OnlineInfo.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveStreamPublishedListRequest(AbstractModel):
"""DescribeLiveStreamPublishedList请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 您的推流域名。
:type DomainName: str
:param EndTime: 结束时间。
UTC 格式,例如:2016-06-30T19:00:00Z。
不超过当前时间。
注意:EndTime和StartTime相差不可超过30天。
:type EndTime: str
:param StartTime: 起始时间。
UTC 格式,例如:2016-06-29T19:00:00Z。
最长支持查询60天内数据。
:type StartTime: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。不支持模糊匹配。
:type AppName: str
:param PageNum: 取得第几页。
默认值:1。
:type PageNum: int
:param PageSize: 分页大小。
最大值:100。
取值范围:1~100 之前的任意整数。
默认值:10。
:type PageSize: int
:param StreamName: 流名称,支持模糊匹配。
:type StreamName: str
"""
self.DomainName = None
self.EndTime = None
self.StartTime = None
self.AppName = None
self.PageNum = None
self.PageSize = None
self.StreamName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.EndTime = params.get("EndTime")
self.StartTime = params.get("StartTime")
self.AppName = params.get("AppName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.StreamName = params.get("StreamName")
class DescribeLiveStreamPublishedListResponse(AbstractModel):
"""DescribeLiveStreamPublishedList返回参数结构体
"""
def __init__(self):
"""
:param PublishInfo: 推流记录信息。
:type PublishInfo: list of StreamName
:param PageNum: 分页的页码。
:type PageNum: int
:param PageSize: 每页大小
:type PageSize: int
:param TotalNum: 符合条件的总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PublishInfo = None
self.PageNum = None
self.PageSize = None
self.TotalNum = None
self.TotalPage = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PublishInfo") is not None:
self.PublishInfo = []
for item in params.get("PublishInfo"):
obj = StreamName()
obj._deserialize(item)
self.PublishInfo.append(obj)
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.RequestId = params.get("RequestId")
class DescribeLiveStreamPushInfoListRequest(AbstractModel):
"""DescribeLiveStreamPushInfoList请求参数结构体
"""
def __init__(self):
"""
:param PushDomain: 推流域名。
:type PushDomain: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
:param PageNum: 页数,
范围[1,10000],
默认值:1。
:type PageNum: int
:param PageSize: 每页个数,
范围:[1,1000],
默认值: 200。
:type PageSize: int
"""
self.PushDomain = None
self.AppName = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.PushDomain = params.get("PushDomain")
self.AppName = params.get("AppName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribeLiveStreamPushInfoListResponse(AbstractModel):
"""DescribeLiveStreamPushInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 直播流的统计信息列表。
:type DataInfoList: list of PushDataInfo
:param TotalNum: 所有在线流的总数量。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param PageNum: 当前数据所在页码。
:type PageNum: int
:param PageSize: 每页的在线流的个数。
:type PageSize: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.TotalNum = None
self.TotalPage = None
self.PageNum = None
self.PageSize = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = PushDataInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.RequestId = params.get("RequestId")
class DescribeLiveStreamStateRequest(AbstractModel):
"""DescribeLiveStreamState请求参数结构体
"""
def __init__(self):
"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 您的推流域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
class DescribeLiveStreamStateResponse(AbstractModel):
"""DescribeLiveStreamState返回参数结构体
"""
def __init__(self):
"""
:param StreamState: 流状态,
active:活跃,
inactive:非活跃,
forbid:禁播。
:type StreamState: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.StreamState = None
self.RequestId = None
def _deserialize(self, params):
self.StreamState = params.get("StreamState")
self.RequestId = params.get("RequestId")
class DescribeLiveTranscodeDetailInfoRequest(AbstractModel):
"""DescribeLiveTranscodeDetailInfo请求参数结构体
"""
def __init__(self):
"""
:param DayTime: 起始时间,北京时间,
格式:yyyymmdd。
注意:当前只支持查询近30天内某天的详细数据。
:type DayTime: str
:param PushDomain: 推流域名。
:type PushDomain: str
:param StreamName: 流名称。
:type StreamName: str
:param PageNum: 页数,默认1,
不超过100页。
:type PageNum: int
:param PageSize: 每页个数,默认20,
范围:[10,1000]。
:type PageSize: int
"""
self.DayTime = None
self.PushDomain = None
self.StreamName = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.DayTime = params.get("DayTime")
self.PushDomain = params.get("PushDomain")
self.StreamName = params.get("StreamName")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribeLiveTranscodeDetailInfoResponse(AbstractModel):
"""DescribeLiveTranscodeDetailInfo返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 统计数据列表。
:type DataInfoList: list of TranscodeDetailInfo
:param PageNum: 页码。
:type PageNum: int
:param PageSize: 每页个数。
:type PageSize: int
:param TotalNum: 总个数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.PageNum = None
self.PageSize = None
self.TotalNum = None
self.TotalPage = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = TranscodeDetailInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.RequestId = params.get("RequestId")
class DescribeLiveTranscodeRulesRequest(AbstractModel):
"""DescribeLiveTranscodeRules请求参数结构体
"""
class DescribeLiveTranscodeRulesResponse(AbstractModel):
"""DescribeLiveTranscodeRules返回参数结构体
"""
def __init__(self):
"""
:param Rules: 转码规则列表。
:type Rules: list of RuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveTranscodeTemplateRequest(AbstractModel):
"""DescribeLiveTranscodeTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
"""
self.TemplateId = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
class DescribeLiveTranscodeTemplateResponse(AbstractModel):
"""DescribeLiveTranscodeTemplate返回参数结构体
"""
def __init__(self):
"""
:param Template: 模板信息。
:type Template: :class:`tencentcloud.live.v20180801.models.TemplateInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Template = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Template") is not None:
self.Template = TemplateInfo()
self.Template._deserialize(params.get("Template"))
self.RequestId = params.get("RequestId")
class DescribeLiveTranscodeTemplatesRequest(AbstractModel):
"""DescribeLiveTranscodeTemplates请求参数结构体
"""
class DescribeLiveTranscodeTemplatesResponse(AbstractModel):
"""DescribeLiveTranscodeTemplates返回参数结构体
"""
def __init__(self):
"""
:param Templates: 转码模板列表。
:type Templates: list of TemplateInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Templates = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Templates") is not None:
self.Templates = []
for item in params.get("Templates"):
obj = TemplateInfo()
obj._deserialize(item)
self.Templates.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveWatermarkRequest(AbstractModel):
"""DescribeLiveWatermark请求参数结构体
"""
def __init__(self):
"""
:param WatermarkId: 水印ID。
:type WatermarkId: int
"""
self.WatermarkId = None
def _deserialize(self, params):
self.WatermarkId = params.get("WatermarkId")
class DescribeLiveWatermarkResponse(AbstractModel):
"""DescribeLiveWatermark返回参数结构体
"""
def __init__(self):
"""
:param Watermark: 水印信息。
:type Watermark: :class:`tencentcloud.live.v20180801.models.WatermarkInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Watermark = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Watermark") is not None:
self.Watermark = WatermarkInfo()
self.Watermark._deserialize(params.get("Watermark"))
self.RequestId = params.get("RequestId")
class DescribeLiveWatermarkRulesRequest(AbstractModel):
"""DescribeLiveWatermarkRules请求参数结构体
"""
class DescribeLiveWatermarkRulesResponse(AbstractModel):
"""DescribeLiveWatermarkRules返回参数结构体
"""
def __init__(self):
"""
:param Rules: 水印规则列表。
:type Rules: list of RuleInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Rules = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Rules") is not None:
self.Rules = []
for item in params.get("Rules"):
obj = RuleInfo()
obj._deserialize(item)
self.Rules.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLiveWatermarksRequest(AbstractModel):
"""DescribeLiveWatermarks请求参数结构体
"""
class DescribeLiveWatermarksResponse(AbstractModel):
"""DescribeLiveWatermarks返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 水印总个数。
:type TotalNum: int
:param WatermarkList: 水印信息列表。
:type WatermarkList: list of WatermarkInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.WatermarkList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
if params.get("WatermarkList") is not None:
self.WatermarkList = []
for item in params.get("WatermarkList"):
obj = WatermarkInfo()
obj._deserialize(item)
self.WatermarkList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeLogDownloadListRequest(AbstractModel):
"""DescribeLogDownloadList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 开始时间,北京时间。
格式:yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间,北京时间。
格式:yyyy-mm-dd HH:MM:SS。
注意:结束时间 - 开始时间 <=7天。
:type EndTime: str
:param PlayDomains: 域名列表。
:type PlayDomains: list of str
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
class DescribeLogDownloadListResponse(AbstractModel):
"""DescribeLogDownloadList返回参数结构体
"""
def __init__(self):
"""
:param LogInfoList: 日志信息列表。
:type LogInfoList: list of LogInfo
:param TotalNum: 总条数。
:type TotalNum: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.LogInfoList = None
self.TotalNum = None
self.RequestId = None
def _deserialize(self, params):
if params.get("LogInfoList") is not None:
self.LogInfoList = []
for item in params.get("LogInfoList"):
obj = LogInfo()
obj._deserialize(item)
self.LogInfoList.append(obj)
self.TotalNum = params.get("TotalNum")
self.RequestId = params.get("RequestId")
class DescribePlayErrorCodeDetailInfoListRequest(AbstractModel):
"""DescribePlayErrorCodeDetailInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
注:EndTime 和 StartTime 只支持最近1天的数据查询。
:type EndTime: str
:param Granularity: 查询粒度:
1-1分钟粒度。
:type Granularity: int
:param StatType: 是,可选值包括”4xx”,”5xx”,支持”4xx,5xx”等这种混合模式。
:type StatType: str
:param PlayDomains: 播放域名列表。
:type PlayDomains: list of str
"""
self.StartTime = None
self.EndTime = None
self.Granularity = None
self.StatType = None
self.PlayDomains = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Granularity = params.get("Granularity")
self.StatType = params.get("StatType")
self.PlayDomains = params.get("PlayDomains")
class DescribePlayErrorCodeDetailInfoListResponse(AbstractModel):
"""DescribePlayErrorCodeDetailInfoList返回参数结构体
"""
def __init__(self):
"""
:param HttpCodeList: 统计信息列表。
:type HttpCodeList: list of HttpCodeInfo
:param StatType: 统计类型。
:type StatType: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.HttpCodeList = None
self.StatType = None
self.RequestId = None
def _deserialize(self, params):
if params.get("HttpCodeList") is not None:
self.HttpCodeList = []
for item in params.get("HttpCodeList"):
obj = HttpCodeInfo()
obj._deserialize(item)
self.HttpCodeList.append(obj)
self.StatType = params.get("StatType")
self.RequestId = params.get("RequestId")
class DescribePlayErrorCodeSumInfoListRequest(AbstractModel):
"""DescribePlayErrorCodeSumInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,北京时间。
格式:yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,北京时间。
格式:yyyy-mm-dd HH:MM:SS。
注:EndTime 和 StartTime 只支持最近1天的数据查询。
:type EndTime: str
:param PlayDomains: 播放域名列表,不填表示总体数据。
:type PlayDomains: list of str
:param PageNum: 页数,
范围[1,1000],
默认值:1。
:type PageNum: int
:param PageSize: 每页个数,
范围:[1,1000],
默认值: 20。
:type PageSize: int
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribePlayErrorCodeSumInfoListResponse(AbstractModel):
"""DescribePlayErrorCodeSumInfoList返回参数结构体
"""
def __init__(self):
"""
:param ProIspInfoList: 分省份分运营商错误码为4或5开头的状态码数据信息。
:type ProIspInfoList: list of ProIspPlayCodeDataInfo
:param TotalCodeAll: 所有状态码的加和的次数。
:type TotalCodeAll: int
:param TotalCode4xx: 状态码为4开头的总次数。
:type TotalCode4xx: int
:param TotalCode5xx: 状态码为5开头的总次数。
:type TotalCode5xx: int
:param TotalCodeList: 各状态码的总次数,暂时支持400,403,404,500,502,503,504。
:type TotalCodeList: list of PlayCodeTotalInfo
:param PageNum: 页号。
:type PageNum: int
:param PageSize: 每页大小。
:type PageSize: int
:param TotalPage: 总页数。
:type TotalPage: int
:param TotalNum: 总记录数。
:type TotalNum: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProIspInfoList = None
self.TotalCodeAll = None
self.TotalCode4xx = None
self.TotalCode5xx = None
self.TotalCodeList = None
self.PageNum = None
self.PageSize = None
self.TotalPage = None
self.TotalNum = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProIspInfoList") is not None:
self.ProIspInfoList = []
for item in params.get("ProIspInfoList"):
obj = ProIspPlayCodeDataInfo()
obj._deserialize(item)
self.ProIspInfoList.append(obj)
self.TotalCodeAll = params.get("TotalCodeAll")
self.TotalCode4xx = params.get("TotalCode4xx")
self.TotalCode5xx = params.get("TotalCode5xx")
if params.get("TotalCodeList") is not None:
self.TotalCodeList = []
for item in params.get("TotalCodeList"):
obj = PlayCodeTotalInfo()
obj._deserialize(item)
self.TotalCodeList.append(obj)
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TotalPage = params.get("TotalPage")
self.TotalNum = params.get("TotalNum")
self.RequestId = params.get("RequestId")
class DescribeProIspPlaySumInfoListRequest(AbstractModel):
"""DescribeProIspPlaySumInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间,北京时间,
格式:yyyy-mm-dd HH:MM:SS。
注:EndTime 和 StartTime 只支持最近1天的数据查询。
:type EndTime: str
:param StatType: 统计的类型,可选值包括”Province”,”Isp”。
:type StatType: str
:param PlayDomains: 不填则为总体数据。
:type PlayDomains: list of str
:param PageNum: 页号,
范围是[1,1000],
默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围是[1,1000],
默认值是20。
:type PageSize: int
"""
self.StartTime = None
self.EndTime = None
self.StatType = None
self.PlayDomains = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.StatType = params.get("StatType")
self.PlayDomains = params.get("PlayDomains")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribeProIspPlaySumInfoListResponse(AbstractModel):
"""DescribeProIspPlaySumInfoList返回参数结构体
"""
def __init__(self):
"""
:param TotalFlux: 总流量。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
:param StatType: 统计的类型。
:type StatType: str
:param PageSize: 每页的记录数。
:type PageSize: int
:param PageNum: 页号。
:type PageNum: int
:param TotalNum: 总记录数。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param DataInfoList: 省份或运营商汇总数据列表。
:type DataInfoList: list of ProIspPlaySumInfo
:param AvgFluxPerSecond: 平均带宽。
:type AvgFluxPerSecond: float
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalFlux = None
self.TotalRequest = None
self.StatType = None
self.PageSize = None
self.PageNum = None
self.TotalNum = None
self.TotalPage = None
self.DataInfoList = None
self.AvgFluxPerSecond = None
self.RequestId = None
def _deserialize(self, params):
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
self.StatType = params.get("StatType")
self.PageSize = params.get("PageSize")
self.PageNum = params.get("PageNum")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = ProIspPlaySumInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.AvgFluxPerSecond = params.get("AvgFluxPerSecond")
self.RequestId = params.get("RequestId")
class DescribeProvinceIspPlayInfoListRequest(AbstractModel):
"""DescribeProvinceIspPlayInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,当前使用北京时间,
例:2019-02-21 10:00:00。
:type StartTime: str
:param EndTime: 结束时间点,当前使用北京时间,
例:2019-02-21 12:00:00。
注:EndTime 和 StartTime 只支持最近1天的数据查询。
:type EndTime: str
:param Granularity: 支持如下粒度:
1:1分钟粒度(跨度不支持超过1天)
:type Granularity: int
:param StatType: 统计指标类型:
“Bandwidth”:带宽
“FluxPerSecond”:平均流量
“Flux”:流量
“Request”:请求数
“Online”:并发连接数
:type StatType: str
:param PlayDomains: 播放域名列表。
:type PlayDomains: list of str
:param ProvinceNames: 非必传参数,要查询的省份(地区)英文名称列表,如 Beijing
:type ProvinceNames: list of str
:param IspNames: 非必传参数,要查询的运营商英文名称列表,如 China Mobile ,如果为空,查询所有运营商的数据
:type IspNames: list of str
"""
self.StartTime = None
self.EndTime = None
self.Granularity = None
self.StatType = None
self.PlayDomains = None
self.ProvinceNames = None
self.IspNames = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Granularity = params.get("Granularity")
self.StatType = params.get("StatType")
self.PlayDomains = params.get("PlayDomains")
self.ProvinceNames = params.get("ProvinceNames")
self.IspNames = params.get("IspNames")
class DescribeProvinceIspPlayInfoListResponse(AbstractModel):
"""DescribeProvinceIspPlayInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 播放信息列表。
:type DataInfoList: list of PlayStatInfo
:param StatType: 统计的类型,和输入参数保持一致。
:type StatType: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.StatType = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = PlayStatInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.StatType = params.get("StatType")
self.RequestId = params.get("RequestId")
class DescribePullStreamConfigsRequest(AbstractModel):
"""DescribePullStreamConfigs请求参数结构体
"""
def __init__(self):
"""
:param ConfigId: 配置id。
:type ConfigId: str
"""
self.ConfigId = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
class DescribePullStreamConfigsResponse(AbstractModel):
"""DescribePullStreamConfigs返回参数结构体
"""
def __init__(self):
"""
:param PullStreamConfigs: 拉流配置。
:type PullStreamConfigs: list of PullStreamConfig
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PullStreamConfigs = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PullStreamConfigs") is not None:
self.PullStreamConfigs = []
for item in params.get("PullStreamConfigs"):
obj = PullStreamConfig()
obj._deserialize(item)
self.PullStreamConfigs.append(obj)
self.RequestId = params.get("RequestId")
class DescribeStreamDayPlayInfoListRequest(AbstractModel):
"""DescribeStreamDayPlayInfoList请求参数结构体
"""
def __init__(self):
"""
:param DayTime: 日期,
格式:YYYY-mm-dd。
:type DayTime: str
:param PlayDomain: 播放域名。
:type PlayDomain: str
:param PageNum: 页号,范围[1,10],默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围[100,1000],默认值是1000。
:type PageSize: int
"""
self.DayTime = None
self.PlayDomain = None
self.PageNum = None
self.PageSize = None
def _deserialize(self, params):
self.DayTime = params.get("DayTime")
self.PlayDomain = params.get("PlayDomain")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
class DescribeStreamDayPlayInfoListResponse(AbstractModel):
"""DescribeStreamDayPlayInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 播放数据信息列表。
:type DataInfoList: list of PlayDataInfoByStream
:param TotalNum: 总数量。
:type TotalNum: int
:param TotalPage: 总页数。
:type TotalPage: int
:param PageNum: 当前数据所处页码。
:type PageNum: int
:param PageSize: 每页个数。
:type PageSize: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.TotalNum = None
self.TotalPage = None
self.PageNum = None
self.PageSize = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = PlayDataInfoByStream()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.RequestId = params.get("RequestId")
class DescribeStreamPlayInfoListRequest(AbstractModel):
"""DescribeStreamPlayInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 开始时间,北京时间,格式为yyyy-mm-dd HH:MM:SS,
当前时间 和 开始时间 间隔不超过30天。
:type StartTime: str
:param EndTime: 结束时间,北京时间,格式为yyyy-mm-dd HH:MM:SS,
结束时间 和 开始时间 必须在同一天内。
:type EndTime: str
:param PlayDomain: 播放域名,
若不填,则为查询所有播放域名的在线流数据。
:type PlayDomain: str
:param StreamName: 流名称,精确匹配。
若不填,则为查询总体播放数据。
:type StreamName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。精确匹配,不支持。
若不填,则为查询总体播放数据。
注意:按AppName查询,需要联系客服同学提单支持。
:type AppName: str
"""
self.StartTime = None
self.EndTime = None
self.PlayDomain = None
self.StreamName = None
self.AppName = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomain = params.get("PlayDomain")
self.StreamName = params.get("StreamName")
self.AppName = params.get("AppName")
class DescribeStreamPlayInfoListResponse(AbstractModel):
"""DescribeStreamPlayInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 统计信息列表。
:type DataInfoList: list of DayStreamPlayInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = DayStreamPlayInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeStreamPushInfoListRequest(AbstractModel):
"""DescribeStreamPushInfoList请求参数结构体
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS,最大时间跨度支持6小时,支持最近6天数据查询。
:type EndTime: str
:param PushDomain: 推流域名。
:type PushDomain: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
"""
self.StreamName = None
self.StartTime = None
self.EndTime = None
self.PushDomain = None
self.AppName = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PushDomain = params.get("PushDomain")
self.AppName = params.get("AppName")
class DescribeStreamPushInfoListResponse(AbstractModel):
"""DescribeStreamPushInfoList返回参数结构体
"""
def __init__(self):
"""
:param DataInfoList: 返回的数据列表。
:type DataInfoList: list of PushQualityData
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = PushQualityData()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopClientIpSumInfoListRequest(AbstractModel):
"""DescribeTopClientIpSumInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS
时间跨度在(0,4小时],支持最近1天数据查询。
:type EndTime: str
:param PlayDomains: 播放域名,默认为不填,表示求总体数据。
:type PlayDomains: list of str
:param PageNum: 页号,
范围是[1,1000],
默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围是[1,1000],
默认值是20。
:type PageSize: int
:param OrderParam: 排序指标,可选值包括”TotalRequest”,”FailedRequest”,“TotalFlux”。
:type OrderParam: str
"""
self.StartTime = None
self.EndTime = None
self.PlayDomains = None
self.PageNum = None
self.PageSize = None
self.OrderParam = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.PlayDomains = params.get("PlayDomains")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.OrderParam = params.get("OrderParam")
class DescribeTopClientIpSumInfoListResponse(AbstractModel):
"""DescribeTopClientIpSumInfoList返回参数结构体
"""
def __init__(self):
"""
:param PageNum: 页号,
范围是[1,1000],
默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围是[1,1000],
默认值是20。
:type PageSize: int
:param OrderParam: 排序指标,可选值包括”TotalRequest”,”FailedRequest”,“TotalFlux”。
:type OrderParam: str
:param TotalNum: 记录总数。
:type TotalNum: int
:param TotalPage: 记录总页数。
:type TotalPage: int
:param DataInfoList: 数据内容。
:type DataInfoList: list of ClientIpPlaySumInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PageNum = None
self.PageSize = None
self.OrderParam = None
self.TotalNum = None
self.TotalPage = None
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.OrderParam = params.get("OrderParam")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = ClientIpPlaySumInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeVisitTopSumInfoListRequest(AbstractModel):
"""DescribeVisitTopSumInfoList请求参数结构体
"""
def __init__(self):
"""
:param StartTime: 起始时间点,格式为yyyy-mm-dd HH:MM:SS。
:type StartTime: str
:param EndTime: 结束时间点,格式为yyyy-mm-dd HH:MM:SS
时间跨度在(0,4小时],支持最近1天数据查询。
:type EndTime: str
:param TopIndex: 峰值指标,可选值包括”Domain”,”StreamId”。
:type TopIndex: str
:param PlayDomains: 播放域名,默认为不填,表示求总体数据。
:type PlayDomains: list of str
:param PageNum: 页号,
范围是[1,1000],
默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围是[1,1000],
默认值是20。
:type PageSize: int
:param OrderParam: 排序指标,可选值包括” AvgFluxPerSecond”,”TotalRequest”(默认),“TotalFlux”。
:type OrderParam: str
"""
self.StartTime = None
self.EndTime = None
self.TopIndex = None
self.PlayDomains = None
self.PageNum = None
self.PageSize = None
self.OrderParam = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.TopIndex = params.get("TopIndex")
self.PlayDomains = params.get("PlayDomains")
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.OrderParam = params.get("OrderParam")
class DescribeVisitTopSumInfoListResponse(AbstractModel):
"""DescribeVisitTopSumInfoList返回参数结构体
"""
def __init__(self):
"""
:param PageNum: 页号,
范围是[1,1000],
默认值是1。
:type PageNum: int
:param PageSize: 每页个数,范围是[1,1000],
默认值是20。
:type PageSize: int
:param TopIndex: 峰值指标,可选值包括”Domain”,”StreamId”。
:type TopIndex: str
:param OrderParam: 排序指标,可选值包括” AvgFluxPerSecond”,”TotalRequest”(默认),“TotalFlux”。
:type OrderParam: str
:param TotalNum: 记录总数。
:type TotalNum: int
:param TotalPage: 记录总页数。
:type TotalPage: int
:param DataInfoList: 数据内容。
:type DataInfoList: list of PlaySumStatInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PageNum = None
self.PageSize = None
self.TopIndex = None
self.OrderParam = None
self.TotalNum = None
self.TotalPage = None
self.DataInfoList = None
self.RequestId = None
def _deserialize(self, params):
self.PageNum = params.get("PageNum")
self.PageSize = params.get("PageSize")
self.TopIndex = params.get("TopIndex")
self.OrderParam = params.get("OrderParam")
self.TotalNum = params.get("TotalNum")
self.TotalPage = params.get("TotalPage")
if params.get("DataInfoList") is not None:
self.DataInfoList = []
for item in params.get("DataInfoList"):
obj = PlaySumStatInfo()
obj._deserialize(item)
self.DataInfoList.append(obj)
self.RequestId = params.get("RequestId")
class DomainCertInfo(AbstractModel):
"""域名证书信息
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: int
:param CertName: 证书名称。
:type CertName: str
:param Description: 描述信息。
:type Description: str
:param CreateTime: 创建时间,UTC格式。
:type CreateTime: str
:param HttpsCrt: 证书内容。
:type HttpsCrt: str
:param CertType: 证书类型。
0:腾讯云托管证书
1:用户添加证书。
:type CertType: int
:param CertExpireTime: 证书过期时间,UTC格式。
:type CertExpireTime: str
:param DomainName: 使用此证书的域名名称。
:type DomainName: str
:param Status: 证书状态
:type Status: int
"""
self.CertId = None
self.CertName = None
self.Description = None
self.CreateTime = None
self.HttpsCrt = None
self.CertType = None
self.CertExpireTime = None
self.DomainName = None
self.Status = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.CertName = params.get("CertName")
self.Description = params.get("Description")
self.CreateTime = params.get("CreateTime")
self.HttpsCrt = params.get("HttpsCrt")
self.CertType = params.get("CertType")
self.CertExpireTime = params.get("CertExpireTime")
self.DomainName = params.get("DomainName")
self.Status = params.get("Status")
class DomainDetailInfo(AbstractModel):
"""每个域名的统计信息
"""
def __init__(self):
"""
:param MainlandOrOversea: 国内还是国外,可选值包括Mainland和Oversea,如果为“Mainland”,表示国内数据;如果为“Oversea”,表示国外数据。
:type MainlandOrOversea: str
:param Bandwidth: 带宽,单位是Mbps。
:type Bandwidth: float
:param Flux: 流量,单位是MB。
:type Flux: float
:param Online: 人数。
:type Online: int
:param Request: 请求数。
:type Request: int
"""
self.MainlandOrOversea = None
self.Bandwidth = None
self.Flux = None
self.Online = None
self.Request = None
def _deserialize(self, params):
self.MainlandOrOversea = params.get("MainlandOrOversea")
self.Bandwidth = params.get("Bandwidth")
self.Flux = params.get("Flux")
self.Online = params.get("Online")
self.Request = params.get("Request")
class DomainInfo(AbstractModel):
"""直播域名信息
"""
def __init__(self):
"""
:param Name: 直播域名
:type Name: str
:param Type: 域名类型。0-推流,1-播放
:type Type: int
:param Status: 域名状态。0-停用,1-启用
:type Status: int
:param CreateTime: 添加时间
:type CreateTime: str
:param BCName: 是否有CName到固定规则域名。0-否,1-是
:type BCName: int
:param TargetDomain: cname对应的域名
:type TargetDomain: str
:param PlayType: 播放区域,只在Type=1时该参数有意义。
1-国内,2-全球,3-海外。
:type PlayType: int
:param IsDelayLive: 0:普通直播,
1:慢直播。
:type IsDelayLive: int
"""
self.Name = None
self.Type = None
self.Status = None
self.CreateTime = None
self.BCName = None
self.TargetDomain = None
self.PlayType = None
self.IsDelayLive = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Type = params.get("Type")
self.Status = params.get("Status")
self.CreateTime = params.get("CreateTime")
self.BCName = params.get("BCName")
self.TargetDomain = params.get("TargetDomain")
self.PlayType = params.get("PlayType")
self.IsDelayLive = params.get("IsDelayLive")
class DomainInfoList(AbstractModel):
"""多个域名信息列表
"""
def __init__(self):
"""
:param Domain: 域名。
:type Domain: str
:param DetailInfoList: 明细信息。
:type DetailInfoList: list of DomainDetailInfo
"""
self.Domain = None
self.DetailInfoList = None
def _deserialize(self, params):
self.Domain = params.get("Domain")
if params.get("DetailInfoList") is not None:
self.DetailInfoList = []
for item in params.get("DetailInfoList"):
obj = DomainDetailInfo()
obj._deserialize(item)
self.DetailInfoList.append(obj)
class DropLiveStreamRequest(AbstractModel):
"""DropLiveStream请求参数结构体
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param DomainName: 您的加速域名。
:type DomainName: str
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
"""
self.StreamName = None
self.DomainName = None
self.AppName = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
class DropLiveStreamResponse(AbstractModel):
"""DropLiveStream返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class EnableLiveDomainRequest(AbstractModel):
"""EnableLiveDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 待启用的直播域名
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class EnableLiveDomainResponse(AbstractModel):
"""EnableLiveDomain返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ForbidLiveDomainRequest(AbstractModel):
"""ForbidLiveDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 停用的直播域名
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class ForbidLiveDomainResponse(AbstractModel):
"""ForbidLiveDomain返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ForbidLiveStreamRequest(AbstractModel):
"""ForbidLiveStream请求参数结构体
"""
def __init__(self):
"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 您的加速域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
:param ResumeTime: 恢复流的时间。UTC 格式,例如:2018-11-29T19:00:00Z。
注意:默认禁播90天,且最长支持禁播90天。
:type ResumeTime: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
self.ResumeTime = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
self.ResumeTime = params.get("ResumeTime")
class ForbidLiveStreamResponse(AbstractModel):
"""ForbidLiveStream返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ForbidStreamInfo(AbstractModel):
"""禁推流列表
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param CreateTime: 创建时间。
:type CreateTime: str
:param ExpireTime: 禁推过期时间。
:type ExpireTime: str
"""
self.StreamName = None
self.CreateTime = None
self.ExpireTime = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.CreateTime = params.get("CreateTime")
self.ExpireTime = params.get("ExpireTime")
class GroupProIspDataInfo(AbstractModel):
"""某省份某运营商在某段时间内的带宽,流量,请求数和并发数
"""
def __init__(self):
"""
:param ProvinceName: 省份。
:type ProvinceName: str
:param IspName: 运营商。
:type IspName: str
:param DetailInfoList: 分钟维度的明细数据。
:type DetailInfoList: list of CdnPlayStatData
"""
self.ProvinceName = None
self.IspName = None
self.DetailInfoList = None
def _deserialize(self, params):
self.ProvinceName = params.get("ProvinceName")
self.IspName = params.get("IspName")
if params.get("DetailInfoList") is not None:
self.DetailInfoList = []
for item in params.get("DetailInfoList"):
obj = CdnPlayStatData()
obj._deserialize(item)
self.DetailInfoList.append(obj)
class HlsSpecialParam(AbstractModel):
"""HLS专属录制参数
"""
def __init__(self):
"""
:param FlowContinueDuration: HLS续流超时时间。
:type FlowContinueDuration: int
"""
self.FlowContinueDuration = None
def _deserialize(self, params):
self.FlowContinueDuration = params.get("FlowContinueDuration")
class HttpCodeInfo(AbstractModel):
"""http返回码和统计数据
"""
def __init__(self):
"""
:param HttpCode: http协议返回码。
例:"2xx", "3xx", "4xx", "5xx"。
:type HttpCode: str
:param ValueList: 统计信息,对于无数据的时间点,会补0。
:type ValueList: list of HttpCodeValue
"""
self.HttpCode = None
self.ValueList = None
def _deserialize(self, params):
self.HttpCode = params.get("HttpCode")
if params.get("ValueList") is not None:
self.ValueList = []
for item in params.get("ValueList"):
obj = HttpCodeValue()
obj._deserialize(item)
self.ValueList.append(obj)
class HttpCodeValue(AbstractModel):
"""http返回码数据信息
"""
def __init__(self):
"""
:param Time: 时间,格式:yyyy-mm-dd HH:MM:SS。
:type Time: str
:param Numbers: 次数。
:type Numbers: int
:param Percentage: 占比。
:type Percentage: float
"""
self.Time = None
self.Numbers = None
self.Percentage = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Numbers = params.get("Numbers")
self.Percentage = params.get("Percentage")
class HttpStatusData(AbstractModel):
"""播放错误码信息
"""
def __init__(self):
"""
:param Time: 数据时间点,
格式:yyyy-mm-dd HH:MM:SS。
:type Time: str
:param HttpStatusInfoList: 播放状态码详细信息。
:type HttpStatusInfoList: list of HttpStatusInfo
"""
self.Time = None
self.HttpStatusInfoList = None
def _deserialize(self, params):
self.Time = params.get("Time")
if params.get("HttpStatusInfoList") is not None:
self.HttpStatusInfoList = []
for item in params.get("HttpStatusInfoList"):
obj = HttpStatusInfo()
obj._deserialize(item)
self.HttpStatusInfoList.append(obj)
class HttpStatusInfo(AbstractModel):
"""播放错误码信息
"""
def __init__(self):
"""
:param HttpStatus: 播放http状态码。
:type HttpStatus: str
:param Num: 个数。
:type Num: int
"""
self.HttpStatus = None
self.Num = None
def _deserialize(self, params):
self.HttpStatus = params.get("HttpStatus")
self.Num = params.get("Num")
class LogInfo(AbstractModel):
"""日志url信息
"""
def __init__(self):
"""
:param LogName: 日志名称。
:type LogName: str
:param LogUrl: 日志Url。
:type LogUrl: str
:param LogTime: 日志生成时间
:type LogTime: str
"""
self.LogName = None
self.LogUrl = None
self.LogTime = None
def _deserialize(self, params):
self.LogName = params.get("LogName")
self.LogUrl = params.get("LogUrl")
self.LogTime = params.get("LogTime")
class ModifyLiveCallbackTemplateRequest(AbstractModel):
"""ModifyLiveCallbackTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param StreamBeginNotifyUrl: 开播回调URL。
:type StreamBeginNotifyUrl: str
:param StreamEndNotifyUrl: 断流回调URL。
:type StreamEndNotifyUrl: str
:param RecordNotifyUrl: 录制回调URL。
:type RecordNotifyUrl: str
:param SnapshotNotifyUrl: 截图回调URL。
:type SnapshotNotifyUrl: str
:param PornCensorshipNotifyUrl: 鉴黄回调URL。
:type PornCensorshipNotifyUrl: str
:param CallbackKey: 回调key,回调URL公用,鉴权回调说明详见回调格式文档
:type CallbackKey: str
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.StreamBeginNotifyUrl = None
self.StreamEndNotifyUrl = None
self.RecordNotifyUrl = None
self.SnapshotNotifyUrl = None
self.PornCensorshipNotifyUrl = None
self.CallbackKey = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
self.StreamBeginNotifyUrl = params.get("StreamBeginNotifyUrl")
self.StreamEndNotifyUrl = params.get("StreamEndNotifyUrl")
self.RecordNotifyUrl = params.get("RecordNotifyUrl")
self.SnapshotNotifyUrl = params.get("SnapshotNotifyUrl")
self.PornCensorshipNotifyUrl = params.get("PornCensorshipNotifyUrl")
self.CallbackKey = params.get("CallbackKey")
class ModifyLiveCallbackTemplateResponse(AbstractModel):
"""ModifyLiveCallbackTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveCertRequest(AbstractModel):
"""ModifyLiveCert请求参数结构体
"""
def __init__(self):
"""
:param CertId: 证书Id。
:type CertId: str
:param CertType: 证书类型。0-用户添加证书;1-腾讯云托管证书。
:type CertType: int
:param CertName: 证书名称。
:type CertName: str
:param HttpsCrt: 证书内容,即公钥。
:type HttpsCrt: str
:param HttpsKey: 私钥。
:type HttpsKey: str
:param Description: 描述信息。
:type Description: str
"""
self.CertId = None
self.CertType = None
self.CertName = None
self.HttpsCrt = None
self.HttpsKey = None
self.Description = None
def _deserialize(self, params):
self.CertId = params.get("CertId")
self.CertType = params.get("CertType")
self.CertName = params.get("CertName")
self.HttpsCrt = params.get("HttpsCrt")
self.HttpsKey = params.get("HttpsKey")
self.Description = params.get("Description")
class ModifyLiveCertResponse(AbstractModel):
"""ModifyLiveCert返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveDomainCertRequest(AbstractModel):
"""ModifyLiveDomainCert请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 播放域名。
:type DomainName: str
:param CertId: 证书Id。
:type CertId: int
:param Status: 状态,0:关闭 1:打开。
:type Status: int
"""
self.DomainName = None
self.CertId = None
self.Status = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.CertId = params.get("CertId")
self.Status = params.get("Status")
class ModifyLiveDomainCertResponse(AbstractModel):
"""ModifyLiveDomainCert返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLivePlayAuthKeyRequest(AbstractModel):
"""ModifyLivePlayAuthKey请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 域名。
:type DomainName: str
:param Enable: 是否启用,0:关闭,1:启用。
:type Enable: int
:param AuthKey: 鉴权key。
:type AuthKey: str
:param AuthDelta: 有效时间,单位:秒。
:type AuthDelta: int
:param AuthBackKey: 鉴权backkey。
:type AuthBackKey: str
"""
self.DomainName = None
self.Enable = None
self.AuthKey = None
self.AuthDelta = None
self.AuthBackKey = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.AuthKey = params.get("AuthKey")
self.AuthDelta = params.get("AuthDelta")
self.AuthBackKey = params.get("AuthBackKey")
class ModifyLivePlayAuthKeyResponse(AbstractModel):
"""ModifyLivePlayAuthKey返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLivePlayDomainRequest(AbstractModel):
"""ModifyLivePlayDomain请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 播放域名。
:type DomainName: str
:param PlayType: 拉流域名类型。1-国内;2-全球;3-境外
:type PlayType: int
"""
self.DomainName = None
self.PlayType = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.PlayType = params.get("PlayType")
class ModifyLivePlayDomainResponse(AbstractModel):
"""ModifyLivePlayDomain返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLivePushAuthKeyRequest(AbstractModel):
"""ModifyLivePushAuthKey请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 推流域名。
:type DomainName: str
:param Enable: 是否启用,0:关闭,1:启用。
:type Enable: int
:param MasterAuthKey: 主鉴权key。
:type MasterAuthKey: str
:param BackupAuthKey: 备鉴权key。
:type BackupAuthKey: str
:param AuthDelta: 有效时间,单位:秒。
:type AuthDelta: int
"""
self.DomainName = None
self.Enable = None
self.MasterAuthKey = None
self.BackupAuthKey = None
self.AuthDelta = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.MasterAuthKey = params.get("MasterAuthKey")
self.BackupAuthKey = params.get("BackupAuthKey")
self.AuthDelta = params.get("AuthDelta")
class ModifyLivePushAuthKeyResponse(AbstractModel):
"""ModifyLivePushAuthKey返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveRecordTemplateRequest(AbstractModel):
"""ModifyLiveRecordTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param FlvParam: Flv录制参数,开启Flv录制时设置。
:type FlvParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param HlsParam: Hls录制参数,开启hls录制时设置。
:type HlsParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param Mp4Param: Mp4录制参数,开启Mp4录制时设置。
:type Mp4Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param AacParam: Aac录制参数,开启Aac录制时设置。
:type AacParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param HlsSpecialParam: HLS录制定制参数
:type HlsSpecialParam: :class:`tencentcloud.live.v20180801.models.HlsSpecialParam`
:param Mp3Param: Mp3录制参数,开启Mp3录制时设置。
:type Mp3Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.FlvParam = None
self.HlsParam = None
self.Mp4Param = None
self.AacParam = None
self.HlsSpecialParam = None
self.Mp3Param = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
if params.get("FlvParam") is not None:
self.FlvParam = RecordParam()
self.FlvParam._deserialize(params.get("FlvParam"))
if params.get("HlsParam") is not None:
self.HlsParam = RecordParam()
self.HlsParam._deserialize(params.get("HlsParam"))
if params.get("Mp4Param") is not None:
self.Mp4Param = RecordParam()
self.Mp4Param._deserialize(params.get("Mp4Param"))
if params.get("AacParam") is not None:
self.AacParam = RecordParam()
self.AacParam._deserialize(params.get("AacParam"))
if params.get("HlsSpecialParam") is not None:
self.HlsSpecialParam = HlsSpecialParam()
self.HlsSpecialParam._deserialize(params.get("HlsSpecialParam"))
if params.get("Mp3Param") is not None:
self.Mp3Param = RecordParam()
self.Mp3Param._deserialize(params.get("Mp3Param"))
class ModifyLiveRecordTemplateResponse(AbstractModel):
"""ModifyLiveRecordTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveSnapshotTemplateRequest(AbstractModel):
"""ModifyLiveSnapshotTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param SnapshotInterval: 截图时间间隔
:type SnapshotInterval: int
:param Width: 截图宽度。
:type Width: int
:param Height: 截图高度。
:type Height: int
:param PornFlag: 是否开启鉴黄,0:不开启,1:开启。
:type PornFlag: int
:param CosAppId: Cos AppId。
:type CosAppId: int
:param CosBucket: Cos Bucket名称。
:type CosBucket: str
:param CosRegion: Cos 地域。
:type CosRegion: str
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.SnapshotInterval = None
self.Width = None
self.Height = None
self.PornFlag = None
self.CosAppId = None
self.CosBucket = None
self.CosRegion = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
self.SnapshotInterval = params.get("SnapshotInterval")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.PornFlag = params.get("PornFlag")
self.CosAppId = params.get("CosAppId")
self.CosBucket = params.get("CosBucket")
self.CosRegion = params.get("CosRegion")
class ModifyLiveSnapshotTemplateResponse(AbstractModel):
"""ModifyLiveSnapshotTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyLiveTranscodeTemplateRequest(AbstractModel):
"""ModifyLiveTranscodeTemplate请求参数结构体
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param Vcodec: 视频编码:
h264/h265。
:type Vcodec: str
:param Acodec: 音频编码:
aac/mp3。
:type Acodec: str
:param AudioBitrate: 音频码率,默认0。0-500
:type AudioBitrate: int
:param Description: 模板描述。
:type Description: str
:param VideoBitrate: 视频码率。100-8000
:type VideoBitrate: int
:param Width: 宽。0-3000
:type Width: int
:param NeedVideo: 是否保留视频,0:否,1:是。默认1。
:type NeedVideo: int
:param NeedAudio: 是否保留音频,0:否,1:是。默认1。
:type NeedAudio: int
:param Height: 高。0-3000
:type Height: int
:param Fps: 帧率。0-200
:type Fps: int
:param Gop: 关键帧间隔,单位:秒。0-50
:type Gop: int
:param Rotate: 旋转角度。0 90 180 270
:type Rotate: int
:param Profile: 编码质量:
baseline/main/high。
:type Profile: str
:param BitrateToOrig: 是否不超过原始码率。0:否,1:是。默认0。
:type BitrateToOrig: int
:param HeightToOrig: 是否不超过原始高。0:否,1:是。默认0。
:type HeightToOrig: int
:param FpsToOrig: 是否不超过原始帧率。0:否,1:是。默认0。
:type FpsToOrig: int
"""
self.TemplateId = None
self.Vcodec = None
self.Acodec = None
self.AudioBitrate = None
self.Description = None
self.VideoBitrate = None
self.Width = None
self.NeedVideo = None
self.NeedAudio = None
self.Height = None
self.Fps = None
self.Gop = None
self.Rotate = None
self.Profile = None
self.BitrateToOrig = None
self.HeightToOrig = None
self.FpsToOrig = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.Vcodec = params.get("Vcodec")
self.Acodec = params.get("Acodec")
self.AudioBitrate = params.get("AudioBitrate")
self.Description = params.get("Description")
self.VideoBitrate = params.get("VideoBitrate")
self.Width = params.get("Width")
self.NeedVideo = params.get("NeedVideo")
self.NeedAudio = params.get("NeedAudio")
self.Height = params.get("Height")
self.Fps = params.get("Fps")
self.Gop = params.get("Gop")
self.Rotate = params.get("Rotate")
self.Profile = params.get("Profile")
self.BitrateToOrig = params.get("BitrateToOrig")
self.HeightToOrig = params.get("HeightToOrig")
self.FpsToOrig = params.get("FpsToOrig")
class ModifyLiveTranscodeTemplateResponse(AbstractModel):
"""ModifyLiveTranscodeTemplate返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyPullStreamConfigRequest(AbstractModel):
"""ModifyPullStreamConfig请求参数结构体
"""
def __init__(self):
"""
:param ConfigId: 配置id。
:type ConfigId: str
:param FromUrl: 源Url。
:type FromUrl: str
:param ToUrl: 目的Url。
:type ToUrl: str
:param AreaId: 区域id,1-深圳,2-上海,3-天津,4-香港。如有改动,需同时传入IspId。
:type AreaId: int
:param IspId: 运营商id,1-电信,2-移动,3-联通,4-其他,AreaId为4的时候,IspId只能为其他。如有改动,需同时传入AreaId。
:type IspId: int
:param StartTime: 开始时间。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type StartTime: str
:param EndTime: 结束时间,注意:
1. 结束时间必须大于开始时间;
2. 结束时间和开始时间必须大于当前时间;
3. 结束时间 和 开始时间 间隔必须小于七天。
使用UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type EndTime: str
"""
self.ConfigId = None
self.FromUrl = None
self.ToUrl = None
self.AreaId = None
self.IspId = None
self.StartTime = None
self.EndTime = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
self.FromUrl = params.get("FromUrl")
self.ToUrl = params.get("ToUrl")
self.AreaId = params.get("AreaId")
self.IspId = params.get("IspId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
class ModifyPullStreamConfigResponse(AbstractModel):
"""ModifyPullStreamConfig返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ModifyPullStreamStatusRequest(AbstractModel):
"""ModifyPullStreamStatus请求参数结构体
"""
def __init__(self):
"""
:param ConfigIds: 配置id列表。
:type ConfigIds: list of str
:param Status: 目标状态。0无效,2正在运行,4暂停。
:type Status: str
"""
self.ConfigIds = None
self.Status = None
def _deserialize(self, params):
self.ConfigIds = params.get("ConfigIds")
self.Status = params.get("Status")
class ModifyPullStreamStatusResponse(AbstractModel):
"""ModifyPullStreamStatus返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class PlayAuthKeyInfo(AbstractModel):
"""播放鉴权key信息
"""
def __init__(self):
"""
:param DomainName: 域名。
:type DomainName: str
:param Enable: 是否启用,0:关闭,1:启用。
:type Enable: int
:param AuthKey: 鉴权key。
:type AuthKey: str
:param AuthDelta: 有效时间,单位:秒。
:type AuthDelta: int
:param AuthBackKey: 鉴权BackKey。
:type AuthBackKey: str
"""
self.DomainName = None
self.Enable = None
self.AuthKey = None
self.AuthDelta = None
self.AuthBackKey = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.AuthKey = params.get("AuthKey")
self.AuthDelta = params.get("AuthDelta")
self.AuthBackKey = params.get("AuthBackKey")
class PlayCodeTotalInfo(AbstractModel):
"""各状态码的总次数,暂时支持400,403,404,500,502,503,504
"""
def __init__(self):
"""
:param Code: http code,可选值包括400,403,404,500,502,503,504
:type Code: str
:param Num: 总次数
:type Num: int
"""
self.Code = None
self.Num = None
def _deserialize(self, params):
self.Code = params.get("Code")
self.Num = params.get("Num")
class PlayDataInfoByStream(AbstractModel):
"""流维度的播放信息
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param TotalFlux: 总流量(单位MB)。
:type TotalFlux: float
"""
self.StreamName = None
self.TotalFlux = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.TotalFlux = params.get("TotalFlux")
class PlayStatInfo(AbstractModel):
"""按省份运营商查询的播放信息
"""
def __init__(self):
"""
:param Time: 数据时间点。
:type Time: str
:param Value: 带宽/流量/请求数/并发连接数/下载速度的值,若没数据返回时该值为0
注意:此字段可能返回 null,表示取不到有效值。
:type Value: float
"""
self.Time = None
self.Value = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.Value = params.get("Value")
class PlaySumStatInfo(AbstractModel):
"""播放汇总统计信息
"""
def __init__(self):
"""
:param Name: 域名或流id。
:type Name: str
:param AvgFluxPerSecond: 平均下载速度,单位是MB/s,计算公式是每分钟的下载速度求平均值。
:type AvgFluxPerSecond: float
:param TotalFlux: 总流量,单位是MB。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
"""
self.Name = None
self.AvgFluxPerSecond = None
self.TotalFlux = None
self.TotalRequest = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.AvgFluxPerSecond = params.get("AvgFluxPerSecond")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
class ProIspPlayCodeDataInfo(AbstractModel):
"""播放错误码信息
"""
def __init__(self):
"""
:param ProvinceName: 省份。
:type ProvinceName: str
:param IspName: 运营商。
:type IspName: str
:param Code4xx: 错误码为4开头的次数。
:type Code4xx: int
:param Code5xx: 错误码为5开头的次数。
:type Code5xx: int
"""
self.ProvinceName = None
self.IspName = None
self.Code4xx = None
self.Code5xx = None
def _deserialize(self, params):
self.ProvinceName = params.get("ProvinceName")
self.IspName = params.get("IspName")
self.Code4xx = params.get("Code4xx")
self.Code5xx = params.get("Code5xx")
class ProIspPlaySumInfo(AbstractModel):
"""获取省份/运营商的播放信息
"""
def __init__(self):
"""
:param Name: 省份/运营商。
:type Name: str
:param TotalFlux: 总流量,单位:MB。
:type TotalFlux: float
:param TotalRequest: 总请求数。
:type TotalRequest: int
:param AvgFluxPerSecond: 平均下载流量,单位:MB/s
:type AvgFluxPerSecond: float
"""
self.Name = None
self.TotalFlux = None
self.TotalRequest = None
self.AvgFluxPerSecond = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.TotalFlux = params.get("TotalFlux")
self.TotalRequest = params.get("TotalRequest")
self.AvgFluxPerSecond = params.get("AvgFluxPerSecond")
class PublishTime(AbstractModel):
"""推流时间
"""
def __init__(self):
"""
:param PublishTime: 推流时间
UTC 格式,例如:2018-06-29T19:00:00Z。
:type PublishTime: str
"""
self.PublishTime = None
def _deserialize(self, params):
self.PublishTime = params.get("PublishTime")
class PullStreamConfig(AbstractModel):
"""拉流配置
"""
def __init__(self):
"""
:param ConfigId: 拉流配置Id。
:type ConfigId: str
:param FromUrl: 源Url。
:type FromUrl: str
:param ToUrl: 目的Url。
:type ToUrl: str
:param AreaName: 区域名。
:type AreaName: str
:param IspName: 运营商名。
:type IspName: str
:param StartTime: 开始时间。
UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type StartTime: str
:param EndTime: 结束时间。
UTC格式时间,
例如:2019-01-08T10:00:00Z。
:type EndTime: str
:param Status: 0无效,1初始状态,2正在运行,3拉起失败,4暂停。
:type Status: str
"""
self.ConfigId = None
self.FromUrl = None
self.ToUrl = None
self.AreaName = None
self.IspName = None
self.StartTime = None
self.EndTime = None
self.Status = None
def _deserialize(self, params):
self.ConfigId = params.get("ConfigId")
self.FromUrl = params.get("FromUrl")
self.ToUrl = params.get("ToUrl")
self.AreaName = params.get("AreaName")
self.IspName = params.get("IspName")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Status = params.get("Status")
class PushAuthKeyInfo(AbstractModel):
"""推流鉴权key信息
"""
def __init__(self):
"""
:param DomainName: 域名。
:type DomainName: str
:param Enable: 是否启用,0:关闭,1:启用。
:type Enable: int
:param MasterAuthKey: 主鉴权key。
:type MasterAuthKey: str
:param BackupAuthKey: 备鉴权key。
:type BackupAuthKey: str
:param AuthDelta: 有效时间,单位:秒。
:type AuthDelta: int
"""
self.DomainName = None
self.Enable = None
self.MasterAuthKey = None
self.BackupAuthKey = None
self.AuthDelta = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
self.Enable = params.get("Enable")
self.MasterAuthKey = params.get("MasterAuthKey")
self.BackupAuthKey = params.get("BackupAuthKey")
self.AuthDelta = params.get("AuthDelta")
class PushDataInfo(AbstractModel):
"""推流数据信息
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param AppName: 推流路径。
:type AppName: str
:param ClientIp: 推流客户端ip。
:type ClientIp: str
:param ServerIp: 接流服务器ip。
:type ServerIp: str
:param VideoFps: 推流视频帧率,单位是Hz。
:type VideoFps: int
:param VideoSpeed: 推流视频码率,单位是bps。
:type VideoSpeed: int
:param AudioFps: 推流音频帧率,单位是Hz。
:type AudioFps: int
:param AudioSpeed: 推流音频码率,单位是bps。
:type AudioSpeed: int
:param PushDomain: 推流域名。
:type PushDomain: str
:param BeginPushTime: 推流开始时间。
:type BeginPushTime: str
:param Acodec: 音频编码格式,
例:"AAC"。
:type Acodec: str
:param Vcodec: 视频编码格式,
例:"H264"。
:type Vcodec: str
:param Resolution: 分辨率。
:type Resolution: str
"""
self.StreamName = None
self.AppName = None
self.ClientIp = None
self.ServerIp = None
self.VideoFps = None
self.VideoSpeed = None
self.AudioFps = None
self.AudioSpeed = None
self.PushDomain = None
self.BeginPushTime = None
self.Acodec = None
self.Vcodec = None
self.Resolution = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.AppName = params.get("AppName")
self.ClientIp = params.get("ClientIp")
self.ServerIp = params.get("ServerIp")
self.VideoFps = params.get("VideoFps")
self.VideoSpeed = params.get("VideoSpeed")
self.AudioFps = params.get("AudioFps")
self.AudioSpeed = params.get("AudioSpeed")
self.PushDomain = params.get("PushDomain")
self.BeginPushTime = params.get("BeginPushTime")
self.Acodec = params.get("Acodec")
self.Vcodec = params.get("Vcodec")
self.Resolution = params.get("Resolution")
class PushQualityData(AbstractModel):
"""某条流的推流质量详情数据。
"""
def __init__(self):
"""
:param Time: 数据时间,格式是%Y-%m-%d %H:%M:%S.%ms,精确到毫秒级。
:type Time: str
:param PushDomain: 推流域名。
:type PushDomain: str
:param AppName: 推流路径。
:type AppName: str
:param ClientIp: 推流客户端ip。
:type ClientIp: str
:param BeginPushTime: 开始推流时间,格式是%Y-%m-%d %H:%M:%S.%ms,精确到毫秒级。
:type BeginPushTime: str
:param Resolution: 分辨率信息。
:type Resolution: str
:param VCodec: 视频编码格式。
:type VCodec: str
:param ACodec: 音频编码格式。
:type ACodec: str
:param Sequence: 推流序列号,用来唯一的标志一次推流。
:type Sequence: str
:param VideoFps: 视频帧率。
:type VideoFps: int
:param VideoRate: 视频码率,单位是bps。
:type VideoRate: int
:param AudioFps: 音频帧率。
:type AudioFps: int
:param AudioRate: 音频码率,单位是bps。
:type AudioRate: int
:param LocalTs: 本地流逝时间,单位是ms,音视频流逝时间与本地流逝时间的差距越大表示推流质量越差,上行卡顿越严重。
:type LocalTs: int
:param VideoTs: 视频流逝时间,单位是ms。
:type VideoTs: int
:param AudioTs: 音频流逝时间,单位是ms。
:type AudioTs: int
"""
self.Time = None
self.PushDomain = None
self.AppName = None
self.ClientIp = None
self.BeginPushTime = None
self.Resolution = None
self.VCodec = None
self.ACodec = None
self.Sequence = None
self.VideoFps = None
self.VideoRate = None
self.AudioFps = None
self.AudioRate = None
self.LocalTs = None
self.VideoTs = None
self.AudioTs = None
def _deserialize(self, params):
self.Time = params.get("Time")
self.PushDomain = params.get("PushDomain")
self.AppName = params.get("AppName")
self.ClientIp = params.get("ClientIp")
self.BeginPushTime = params.get("BeginPushTime")
self.Resolution = params.get("Resolution")
self.VCodec = params.get("VCodec")
self.ACodec = params.get("ACodec")
self.Sequence = params.get("Sequence")
self.VideoFps = params.get("VideoFps")
self.VideoRate = params.get("VideoRate")
self.AudioFps = params.get("AudioFps")
self.AudioRate = params.get("AudioRate")
self.LocalTs = params.get("LocalTs")
self.VideoTs = params.get("VideoTs")
self.AudioTs = params.get("AudioTs")
class RecordParam(AbstractModel):
"""录制模板参数
"""
def __init__(self):
"""
:param RecordInterval: 录制间隔。
单位秒,默认值1800。
取值范围:300-7200。
此参数对 HLS 无效,当录制 HLS 时从推流到断流生成一个文件。
:type RecordInterval: int
:param StorageTime: 录制存储时长。
单位秒,取值范围: 0-93312000。
0表示永久存储。
:type StorageTime: int
:param Enable: 是否开启当前格式录制,0 否 1是。默认值0。
:type Enable: int
"""
self.RecordInterval = None
self.StorageTime = None
self.Enable = None
def _deserialize(self, params):
self.RecordInterval = params.get("RecordInterval")
self.StorageTime = params.get("StorageTime")
self.Enable = params.get("Enable")
class RecordTemplateInfo(AbstractModel):
"""录制模板信息
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param Description: 描述信息。
:type Description: str
:param FlvParam: Flv录制参数。
:type FlvParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param HlsParam: Hls录制参数。
:type HlsParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param Mp4Param: Mp4录制参数。
:type Mp4Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param AacParam: Aac录制参数。
:type AacParam: :class:`tencentcloud.live.v20180801.models.RecordParam`
:param IsDelayLive: 0:普通直播,
1:慢直播。
:type IsDelayLive: int
:param HlsSpecialParam: HLS录制定制参数
:type HlsSpecialParam: :class:`tencentcloud.live.v20180801.models.HlsSpecialParam`
:param Mp3Param: Mp3录制参数。
:type Mp3Param: :class:`tencentcloud.live.v20180801.models.RecordParam`
"""
self.TemplateId = None
self.TemplateName = None
self.Description = None
self.FlvParam = None
self.HlsParam = None
self.Mp4Param = None
self.AacParam = None
self.IsDelayLive = None
self.HlsSpecialParam = None
self.Mp3Param = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
if params.get("FlvParam") is not None:
self.FlvParam = RecordParam()
self.FlvParam._deserialize(params.get("FlvParam"))
if params.get("HlsParam") is not None:
self.HlsParam = RecordParam()
self.HlsParam._deserialize(params.get("HlsParam"))
if params.get("Mp4Param") is not None:
self.Mp4Param = RecordParam()
self.Mp4Param._deserialize(params.get("Mp4Param"))
if params.get("AacParam") is not None:
self.AacParam = RecordParam()
self.AacParam._deserialize(params.get("AacParam"))
self.IsDelayLive = params.get("IsDelayLive")
if params.get("HlsSpecialParam") is not None:
self.HlsSpecialParam = HlsSpecialParam()
self.HlsSpecialParam._deserialize(params.get("HlsSpecialParam"))
if params.get("Mp3Param") is not None:
self.Mp3Param = RecordParam()
self.Mp3Param._deserialize(params.get("Mp3Param"))
class ResumeDelayLiveStreamRequest(AbstractModel):
"""ResumeDelayLiveStream请求参数结构体
"""
def __init__(self):
"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为live。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
class ResumeDelayLiveStreamResponse(AbstractModel):
"""ResumeDelayLiveStream返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class ResumeLiveStreamRequest(AbstractModel):
"""ResumeLiveStream请求参数结构体
"""
def __init__(self):
"""
:param AppName: 推流路径,与推流和播放地址中的AppName保持一致,默认为 live。
:type AppName: str
:param DomainName: 您的加速域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
class ResumeLiveStreamResponse(AbstractModel):
"""ResumeLiveStream返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class RuleInfo(AbstractModel):
"""规则信息
"""
def __init__(self):
"""
:param CreateTime: 规则创建时间。
:type CreateTime: str
:param UpdateTime: 规则更新时间。
:type UpdateTime: str
:param TemplateId: 模板Id。
:type TemplateId: int
:param DomainName: 推流域名。
:type DomainName: str
:param AppName: 推流路径。
:type AppName: str
:param StreamName: 流名称。
:type StreamName: str
"""
self.CreateTime = None
self.UpdateTime = None
self.TemplateId = None
self.DomainName = None
self.AppName = None
self.StreamName = None
def _deserialize(self, params):
self.CreateTime = params.get("CreateTime")
self.UpdateTime = params.get("UpdateTime")
self.TemplateId = params.get("TemplateId")
self.DomainName = params.get("DomainName")
self.AppName = params.get("AppName")
self.StreamName = params.get("StreamName")
class SnapshotTemplateInfo(AbstractModel):
"""截图模板信息
"""
def __init__(self):
"""
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称。
:type TemplateName: str
:param SnapshotInterval: 截图时间间隔。5-300
:type SnapshotInterval: int
:param Width: 截图宽度。0-3000 0原始宽度并适配原始比例
:type Width: int
:param Height: 截图高度。0-2000 0原始高度并适配原始比例
:type Height: int
:param PornFlag: 是否开启鉴黄,0:不开启,1:开启。
:type PornFlag: int
:param CosAppId: Cos AppId。
:type CosAppId: int
:param CosBucket: Cos Bucket名称。
:type CosBucket: str
:param CosRegion: Cos 地域。
:type CosRegion: str
:param Description: 模板描述
:type Description: str
"""
self.TemplateId = None
self.TemplateName = None
self.SnapshotInterval = None
self.Width = None
self.Height = None
self.PornFlag = None
self.CosAppId = None
self.CosBucket = None
self.CosRegion = None
self.Description = None
def _deserialize(self, params):
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.SnapshotInterval = params.get("SnapshotInterval")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.PornFlag = params.get("PornFlag")
self.CosAppId = params.get("CosAppId")
self.CosBucket = params.get("CosBucket")
self.CosRegion = params.get("CosRegion")
self.Description = params.get("Description")
class StopLiveRecordRequest(AbstractModel):
"""StopLiveRecord请求参数结构体
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param TaskId: 任务ID,全局唯一标识录制任务。
:type TaskId: int
"""
self.StreamName = None
self.TaskId = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.TaskId = params.get("TaskId")
class StopLiveRecordResponse(AbstractModel):
"""StopLiveRecord返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class StreamEventInfo(AbstractModel):
"""推断流事件信息。
"""
def __init__(self):
"""
:param AppName: 应用名称。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamName: 流名称。
:type StreamName: str
:param StreamStartTime: 推流开始时间。
UTC格式时间,
例如:2019-01-07T12:00:00Z。
:type StreamStartTime: str
:param StreamEndTime: 推流结束时间。
UTC格式时间,
例如:2019-01-07T15:00:00Z。
:type StreamEndTime: str
:param StopReason: 停止原因。
:type StopReason: str
:param Duration: 推流持续时长,单位:秒。
:type Duration: int
:param ClientIp: 主播IP。
:type ClientIp: str
:param Resolution: 分辨率。
:type Resolution: str
"""
self.AppName = None
self.DomainName = None
self.StreamName = None
self.StreamStartTime = None
self.StreamEndTime = None
self.StopReason = None
self.Duration = None
self.ClientIp = None
self.Resolution = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamName = params.get("StreamName")
self.StreamStartTime = params.get("StreamStartTime")
self.StreamEndTime = params.get("StreamEndTime")
self.StopReason = params.get("StopReason")
self.Duration = params.get("Duration")
self.ClientIp = params.get("ClientIp")
self.Resolution = params.get("Resolution")
class StreamInfo(AbstractModel):
"""推流信息
"""
def __init__(self):
"""
:param AppName: 直播流所属应用名称
:type AppName: str
:param CreateMode: 创建模式
:type CreateMode: str
:param CreateTime: 创建时间,如: 2018-07-13 14:48:23
:type CreateTime: str
:param Status: 流状态
:type Status: int
:param StreamId: 流id
:type StreamId: str
:param StreamName: 流名称
:type StreamName: str
:param WaterMarkId: 水印id
:type WaterMarkId: str
"""
self.AppName = None
self.CreateMode = None
self.CreateTime = None
self.Status = None
self.StreamId = None
self.StreamName = None
self.WaterMarkId = None
def _deserialize(self, params):
self.AppName = params.get("AppName")
self.CreateMode = params.get("CreateMode")
self.CreateTime = params.get("CreateTime")
self.Status = params.get("Status")
self.StreamId = params.get("StreamId")
self.StreamName = params.get("StreamName")
self.WaterMarkId = params.get("WaterMarkId")
class StreamName(AbstractModel):
"""流名称列表
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param AppName: 应用名称。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
:param StreamStartTime: 推流开始时间。
UTC格式时间,
例如:2019-01-07T12:00:00Z。
:type StreamStartTime: str
:param StreamEndTime: 推流结束时间。
UTC格式时间,
例如:2019-01-07T15:00:00Z。
:type StreamEndTime: str
:param StopReason: 停止原因。
:type StopReason: str
:param Duration: 推流持续时长,单位:秒。
:type Duration: int
:param ClientIp: 主播IP。
:type ClientIp: str
:param Resolution: 分辨率。
:type Resolution: str
"""
self.StreamName = None
self.AppName = None
self.DomainName = None
self.StreamStartTime = None
self.StreamEndTime = None
self.StopReason = None
self.Duration = None
self.ClientIp = None
self.Resolution = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
self.StreamStartTime = params.get("StreamStartTime")
self.StreamEndTime = params.get("StreamEndTime")
self.StopReason = params.get("StopReason")
self.Duration = params.get("Duration")
self.ClientIp = params.get("ClientIp")
self.Resolution = params.get("Resolution")
class StreamOnlineInfo(AbstractModel):
"""查询当前正在推流的信息
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param PublishTimeList: 推流时间列表
:type PublishTimeList: list of PublishTime
:param AppName: 应用名称。
:type AppName: str
:param DomainName: 推流域名。
:type DomainName: str
"""
self.StreamName = None
self.PublishTimeList = None
self.AppName = None
self.DomainName = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
if params.get("PublishTimeList") is not None:
self.PublishTimeList = []
for item in params.get("PublishTimeList"):
obj = PublishTime()
obj._deserialize(item)
self.PublishTimeList.append(obj)
self.AppName = params.get("AppName")
self.DomainName = params.get("DomainName")
class TemplateInfo(AbstractModel):
"""转码模板信息
"""
def __init__(self):
"""
:param Vcodec: 视频编码:
h264/h265。
:type Vcodec: str
:param VideoBitrate: 视频码率。100-8000kbps
:type VideoBitrate: int
:param Acodec: 音频编码:aac/mp3
aac/mp3。
:type Acodec: str
:param AudioBitrate: 音频码率。0-500
:type AudioBitrate: int
:param Width: 宽。0-3000
:type Width: int
:param Height: 高。0-3000
:type Height: int
:param Fps: 帧率。0-200
:type Fps: int
:param Gop: 关键帧间隔,单位:秒。1-50
:type Gop: int
:param Rotate: 旋转角度。0 90 180 270
:type Rotate: int
:param Profile: 编码质量:
baseline/main/high。
:type Profile: str
:param BitrateToOrig: 是否不超过原始码率。0:否,1:是。
:type BitrateToOrig: int
:param HeightToOrig: 是否不超过原始高度。0:否,1:是。
:type HeightToOrig: int
:param FpsToOrig: 是否不超过原始帧率。0:否,1:是。
:type FpsToOrig: int
:param NeedVideo: 是否保留视频。0:否,1:是。
:type NeedVideo: int
:param NeedAudio: 是否保留音频。0:否,1:是。
:type NeedAudio: int
:param TemplateId: 模板Id。
:type TemplateId: int
:param TemplateName: 模板名称
:type TemplateName: str
:param Description: 模板描述
:type Description: str
"""
self.Vcodec = None
self.VideoBitrate = None
self.Acodec = None
self.AudioBitrate = None
self.Width = None
self.Height = None
self.Fps = None
self.Gop = None
self.Rotate = None
self.Profile = None
self.BitrateToOrig = None
self.HeightToOrig = None
self.FpsToOrig = None
self.NeedVideo = None
self.NeedAudio = None
self.TemplateId = None
self.TemplateName = None
self.Description = None
def _deserialize(self, params):
self.Vcodec = params.get("Vcodec")
self.VideoBitrate = params.get("VideoBitrate")
self.Acodec = params.get("Acodec")
self.AudioBitrate = params.get("AudioBitrate")
self.Width = params.get("Width")
self.Height = params.get("Height")
self.Fps = params.get("Fps")
self.Gop = params.get("Gop")
self.Rotate = params.get("Rotate")
self.Profile = params.get("Profile")
self.BitrateToOrig = params.get("BitrateToOrig")
self.HeightToOrig = params.get("HeightToOrig")
self.FpsToOrig = params.get("FpsToOrig")
self.NeedVideo = params.get("NeedVideo")
self.NeedAudio = params.get("NeedAudio")
self.TemplateId = params.get("TemplateId")
self.TemplateName = params.get("TemplateName")
self.Description = params.get("Description")
class TranscodeDetailInfo(AbstractModel):
"""转码详细信息
"""
def __init__(self):
"""
:param StreamName: 流名称。
:type StreamName: str
:param StartTime: 开始时间,北京时间,
格式:yyyy-mm-dd HH:MM。
:type StartTime: str
:param EndTime: 结束时间,北京时间,
格式:yyyy-mm-dd HH:MM。
:type EndTime: str
:param Duration: 转码时长,单位:分钟。
注意:因推流过程中可能有中断重推情况,此处时长为真实转码时长累加值,并非结束时间和开始时间的间隔。
:type Duration: int
:param ModuleCodec: 编码方式,带模块,
示例:
liveprocessor_H264 =》直播转码-H264,
liveprocessor_H265 =》 直播转码-H265,
topspeed_H264 =》极速高清-H264,
topspeed_H265 =》极速高清-H265。
:type ModuleCodec: str
:param Bitrate: 码率。
:type Bitrate: int
:param Type: 类型,包含:转码(Transcode),混流(MixStream),水印(WaterMark)。
:type Type: str
:param PushDomain: 推流域名。
:type PushDomain: str
"""
self.StreamName = None
self.StartTime = None
self.EndTime = None
self.Duration = None
self.ModuleCodec = None
self.Bitrate = None
self.Type = None
self.PushDomain = None
def _deserialize(self, params):
self.StreamName = params.get("StreamName")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Duration = params.get("Duration")
self.ModuleCodec = params.get("ModuleCodec")
self.Bitrate = params.get("Bitrate")
self.Type = params.get("Type")
self.PushDomain = params.get("PushDomain")
class UnBindLiveDomainCertRequest(AbstractModel):
"""UnBindLiveDomainCert请求参数结构体
"""
def __init__(self):
"""
:param DomainName: 播放域名。
:type DomainName: str
"""
self.DomainName = None
def _deserialize(self, params):
self.DomainName = params.get("DomainName")
class UnBindLiveDomainCertResponse(AbstractModel):
"""UnBindLiveDomainCert返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class UpdateLiveWatermarkRequest(AbstractModel):
"""UpdateLiveWatermark请求参数结构体
"""
def __init__(self):
"""
:param WatermarkId: 水印ID。
:type WatermarkId: int
:param PictureUrl: 水印图片url。
:type PictureUrl: str
:param XPosition: 显示位置,X轴偏移。
:type XPosition: int
:param YPosition: 显示位置,Y轴偏移。
:type YPosition: int
:param WatermarkName: 水印名称。
:type WatermarkName: str
:param Width: 水印宽度,占直播原始画面宽度百分比,建议高宽只设置一项,另外一项会自适应缩放,避免变形。
:type Width: int
:param Height: 水印高度,占直播原始画面宽度百分比,建议高宽只设置一项,另外一项会自适应缩放,避免变形。
:type Height: int
"""
self.WatermarkId = None
self.PictureUrl = None
self.XPosition = None
self.YPosition = None
self.WatermarkName = None
self.Width = None
self.Height = None
def _deserialize(self, params):
self.WatermarkId = params.get("WatermarkId")
self.PictureUrl = params.get("PictureUrl")
self.XPosition = params.get("XPosition")
self.YPosition = params.get("YPosition")
self.WatermarkName = params.get("WatermarkName")
self.Width = params.get("Width")
self.Height = params.get("Height")
class UpdateLiveWatermarkResponse(AbstractModel):
"""UpdateLiveWatermark返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class WatermarkInfo(AbstractModel):
"""水印信息
"""
def __init__(self):
"""
:param WatermarkId: 水印ID。
:type WatermarkId: int
:param PictureUrl: 水印图片url。
:type PictureUrl: str
:param XPosition: 显示位置,X轴偏移。
:type XPosition: int
:param YPosition: 显示位置,Y轴偏移。
:type YPosition: int
:param WatermarkName: 水印名称。
:type WatermarkName: str
:param Status: 当前状态。0:未使用,1:使用中。
:type Status: int
:param CreateTime: 添加时间。
:type CreateTime: str
:param Width: 水印宽
:type Width: int
:param Height: 水印高
:type Height: int
"""
self.WatermarkId = None
self.PictureUrl = None
self.XPosition = None
self.YPosition = None
self.WatermarkName = None
self.Status = None
self.CreateTime = None
self.Width = None
self.Height = None
def _deserialize(self, params):
self.WatermarkId = params.get("WatermarkId")
self.PictureUrl = params.get("PictureUrl")
self.XPosition = params.get("XPosition")
self.YPosition = params.get("YPosition")
self.WatermarkName = params.get("WatermarkName")
self.Status = params.get("Status")
self.CreateTime = params.get("CreateTime")
self.Width = params.get("Width")
self.Height = params.get("Height") | [
"[email protected]"
] | |
09e4f93dfe0a7dbf721add15e86b819a1a93c6b9 | d475a6cf49c0b2d40895ff6d48ca9b0298643a87 | /pyleecan/Classes/SlotW24.py | 984f0065ae9048217d93f3adee011ed7f89dd645 | [
"Apache-2.0"
] | permissive | lyhehehe/pyleecan | 6c4a52b17a083fe29fdc8dcd989a3d20feb844d9 | 421e9a843bf30d796415c77dc934546adffd1cd7 | refs/heads/master | 2021-07-05T17:42:02.813128 | 2020-09-03T14:27:03 | 2020-09-03T14:27:03 | 176,678,325 | 2 | 0 | null | 2019-03-20T07:28:06 | 2019-03-20T07:28:06 | null | UTF-8 | Python | false | false | 9,973 | py | # -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Slot/SlotW24.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Slot/SlotW24
"""
from os import linesep
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from .SlotWind import SlotWind
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Slot.SlotW24._comp_point_coordinate import _comp_point_coordinate
except ImportError as error:
_comp_point_coordinate = error
try:
from ..Methods.Slot.SlotW24.build_geometry import build_geometry
except ImportError as error:
build_geometry = error
try:
from ..Methods.Slot.SlotW24.build_geometry_wind import build_geometry_wind
except ImportError as error:
build_geometry_wind = error
try:
from ..Methods.Slot.SlotW24.check import check
except ImportError as error:
check = error
try:
from ..Methods.Slot.SlotW24.comp_alphas import comp_alphas
except ImportError as error:
comp_alphas = error
try:
from ..Methods.Slot.SlotW24.comp_angle_opening import comp_angle_opening
except ImportError as error:
comp_angle_opening = error
try:
from ..Methods.Slot.SlotW24.comp_height import comp_height
except ImportError as error:
comp_height = error
try:
from ..Methods.Slot.SlotW24.comp_height_wind import comp_height_wind
except ImportError as error:
comp_height_wind = error
try:
from ..Methods.Slot.SlotW24.comp_surface import comp_surface
except ImportError as error:
comp_surface = error
try:
from ..Methods.Slot.SlotW24.comp_surface_wind import comp_surface_wind
except ImportError as error:
comp_surface_wind = error
from ._check import InitUnKnowClassError
class SlotW24(SlotWind):
VERSION = 1
IS_SYMMETRICAL = 1
# Check ImportError to remove unnecessary dependencies in unused method
# cf Methods.Slot.SlotW24._comp_point_coordinate
if isinstance(_comp_point_coordinate, ImportError):
_comp_point_coordinate = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method _comp_point_coordinate: "
+ str(_comp_point_coordinate)
)
)
)
else:
_comp_point_coordinate = _comp_point_coordinate
# cf Methods.Slot.SlotW24.build_geometry
if isinstance(build_geometry, ImportError):
build_geometry = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method build_geometry: " + str(build_geometry)
)
)
)
else:
build_geometry = build_geometry
# cf Methods.Slot.SlotW24.build_geometry_wind
if isinstance(build_geometry_wind, ImportError):
build_geometry_wind = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method build_geometry_wind: "
+ str(build_geometry_wind)
)
)
)
else:
build_geometry_wind = build_geometry_wind
# cf Methods.Slot.SlotW24.check
if isinstance(check, ImportError):
check = property(
fget=lambda x: raise_(
ImportError("Can't use SlotW24 method check: " + str(check))
)
)
else:
check = check
# cf Methods.Slot.SlotW24.comp_alphas
if isinstance(comp_alphas, ImportError):
comp_alphas = property(
fget=lambda x: raise_(
ImportError("Can't use SlotW24 method comp_alphas: " + str(comp_alphas))
)
)
else:
comp_alphas = comp_alphas
# cf Methods.Slot.SlotW24.comp_angle_opening
if isinstance(comp_angle_opening, ImportError):
comp_angle_opening = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method comp_angle_opening: "
+ str(comp_angle_opening)
)
)
)
else:
comp_angle_opening = comp_angle_opening
# cf Methods.Slot.SlotW24.comp_height
if isinstance(comp_height, ImportError):
comp_height = property(
fget=lambda x: raise_(
ImportError("Can't use SlotW24 method comp_height: " + str(comp_height))
)
)
else:
comp_height = comp_height
# cf Methods.Slot.SlotW24.comp_height_wind
if isinstance(comp_height_wind, ImportError):
comp_height_wind = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method comp_height_wind: "
+ str(comp_height_wind)
)
)
)
else:
comp_height_wind = comp_height_wind
# cf Methods.Slot.SlotW24.comp_surface
if isinstance(comp_surface, ImportError):
comp_surface = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method comp_surface: " + str(comp_surface)
)
)
)
else:
comp_surface = comp_surface
# cf Methods.Slot.SlotW24.comp_surface_wind
if isinstance(comp_surface_wind, ImportError):
comp_surface_wind = property(
fget=lambda x: raise_(
ImportError(
"Can't use SlotW24 method comp_surface_wind: "
+ str(comp_surface_wind)
)
)
)
else:
comp_surface_wind = comp_surface_wind
# save method is available in all object
save = save
# generic copy method
def copy(self):
"""Return a copy of the class
"""
return type(self)(init_dict=self.as_dict())
# get_logger method is available in all object
get_logger = get_logger
def __init__(self, W3=0.003, H2=0.003, Zs=36, init_dict=None, init_str=None):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for Matrix, None will initialise the property with an empty Matrix
for pyleecan type, None will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with every properties as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Initialisation by str
from ..Functions.load import load
assert type(init_str) is str
# load the object from a file
obj = load(init_str)
assert type(obj) is type(self)
W3 = obj.W3
H2 = obj.H2
Zs = obj.Zs
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "W3" in list(init_dict.keys()):
W3 = init_dict["W3"]
if "H2" in list(init_dict.keys()):
H2 = init_dict["H2"]
if "Zs" in list(init_dict.keys()):
Zs = init_dict["Zs"]
# Initialisation by argument
self.W3 = W3
self.H2 = H2
# Call SlotWind init
super(SlotW24, self).__init__(Zs=Zs)
# The class is frozen (in SlotWind init), for now it's impossible to
# add new properties
def __str__(self):
"""Convert this objet in a readeable string (for print)"""
SlotW24_str = ""
# Get the properties inherited from SlotWind
SlotW24_str += super(SlotW24, self).__str__()
SlotW24_str += "W3 = " + str(self.W3) + linesep
SlotW24_str += "H2 = " + str(self.H2) + linesep
return SlotW24_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
# Check the properties inherited from SlotWind
if not super(SlotW24, self).__eq__(other):
return False
if other.W3 != self.W3:
return False
if other.H2 != self.H2:
return False
return True
def as_dict(self):
"""Convert this objet in a json seriable dict (can be use in __init__)
"""
# Get the properties inherited from SlotWind
SlotW24_dict = super(SlotW24, self).as_dict()
SlotW24_dict["W3"] = self.W3
SlotW24_dict["H2"] = self.H2
# The class name is added to the dict fordeserialisation purpose
# Overwrite the mother class name
SlotW24_dict["__class__"] = "SlotW24"
return SlotW24_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
self.W3 = None
self.H2 = None
# Set to None the properties inherited from SlotWind
super(SlotW24, self)._set_None()
def _get_W3(self):
"""getter of W3"""
return self._W3
def _set_W3(self, value):
"""setter of W3"""
check_var("W3", value, "float", Vmin=0)
self._W3 = value
W3 = property(
fget=_get_W3,
fset=_set_W3,
doc=u"""Teeth width
:Type: float
:min: 0
""",
)
def _get_H2(self):
"""getter of H2"""
return self._H2
def _set_H2(self, value):
"""setter of H2"""
check_var("H2", value, "float", Vmin=0)
self._H2 = value
H2 = property(
fget=_get_H2,
fset=_set_H2,
doc=u"""Slot height
:Type: float
:min: 0
""",
)
| [
"[email protected]"
] | |
0ec055a25cc8a0344ce78bd9d4773178113d80f6 | 77ec9edf40b34b48477a627d149b6c2054b98a93 | /abc_179_d.py | 7b6ac7b449986dfe9275cb4f4fd8e0cb8b57219c | [] | no_license | junkhp/atcorder | fa4eeb204e3a4ac713001ab89c205039703abc88 | 028ddf7a39534d5907232c4576a03af79feb6073 | refs/heads/main | 2023-04-11T02:15:10.088883 | 2021-04-22T07:06:06 | 2021-04-22T07:06:06 | 313,284,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | def main():
n, k = map(int, input().split())
move_set = set([])
for i in range(k):
a, b = map(int, input().split())
for j in range(a, b + 1):
move_set.add(j)
sorted_set = sorted(move_set)
# print(sorted_set)
dp = [0] * (n + 1)
dp[1] = 1
for i in range(2, n+1):
for num in sorted_set:
if num + 1 > i:
break
else:
dp[i] += dp[i - num]
print(dp[-1] % 998244353)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
281ad853afa50f156cc560eb9efde70e9130b00e | 40e09fc848fac3bc523802e353c4e8bef9e3cf5e | /pyvore/pyvore/managers/sessions.py | 9f5d60dbae366a036b85d684f0aee266d2320f5c | [] | no_license | sontek/pycon2012 | 8ff24ce51770e0fb6a40ec9a510e958b9b9f309b | 79d417d185030c0af247506b49903744088abe65 | refs/heads/master | 2016-09-05T19:56:18.702274 | 2012-03-17T05:53:46 | 2012-03-17T05:53:46 | 3,627,137 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | from pyvore.managers import BaseManager
from pyvore.models.sessions import Session
from pyvore.models.sessions import Chat
class SessionManager(BaseManager):
def get_sessions(self):
return self.session.query(Session).all()
def get_chatlog(self, pk):
return self.session.query(Chat).filter(Chat.session_pk == pk)
| [
"[email protected]"
] | |
44e898de8b26e5a201cf475e7ab019e44ead146d | 67379c2ae929266f303edc783c8c62edb521174b | /exception/TransactionException.py | 255a542bbd984278db4669c881c1ac6ca58f723b | [] | no_license | bbb11808/seata-python | d20be83093d6d084ad36d9292a8ee18ad3bfc8c6 | c53b605be423c781d38e599e5bade8df8c81c2d9 | refs/heads/master | 2023-02-11T01:22:18.488881 | 2021-01-05T10:10:08 | 2021-01-05T10:10:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @author jsbxyyx
# @since 1.0
class TransactionException(Exception):
def __init__(self, code, message=None, cause=None):
self.code = code
self.message = message
self.cause = cause
| [
"[email protected]"
] | |
82517b45e33e99c6728eea5ef933042d18891240 | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/plugin/core/instructionsearch/ui/SearchDirectionWidget.pyi | 68398a7321fe0e264877ec23688efac4696bbacd | [
"MIT"
] | permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 24,511 | pyi | from typing import List
import ghidra.app.plugin.core.instructionsearch.ui
import java.awt
import java.awt.dnd
import java.awt.event
import java.awt.im
import java.awt.image
import java.beans
import java.io
import java.lang
import java.util
import javax.accessibility
import javax.swing
import javax.swing.border
import javax.swing.event
import javax.swing.plaf
class SearchDirectionWidget(ghidra.app.plugin.core.instructionsearch.ui.ControlPanelWidget):
class Direction(java.lang.Enum):
BACKWARD: ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction = BACKWARD
FORWARD: ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction = FORWARD
@overload
def compareTo(self, __a0: java.lang.Enum) -> int: ...
@overload
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getDeclaringClass(self) -> java.lang.Class: ...
def hashCode(self) -> int: ...
def name(self) -> unicode: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def ordinal(self) -> int: ...
def toString(self) -> unicode: ...
@overload
@staticmethod
def valueOf(__a0: unicode) -> ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction: ...
@overload
@staticmethod
def valueOf(__a0: java.lang.Class, __a1: unicode) -> java.lang.Enum: ...
@staticmethod
def values() -> List[ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction]: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
def __init__(self, __a0: unicode, __a1: ghidra.app.plugin.core.instructionsearch.ui.InstructionSearchDialog): ...
def action(self, __a0: java.awt.Event, __a1: object) -> bool: ...
@overload
def add(self, __a0: java.awt.Component) -> java.awt.Component: ...
@overload
def add(self, __a0: java.awt.PopupMenu) -> None: ...
@overload
def add(self, __a0: java.awt.Component, __a1: int) -> java.awt.Component: ...
@overload
def add(self, __a0: unicode, __a1: java.awt.Component) -> java.awt.Component: ...
@overload
def add(self, __a0: java.awt.Component, __a1: object) -> None: ...
@overload
def add(self, __a0: java.awt.Component, __a1: object, __a2: int) -> None: ...
def addAncestorListener(self, __a0: javax.swing.event.AncestorListener) -> None: ...
def addComponentListener(self, __a0: java.awt.event.ComponentListener) -> None: ...
def addContainerListener(self, __a0: java.awt.event.ContainerListener) -> None: ...
def addFocusListener(self, __a0: java.awt.event.FocusListener) -> None: ...
def addHierarchyBoundsListener(self, __a0: java.awt.event.HierarchyBoundsListener) -> None: ...
def addHierarchyListener(self, __a0: java.awt.event.HierarchyListener) -> None: ...
def addInputMethodListener(self, __a0: java.awt.event.InputMethodListener) -> None: ...
def addKeyListener(self, __a0: java.awt.event.KeyListener) -> None: ...
def addMouseListener(self, __a0: java.awt.event.MouseListener) -> None: ...
def addMouseMotionListener(self, __a0: java.awt.event.MouseMotionListener) -> None: ...
def addMouseWheelListener(self, __a0: java.awt.event.MouseWheelListener) -> None: ...
def addNotify(self) -> None: ...
@overload
def addPropertyChangeListener(self, __a0: java.beans.PropertyChangeListener) -> None: ...
@overload
def addPropertyChangeListener(self, __a0: unicode, __a1: java.beans.PropertyChangeListener) -> None: ...
def addVetoableChangeListener(self, __a0: java.beans.VetoableChangeListener) -> None: ...
def applyComponentOrientation(self, __a0: java.awt.ComponentOrientation) -> None: ...
def areFocusTraversalKeysSet(self, __a0: int) -> bool: ...
@overload
def checkImage(self, __a0: java.awt.Image, __a1: java.awt.image.ImageObserver) -> int: ...
@overload
def checkImage(self, __a0: java.awt.Image, __a1: int, __a2: int, __a3: java.awt.image.ImageObserver) -> int: ...
def computeVisibleRect(self, __a0: java.awt.Rectangle) -> None: ...
@overload
def contains(self, __a0: java.awt.Point) -> bool: ...
@overload
def contains(self, __a0: int, __a1: int) -> bool: ...
def countComponents(self) -> int: ...
@overload
def createImage(self, __a0: java.awt.image.ImageProducer) -> java.awt.Image: ...
@overload
def createImage(self, __a0: int, __a1: int) -> java.awt.Image: ...
def createToolTip(self) -> javax.swing.JToolTip: ...
@overload
def createVolatileImage(self, __a0: int, __a1: int) -> java.awt.image.VolatileImage: ...
@overload
def createVolatileImage(self, __a0: int, __a1: int, __a2: java.awt.ImageCapabilities) -> java.awt.image.VolatileImage: ...
def deliverEvent(self, __a0: java.awt.Event) -> None: ...
def disable(self) -> None: ...
def dispatchEvent(self, __a0: java.awt.AWTEvent) -> None: ...
def doLayout(self) -> None: ...
@overload
def enable(self) -> None: ...
@overload
def enable(self, __a0: bool) -> None: ...
def enableInputMethods(self, __a0: bool) -> None: ...
def equals(self, __a0: object) -> bool: ...
@overload
def findComponentAt(self, __a0: java.awt.Point) -> java.awt.Component: ...
@overload
def findComponentAt(self, __a0: int, __a1: int) -> java.awt.Component: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: long, __a2: long) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: int, __a2: int) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: float, __a2: float) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: float, __a2: float) -> None: ...
@overload
def firePropertyChange(self, __a0: unicode, __a1: bool, __a2: bool) -> None: ...
def getAccessibleContext(self) -> javax.accessibility.AccessibleContext: ...
def getActionForKeyStroke(self, __a0: javax.swing.KeyStroke) -> java.awt.event.ActionListener: ...
def getActionMap(self) -> javax.swing.ActionMap: ...
def getAlignmentX(self) -> float: ...
def getAlignmentY(self) -> float: ...
def getAncestorListeners(self) -> List[javax.swing.event.AncestorListener]: ...
def getAutoscrolls(self) -> bool: ...
def getBackground(self) -> java.awt.Color: ...
def getBaseline(self, __a0: int, __a1: int) -> int: ...
def getBaselineResizeBehavior(self) -> java.awt.Component.BaselineResizeBehavior: ...
def getBorder(self) -> javax.swing.border.Border: ...
@overload
def getBounds(self) -> java.awt.Rectangle: ...
@overload
def getBounds(self, __a0: java.awt.Rectangle) -> java.awt.Rectangle: ...
def getClass(self) -> java.lang.Class: ...
def getClientProperty(self, __a0: object) -> object: ...
def getColorModel(self) -> java.awt.image.ColorModel: ...
def getComponent(self, __a0: int) -> java.awt.Component: ...
@overload
def getComponentAt(self, __a0: java.awt.Point) -> java.awt.Component: ...
@overload
def getComponentAt(self, __a0: int, __a1: int) -> java.awt.Component: ...
def getComponentCount(self) -> int: ...
def getComponentListeners(self) -> List[java.awt.event.ComponentListener]: ...
def getComponentOrientation(self) -> java.awt.ComponentOrientation: ...
def getComponentPopupMenu(self) -> javax.swing.JPopupMenu: ...
def getComponentZOrder(self, __a0: java.awt.Component) -> int: ...
def getComponents(self) -> List[java.awt.Component]: ...
def getConditionForKeyStroke(self, __a0: javax.swing.KeyStroke) -> int: ...
def getContainerListeners(self) -> List[java.awt.event.ContainerListener]: ...
def getCursor(self) -> java.awt.Cursor: ...
def getDebugGraphicsOptions(self) -> int: ...
@staticmethod
def getDefaultLocale() -> java.util.Locale: ...
def getDropTarget(self) -> java.awt.dnd.DropTarget: ...
def getFocusCycleRootAncestor(self) -> java.awt.Container: ...
def getFocusListeners(self) -> List[java.awt.event.FocusListener]: ...
def getFocusTraversalKeys(self, __a0: int) -> java.util.Set: ...
def getFocusTraversalKeysEnabled(self) -> bool: ...
def getFocusTraversalPolicy(self) -> java.awt.FocusTraversalPolicy: ...
def getFont(self) -> java.awt.Font: ...
def getFontMetrics(self, __a0: java.awt.Font) -> java.awt.FontMetrics: ...
def getForeground(self) -> java.awt.Color: ...
def getGraphics(self) -> java.awt.Graphics: ...
def getGraphicsConfiguration(self) -> java.awt.GraphicsConfiguration: ...
def getHeight(self) -> int: ...
def getHierarchyBoundsListeners(self) -> List[java.awt.event.HierarchyBoundsListener]: ...
def getHierarchyListeners(self) -> List[java.awt.event.HierarchyListener]: ...
def getIgnoreRepaint(self) -> bool: ...
def getInheritsPopupMenu(self) -> bool: ...
def getInputContext(self) -> java.awt.im.InputContext: ...
@overload
def getInputMap(self) -> javax.swing.InputMap: ...
@overload
def getInputMap(self, __a0: int) -> javax.swing.InputMap: ...
def getInputMethodListeners(self) -> List[java.awt.event.InputMethodListener]: ...
def getInputMethodRequests(self) -> java.awt.im.InputMethodRequests: ...
def getInputVerifier(self) -> javax.swing.InputVerifier: ...
@overload
def getInsets(self) -> java.awt.Insets: ...
@overload
def getInsets(self, __a0: java.awt.Insets) -> java.awt.Insets: ...
def getKeyListeners(self) -> List[java.awt.event.KeyListener]: ...
def getLayout(self) -> java.awt.LayoutManager: ...
def getListeners(self, __a0: java.lang.Class) -> List[java.util.EventListener]: ...
def getLocale(self) -> java.util.Locale: ...
@overload
def getLocation(self) -> java.awt.Point: ...
@overload
def getLocation(self, __a0: java.awt.Point) -> java.awt.Point: ...
def getLocationOnScreen(self) -> java.awt.Point: ...
def getMaximumSize(self) -> java.awt.Dimension: ...
def getMinimumSize(self) -> java.awt.Dimension: ...
def getMouseListeners(self) -> List[java.awt.event.MouseListener]: ...
def getMouseMotionListeners(self) -> List[java.awt.event.MouseMotionListener]: ...
@overload
def getMousePosition(self) -> java.awt.Point: ...
@overload
def getMousePosition(self, __a0: bool) -> java.awt.Point: ...
def getMouseWheelListeners(self) -> List[java.awt.event.MouseWheelListener]: ...
def getName(self) -> unicode: ...
def getNextFocusableComponent(self) -> java.awt.Component: ...
def getParent(self) -> java.awt.Container: ...
def getPopupLocation(self, __a0: java.awt.event.MouseEvent) -> java.awt.Point: ...
def getPreferredSize(self) -> java.awt.Dimension: ...
@overload
def getPropertyChangeListeners(self) -> List[java.beans.PropertyChangeListener]: ...
@overload
def getPropertyChangeListeners(self, __a0: unicode) -> List[java.beans.PropertyChangeListener]: ...
def getRegisteredKeyStrokes(self) -> List[javax.swing.KeyStroke]: ...
def getRootPane(self) -> javax.swing.JRootPane: ...
def getSearchDirection(self) -> ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction: ...
@overload
def getSize(self) -> java.awt.Dimension: ...
@overload
def getSize(self, __a0: java.awt.Dimension) -> java.awt.Dimension: ...
def getToolTipLocation(self, __a0: java.awt.event.MouseEvent) -> java.awt.Point: ...
@overload
def getToolTipText(self) -> unicode: ...
@overload
def getToolTipText(self, __a0: java.awt.event.MouseEvent) -> unicode: ...
def getToolkit(self) -> java.awt.Toolkit: ...
def getTopLevelAncestor(self) -> java.awt.Container: ...
def getTransferHandler(self) -> javax.swing.TransferHandler: ...
def getTreeLock(self) -> object: ...
def getUI(self) -> javax.swing.plaf.ComponentUI: ...
def getUIClassID(self) -> unicode: ...
def getVerifyInputWhenFocusTarget(self) -> bool: ...
def getVetoableChangeListeners(self) -> List[java.beans.VetoableChangeListener]: ...
def getVisibleRect(self) -> java.awt.Rectangle: ...
def getWidth(self) -> int: ...
def getX(self) -> int: ...
def getY(self) -> int: ...
def gotFocus(self, __a0: java.awt.Event, __a1: object) -> bool: ...
def grabFocus(self) -> None: ...
def handleEvent(self, __a0: java.awt.Event) -> bool: ...
def hasFocus(self) -> bool: ...
def hashCode(self) -> int: ...
def hide(self) -> None: ...
def imageUpdate(self, __a0: java.awt.Image, __a1: int, __a2: int, __a3: int, __a4: int, __a5: int) -> bool: ...
def inside(self, __a0: int, __a1: int) -> bool: ...
def invalidate(self) -> None: ...
def isAncestorOf(self, __a0: java.awt.Component) -> bool: ...
def isBackgroundSet(self) -> bool: ...
def isCursorSet(self) -> bool: ...
def isDisplayable(self) -> bool: ...
def isDoubleBuffered(self) -> bool: ...
def isEnabled(self) -> bool: ...
@overload
def isFocusCycleRoot(self) -> bool: ...
@overload
def isFocusCycleRoot(self, __a0: java.awt.Container) -> bool: ...
def isFocusOwner(self) -> bool: ...
def isFocusTraversable(self) -> bool: ...
def isFocusTraversalPolicyProvider(self) -> bool: ...
def isFocusTraversalPolicySet(self) -> bool: ...
def isFocusable(self) -> bool: ...
def isFontSet(self) -> bool: ...
def isForegroundSet(self) -> bool: ...
def isLightweight(self) -> bool: ...
@staticmethod
def isLightweightComponent(__a0: java.awt.Component) -> bool: ...
def isManagingFocus(self) -> bool: ...
def isMaximumSizeSet(self) -> bool: ...
def isMinimumSizeSet(self) -> bool: ...
def isOpaque(self) -> bool: ...
def isOptimizedDrawingEnabled(self) -> bool: ...
def isPaintingForPrint(self) -> bool: ...
def isPaintingTile(self) -> bool: ...
def isPreferredSizeSet(self) -> bool: ...
def isRequestFocusEnabled(self) -> bool: ...
def isShowing(self) -> bool: ...
def isValid(self) -> bool: ...
def isValidateRoot(self) -> bool: ...
def isVisible(self) -> bool: ...
def keyDown(self, __a0: java.awt.Event, __a1: int) -> bool: ...
def keyUp(self, __a0: java.awt.Event, __a1: int) -> bool: ...
@overload
def list(self) -> None: ...
@overload
def list(self, __a0: java.io.PrintStream) -> None: ...
@overload
def list(self, __a0: java.io.PrintWriter) -> None: ...
@overload
def list(self, __a0: java.io.PrintStream, __a1: int) -> None: ...
@overload
def list(self, __a0: java.io.PrintWriter, __a1: int) -> None: ...
def locate(self, __a0: int, __a1: int) -> java.awt.Component: ...
def location(self) -> java.awt.Point: ...
def lostFocus(self, __a0: java.awt.Event, __a1: object) -> bool: ...
def mouseDown(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseDrag(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseEnter(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseExit(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseMove(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def mouseUp(self, __a0: java.awt.Event, __a1: int, __a2: int) -> bool: ...
def move(self, __a0: int, __a1: int) -> None: ...
def nextFocus(self) -> None: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def paint(self, __a0: java.awt.Graphics) -> None: ...
def paintAll(self, __a0: java.awt.Graphics) -> None: ...
def paintComponents(self, __a0: java.awt.Graphics) -> None: ...
@overload
def paintImmediately(self, __a0: java.awt.Rectangle) -> None: ...
@overload
def paintImmediately(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
def postEvent(self, __a0: java.awt.Event) -> bool: ...
@overload
def prepareImage(self, __a0: java.awt.Image, __a1: java.awt.image.ImageObserver) -> bool: ...
@overload
def prepareImage(self, __a0: java.awt.Image, __a1: int, __a2: int, __a3: java.awt.image.ImageObserver) -> bool: ...
def print(self, __a0: java.awt.Graphics) -> None: ...
def printAll(self, __a0: java.awt.Graphics) -> None: ...
def printComponents(self, __a0: java.awt.Graphics) -> None: ...
def putClientProperty(self, __a0: object, __a1: object) -> None: ...
@overload
def registerKeyboardAction(self, __a0: java.awt.event.ActionListener, __a1: javax.swing.KeyStroke, __a2: int) -> None: ...
@overload
def registerKeyboardAction(self, __a0: java.awt.event.ActionListener, __a1: unicode, __a2: javax.swing.KeyStroke, __a3: int) -> None: ...
@overload
def remove(self, __a0: int) -> None: ...
@overload
def remove(self, __a0: java.awt.Component) -> None: ...
@overload
def remove(self, __a0: java.awt.MenuComponent) -> None: ...
def removeAll(self) -> None: ...
def removeAncestorListener(self, __a0: javax.swing.event.AncestorListener) -> None: ...
def removeComponentListener(self, __a0: java.awt.event.ComponentListener) -> None: ...
def removeContainerListener(self, __a0: java.awt.event.ContainerListener) -> None: ...
def removeFocusListener(self, __a0: java.awt.event.FocusListener) -> None: ...
def removeHierarchyBoundsListener(self, __a0: java.awt.event.HierarchyBoundsListener) -> None: ...
def removeHierarchyListener(self, __a0: java.awt.event.HierarchyListener) -> None: ...
def removeInputMethodListener(self, __a0: java.awt.event.InputMethodListener) -> None: ...
def removeKeyListener(self, __a0: java.awt.event.KeyListener) -> None: ...
def removeMouseListener(self, __a0: java.awt.event.MouseListener) -> None: ...
def removeMouseMotionListener(self, __a0: java.awt.event.MouseMotionListener) -> None: ...
def removeMouseWheelListener(self, __a0: java.awt.event.MouseWheelListener) -> None: ...
def removeNotify(self) -> None: ...
@overload
def removePropertyChangeListener(self, __a0: java.beans.PropertyChangeListener) -> None: ...
@overload
def removePropertyChangeListener(self, __a0: unicode, __a1: java.beans.PropertyChangeListener) -> None: ...
def removeVetoableChangeListener(self, __a0: java.beans.VetoableChangeListener) -> None: ...
@overload
def repaint(self) -> None: ...
@overload
def repaint(self, __a0: long) -> None: ...
@overload
def repaint(self, __a0: java.awt.Rectangle) -> None: ...
@overload
def repaint(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
@overload
def repaint(self, __a0: long, __a1: int, __a2: int, __a3: int, __a4: int) -> None: ...
def requestDefaultFocus(self) -> bool: ...
@overload
def requestFocus(self) -> None: ...
@overload
def requestFocus(self, __a0: bool) -> bool: ...
@overload
def requestFocus(self, __a0: java.awt.event.FocusEvent.Cause) -> None: ...
@overload
def requestFocusInWindow(self) -> bool: ...
@overload
def requestFocusInWindow(self, __a0: java.awt.event.FocusEvent.Cause) -> bool: ...
def resetKeyboardActions(self) -> None: ...
def reshape(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
@overload
def resize(self, __a0: java.awt.Dimension) -> None: ...
@overload
def resize(self, __a0: int, __a1: int) -> None: ...
def revalidate(self) -> None: ...
def scrollRectToVisible(self, __a0: java.awt.Rectangle) -> None: ...
def setActionMap(self, __a0: javax.swing.ActionMap) -> None: ...
def setAlignmentX(self, __a0: float) -> None: ...
def setAlignmentY(self, __a0: float) -> None: ...
def setAutoscrolls(self, __a0: bool) -> None: ...
def setBackground(self, __a0: java.awt.Color) -> None: ...
def setBorder(self, __a0: javax.swing.border.Border) -> None: ...
@overload
def setBounds(self, __a0: java.awt.Rectangle) -> None: ...
@overload
def setBounds(self, __a0: int, __a1: int, __a2: int, __a3: int) -> None: ...
def setComponentOrientation(self, __a0: java.awt.ComponentOrientation) -> None: ...
def setComponentPopupMenu(self, __a0: javax.swing.JPopupMenu) -> None: ...
def setComponentZOrder(self, __a0: java.awt.Component, __a1: int) -> None: ...
def setCursor(self, __a0: java.awt.Cursor) -> None: ...
def setDebugGraphicsOptions(self, __a0: int) -> None: ...
@staticmethod
def setDefaultLocale(__a0: java.util.Locale) -> None: ...
def setDoubleBuffered(self, __a0: bool) -> None: ...
def setDropTarget(self, __a0: java.awt.dnd.DropTarget) -> None: ...
def setEnabled(self, __a0: bool) -> None: ...
def setFocusCycleRoot(self, __a0: bool) -> None: ...
def setFocusTraversalKeys(self, __a0: int, __a1: java.util.Set) -> None: ...
def setFocusTraversalKeysEnabled(self, __a0: bool) -> None: ...
def setFocusTraversalPolicy(self, __a0: java.awt.FocusTraversalPolicy) -> None: ...
def setFocusTraversalPolicyProvider(self, __a0: bool) -> None: ...
def setFocusable(self, __a0: bool) -> None: ...
def setFont(self, __a0: java.awt.Font) -> None: ...
def setForeground(self, __a0: java.awt.Color) -> None: ...
def setIgnoreRepaint(self, __a0: bool) -> None: ...
def setInheritsPopupMenu(self, __a0: bool) -> None: ...
def setInputMap(self, __a0: int, __a1: javax.swing.InputMap) -> None: ...
def setInputVerifier(self, __a0: javax.swing.InputVerifier) -> None: ...
def setLayout(self, __a0: java.awt.LayoutManager) -> None: ...
def setLocale(self, __a0: java.util.Locale) -> None: ...
@overload
def setLocation(self, __a0: java.awt.Point) -> None: ...
@overload
def setLocation(self, __a0: int, __a1: int) -> None: ...
def setMaximumSize(self, __a0: java.awt.Dimension) -> None: ...
def setMinimumSize(self, __a0: java.awt.Dimension) -> None: ...
def setMixingCutoutShape(self, __a0: java.awt.Shape) -> None: ...
def setName(self, __a0: unicode) -> None: ...
def setNextFocusableComponent(self, __a0: java.awt.Component) -> None: ...
def setOpaque(self, __a0: bool) -> None: ...
def setPreferredSize(self, __a0: java.awt.Dimension) -> None: ...
def setRequestFocusEnabled(self, __a0: bool) -> None: ...
@overload
def setSize(self, __a0: java.awt.Dimension) -> None: ...
@overload
def setSize(self, __a0: int, __a1: int) -> None: ...
def setToolTipText(self, __a0: unicode) -> None: ...
def setTransferHandler(self, __a0: javax.swing.TransferHandler) -> None: ...
def setUI(self, __a0: javax.swing.plaf.PanelUI) -> None: ...
def setVerifyInputWhenFocusTarget(self, __a0: bool) -> None: ...
def setVisible(self, __a0: bool) -> None: ...
@overload
def show(self) -> None: ...
@overload
def show(self, __a0: bool) -> None: ...
def toString(self) -> unicode: ...
def transferFocus(self) -> None: ...
def transferFocusBackward(self) -> None: ...
def transferFocusDownCycle(self) -> None: ...
def transferFocusUpCycle(self) -> None: ...
def unregisterKeyboardAction(self, __a0: javax.swing.KeyStroke) -> None: ...
def update(self, __a0: java.awt.Graphics) -> None: ...
def updateUI(self) -> None: ...
def validate(self) -> None: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def searchDirection(self) -> ghidra.app.plugin.core.instructionsearch.ui.SearchDirectionWidget.Direction: ...
| [
"[email protected]"
] | |
ef9a7c367bd1087b092f78ee9feb34f8fb220822 | 0e667a493715932d3dd45f6a59bd31c391c05b6a | /bin/pygmentize | 9e0974abc30dcc7b01ffff006b0e612e8a1e5f35 | [] | no_license | Anubhav722/QR-Code-Scanner | 84908069d6dc4082e94ce01c62085ce1ac380a62 | 455d28d5654bed3c9d3161897f7cead21d4c7f8e | refs/heads/master | 2021-04-30T16:13:46.769315 | 2017-01-26T17:28:45 | 2017-01-26T17:28:45 | 79,985,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | #!/home/paras/Desktop/QR-Code-Scanner/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pygments.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
85398c9153e63b9b53d9985b044400b6227c505f | 6806bd3e24d2ec3382cce6964e817e279052f121 | /sentry/plugins/sentry_sites/models.py | 4e294b463ec6ef4e47deb100f73b3e68c629019e | [
"BSD-2-Clause"
] | permissive | magicaltrevor/sentry | af70427a6930f555715362e8899e4269f844e57f | 8c11b2db7f09844aa860bfe7f1c3ff23c0d30f94 | refs/heads/master | 2021-01-18T11:53:55.770327 | 2012-07-29T22:00:35 | 2012-07-29T22:00:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | """
sentry.plugins.sentry_sites.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sentry
from django.utils.translation import ugettext_lazy as _
from sentry.plugins import register
from sentry.plugins.bases.tag import TagPlugin
class SitesPlugin(TagPlugin):
"""
Adds additional support for showing information about sites including:
* A panel which shows all sites a message was seen on.
* A sidebar module which shows the sites most actively seen on.
"""
slug = 'sites'
title = _('Sites')
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/dcramer/sentry"
tag = 'site'
tag_label = _('Site')
def get_tag_values(self, event):
if not event.site:
return []
return [event.site]
register(SitesPlugin)
| [
"[email protected]"
] | |
456c590d5d9b7436ae5408d60532e45bf5df7d77 | b761c9c8775d5a08b3b9be6d8300131a4f6a249f | /spring1819_assignment1/assignment1/cs231n/classifiers/neural_net.py | ea28f209bbe508af15877b227b81933dd0badea5 | [] | no_license | DizzyYunxuan/CS231n | 2599c68ccfcae1ba7dc244440eb30abc9f9340df | 3684b83639c49a1060437901da242d8cccadef34 | refs/heads/master | 2020-09-13T16:40:30.364281 | 2019-11-20T03:49:50 | 2019-11-20T03:49:50 | 222,844,025 | 1 | 0 | null | 2019-11-20T03:45:07 | 2019-11-20T03:36:22 | Jupyter Notebook | UTF-8 | Python | false | false | 9,832 | py | from __future__ import print_function
from builtins import range
from builtins import object
import numpy as np
import matplotlib.pyplot as plt
# from past.builtins import xrange
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension of
N, a hidden layer dimension of H, and performs classification over C classes.
We train the network with a softmax loss function and L2 regularization on the
weight matrices. The network uses a ReLU nonlinearity after the first fully
connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values and
biases are initialized to zero. Weights and biases are stored in the
variable self.params, which is a dictionary with the following keys:
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each y[i] is
an integer in the range 0 <= y[i] < C. This parameter is optional; if it
is not passed then we only return scores, and if it is passed then we
instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c] is
the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of training
samples.
- grads: Dictionary mapping parameter names to gradients of those parameters
with respect to the loss function; has the same keys as self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#############################################################################
# TODO: Perform the forward pass, computing the class scores for the input. #
# Store the result in the scores variable, which should be an array of #
# shape (N, C). #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# If the targets are not given then jump out, we're done
if y is None:
return scores
# Compute the loss
loss = None
#############################################################################
# TODO: Finish the forward pass, and compute the loss. This should include #
# both the data loss and L2 regularization for W1 and W2. Store the result #
# in the variable loss, which should be a scalar. Use the Softmax #
# classifier loss. #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means that
X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning rate
after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: Create a random minibatch of training data and labels, storing #
# them in X_batch and y_batch respectively. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
#########################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params) #
# using stochastic gradient descent. You'll need to use the gradients #
# stored in the grads dictionary defined above. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate.
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score.
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points to
classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each of
the elements of X. For all i, y_pred[i] = c means that X[i] is predicted
to have class c, where 0 <= c < C.
"""
y_pred = None
###########################################################################
# TODO: Implement this function; it should be VERY simple! #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return y_pred
| [
"[email protected]"
] | |
dcbb7d2c6c118a3060c64cfee2dae6fd5aa40e9d | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2453486_1/Python/dvolgyes/solution.py | 3336b278088a91573eff663adc3c60e306e631a1 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,667 | py | #!/usr/bin/python
#
# Google codejam solution
# David Volgyes
#
#
import sys, math, os
#import mpmath as mp # see https://code.google.com/p/mpmath/
import numpy as np # see http://www.numpy.org/
#import sympy as sp # see https://code.google.com/p/sympy/
import networkx as nx # see http://networkx.github.com/
import re
import random
T=int(sys.stdin.readline())
fieldX=np.zeros( (4,4), dtype=np.uint8 )
fieldO=np.zeros( (4,4), dtype=np.uint8 )
def solve(x):
solution=False
for i in range(0,4):
subsolution1=True
subsolution2=True
for j in range(0,4):
if x[i,j]==0: subsolution1=False
if x[j,i]==0: subsolution2=False
if subsolution1 or subsolution2: return True
if x[0,0]+x[1,1]+x[2,2]+x[3,3]==4: return True
if x[0,3]+x[1,2]+x[2,1]+x[3,0]==4: return True
return False
for i in range(0,T):
fieldX.fill(0)
fieldO.fill(0)
counter=0
empty=False
while counter<4:
sline=sys.stdin.readline().strip()
if len(sline)<4:continue
for j in range(0,4):
if sline[j]=='X' or sline[j]=='T':
fieldX[counter,j]=1
if sline[j]=='O' or sline[j]=='T':
fieldO[counter,j]=1
continue
if sline[j]=='.':
empty=True
counter+=1
if solve(fieldX):
print "Case #%i: X won" % (i+1,)
continue
if solve(fieldO):
print "Case #%i: O won" % (i+1,)
continue
if empty:
print "Case #%i: Game has not completed" % (i+1,)
continue
print "Case #%i: Draw" % (i+1,)
| [
"[email protected]"
] | |
68a41b87ce93babc8cc9ff31ee191ed3942d9e11 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/fv/afabricextconnp.py | 6c8a4c7ee71ed4b11370d170b02722427f256c7d | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,912 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AFabricExtConnP(Mo):
meta = ClassMeta("cobra.model.fv.AFabricExtConnP")
meta.isAbstract = True
meta.moClassName = "fvAFabricExtConnP"
meta.moClassName = "fvAFabricExtConnP"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Abstract Intrasite/Intersite Profile"
meta.writeAccessMask = 0x0
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.concreteSubClasses.add("cobra.model.fv.FabricExtConnP")
meta.concreteSubClasses.add("cobra.model.fv.FabricExtConnPDef")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "id", "id", 21395, PropCategory.REGULAR)
prop.label = "Fabric ID"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("id", prop)
prop = PropMeta("str", "name", "name", 4991, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "rt", "rt", 21396, PropCategory.REGULAR)
prop.label = "Global EVPN Route Target"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("rt", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
e01c3b9e135e6767e80e69e93678a8c30583d54b | a797793842f433251d2ab0bafb0ebe800b89a076 | /z7.3.py | 04495ae9dfa0056a6b01a8be77740c3a7360d223 | [] | no_license | irhadSaric/Instrukcije | b2f576bceb7e75f5fa65bfef99c9cde53d597b32 | 9ac8979b824babdeef3712ab9d23c764536d57b0 | refs/heads/master | 2020-09-28T09:00:08.389651 | 2020-02-01T20:33:59 | 2020-02-01T20:33:59 | 226,740,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | lista = []
for i in range(5):
broj = int(input())
lista.append(broj)
lista = sorted(lista)
print(lista[2]) | [
"[email protected]"
] | |
80a886b3cc887cdf1aefb3525eaa35f1f6528e29 | e20ed90b9be7a0bcdc1603929d65b2375a224bf6 | /generated-libraries/python/netapp/volume/volume_attributes.py | 94f52a5e553ca733b3138d1b081bb226e35c66cc | [
"MIT"
] | permissive | radekg/netapp-ontap-lib-gen | 530ec3248cff5ead37dc2aa47ced300b7585361b | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | refs/heads/master | 2016-09-06T17:41:23.263133 | 2015-01-14T17:40:46 | 2015-01-14T17:40:46 | 29,256,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,794 | py | from netapp.volume.volume_hybrid_cache_attributes import VolumeHybridCacheAttributes
from netapp.volume.volume_mirror_attributes import VolumeMirrorAttributes
from netapp.volume.volume_space_attributes import VolumeSpaceAttributes
from netapp.volume.volume_directory_attributes import VolumeDirectoryAttributes
from netapp.volume.volume_state_attributes import VolumeStateAttributes
from netapp.volume.volume_autosize_attributes import VolumeAutosizeAttributes
from netapp.volume.volume_flexcache_attributes import VolumeFlexcacheAttributes
from netapp.volume.volume_id_attributes import VolumeIdAttributes
from netapp.volume.volume_antivirus_attributes import VolumeAntivirusAttributes
from netapp.volume.volume_qos_attributes import VolumeQosAttributes
from netapp.volume.volume_transition_attributes import VolumeTransitionAttributes
from netapp.volume.volume_snapshot_attributes import VolumeSnapshotAttributes
from netapp.volume.volume_language_attributes import VolumeLanguageAttributes
from netapp.volume.volume_security_attributes import VolumeSecurityAttributes
from netapp.volume.volume_sis_attributes import VolumeSisAttributes
from netapp.volume.volume_performance_attributes import VolumePerformanceAttributes
from netapp.volume.volume_inode_attributes import VolumeInodeAttributes
from netapp.volume.volume_snapshot_autodelete_attributes import VolumeSnapshotAutodeleteAttributes
from netapp.volume.volume_vm_align_attributes import VolumeVmAlignAttributes
from netapp.volume.volume_64bit_upgrade_attributes import Volume64BitUpgradeAttributes
from netapp.volume.volume_clone_attributes import VolumeCloneAttributes
from netapp.volume.volume_infinitevol_attributes import VolumeInfinitevolAttributes
from netapp.volume.volume_export_attributes import VolumeExportAttributes
from netapp.netapp_object import NetAppObject
class VolumeAttributes(NetAppObject):
"""
Attributes of a volume.
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_volume_hybrid_cache_attributes = None
@property
def volume_hybrid_cache_attributes(self):
"""
This field contains information on Flash Pool caching
attributes on a volume
"""
return self._volume_hybrid_cache_attributes
@volume_hybrid_cache_attributes.setter
def volume_hybrid_cache_attributes(self, val):
if val != None:
self.validate('volume_hybrid_cache_attributes', val)
self._volume_hybrid_cache_attributes = val
_volume_mirror_attributes = None
@property
def volume_mirror_attributes(self):
"""
This field contains information applying exclusive to
volume mirror.
"""
return self._volume_mirror_attributes
@volume_mirror_attributes.setter
def volume_mirror_attributes(self, val):
if val != None:
self.validate('volume_mirror_attributes', val)
self._volume_mirror_attributes = val
_volume_space_attributes = None
@property
def volume_space_attributes(self):
"""
This field contains information related to volume disk
space management including on-disk layout.
"""
return self._volume_space_attributes
@volume_space_attributes.setter
def volume_space_attributes(self, val):
if val != None:
self.validate('volume_space_attributes', val)
self._volume_space_attributes = val
_volume_directory_attributes = None
@property
def volume_directory_attributes(self):
"""
This field contains information related to directories in
a volume.
"""
return self._volume_directory_attributes
@volume_directory_attributes.setter
def volume_directory_attributes(self, val):
if val != None:
self.validate('volume_directory_attributes', val)
self._volume_directory_attributes = val
_volume_state_attributes = None
@property
def volume_state_attributes(self):
"""
This field contains information about the state or status
of a volume or its features.
"""
return self._volume_state_attributes
@volume_state_attributes.setter
def volume_state_attributes(self, val):
if val != None:
self.validate('volume_state_attributes', val)
self._volume_state_attributes = val
_volume_autosize_attributes = None
@property
def volume_autosize_attributes(self):
"""
This field contains information about the autosize
settings of the volume.
"""
return self._volume_autosize_attributes
@volume_autosize_attributes.setter
def volume_autosize_attributes(self, val):
if val != None:
self.validate('volume_autosize_attributes', val)
self._volume_autosize_attributes = val
_volume_flexcache_attributes = None
@property
def volume_flexcache_attributes(self):
"""
This field contains information applying exclusively to
flexcache volumes.
"""
return self._volume_flexcache_attributes
@volume_flexcache_attributes.setter
def volume_flexcache_attributes(self, val):
if val != None:
self.validate('volume_flexcache_attributes', val)
self._volume_flexcache_attributes = val
_volume_id_attributes = None
@property
def volume_id_attributes(self):
"""
This field contains identification information about the
volume.
"""
return self._volume_id_attributes
@volume_id_attributes.setter
def volume_id_attributes(self, val):
if val != None:
self.validate('volume_id_attributes', val)
self._volume_id_attributes = val
_volume_antivirus_attributes = None
@property
def volume_antivirus_attributes(self):
"""
This field contains information about Antivirus On-Access
settings for the volume.
"""
return self._volume_antivirus_attributes
@volume_antivirus_attributes.setter
def volume_antivirus_attributes(self, val):
if val != None:
self.validate('volume_antivirus_attributes', val)
self._volume_antivirus_attributes = val
_volume_qos_attributes = None
@property
def volume_qos_attributes(self):
"""
This field contains the information that relates to QoS.
"""
return self._volume_qos_attributes
@volume_qos_attributes.setter
def volume_qos_attributes(self, val):
if val != None:
self.validate('volume_qos_attributes', val)
self._volume_qos_attributes = val
_volume_transition_attributes = None
@property
def volume_transition_attributes(self):
"""
This field contains information applying exclusively to
transitioned or transitioning volumes.
"""
return self._volume_transition_attributes
@volume_transition_attributes.setter
def volume_transition_attributes(self, val):
if val != None:
self.validate('volume_transition_attributes', val)
self._volume_transition_attributes = val
_volume_snapshot_attributes = None
@property
def volume_snapshot_attributes(self):
"""
This field contains information applying exclusively to
all the snapshots in the volume. Volume disk
space-related settings are excluded.
"""
return self._volume_snapshot_attributes
@volume_snapshot_attributes.setter
def volume_snapshot_attributes(self, val):
if val != None:
self.validate('volume_snapshot_attributes', val)
self._volume_snapshot_attributes = val
_volume_language_attributes = None
@property
def volume_language_attributes(self):
"""
This field contains information about volume
language-related settings.
"""
return self._volume_language_attributes
@volume_language_attributes.setter
def volume_language_attributes(self, val):
if val != None:
self.validate('volume_language_attributes', val)
self._volume_language_attributes = val
_volume_security_attributes = None
@property
def volume_security_attributes(self):
"""
This field contains information about volume security
settings.
"""
return self._volume_security_attributes
@volume_security_attributes.setter
def volume_security_attributes(self, val):
if val != None:
self.validate('volume_security_attributes', val)
self._volume_security_attributes = val
_volume_sis_attributes = None
@property
def volume_sis_attributes(self):
"""
This field contains information about Deduplication, file
clone, compression, etc.
"""
return self._volume_sis_attributes
@volume_sis_attributes.setter
def volume_sis_attributes(self, val):
if val != None:
self.validate('volume_sis_attributes', val)
self._volume_sis_attributes = val
_volume_performance_attributes = None
@property
def volume_performance_attributes(self):
"""
This field contains information that relates to the
performance of the volume.
"""
return self._volume_performance_attributes
@volume_performance_attributes.setter
def volume_performance_attributes(self, val):
if val != None:
self.validate('volume_performance_attributes', val)
self._volume_performance_attributes = val
_volume_inode_attributes = None
@property
def volume_inode_attributes(self):
"""
This field contains information about inodes in a
volume.
"""
return self._volume_inode_attributes
@volume_inode_attributes.setter
def volume_inode_attributes(self, val):
if val != None:
self.validate('volume_inode_attributes', val)
self._volume_inode_attributes = val
_volume_snapshot_autodelete_attributes = None
@property
def volume_snapshot_autodelete_attributes(self):
"""
This field contains information about snapshot autodelete
policy settings.
"""
return self._volume_snapshot_autodelete_attributes
@volume_snapshot_autodelete_attributes.setter
def volume_snapshot_autodelete_attributes(self, val):
if val != None:
self.validate('volume_snapshot_autodelete_attributes', val)
self._volume_snapshot_autodelete_attributes = val
_volume_vm_align_attributes = None
@property
def volume_vm_align_attributes(self):
"""
This field contains information related to the Virtual
Machine alignment settings on a volume
"""
return self._volume_vm_align_attributes
@volume_vm_align_attributes.setter
def volume_vm_align_attributes(self, val):
if val != None:
self.validate('volume_vm_align_attributes', val)
self._volume_vm_align_attributes = val
_volume_64bit_upgrade_attributes = None
@property
def volume_64bit_upgrade_attributes(self):
"""
Information related to 64-bit upgrade. After 64-bit
upgrade completes, this information is no longer
available.
"""
return self._volume_64bit_upgrade_attributes
@volume_64bit_upgrade_attributes.setter
def volume_64bit_upgrade_attributes(self, val):
if val != None:
self.validate('volume_64bit_upgrade_attributes', val)
self._volume_64bit_upgrade_attributes = val
_volume_clone_attributes = None
@property
def volume_clone_attributes(self):
"""
This field contains information applying exclusively to
clone volumes.
"""
return self._volume_clone_attributes
@volume_clone_attributes.setter
def volume_clone_attributes(self, val):
if val != None:
self.validate('volume_clone_attributes', val)
self._volume_clone_attributes = val
_volume_infinitevol_attributes = None
@property
def volume_infinitevol_attributes(self):
"""
This field contains information about the state of an
Infinite Volume.
"""
return self._volume_infinitevol_attributes
@volume_infinitevol_attributes.setter
def volume_infinitevol_attributes(self, val):
if val != None:
self.validate('volume_infinitevol_attributes', val)
self._volume_infinitevol_attributes = val
_volume_export_attributes = None
@property
def volume_export_attributes(self):
"""
This field contains information about export settings of
the volume.
"""
return self._volume_export_attributes
@volume_export_attributes.setter
def volume_export_attributes(self, val):
if val != None:
self.validate('volume_export_attributes', val)
self._volume_export_attributes = val
@staticmethod
def get_api_name():
return "volume-attributes"
@staticmethod
def get_desired_attrs():
return [
'volume-hybrid-cache-attributes',
'volume-mirror-attributes',
'volume-space-attributes',
'volume-directory-attributes',
'volume-state-attributes',
'volume-autosize-attributes',
'volume-flexcache-attributes',
'volume-id-attributes',
'volume-antivirus-attributes',
'volume-qos-attributes',
'volume-transition-attributes',
'volume-snapshot-attributes',
'volume-language-attributes',
'volume-security-attributes',
'volume-sis-attributes',
'volume-performance-attributes',
'volume-inode-attributes',
'volume-snapshot-autodelete-attributes',
'volume-vm-align-attributes',
'volume-64bit-upgrade-attributes',
'volume-clone-attributes',
'volume-infinitevol-attributes',
'volume-export-attributes',
]
def describe_properties(self):
return {
'volume_hybrid_cache_attributes': { 'class': VolumeHybridCacheAttributes, 'is_list': False, 'required': 'optional' },
'volume_mirror_attributes': { 'class': VolumeMirrorAttributes, 'is_list': False, 'required': 'optional' },
'volume_space_attributes': { 'class': VolumeSpaceAttributes, 'is_list': False, 'required': 'optional' },
'volume_directory_attributes': { 'class': VolumeDirectoryAttributes, 'is_list': False, 'required': 'optional' },
'volume_state_attributes': { 'class': VolumeStateAttributes, 'is_list': False, 'required': 'optional' },
'volume_autosize_attributes': { 'class': VolumeAutosizeAttributes, 'is_list': False, 'required': 'optional' },
'volume_flexcache_attributes': { 'class': VolumeFlexcacheAttributes, 'is_list': False, 'required': 'optional' },
'volume_id_attributes': { 'class': VolumeIdAttributes, 'is_list': False, 'required': 'optional' },
'volume_antivirus_attributes': { 'class': VolumeAntivirusAttributes, 'is_list': False, 'required': 'optional' },
'volume_qos_attributes': { 'class': VolumeQosAttributes, 'is_list': False, 'required': 'optional' },
'volume_transition_attributes': { 'class': VolumeTransitionAttributes, 'is_list': False, 'required': 'optional' },
'volume_snapshot_attributes': { 'class': VolumeSnapshotAttributes, 'is_list': False, 'required': 'optional' },
'volume_language_attributes': { 'class': VolumeLanguageAttributes, 'is_list': False, 'required': 'optional' },
'volume_security_attributes': { 'class': VolumeSecurityAttributes, 'is_list': False, 'required': 'optional' },
'volume_sis_attributes': { 'class': VolumeSisAttributes, 'is_list': False, 'required': 'optional' },
'volume_performance_attributes': { 'class': VolumePerformanceAttributes, 'is_list': False, 'required': 'optional' },
'volume_inode_attributes': { 'class': VolumeInodeAttributes, 'is_list': False, 'required': 'optional' },
'volume_snapshot_autodelete_attributes': { 'class': VolumeSnapshotAutodeleteAttributes, 'is_list': False, 'required': 'optional' },
'volume_vm_align_attributes': { 'class': VolumeVmAlignAttributes, 'is_list': False, 'required': 'optional' },
'volume_64bit_upgrade_attributes': { 'class': Volume64BitUpgradeAttributes, 'is_list': False, 'required': 'optional' },
'volume_clone_attributes': { 'class': VolumeCloneAttributes, 'is_list': False, 'required': 'optional' },
'volume_infinitevol_attributes': { 'class': VolumeInfinitevolAttributes, 'is_list': False, 'required': 'optional' },
'volume_export_attributes': { 'class': VolumeExportAttributes, 'is_list': False, 'required': 'optional' },
}
| [
"[email protected]"
] | |
02d48bd2c223636e35624a38576f0a5412d9f2f8 | 2e06c0df26e3fbccc2af052301e8b486fd17d84c | /Line3D/line3d_rectangular_projection.py | 66b986387a063fbb644ba6817cebe039bc9a5c45 | [
"MIT"
] | permissive | d8ye/pyecharts-gallery | 54f44c0a78d88608ae83a678c105424113866f25 | 07995a7f2600983282eb37b1e94da9af2f1a25b5 | refs/heads/master | 2020-07-03T13:04:42.093830 | 2019-08-13T04:14:13 | 2019-08-13T04:14:13 | 201,913,794 | 0 | 0 | MIT | 2019-08-12T11:04:10 | 2019-08-12T11:04:09 | null | UTF-8 | Python | false | false | 1,458 | py | import math
import pyecharts.options as opts
from pyecharts.charts import Line3D
"""
Gallery 使用 pyecharts 1.1.0
参考地址: https://echarts.baidu.com/examples/editor.html?c=line3d-orthographic&gl=1
目前无法实现的功能:
1、
"""
week_en = "Saturday Friday Thursday Wednesday Tuesday Monday Sunday".split()
clock = (
"12a 1a 2a 3a 4a 5a 6a 7a 8a 9a 10a 11a 12p "
"1p 2p 3p 4p 5p 6p 7p 8p 9p 10p 11p".split()
)
data = []
for t in range(0, 25000):
_t = t / 1000
x = (1 + 0.25 * math.cos(75 * _t)) * math.cos(_t)
y = (1 + 0.25 * math.cos(75 * _t)) * math.sin(_t)
z = _t + 2.0 * math.sin(75 * _t)
data.append([x, y, z])
(
Line3D()
.add(
"",
data,
xaxis3d_opts=opts.Axis3DOpts(data=clock, type_="value"),
yaxis3d_opts=opts.Axis3DOpts(data=week_en, type_="value"),
grid3d_opts=opts.Grid3DOpts(width=100, height=100, depth=100),
)
.set_global_opts(
visualmap_opts=opts.VisualMapOpts(
dimension=2,
max_=30,
min_=0,
range_color=[
"#313695",
"#4575b4",
"#74add1",
"#abd9e9",
"#e0f3f8",
"#ffffbf",
"#fee090",
"#fdae61",
"#f46d43",
"#d73027",
"#a50026",
],
)
)
.render("line3d_rectangular_projection.html")
)
| [
"[email protected]"
] | |
26c28d596fb8b6712cc4ba60a88c42f88de634df | 959d6f7027a965f609a0be2885960b63c6dc97bc | /facebook/likers/steps.py | 96cfda296f3d581fbb757246dd37896ae0d2495a | [] | no_license | ameetbora/facebook-comments | 0bf57f8e5b4a8ef7804aa999fa86d9913b7ee99c | 7649c808164f978b147a4410795eadf374e3d3dc | refs/heads/master | 2020-04-12T14:39:23.733965 | 2018-10-30T06:17:42 | 2018-10-30T06:17:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | import time
def login(driver, user_email: str, user_password: str):
driver.get("https://www.facebook.com")
email = driver.find_element_by_id("email")
password = driver.find_element_by_id("pass")
submit = driver.find_element_by_id("loginbutton")
email.send_keys(user_email)
password.send_keys(user_password)
submit.click()
def keep_scrolling(driver, times: int = 99999999999):
while times > 0:
times -= 1
results_end_notifiers = driver.find_elements_by_xpath("//div[text()='End of results']")
if len(results_end_notifiers) > 0:
print("Looks like we found all the likers.")
return True
else:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight + 1000);")
time.sleep(3)
def get_likers(driver):
likers = []
links = [link.get_attribute("href") for link in driver.find_elements_by_xpath("//table[@role='presentation']//tr//td[position()=2]//a[not(@class)]")]
names = [name.text for name in driver.find_elements_by_xpath("//table[@role='presentation']//tr//td[position()=2]//a[not(@class)]/div/div")]
if len(names) > 0 and len(names) == len(links):
for i in range(len(links)):
likers.append({
"name": names[i],
"link": links[i],
})
else:
print("The names And links didn't match, something is wrong with our xpathing.")
return likers
def get_next_likers(driver):
next_page_link = driver.find_elements_by_xpath("//div[@id='see_more_pager']/a")
if len(next_page_link) > 0:
next_page_link[0].click()
return True
return False
def get_facebook_warning(driver):
warning = driver.find_elements_by_xpath("//div[contains(text(), 'It looks like you’re using this feature in a way it wasn’t meant to be used.')]")
return len(warning) > 0 | [
"[email protected]"
] | |
3e92f309ef61231db2fa56989217b3ba6eb86326 | 275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc | /swagger_client/models/conflict_error.py | a40924bde282c121008b4b6801a38516e4f056f1 | [] | no_license | cascadiarc/cyclos-python-client | 8029ce07174f2fe92350a92dda9a60976b2bb6c2 | a2e22a30e22944587293d51be2b8268bce808d70 | refs/heads/main | 2023-04-03T16:52:01.618444 | 2021-04-04T00:00:52 | 2021-04-04T00:00:52 | 354,419,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,439 | py | # coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class ConflictError(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'code': 'ConflictErrorCode'
}
attribute_map = {
'code': 'code'
}
def __init__(self, code=None, _configuration=None): # noqa: E501
"""ConflictError - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._code = None
self.discriminator = None
if code is not None:
self.code = code
@property
def code(self):
"""Gets the code of this ConflictError. # noqa: E501
Error codes for 409 Conflict entity HTTP status Possible values are: * constraintViolatedOnRemove: An attempt to remove some entity has failed, probably because that entity is in use, that is, is being referenced by some other entity. * staleEntity: Failure in the optimistic lock. It means some entity was fetched for editing by 2 clients. Then they both saved it. The first one is successful, but the second one will fail. If you get this error, make sure the `version` field is being sent with the correct value, as fetched from the server. # noqa: E501
:return: The code of this ConflictError. # noqa: E501
:rtype: ConflictErrorCode
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ConflictError.
Error codes for 409 Conflict entity HTTP status Possible values are: * constraintViolatedOnRemove: An attempt to remove some entity has failed, probably because that entity is in use, that is, is being referenced by some other entity. * staleEntity: Failure in the optimistic lock. It means some entity was fetched for editing by 2 clients. Then they both saved it. The first one is successful, but the second one will fail. If you get this error, make sure the `version` field is being sent with the correct value, as fetched from the server. # noqa: E501
:param code: The code of this ConflictError. # noqa: E501
:type: ConflictErrorCode
"""
self._code = code
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConflictError, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConflictError):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConflictError):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
6085a2cfbcde968d0ed001eb7a49d5bebfa6aa75 | 817a97680e85142634c3e7c66a3e0a0e5eceaffd | /sma_cross_vol.py | d1c0f856afe1d5f0f00c0bc6834541cf33e6a4d0 | [] | no_license | johndpope/algotrading | 4cca78db99af8fef0d1fc57aac3104bd0e8a895c | f2f527f85aad6cce928f1c2e9794f9217efcce93 | refs/heads/master | 2021-06-24T15:24:53.136691 | 2017-08-27T16:13:55 | 2017-08-27T16:13:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,488 | py | from datetime import datetime, timedelta
import backtrader as bt
class SMACrossVolumeStrategy(bt.SignalStrategy):
params = dict(
diff=0.01,
limit=0.005,
limdays=10,
limdays2=1000,
maperiod_small=30,
maperiod_big=30,
)
def __init__(self):
self.order = None
self.dataclose = self.datas[0].close
self.datavol = self.datas[0].volume
self.sma_small = bt.indicators.SimpleMovingAverage(
self.datas[0],
period=self.params.maperiod_small
)
self.sma_big = bt.indicators.SimpleMovingAverage(
self.datas[0],
period=self.params.maperiod_big
)
def log(self, txt, dt=None, doprint=False):
'''Logging function fot this strategy'''
if doprint:
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def stop(self):
self.log('(MA Period Small: %2d | MA Period Big: %2d) Ending Value %.2f' %
(self.p.maperiod_small, self.p.maperiod_big, self.broker.getvalue()), doprint=True)
def next(self):
if self.order:
return
if not self.position:
if self.sma_small[0] > self.sma_big[0] and self.sma_small[-1] < self.sma_big[-1] and self.datavol[0] > 2000000:
self.order = self.buy()
else:
if self.sma_small[0] < self.sma_big[0] and self.sma_small[-1] > self.sma_big[-1] and self.datavol[0] > 2000000:
self.order = self.sell()
cerebro = bt.Cerebro()
strats = cerebro.optstrategy(
SMACrossVolumeStrategy,
maperiod_small=range(2, 10),
maperiod_big=range(10, 20),
)
data = bt.feeds.GenericCSVData(
dataname='eur_usd_1d.csv',
separator=',',
dtformat=('%Y%m%d'),
tmformat=('%H%M00'),
datetime=0,
time=1,
open=2,
high=3,
low=4,
close=5,
volume=6,
openinterest=-1
)
# data = bt.feeds.YahooFinanceData(dataname='YHOO', fromdate=datetime(2011, 1, 1),
# todate=datetime(2012, 12, 31))
cerebro.adddata(data)
cerebro.addsizer(bt.sizers.FixedSize, stake=50)
# cerebro.addstrategy(SimpleSMAStrategy)
# Print out the starting conditions
print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())
# Run over everything
cerebro.run()
# Print out the final result
print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())
# cerebro.run()
# cerebro.plot()
| [
"[email protected]"
] | |
40d4849bbc2eaf4a84128ba8c1fdc12a9548dde1 | 16450d59c820298f8803fd40a1ffa2dd5887e103 | /baekjoon/5622.py | d81981f661aa57dc341a4a724cc55527ebc3158a | [] | no_license | egyeasy/TIL_public | f78c11f81d159eedb420f5fa177c05d310c4a039 | e2f40eda09cb0a65cc064d9ba9b0e2fa7cbbcb38 | refs/heads/master | 2021-06-21T01:22:16.516777 | 2021-02-02T13:16:21 | 2021-02-02T13:16:21 | 167,803,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,529 | py | """
상근이의 할머니는 아래 그림과 같이 오래된 다이얼 전화기를 사용한다.
전화를 걸고 싶은 번호가 있다면, 숫자를 하나를 누른 다음에 금속 핀이 있는 곳 까지 시계방향으로 돌려야 한다. 숫자를 하나 누르면 다이얼이 처음 위치로 돌아가고, 다음 숫자를 누르려면 다이얼을 처음 위치에서 다시 돌려야 한다.
숫자 1을 걸려면 총 2초가 필요하다. 1보다 큰 수를 거는데 걸리는 시간은 이보다 더 걸리며, 한 칸 옆에 있는 숫자를 걸기 위해선 1초씩 더 걸린다.
상근이의 할머니는 전화 번호를 각 숫자에 해당하는 문자로 외운다. 즉, 어떤 단어를 걸 때, 각 알파벳에 해당하는 숫자를 걸면 된다. 예를 들어, UNUCIC는 868242와 같다.
할머니가 외운 단어가 주어졌을 때, 이 전화를 걸기 위해서 필요한 시간을 구하는 프로그램을 작성하시오.
> 입력
첫째 줄에 알파벳 대문자로 이루어진 단어가 주어진다. 단어는 2글자~15글자로 이루어져 있다.
UNUCIC
> 출력
첫째 줄에 다이얼을 걸기 위해서 필요한 시간을 출력한다.
36
"""
num_list = [2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9]
word = input()
result = 0
for i in word:
idx = ord(i) - 65
result += num_list[idx] + 1
print(result)
# 생각
# 1. 숫자 - 알파벳 간 규칙을 최대한 찾아내서 적은 노가다로 짜는 방법도 있을 듯. | [
"[email protected]"
] | |
a3ee575e7318f6ded972fa7288d9b79b53f4f0e7 | 302442c32bacca6cde69184d3f2d7529361e4f3c | /cidtrsend-all/stage2-model/pytz/zoneinfo/Navajo.py | 1b27ae20abe14d05ef0286e1b3a242389516aafd | [] | no_license | fucknoob/WebSemantic | 580b85563072b1c9cc1fc8755f4b09dda5a14b03 | f2b4584a994e00e76caccce167eb04ea61afa3e0 | refs/heads/master | 2021-01-19T09:41:59.135927 | 2015-02-07T02:11:23 | 2015-02-07T02:11:23 | 30,441,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,131 | py | '''tzinfo timezone information for Navajo.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Navajo(DstTzInfo):
'''Navajo timezone definition. See datetime.tzinfo for details'''
zone = 'Navajo'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1918,3,31,9,0,0),
d(1918,10,27,8,0,0),
d(1919,3,30,9,0,0),
d(1919,10,26,8,0,0),
d(1920,3,28,9,0,0),
d(1920,10,31,8,0,0),
d(1921,3,27,9,0,0),
d(1921,5,22,8,0,0),
d(1942,2,9,9,0,0),
d(1945,8,14,23,0,0),
d(1945,9,30,8,0,0),
d(1965,4,25,9,0,0),
d(1965,10,31,8,0,0),
d(1966,4,24,9,0,0),
d(1966,10,30,8,0,0),
d(1967,4,30,9,0,0),
d(1967,10,29,8,0,0),
d(1968,4,28,9,0,0),
d(1968,10,27,8,0,0),
d(1969,4,27,9,0,0),
d(1969,10,26,8,0,0),
d(1970,4,26,9,0,0),
d(1970,10,25,8,0,0),
d(1971,4,25,9,0,0),
d(1971,10,31,8,0,0),
d(1972,4,30,9,0,0),
d(1972,10,29,8,0,0),
d(1973,4,29,9,0,0),
d(1973,10,28,8,0,0),
d(1974,1,6,9,0,0),
d(1974,10,27,8,0,0),
d(1975,2,23,9,0,0),
d(1975,10,26,8,0,0),
d(1976,4,25,9,0,0),
d(1976,10,31,8,0,0),
d(1977,4,24,9,0,0),
d(1977,10,30,8,0,0),
d(1978,4,30,9,0,0),
d(1978,10,29,8,0,0),
d(1979,4,29,9,0,0),
d(1979,10,28,8,0,0),
d(1980,4,27,9,0,0),
d(1980,10,26,8,0,0),
d(1981,4,26,9,0,0),
d(1981,10,25,8,0,0),
d(1982,4,25,9,0,0),
d(1982,10,31,8,0,0),
d(1983,4,24,9,0,0),
d(1983,10,30,8,0,0),
d(1984,4,29,9,0,0),
d(1984,10,28,8,0,0),
d(1985,4,28,9,0,0),
d(1985,10,27,8,0,0),
d(1986,4,27,9,0,0),
d(1986,10,26,8,0,0),
d(1987,4,5,9,0,0),
d(1987,10,25,8,0,0),
d(1988,4,3,9,0,0),
d(1988,10,30,8,0,0),
d(1989,4,2,9,0,0),
d(1989,10,29,8,0,0),
d(1990,4,1,9,0,0),
d(1990,10,28,8,0,0),
d(1991,4,7,9,0,0),
d(1991,10,27,8,0,0),
d(1992,4,5,9,0,0),
d(1992,10,25,8,0,0),
d(1993,4,4,9,0,0),
d(1993,10,31,8,0,0),
d(1994,4,3,9,0,0),
d(1994,10,30,8,0,0),
d(1995,4,2,9,0,0),
d(1995,10,29,8,0,0),
d(1996,4,7,9,0,0),
d(1996,10,27,8,0,0),
d(1997,4,6,9,0,0),
d(1997,10,26,8,0,0),
d(1998,4,5,9,0,0),
d(1998,10,25,8,0,0),
d(1999,4,4,9,0,0),
d(1999,10,31,8,0,0),
d(2000,4,2,9,0,0),
d(2000,10,29,8,0,0),
d(2001,4,1,9,0,0),
d(2001,10,28,8,0,0),
d(2002,4,7,9,0,0),
d(2002,10,27,8,0,0),
d(2003,4,6,9,0,0),
d(2003,10,26,8,0,0),
d(2004,4,4,9,0,0),
d(2004,10,31,8,0,0),
d(2005,4,3,9,0,0),
d(2005,10,30,8,0,0),
d(2006,4,2,9,0,0),
d(2006,10,29,8,0,0),
d(2007,3,11,9,0,0),
d(2007,11,4,8,0,0),
d(2008,3,9,9,0,0),
d(2008,11,2,8,0,0),
d(2009,3,8,9,0,0),
d(2009,11,1,8,0,0),
d(2010,3,14,9,0,0),
d(2010,11,7,8,0,0),
d(2011,3,13,9,0,0),
d(2011,11,6,8,0,0),
d(2012,3,11,9,0,0),
d(2012,11,4,8,0,0),
d(2013,3,10,9,0,0),
d(2013,11,3,8,0,0),
d(2014,3,9,9,0,0),
d(2014,11,2,8,0,0),
d(2015,3,8,9,0,0),
d(2015,11,1,8,0,0),
d(2016,3,13,9,0,0),
d(2016,11,6,8,0,0),
d(2017,3,12,9,0,0),
d(2017,11,5,8,0,0),
d(2018,3,11,9,0,0),
d(2018,11,4,8,0,0),
d(2019,3,10,9,0,0),
d(2019,11,3,8,0,0),
d(2020,3,8,9,0,0),
d(2020,11,1,8,0,0),
d(2021,3,14,9,0,0),
d(2021,11,7,8,0,0),
d(2022,3,13,9,0,0),
d(2022,11,6,8,0,0),
d(2023,3,12,9,0,0),
d(2023,11,5,8,0,0),
d(2024,3,10,9,0,0),
d(2024,11,3,8,0,0),
d(2025,3,9,9,0,0),
d(2025,11,2,8,0,0),
d(2026,3,8,9,0,0),
d(2026,11,1,8,0,0),
d(2027,3,14,9,0,0),
d(2027,11,7,8,0,0),
d(2028,3,12,9,0,0),
d(2028,11,5,8,0,0),
d(2029,3,11,9,0,0),
d(2029,11,4,8,0,0),
d(2030,3,10,9,0,0),
d(2030,11,3,8,0,0),
d(2031,3,9,9,0,0),
d(2031,11,2,8,0,0),
d(2032,3,14,9,0,0),
d(2032,11,7,8,0,0),
d(2033,3,13,9,0,0),
d(2033,11,6,8,0,0),
d(2034,3,12,9,0,0),
d(2034,11,5,8,0,0),
d(2035,3,11,9,0,0),
d(2035,11,4,8,0,0),
d(2036,3,9,9,0,0),
d(2036,11,2,8,0,0),
d(2037,3,8,9,0,0),
d(2037,11,1,8,0,0),
]
_transition_info = [
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MWT'),
i(-21600,3600,'MPT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
i(-21600,3600,'MDT'),
i(-25200,0,'MST'),
]
Navajo = Navajo()
| [
"[email protected]"
] | |
0726b7390ed35387b3d0eef50a1cb7a3d9aa9f8a | 3044d26f03f23e8e8c5fcec57b78bfffe0fa0bd3 | /case/workflow_FinancialClass_samplepayment_FlowSamplePayment/workflow_FinancialClass_samplepayment_FlowSamplePayment_purchaseleader_return.py | dad86a28c42e2c435c7fe2f5a98f76c2af6bf9d8 | [] | no_license | tian848-tim/trunk | de50a153c8cab3c81c79c523256a6f1b4c2f049d | cd52afdd003f094056dc2ea877c823a38e6a26fd | refs/heads/master | 2022-11-20T06:43:35.540105 | 2020-07-20T07:48:26 | 2020-07-20T07:48:26 | 281,048,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,324 | py | '''
测试用例标题:样品付款测试
测试场景:样品付款业务流程测试——采购组长退回
创建者:Tim
创建日期:2018-11-13
最后修改日期:2018-11-13
输入数据:审批流程各个角色账号
输出数据:无
'''
# -*- coding: utf-8 -*-
import sys, os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
# sys.path.append(rootPath)
import unittest
from cgitb import text
import selenium.webdriver.support.ui as ui
from selenium import webdriver
from time import sleep
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest, configparser
from selenium import webdriver
from selenium.common.exceptions import NoAlertPresentException
from selenium.common.exceptions import NoSuchElementException
import random
import json
'''
加载配置选项
'''
cfg = configparser.ConfigParser()
cfg.read(rootPath + '/core/config.ini')
class FlowSamplePayment(unittest.TestCase):
base_url = cfg.get("projects", "base_url")
project_path = cfg.get("projects", "project_path")
log_path = cfg.get("webdriver", "log") + '/' + cfg.get("webdriver", "logfile") + '-%s.log' % time.strftime(
"%Y-%m-%d %H_%M_%S")
def loadvendername(self):
global result
file = open(rootPath + '/data/workflow_FinancialClass_samplepayment_FlowSamplePayment_purchaseleader_return.json', encoding='utf-8')
data = json.load(file)
result = [(d['username'], d['password']) for d in data['login']]
return result
def loadvendernames(self):
global results
file = open(rootPath + '/data/workflow_FinancialClass_samplepayment_FlowSamplePayment_purchaseleader_return.json', encoding='utf-8')
data = json.load(file)
results = [(d['name']) for d in data['use_vendorname']]
return results
def setUp(self):
# 脚本标识-标题
self.script_name = '样品付款申请——采购组长退回'
# 脚本标识-ID
self.script_id = 'workflow_FinancialClass_samplepayment_FlowSamplePayment_purchaseleader_return'
self.target_url = self.base_url + self.project_path
if (cfg.get("webdriver", "enabled") == "off"):
# 如果使用最新firefox需要屏蔽下面这句
self.driver = webdriver.Firefox()
else:
# 如果使用最新firefox需要使用下面这句
self.driver = webdriver.Firefox(log_path=self.log_path)
self.verificationErrors = []
self.accept_next_alert = True
self.driver.implicitly_wait(15)
self.driver.maximize_window()
# 定义登录方法
def login(self, username, password):
self.driver.get(self.target_url) # 登录页面
self.driver.find_element_by_id('account-inputEl').send_keys(username)
self.driver.find_element_by_id('password-inputEl').send_keys(password)
self.driver.find_element_by_xpath("//*[@id='LoginWin']//span[contains(@class,'x-btn-icon-el')]").click()
def test_FlowSamplePayment(self):
su = self.loadvendername()
ad = self.loadvendernames()
for i in range(0, len(su)):
print(su[i][0])
print(su[i][1])
self.login(su[0][0],su[0][1])
#self.login('Vic_cn','123')
sleep(5)
# 关闭弹出框
self.driver.find_element_by_xpath("//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]").click()
sleep(2)
# 定位到申请单据
self.driver.find_element_by_xpath("//*[@id='appNavTabPanel']//span[contains(@class,'fa-code-fork')]").click()
sleep(2)
# 定位到样品申请
self.driver.find_element_by_xpath("//*[@id='west-panel-targetEl']//span[contains(text(), '样品申请')]").click()
sleep(2)
# 定位到样品申请新建
self.driver.find_element_by_xpath("//*[@id='FlowSampleView']//span[contains(@class,'fa-plus')]").click()
sleep(2)
## 选择供应商
self.driver.find_element_by_xpath( "//*[@id='FlowSampleViewFormPanelID-body']//input[@name='main.vendorName']").click()
sleep(2)
if ad[0] != '':
## 定位到关键字
self.driver.find_element_by_xpath("//*[@id='VendorDialogWinSearchPanelID-innerCt']//input[@name='keywords']").send_keys(ad[0])
sleep(2)
# 点击搜索
self.driver.find_element_by_xpath("//*[@id='VendorDialogWinSearchPanelID-innerCt']//span[contains(@class,'fa-search')]").click()
sleep(2)
# 定位供应商第一条记录
_elementFirst = self.driver.find_element_by_xpath("//*[@id='VendorDialogWinGridPanelID-body']//div[contains(text(), '1')]")
sleep(2)
# 在此元素上双击
ActionChains(self.driver).double_click(_elementFirst).perform()
else:
_elementFiveth = (random.randint(1, 10))
# 定位供应商第一条记录
_elementFirst = self.driver.find_element_by_xpath("//*[@id='VendorDialogWinGridPanelID-body']//div[text()='{}']".format(_elementFiveth))
sleep(2)
# 在此元素上双击
ActionChains(self.driver).double_click(_elementFirst).perform()
sleep(2)
# 定位添加样品按钮'''
_elementSecond = self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID_header-body']//img[contains(@class,'x-tool-plus')]")
sleep(2)
# 在此元素上双击
ActionChains(self.driver).double_click(_elementSecond).perform()
sleep(2)
# 定位样品第一条记录
_elementThird = self.driver.find_element_by_xpath("//*[@id='ProductDialogWinGridPanelID-body']//div[contains(text(), '1')]")
sleep(2)
# 在此元素上双击
ActionChains(self.driver).double_click(_elementThird).perform()
sleep(2)
# 点击确认
self.driver.find_element_by_xpath("//*[@id='ProductDialogWinID']//span[contains(@class,'fa-check')]").click()
sleep(2)
# 点击aud
self.driver.find_element_by_xpath("//div[@id='FlowSampleFormGridPanelID-normal-body']/div/table/tbody/tr/td[5]").click()
sleep(2)
# 清除输入框
self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='sampleFeeAud']").clear()
sleep(2)
## 定位到AUD输入
self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='sampleFeeAud']").send_keys('10')
sleep(2)
# 点击样品件数
self.driver.find_element_by_xpath("//div[@id='FlowSampleFormGridPanelID-normal-body']/div/table/tbody/tr/td[8]").click()
sleep(2)
## 清除样品件数
self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='qty']").clear()
sleep(2)
## 定位到样品件数
self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='qty']").send_keys('10')
sleep(2)
# 定位到费用可退
self.driver.find_element_by_xpath( "//div[@id='FlowSampleFormGridPanelID-normal-body']/div/table/tbody/tr/td[12]").click()
sleep(2)
# 清除输入框
self.driver.find_element_by_xpath( "//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='sampleFeeRefund']").clear()
sleep(2)
## 定位费用可退
self.driver.find_element_by_xpath("//*[@id='FlowSampleFormGridPanelID-f-body']//input[@name='sampleFeeRefund']").send_keys('1')
sleep(2)
# 定位到费用可退
self.driver.find_element_by_xpath("//div[@id='FlowSampleFormGridPanelID-normal-body']/div/table/tbody/tr/td[12]").click()
sleep(2)
# 点击发启
self.driver.find_element_by_xpath("//*[@id='FlowSampleForm']//span[contains(@class,'fa-play')]").click()
self.driver.implicitly_wait(30)
# 获取弹窗提示:
self.driver.implicitly_wait(10)
a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')
print(a)
sleep(5)
try:
self.driver.find_element_by_xpath("//*[@id='FlowSampleViewGridPanelID-body']/div/table/tbody/tr[1]//span[contains(text(), '{}')]".format('调整申请')).is_displayed()
a = True
except:
a = False
if a == True:
print("元素存在")
elif a == False:
print("元素不存在")
sleep(2)
if a == True:
# 选择新品开发第一条记录
self.driver.find_element_by_xpath(
"//*[@id='FlowSampleViewGridPanelID-body']//div[contains(text(), '1')]").click()
# 强制等待
sleep(2)
# 定位到新品开发编辑
self.driver.find_element_by_xpath(
"//*[@id='FlowSampleView']//span[contains(@class,'fa-pencil-square-o')]").click()
sleep(2)
self.driver.find_element_by_xpath(
"//*[@id='FlowSampleViewMainTbsPanelID-win-body']//input[@name='flowNextHandlerAccount']").click()
sleep(2)
self.driver.find_element_by_xpath(
"//*[@class='x-list-plain']//li[contains(text(),'{}')]".format(su[1][0])).click()
sleep(2)
# 定位到发启按钮
self.driver.find_element_by_xpath("//*[@id='FlowSampleForm']//span[contains(@class,'fa-check-square')]").click()
# 获取弹窗提示:
self.driver.implicitly_wait(30)
a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')
print(a)
# 强制等待
sleep(5)
else:
pass
self.driver.find_element_by_link_text('注销').click() # 点击注销
self.driver.find_element_by_link_text('是').click()
alert = self.driver.switch_to_alert()
alert.accept() # 退出页面
sleep(2)
self.login(su[1][0],su[1][1])
#self.login('Vic_cn', '123')
sleep(5)
# 关闭弹出框
self.driver.find_element_by_xpath("//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]").click()
sleep(2)
# 定位到工作面板
self.driver.find_element_by_xpath("//*[@id='appNavTabPanel']//span[contains(@class,'fa-desktop')]").click()
sleep(2)
# 定位到待办事项
self.driver.find_element_by_xpath("//*[@id='west-panel-targetEl']//span[contains(text(), '待办事项')]").click()
sleep(2)
# 定位到待办事项第一条记录
self.driver.find_element_by_xpath("//*[@id='EventsGridPanelID-body']//div[contains(text(), '1')]").click()
sleep(2)
# 点击马上处理
self.driver.find_element_by_xpath("//*[@id='EventsFormPanelID-body']//span[contains(@class, 'x-btn-icon-el')]").click()
sleep(2)
# 点击通过
self.driver.find_element_by_xpath("//*[@id='FlowSampleForm']//span[contains(@class, 'fa-check-square')]").click()
self.driver.implicitly_wait(30)
# 获取弹窗提示:
self.driver.implicitly_wait(10)
a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')
print(a)
sleep(3)
self.driver.find_element_by_link_text('注销').click() # 点击注销
self.driver.find_element_by_link_text('是').click()
alert = self.driver.switch_to_alert()
alert.accept() # 退出页面
sleep(2)
'''样品付款'''
self.login(su[0][0],su[0][1])
#self.login('Vic_cn', '123')
sleep(5)
# 关闭弹出框
self.driver.find_element_by_xpath("//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]").click()
sleep(2)
# 定位到申请单据
self.driver.find_element_by_xpath( "//*[@id='appNavTabPanel']//span[contains(@class,'fa-code-fork')]").click()
sleep(2)
# 定位到船务类
self.driver.find_element_by_xpath("//*[@id='west-panel-targetEl']//span[contains(text(), '财务类')]").click()
sleep(3)
# 定位到样品付款申请
self.driver.find_element_by_xpath("//*[@id='west-panel-targetEl']//span[contains(text(), '样品付款申请')]").click()
sleep(2)
# 定位到样品付款申请第一条记录
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentGridPanelID-body']//div[contains(text(), '1')]").click()
sleep(2)
# 定位到样品付款申请编辑
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentView']//span[contains(@class,'fa-pencil-square-o')]").click()
sleep(2)
# 定位到实付金额
_elementFiveth =self.driver.find_element_by_xpath("//*[@class='x-form-trigger-input-cell']//input[@name='main.totalSampleFeeAud']").get_attribute("value")
sleep(2)
# 定位到实付金额
self.driver.find_element_by_xpath("//*[@class='x-form-trigger-input-cell']//input[@name='main.paymentTotalSampleFeeAud']").clear()
sleep(2)
#_elementFiveth = (random.randint(0, 1000))
#sleep(2)
# 定位到实付金额
self.driver.find_element_by_xpath("//*[@class='x-form-trigger-input-cell']//input[@name='main.paymentTotalSampleFeeAud']").send_keys(_elementFiveth)
sleep(2)
# 定位到发启
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentForm']//span[contains(@class,'fa-play')]").click()
self.driver.implicitly_wait(30)
# 获取弹窗提示:
self.driver.implicitly_wait(10)
a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')
print(a)
sleep(10)
try:
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentGridPanelID-body']/div/table/tbody/tr[1]//span[contains(text(), '{}')]".format('调整申请')).is_displayed()
a = True
except:
a = False
if a == True:
print("元素存在")
elif a == False:
print("元素不存在")
sleep(2)
if a == True:
# 选择新品开发第一条记录
self.driver.find_element_by_xpath(
"//*[@id='FlowSamplePaymentGridPanelID-body']//div[contains(text(), '1')]").click()
# 强制等待
sleep(2)
# 定位到新品开发编辑
self.driver.find_element_by_xpath(
"//*[@id='FlowSamplePaymentView']//span[contains(@class,'fa-pencil-square-o')]").click()
sleep(2)
self.driver.find_element_by_xpath(
"//*[@id='FlowSamplePaymentMainTbsPanelID-win-body']//input[@name='flowNextHandlerAccount']").click()
sleep(2)
self.driver.find_element_by_xpath(
"//*[@class='x-list-plain']//li[contains(text(),'{}')]".format(su[1][0])).click()
sleep(2)
# 定位到发启按钮
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentForm']//span[contains(@class,'fa-check-square')]").click()
# 获取弹窗提示:
self.driver.implicitly_wait(30)
a = self.driver.find_element_by_css_selector('.x-box-mc').get_attribute('textContent')
print(a)
# 强制等待
sleep(5)
else:
pass
self.driver.find_element_by_link_text('注销').click() # 点击注销
self.driver.find_element_by_link_text('是').click()
alert = self.driver.switch_to_alert()
alert.accept() # 退出页面
sleep(5)
'''第一节点审核'''
self.login(su[1][0],su[1][1])
#self.login('Vic_cn', '123')
sleep(5)
# 关闭弹出框
self.driver.find_element_by_xpath("//*[@id='msgwin-div']//div[contains(@class,'x-tool-close')]").click()
sleep(2)
# 定位到工作面板
self.driver.find_element_by_xpath("//*[@id='appNavTabPanel']//span[contains(@class,'fa-desktop')]").click()
sleep(2)
# 定位到待办事项
self.driver.find_element_by_xpath("//*[@id='west-panel-targetEl']//span[contains(text(), '待办事项')]").click()
sleep(2)
# 定位到待办事项第一条记录
self.driver.find_element_by_xpath("//*[@id='EventsGridPanelID-body']//div[contains(text(), '1')]").click()
sleep(2)
# 点击马上处理
self.driver.find_element_by_xpath(
"//*[@id='EventsFormPanelID-body']//span[contains(@class, 'x-btn-icon-el')]").click()
sleep(2)
# 分配处理人
self.driver.find_element_by_xpath(
"//*[@id='FlowSamplePaymentMainTbsPanelID-win-body']//input[@name='flowNextHandlerAccount']").click()
sleep(2)
# 选择第一项
self.driver.find_element_by_xpath(
"//*[@class='x-list-plain']//li[contains(@class, 'x-boundlist-item-over')]").click()
sleep(2)
# 定位iframe
self.driver.switch_to.frame(self.driver.find_element_by_xpath(
"//*[@id='FlowSamplePaymentMainTbsPanelID-win-body']//iframe[contains(@class,'cke_wysiwyg_frame cke_reset')]"))
sleep(2)
# 输入内容
self.driver.find_element_by_class_name("cke_show_borders").send_keys('test')
sleep(2)
# 退出iframe
self.driver.switch_to.default_content()
sleep(2)
# 点击退回
self.driver.find_element_by_xpath("//*[@id='FlowSamplePaymentForm']//span[contains(@class, 'fa-reply')]").click()
sleep(5)
self.driver.find_element_by_link_text('注销').click() # 点击注销
self.driver.find_element_by_link_text('是').click()
alert = self.driver.switch_to_alert()
alert.accept() # 退出页面
sleep(5)
def isElementExist(self, link):
flag = True
try:
self.driver.find_element_by_xpath(link)
print('元素找到')
return flag
except:
flag = False
print('未找到')
return flag
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException as e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
3729d9da023e6a5a84cc1c3bac5ff6e4ef5f87db | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stubs/redis/redis/sentinel.pyi | ea13ae681287fa1353217d2e6d217fe0898b122b | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 2,820 | pyi | from collections.abc import Iterable, Iterator
from typing import Any, TypeVar, overload
from typing_extensions import Literal, TypeAlias
from redis.client import Redis
from redis.commands.sentinel import SentinelCommands
from redis.connection import Connection, ConnectionPool, SSLConnection
from redis.exceptions import ConnectionError
_RedisT = TypeVar("_RedisT", bound=Redis[Any])
_AddressAndPort: TypeAlias = tuple[str, int]
_SentinelState: TypeAlias = dict[str, Any] # TODO: this can be a TypedDict
class MasterNotFoundError(ConnectionError): ...
class SlaveNotFoundError(ConnectionError): ...
class SentinelManagedConnection(Connection):
connection_pool: SentinelConnectionPool
def __init__(self, **kwargs) -> None: ...
def connect_to(self, address: _AddressAndPort) -> None: ...
def connect(self) -> None: ...
# The result can be either `str | bytes` or `list[str | bytes]`
def read_response(self, disable_decoding: bool = ...) -> Any: ...
class SentinelManagedSSLConnection(SentinelManagedConnection, SSLConnection): ...
class SentinelConnectionPool(ConnectionPool):
is_master: bool
check_connection: bool
service_name: str
sentinel_manager: Sentinel
def __init__(self, service_name: str, sentinel_manager: Sentinel, **kwargs) -> None: ...
def reset(self) -> None: ...
def owns_connection(self, connection: Connection) -> bool: ...
def get_master_address(self) -> _AddressAndPort: ...
def rotate_slaves(self) -> Iterator[_AddressAndPort]: ...
class Sentinel(SentinelCommands):
sentinel_kwargs: dict[str, Any]
sentinels: list[Redis[Any]]
min_other_sentinels: int
connection_kwargs: dict[str, Any]
def __init__(
self,
sentinels: Iterable[_AddressAndPort],
min_other_sentinels: int = ...,
sentinel_kwargs: dict[str, Any] | None = ...,
**connection_kwargs,
) -> None: ...
def check_master_state(self, state: _SentinelState, service_name: str) -> bool: ...
def discover_master(self, service_name: str) -> _AddressAndPort: ...
def filter_slaves(self, slaves: Iterable[_SentinelState]) -> list[_AddressAndPort]: ...
def discover_slaves(self, service_name: str) -> list[_AddressAndPort]: ...
@overload
def master_for(self, service_name: str, *, connection_pool_class=..., **kwargs) -> Redis[Any]: ...
@overload
def master_for(self, service_name: str, redis_class: type[_RedisT], connection_pool_class=..., **kwargs) -> _RedisT: ...
@overload
def slave_for(self, service_name: str, *, connection_pool_class=..., **kwargs) -> Redis[Any]: ...
@overload
def slave_for(self, service_name: str, redis_class: type[_RedisT], connection_pool_class=..., **kwargs) -> _RedisT: ...
def execute_command(self, *args, **kwargs) -> Literal[True]: ...
| [
"[email protected]"
] | |
d58cb7de2dbd4f821d0407b7ef618003f1f9fc9b | cb4db25a0b13f058f1a31b38d80d76a118d1e2dc | /venv/lib/python3.6/site-packages/google/api/usage_pb2.py | efe4f7945d109abc9613cb147c11bb4917bdf030 | [
"MIT"
] | permissive | Hackaton-Dragons/Never-Boils | 73df2b65f54a77d961ce53dea350b7d2a4261154 | 2d43e6e07fb18409d5a964f44f481d28d2352531 | refs/heads/master | 2020-03-09T20:27:54.554616 | 2018-10-08T05:52:33 | 2018-10-08T05:52:33 | 128,985,616 | 1 | 0 | MIT | 2018-04-15T13:32:45 | 2018-04-10T19:35:32 | Python | UTF-8 | Python | false | true | 4,244 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/usage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/api/usage.proto',
package='google.api',
syntax='proto3',
serialized_pb=_b('\n\x16google/api/usage.proto\x12\ngoogle.api\x1a\x1cgoogle/api/annotations.proto\"C\n\x05Usage\x12\x14\n\x0crequirements\x18\x01 \x03(\t\x12$\n\x05rules\x18\x06 \x03(\x0b\x32\x15.google.api.UsageRule\"?\n\tUsageRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12 \n\x18\x61llow_unregistered_calls\x18\x02 \x01(\x08\x42%\n\x0e\x63om.google.apiB\nUsageProtoP\x01\xa2\x02\x04GAPIb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_USAGE = _descriptor.Descriptor(
name='Usage',
full_name='google.api.Usage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='requirements', full_name='google.api.Usage.requirements', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rules', full_name='google.api.Usage.rules', index=1,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=68,
serialized_end=135,
)
_USAGERULE = _descriptor.Descriptor(
name='UsageRule',
full_name='google.api.UsageRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='selector', full_name='google.api.UsageRule.selector', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_unregistered_calls', full_name='google.api.UsageRule.allow_unregistered_calls', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=137,
serialized_end=200,
)
_USAGE.fields_by_name['rules'].message_type = _USAGERULE
DESCRIPTOR.message_types_by_name['Usage'] = _USAGE
DESCRIPTOR.message_types_by_name['UsageRule'] = _USAGERULE
Usage = _reflection.GeneratedProtocolMessageType('Usage', (_message.Message,), dict(
DESCRIPTOR = _USAGE,
__module__ = 'google.api.usage_pb2'
# @@protoc_insertion_point(class_scope:google.api.Usage)
))
_sym_db.RegisterMessage(Usage)
UsageRule = _reflection.GeneratedProtocolMessageType('UsageRule', (_message.Message,), dict(
DESCRIPTOR = _USAGERULE,
__module__ = 'google.api.usage_pb2'
# @@protoc_insertion_point(class_scope:google.api.UsageRule)
))
_sym_db.RegisterMessage(UsageRule)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.apiB\nUsageProtoP\001\242\002\004GAPI'))
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
2faed508287230ab969292eaf79e86a5f4c970fb | 5e9c890f5677aa561f7acdb1b1249dc06d5a166a | /tests/output/timesteptest.py | d613f2dd19d68fdf604ce930488681bdf566445a | [
"Apache-2.0"
] | permissive | fispact/pypact | 6690b6d4f8265990e2c238532e57c4c9cf8c88d4 | be7723189236de333c44131ca534fb4286b4e3f7 | refs/heads/master | 2023-01-24T09:36:47.363611 | 2022-10-23T17:58:40 | 2022-10-23T17:58:40 | 118,437,112 | 20 | 10 | Apache-2.0 | 2022-10-23T17:58:41 | 2018-01-22T09:42:58 | Python | UTF-8 | Python | false | false | 9,251 | py | from tests.output.baseoutputtest import BaseOutputUnitTest
from tests.output.doseratetest import DoseRateAssertor
from tests.output.nuclidestest import NuclidesAssertor
import pypact as pp
class TimeStepAssertor(BaseOutputUnitTest):
ds_assertor = DoseRateAssertor()
nuc_assertor = NuclidesAssertor()
def assert_defaults(self, timestep):
ts = pp.TimeStep()
ts.irradiation_time = 0.0
ts.cooling_time = 0.0
ts.flux = 0.0
ts.total_heat = 0.0
ts.alpha_heat = 0.0
ts.beta_heat = 0.0
ts.gamma_heat = 0.0
ts.ingestion_dose = 0.0
ts.inhalation_dose = 0.0
ts.initial_mass = 0.0
ts.total_mass = 0.0
ts.number_of_fissions = 0.0
ts.burnup = 0.0
ts.total_activity = 0.0
ts.total_activity_exclude_trit = 0.0
ts.alpha_activity = 0.0
ts.beta_activity = 0.0
ts.gamma_activity = 0.0
self.assert_inventory(timestep, ts)
self.ds_assertor.assert_defaults(timestep.dose_rate)
self.nuc_assertor.assert_defaults(timestep.nuclides)
def assert_inventory(self, inv, compareinv):
self.assertValueAndType(inv, pp.TimeStep, 'irradiation_time', float, compareinv.irradiation_time)
self.assertValueAndType(inv, pp.TimeStep, 'cooling_time', float, compareinv.cooling_time)
self.assertValueAndType(inv, pp.TimeStep, 'flux', float, compareinv.flux)
self.assertValueAndType(inv, pp.TimeStep, 'total_heat', float, compareinv.total_heat)
self.assertValueAndType(inv, pp.TimeStep, 'alpha_heat', float, compareinv.alpha_heat)
self.assertValueAndType(inv, pp.TimeStep, 'beta_heat', float, compareinv.beta_heat)
self.assertValueAndType(inv, pp.TimeStep, 'gamma_heat', float, compareinv.gamma_heat)
self.assertValueAndType(inv, pp.TimeStep, 'initial_mass', float, compareinv.initial_mass)
self.assertValueAndType(inv, pp.TimeStep, 'ingestion_dose', float, compareinv.ingestion_dose)
self.assertValueAndType(inv, pp.TimeStep, 'total_mass', float, compareinv.total_mass)
self.assertValueAndType(inv, pp.TimeStep, 'number_of_fissions', float, compareinv.number_of_fissions)
self.assertValueAndType(inv, pp.TimeStep, 'burnup', float, compareinv.burnup)
self.assertValueAndType(inv, pp.TimeStep, 'inhalation_dose', float, compareinv.inhalation_dose)
self.assertValueAndType(inv, pp.TimeStep, 'total_activity', float, compareinv.total_activity)
self.assertValueAndType(inv, pp.TimeStep, 'total_activity_exclude_trit', float, compareinv.total_activity_exclude_trit)
self.assertValueAndType(inv, pp.TimeStep, 'alpha_activity', float, compareinv.alpha_activity)
self.assertValueAndType(inv, pp.TimeStep, 'beta_activity', float, compareinv.beta_activity)
self.assertValueAndType(inv, pp.TimeStep, 'gamma_activity', float, compareinv.gamma_activity)
def assert_timestep(self, inv, timestep):
self.ds_assertor.assert_timestep(inv.dose_rate, timestep)
self.nuc_assertor.assert_timestep(inv.nuclides, timestep)
# Let's test some key timesteps
# much too time consuming to test all timesteps
if timestep == 1:
self.assert_inventory(inv, timestep_1_inv())
elif timestep == 2:
self.assert_inventory(inv, timestep_2_inv())
elif timestep == 14:
self.assert_inventory(inv, timestep_14_inv())
elif 16 > timestep > 2:
return
else:
self.assert_defaults(inv)
def timestep_1_inv():
inv = pp.TimeStep()
inv.irradiation_time = 0.0
inv.cooling_time = 0.0
inv.flux = 3.3400E+10
inv.alpha_heat = 1.00026E-08
inv.beta_heat = 3.98609E-11
inv.gamma_heat = 6.71486E-11
inv.total_heat = inv.alpha_heat + inv.beta_heat + inv.gamma_heat
inv.ingestion_dose = 6.59242E-01
inv.inhalation_dose = 1.17557E+02
inv.initial_mass = 1.00067E+00
inv.total_mass = 1.00067E+00
inv.number_of_fissions = 0.0E+00
inv.burnup = 0.0E+00
inv.total_activity = 1.45396E+07
inv.total_activity_exclude_trit = 1.45396E+07
inv.alpha_activity = 1.453958E+07
inv.beta_activity = 0.0
inv.gamma_activity = 0.0
return inv
def timestep_2_inv():
inv = pp.TimeStep()
inv.irradiation_time = 2.6298E+06
inv.cooling_time = 0.0
inv.flux = 3.3400E+10
inv.alpha_heat = 1.00026E-08
inv.beta_heat = 1.09700E-09
inv.gamma_heat = 1.12065E-10
inv.total_heat = inv.alpha_heat + inv.beta_heat + inv.gamma_heat
inv.ingestion_dose = 6.84076E-01
inv.inhalation_dose = 1.17614E+02
inv.initial_mass = 1.00067E+00
inv.total_mass = 1.00067E+00
inv.number_of_fissions = 0.0E+00
inv.burnup = 0.0E+00
inv.total_activity = 3.11345E+07
inv.total_activity_exclude_trit = 3.11345E+07
inv.alpha_activity = 1.453958E+07
inv.beta_activity = 1.658438E+07
inv.gamma_activity = 1.057793E+04
return inv
def timestep_14_inv():
inv = pp.TimeStep()
inv.irradiation_time = 2.6298E+06 + 5.2596E+06 + 7.8894E+06 + 1.5779E+07 \
+ 1.5779E+07 + 1.5779E+07
inv.cooling_time = 6.0000E+01 + 8.6400E+04 + 2.5434E+06 + 1.3149E+07 \
+ 1.5779E+07 + 6.3115E+07 + 6.3115E+07
inv.flux = 0.0000E+00
inv.alpha_heat = 1.00031E-08
inv.beta_heat = 1.80108E-09
inv.gamma_heat = 1.36712E-10
inv.total_heat = inv.alpha_heat + inv.beta_heat + inv.gamma_heat
inv.ingestion_dose = 7.01423E-01
inv.inhalation_dose = 1.17728E+02
inv.initial_mass = 1.00067E+00
inv.total_mass = 1.00067E+00
inv.number_of_fissions = 0.0E+00
inv.burnup = 0.0E+00
inv.total_activity = 4.11571E+07
inv.total_activity_exclude_trit = 4.11571E+07
inv.alpha_activity = 1.454025E+07
inv.beta_activity = 2.659877E+07
inv.gamma_activity = 1.808869E+04
return inv
class TimeStepUnitTest(BaseOutputUnitTest):
assertor = TimeStepAssertor()
def test_fission_example(self):
ts = pp.TimeStep()
ts.fispact_deserialize(self.filerecord_fission, 1)
self.assertEqual(ts.alpha_heat, 7.22533E-10, "Assert alpha heat")
self.assertEqual(ts.number_of_fissions, 0.0, "Assert number of fissions is zero")
self.assertEqual(ts.burnup, 0.0, "Assert burnup is zero")
ts.fispact_deserialize(self.filerecord_fission, 2)
self.assertEqual(ts.alpha_heat, 7.38131E-10, "Assert alpha heat")
self.assertEqual(ts.number_of_fissions, 6.73186E+09, "Assert number of fissions is non zero")
self.assertEqual(ts.burnup, 2.93608E-11, "Assert burnup is non zero")
def test_fispact_deserialize(self):
def func(ts, i):
ts.fispact_deserialize(self.filerecord91, i)
self.assertor.assert_timestep(ts, i)
self._wrapper(func)
def test_fispact_deserialize_isirradiation(self):
ts = pp.TimeStep()
self.assertor.assert_defaults(ts)
ts.fispact_deserialize(self.filerecord91, 1)
self.assertEqual(True, ts.isirradiation, "Assert timestep 1 is an irradiation step")
ts.fispact_deserialize(self.filerecord91, 2)
self.assertEqual(True, ts.isirradiation, "Assert timestep 2 is an irradiation step")
ts.fispact_deserialize(self.filerecord91, 14)
self.assertEqual(False, ts.isirradiation, "Assert timestep 14 is a cooling step")
def test_fispact_deserialize_currenttime(self):
ts = pp.TimeStep()
self.assertor.assert_defaults(ts)
ts.fispact_deserialize(self.filerecord91, 1)
self.assertEqual(0.0, ts.currenttime, "Assert the irradiation time for timestep 1")
ts.fispact_deserialize(self.filerecord91, 2)
self.assertEqual(2.6298E+06, ts.currenttime, "Assert the irradiation time for timestep 2")
ts.fispact_deserialize(self.filerecord91, 14)
self.assertEqual(ts.cooling_time, ts.currenttime, "Assert the cooling time for timestep 14")
def test_fispact_deserialize_nonuclides(self):
ts = pp.TimeStep(ignorenuclides=True)
self.assertor.assert_defaults(ts)
ts.fispact_deserialize(self.filerecord91, 1)
self.assertor.nuc_assertor.assert_defaults(ts.nuclides)
ts.fispact_deserialize(self.filerecord91, 2)
self.assertor.nuc_assertor.assert_defaults(ts.nuclides)
ts.fispact_deserialize(self.filerecord91, 14)
self.assertor.nuc_assertor.assert_defaults(ts.nuclides)
def test_fispact_readwriteread(self):
def func(ts, i):
# deserialize from standard output
ts.fispact_deserialize(self.filerecord91, i)
self.assertor.assert_timestep(ts, i)
# serialize to JSON
j = ts.json_serialize()
# reset object
newts = pp.TimeStep()
self.assertor.assert_defaults(newts)
# deserialize JSON and compare to original
newts.json_deserialize(j)
self.assertor.assert_timestep(newts, i)
self._wrapper(func)
def _wrapper(self, func):
ts = pp.TimeStep()
self.assertor.assert_defaults(ts)
for i in range(-100, 100):
func(ts, i)
| [
"[email protected]"
] | |
df11eb12d02f73346e7096e6039400e85381a2bb | ab5ef28065b0ad3f8d86fc894be569074a4569ea | /mirari/CRYE/migrations/0028_auto_20190406_1344.py | 99b9026478cf2c6aaf96926be89b77b7d4bbecdd | [
"MIT"
] | permissive | gcastellan0s/mirariapp | 1b30dce3ac2ee56945951f340691d39494b55e95 | 24a9db06d10f96c894d817ef7ccfeec2a25788b7 | refs/heads/master | 2023-01-22T22:21:30.558809 | 2020-09-25T22:37:24 | 2020-09-25T22:37:24 | 148,203,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | # Generated by Django 2.0.5 on 2019-04-06 19:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CRYE', '0027_auto_20190406_1331'),
]
operations = [
migrations.AlterField(
model_name='walletcredit',
name='walletcredit_tipo',
field=models.CharField(choices=[('ARRENDAMIENTO', 'ARRENDAMIENTO'), ('CREDITO', 'CREDITO')], default='CREDITO', max_length=250, verbose_name='Tipo de cartera'),
),
]
| [
"[email protected]"
] | |
8df4144788164a6ec89107cc0ade23a41752bfe4 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_scheduled.py | 8a085638d8ee5bfb1423f7fbfc4217347f5939be | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.verbs._schedule import _SCHEDULE
#calss header
class _SCHEDULED(_SCHEDULE, ):
def __init__(self,):
_SCHEDULE.__init__(self)
self.name = "SCHEDULED"
self.specie = 'verbs'
self.basic = "schedule"
self.jsondata = {}
| [
"[email protected]"
] | |
e94f6a0ef46d77df9c8d3ece79519b0d26d16bf7 | 028d788c0fa48a8cb0cc6990a471e8cd46f6ec50 | /Python-OOP/Exam-Preparation/16-Aug-2020/project/software/light_software.py | 6182deaf8edbfaa898d0623ff12527b07c73dd0b | [] | no_license | Sheko1/SoftUni | d6b8e79ae545116f4c0e5705ad842f12d77a9c9d | a9fbeec13a30231b6a97c2b22bb35257ac1481c0 | refs/heads/main | 2023-07-13T15:39:48.826925 | 2021-08-21T12:51:02 | 2021-08-21T12:51:02 | 317,266,200 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | from .software import Software
class LightSoftware(Software):
CAPACITY_INCREASE = 0.5
MEMORY_DECREASE = 0.5
def __init__(self, name: str, capacity_consumption: int, memory_consumption: int):
super().__init__(name, type="Light", capacity_consumption=int(
capacity_consumption + (capacity_consumption * self.CAPACITY_INCREASE)),
memory_consumption=int(memory_consumption * self.MEMORY_DECREASE))
| [
"[email protected]"
] | |
236f2f6a95d6fae44b77149fadda0d33ae893743 | fd981b47482467291576ae4650d2925d6fa00564 | /robot_ws/build/hector_slam/hector_imu_attitude_to_tf/catkin_generated/pkg.develspace.context.pc.py | 76e540da7a8c070743b0ac0c7f119166f43c6959 | [] | no_license | Forrest-Z/rtcrobot | 7337271e726db794ce08953f333ad9a0f8e70027 | 229ce1d7e77af9348eac870e00a2c4049e4562f1 | refs/heads/master | 2022-12-16T00:14:17.525845 | 2020-05-18T09:28:09 | 2020-05-18T09:28:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "hector_imu_attitude_to_tf"
PROJECT_SPACE_DIR = "/home/gaara/robot_ws/devel"
PROJECT_VERSION = "0.3.5"
| [
"mtk@mtk"
] | mtk@mtk |
dc06bceff161ff58ede64f0c6360bacc5fdbeee6 | 6d7678e3d79c97ddea2e2d65f2c2ef03b17f88f6 | /venv/lib/python3.6/site-packages/pysnmp/proto/api/__init__.py | d742ecc76dec1386047d3cae28b450a5edff0f52 | [
"MIT"
] | permissive | PitCoder/NetworkMonitor | b47d481323f26f89be120c27f614f2a17dc9c483 | 36420ae48d2b04d2cc3f13d60d82f179ae7454f3 | refs/heads/master | 2020-04-25T11:48:08.718862 | 2019-03-19T06:19:40 | 2019-03-19T06:19:40 | 172,757,390 | 2 | 0 | MIT | 2019-03-15T06:07:27 | 2019-02-26T17:26:06 | Python | UTF-8 | Python | false | false | 368 | py | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pysnmp/license.html
#
from pysnmp.proto.api import v1, v2c, verdec
# Protocol versions
protoVersion1 = 0
protoVersion2c = 1
protoModules = {protoVersion1: v1, protoVersion2c: v2c}
decodeMessageVersion = verdec.decodeMessageVersion
| [
"[email protected]"
] | |
46c5e2b2ed08ba91155f44d266097399816d6ca5 | dcce56815dca2b18039e392053376636505ce672 | /dumpscripts/atexit_simple.py | cadf7e713067bfdb1023e1a1054adc7c45bab915 | [] | no_license | robertopauletto/PyMOTW-it_3.0 | 28ff05d8aeccd61ade7d4107a971d9d2576fb579 | c725df4a2aa2e799a969e90c64898f08b7eaad7d | refs/heads/master | 2021-01-20T18:51:30.512327 | 2020-01-09T19:30:14 | 2020-01-09T19:30:14 | 63,536,756 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | # atexit_simple.py
import atexit
def all_done():
print('all_done()')
print('In registrazione')
atexit.register(all_done)
print('Registrato')
| [
"[email protected]"
] | |
aac4db2e2f613a796ff33628461587fd26159cfb | db4d56e63c63cd577c3871349ffa2a7c39c80edc | /3.WEB/cxr_project/cxr_project/wsgi.py | b576c8906ada1a87940826b1a379206b6c76b16d | [] | no_license | Lagom92/CXR_AI | 33014b7471775e776ed51bfeb88128fd7ca4ce6f | bb4bbaf3fc984938f153bf6b58ed99324f779070 | refs/heads/master | 2023-06-09T11:20:57.613207 | 2021-06-20T11:34:21 | 2021-06-20T11:34:21 | 293,966,064 | 0 | 0 | null | 2021-06-18T00:09:48 | 2020-09-09T01:08:43 | Jupyter Notebook | UTF-8 | Python | false | false | 399 | py | """
WSGI config for cxr_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cxr_project.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
4f0a6cf506689d8331fef6df1a76b147b1ff06ad | 82b495a208ebdeb71314961021fbfe767de57820 | /chapter-13/sample02.py | 5d7d05833306dc085a1573bee83e46cd05ba6b89 | [
"MIT"
] | permissive | krastin/pp-cs3.0 | 7c860794332e598aa74278972d5daa16853094f6 | 502be9aac2d84215db176864e443c219e5e26591 | refs/heads/master | 2020-05-28T02:23:58.131428 | 2019-11-13T13:06:08 | 2019-11-13T13:06:08 | 188,853,205 | 0 | 0 | MIT | 2019-11-13T13:06:09 | 2019-05-27T13:56:41 | Python | UTF-8 | Python | false | false | 1,410 | py | import time
from sample01 import linear_search_while
from sample01 import linear_search_for
from sample01 import linear_search_sentinel
from typing import Callable, Any
def time_it(search: Callable[[list, Any], Any], L: list, v: Any) -> float:
"""Time how long it takes to run function search to find
value v in list L.
"""
t1 = time.perf_counter()
search(L, v)
t2 = time.perf_counter()
return (t2 - t1) * 1000.0
def print_times(v: Any, L: list) -> None:
"""Print the number of milliseconds it takes for linear_search(v, L)
to run for list.index, the while loop linear search, the for loop
linear search, and sentinel search.
"""
# Get list.index's running time.
t1 = time.perf_counter()
L.index(v)
t2 = time.perf_counter()
index_time = (t2 - t1) * 1000.0
# Get the other three running times.
while_time = time_it(linear_search_while, L, v)
for_time = time_it(linear_search_for, L, v)
sentinel_time = time_it(linear_search_sentinel, L, v)
print("{0}\t\t{1:.2f}\t{2:.2f}\t{3:.2f}\t{4:.2f}".format(
v, while_time, for_time, sentinel_time, index_time))
L = list(range(10000001)) # A list with just over ten million values
print_times(10, L) # How fast is it to search near the beginning?
print_times(5000000, L) # How fast is it to search near the middle?
print_times(10000000, L) # How fast is it to search near the end? | [
"[email protected]"
] | |
f2515b3ea9d81b413d7f16c3fd76965b099723a9 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Fisher/trend_Lag1Trend/cycle_5/ar_12/test_artificial_1024_Fisher_Lag1Trend_5_12_0.py | cdbf8396fc2e08ebfbdd54ac8c3f8c8a7b230896 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 262 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 5, transform = "Fisher", sigma = 0.0, exog_count = 0, ar_order = 12); | [
"[email protected]"
] | |
5e50c90e36940a756c0066a4f1a0415e5c585153 | bc2a96e8b529b0c750f6bc1d0424300af9743904 | /acapy_client/models/v20_pres_ex_record_list.py | 637f5f3594379b6fb12a0376417eca62ccdfbc8b | [
"Apache-2.0"
] | permissive | TimoGlastra/acapy-client | d091fd67c97a57f2b3462353459780281de51281 | d92ef607ba2ff1152ec15429f2edb20976991424 | refs/heads/main | 2023-06-29T22:45:07.541728 | 2021-08-03T15:54:48 | 2021-08-03T15:54:48 | 396,015,854 | 1 | 0 | Apache-2.0 | 2021-08-14T13:22:28 | 2021-08-14T13:22:27 | null | UTF-8 | Python | false | false | 1,983 | py | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.v20_pres_ex_record import V20PresExRecord
from ..types import UNSET, Unset
T = TypeVar("T", bound="V20PresExRecordList")
@attr.s(auto_attribs=True)
class V20PresExRecordList:
""" """
results: Union[Unset, List[V20PresExRecord]] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
results: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.results, Unset):
results = []
for results_item_data in self.results:
results_item = results_item_data.to_dict()
results.append(results_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if results is not UNSET:
field_dict["results"] = results
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
results = []
_results = d.pop("results", UNSET)
for results_item_data in _results or []:
results_item = V20PresExRecord.from_dict(results_item_data)
results.append(results_item)
v20_pres_ex_record_list = cls(
results=results,
)
v20_pres_ex_record_list.additional_properties = d
return v20_pres_ex_record_list
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| [
"[email protected]"
] | |
2c1fc8d25010246935865616a7f2d77dbf36a205 | ff739149fb1091fcd090b5e68ab4b98d9fec9262 | /tests/unit/test_sitemap.py | 7f58445883b0626a64a1c800b55009991b5a7c33 | [
"MIT"
] | permissive | zhuoranmusic/dash-docs | dcdab8a5543f6f3f10cb20d196148969bfe01943 | 3518869b195a7827fe661a90f9a2054c31680d44 | refs/heads/master | 2022-04-18T17:37:44.647847 | 2020-04-20T18:13:14 | 2020-04-20T18:13:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | import pytest
import sys
from generate_sitemap import create_sitemap
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_sitemap_is_updated():
with open('dash_docs/assets/sitemap.xml', 'r') as f:
saved_sitemap = f.read()
assert create_sitemap() == saved_sitemap
| [
"[email protected]"
] | |
1a41bd25d395783d808bbe7baa3ab53534669a7e | f5a82f7b2695ed08c9f7432013889590ed9cd1d0 | /healthpoint/decorators.py | 17bb337812dcdbf86156385ff894f6a57f2c31fe | [
"MIT"
] | permissive | lordoftheflies/django-healthpoint | bb717f3a4f9a96b9d81f10fbb45e6982c020e93b | aaf8c77150b2ae5bf7d3f9050841b885e8cda17a | refs/heads/master | 2020-08-03T02:55:15.244656 | 2019-09-18T16:13:10 | 2019-09-18T16:13:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | from functools import wraps
from healthpoint.registry import register_health_check
def health_check(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
result = f(*args, **kwargs)
if isinstance(result, bool):
success, detail = result, 'OK' if result else 'ERROR'
elif isinstance(result, tuple) and len(result) == 2:
success, detail = result
else:
raise ValueError(
'Your @health_check must return'
' a `bool`, or a tuple of (`bool`, `detail`)')
except Exception as e:
success, detail = False, str(e)
return success, detail
register_health_check(wrapper)
return wrapper
| [
"[email protected]"
] | |
aa9facefd2669ed057397d86449409e25ed9d148 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/agrifood/azure-agrifood-farming/azure/agrifood/farming/_farm_beats_client.py | e987fc646fb78c5fa674aa650dfafae923c7d7bb | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 9,228 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import FarmBeatsClientConfiguration
from .operations import ApplicationDataOperations
from .operations import AttachmentsOperations
from .operations import BoundariesOperations
from .operations import CropsOperations
from .operations import CropVarietiesOperations
from .operations import FarmersOperations
from .operations import FarmOperationsOperations
from .operations import FarmsOperations
from .operations import FieldsOperations
from .operations import HarvestDataOperations
from .operations import ImageProcessingOperations
from .operations import OAuthProvidersOperations
from .operations import OAuthTokensOperations
from .operations import PlantingDataOperations
from .operations import ScenesOperations
from .operations import SeasonalFieldsOperations
from .operations import SeasonsOperations
from .operations import TillageDataOperations
from .operations import WeatherOperations
from . import models
class FarmBeatsClient(object):
"""APIs documentation for Azure AgPlatform DataPlane Service.
:ivar application_data: ApplicationDataOperations operations
:vartype application_data: azure.agrifood.farming.operations.ApplicationDataOperations
:ivar attachments: AttachmentsOperations operations
:vartype attachments: azure.agrifood.farming.operations.AttachmentsOperations
:ivar boundaries: BoundariesOperations operations
:vartype boundaries: azure.agrifood.farming.operations.BoundariesOperations
:ivar crops: CropsOperations operations
:vartype crops: azure.agrifood.farming.operations.CropsOperations
:ivar crop_varieties: CropVarietiesOperations operations
:vartype crop_varieties: azure.agrifood.farming.operations.CropVarietiesOperations
:ivar farmers: FarmersOperations operations
:vartype farmers: azure.agrifood.farming.operations.FarmersOperations
:ivar farm_operations: FarmOperationsOperations operations
:vartype farm_operations: azure.agrifood.farming.operations.FarmOperationsOperations
:ivar farms: FarmsOperations operations
:vartype farms: azure.agrifood.farming.operations.FarmsOperations
:ivar fields: FieldsOperations operations
:vartype fields: azure.agrifood.farming.operations.FieldsOperations
:ivar harvest_data: HarvestDataOperations operations
:vartype harvest_data: azure.agrifood.farming.operations.HarvestDataOperations
:ivar image_processing: ImageProcessingOperations operations
:vartype image_processing: azure.agrifood.farming.operations.ImageProcessingOperations
:ivar oauth_providers: OAuthProvidersOperations operations
:vartype oauth_providers: azure.agrifood.farming.operations.OAuthProvidersOperations
:ivar oauth_tokens: OAuthTokensOperations operations
:vartype oauth_tokens: azure.agrifood.farming.operations.OAuthTokensOperations
:ivar planting_data: PlantingDataOperations operations
:vartype planting_data: azure.agrifood.farming.operations.PlantingDataOperations
:ivar scenes: ScenesOperations operations
:vartype scenes: azure.agrifood.farming.operations.ScenesOperations
:ivar seasonal_fields: SeasonalFieldsOperations operations
:vartype seasonal_fields: azure.agrifood.farming.operations.SeasonalFieldsOperations
:ivar seasons: SeasonsOperations operations
:vartype seasons: azure.agrifood.farming.operations.SeasonsOperations
:ivar tillage_data: TillageDataOperations operations
:vartype tillage_data: azure.agrifood.farming.operations.TillageDataOperations
:ivar weather: WeatherOperations operations
:vartype weather: azure.agrifood.farming.operations.WeatherOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param endpoint: The endpoint of your FarmBeats resource (protocol and hostname, for example: https://{resourceName}.farmbeats.azure.net).
:type endpoint: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
endpoint, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{Endpoint}'
self._config = FarmBeatsClientConfiguration(credential, endpoint, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.application_data = ApplicationDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.attachments = AttachmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.boundaries = BoundariesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.crops = CropsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.crop_varieties = CropVarietiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.farmers = FarmersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.farm_operations = FarmOperationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.farms = FarmsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.fields = FieldsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.harvest_data = HarvestDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.image_processing = ImageProcessingOperations(
self._client, self._config, self._serialize, self._deserialize)
self.oauth_providers = OAuthProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.oauth_tokens = OAuthTokensOperations(
self._client, self._config, self._serialize, self._deserialize)
self.planting_data = PlantingDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scenes = ScenesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.seasonal_fields = SeasonalFieldsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.seasons = SeasonsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tillage_data = TillageDataOperations(
self._client, self._config, self._serialize, self._deserialize)
self.weather = WeatherOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> FarmBeatsClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| [
"[email protected]"
] | |
f7f7eda4a188511ca65e1cb0a7660387d2ce5312 | d042b8895dc8347356fa4d5984d07bff41eecc73 | /obtainfo/views/views.py | 33c81e13d9f89ceb67d0dc937a7b184612156a68 | [
"Apache-2.0"
] | permissive | jzx1230/obtainfo | 257b075c32c3448096391f258f42dd7f0c081350 | 883c29ab0a462d11682b60b9b52b2fc93031b816 | refs/heads/master | 2021-05-08T04:19:33.810848 | 2015-10-13T10:10:10 | 2015-10-13T10:10:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,997 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db.models import Q
from django.conf import settings
from django.shortcuts import render
from django.core import serializers
from django.forms.models import model_to_dict
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.cache import cache_page
from django.utils.encoding import force_unicode
from detail import used_time_tag
from obtainfo.models import SearchKey, MovieInfo, BigPoster, Series
from obtainfo.templatetags.obtainfo_tags import pic as render_pic
from pcnile.search import FullTextSearchClient
from pcnile.http import json, JsonResponse
from pcnile.paginator import get_page_part
from pcnile.helper import md5sum, group_list
from bson.objectid import ObjectId
import re
import os
import base64
import random
import datetime
import pymongo
import time
import logging
re_imdb = re.compile(r"tt\d+")
re_douban = re.compile(r"dd\d+")
logger = logging.getLogger(__name__)
verify_oid = lambda oid: True if re.match(r'^[0-9a-fA-F]{24}$', oid) else False
index_field = {'type': 1, 'bigpic': 1, 'title': 1, 'language': 1, 'year': 1, 'douban': 1, 'genre': 1, 'resource': 1,
'area': 1, 'director': 1, 'actor': 1, 'plot': 1, 'finish': 1}
def get_request_page(request_dict, default=1):
try:
return int(request_dict['page'])
except:
return default
def search(request):
if request.method == "POST":
raise Http404
try:
key = request.GET['search'].strip()
except:
return HttpResponseRedirect("/")
page = get_request_page(request.GET)
if key == '':
errors = u"请输入搜索的关键词哦!"
m = {'results': [], 'errors': errors, 'search': key}
elif len(key) > 25:
errors = u"不好意思!您的关键词太长了......"
m = {'results': [], 'errors': errors, 'search': key}
elif re_imdb.match(key):
key = re_imdb.match(key).group()
collection = settings.MONGOINFO
results = collection.find({'imdb': key}, index_field)
contacts = get_page_part(results, request.GET.get('page'))
m = {'results': contacts, 'pages': contacts, 'search': key}
elif re_douban.match(key):
key = re_douban.match(key).group()
collection = settings.MONGOINFO
results = collection.find({'douban.id': key[2:]}, index_field)
contacts = get_page_part(results, request.GET.get('page'))
m = {'results': contacts, 'pages': contacts, 'search': key}
else:
client = FullTextSearchClient(settings.MONGOINFO)
(contacts, results) = client.query_page(key, page)
if results.count() == 0:
try:
sk = SearchKey(key=force_unicode(key).encode('utf-8'))
except:
sk = SearchKey(key=key)
sk.save()
results = []
errors = u"对不起!我们目前还没有您要的资源......"
else:
errors = ''
m = {'results': results, 'pages': contacts, 'errors': errors, 'search': key}
return render(request, 'search.html', {"m": m}, )
@cache_page(60 * 60)
@used_time_tag
def index(request, query):
# generate query string
try:
k, v = query.split('_')[0].split('-')
if k == 'genre':
query_obj = {k: v}
else:
return HttpResponseRedirect("/")
except:
return HttpResponseRedirect("/")
collection = settings.MONGOINFO
results = collection.find(query_obj).sort("year", -1)
contacts = get_page_part(results, get_request_page(request.GET))
return render(request, 'search.html', {"m": {'results': contacts, 'pages': contacts, 'search': ''}}, )
@cache_page(60 * 60)
def sindex(request): # selection index
db = settings.MONGODB
results = db.selection.find().sort("addtime", -1)
contacts = get_page_part(results, get_request_page(request.GET))
results = list()
collection = settings.MONGOINFO
for c in contacts:
c['pic'] = collection.find_one({'_id': ObjectId(c['list'][0])}, {'stdpic': 1})['stdpic']
results.append(c)
m = {'results': results, 'pages': contacts, 'search': ''}
return render(request, 'sindex.html', {"m": m}, )
"""
build selection block
"""
@cache_page(60 * 60)
def selection(request, sid):
try:
db = settings.MONGODB
s = db.selection.find_one({'_id': ObjectId(sid)})
if s == None:
raise Http404
except pymongo.errors.InvalidId:
logger.error('get an unused selection id %s' % sid)
raise Http404
contacts = get_page_part(s['list'], get_request_page(request.GET))
collection = settings.MONGOINFO
results = collection.find({'_id': {'$in': [ObjectId(oid) for oid in contacts]}})
m = {'results': results, 'pages': contacts}
return render(request, 'selection.html', {"m": m, 'title': s['title']}, )
@csrf_exempt
def retrieve(request):
genre = {
'title': u'类型:',
'name': 'genre',
'k': [u'全部', u'剧情', u'喜剧', u'爱情', u'科幻', u'动作', u'惊悚', u'恐怖', u'冒险', u'奇幻', u'家庭', u'记录片', u'古装', u'战争', u'历史',
u'西部', u'悬疑', u'奇幻'],
'v': [u'', u'剧情', u'喜剧', u'爱情', u'科幻', u'动作', u'惊悚', u'恐怖', u'冒险', u'奇幻', u'家庭', u'记录片', u'古装', u'战争', u'历史',
u'西部', u'悬疑', u'奇幻']
}
area = {
'title': u'地区:',
'name': 'area',
'k': [u'全部', u'内地', u'美国', u'英国', u'韩国', u'日本', u'香港', u'台湾', u'印度', u'英国', u'法国', u'意大利', u'德国', u'泰国', u'西班牙',
u'瑞典', u'俄罗斯'],
'v': [u'', u'中国', u'美国', u'英国', u'韩国', u'日本', u'香港', u'台湾', u'印度', u'英国', u'法国', u'意大利', u'德国', u'泰国', u'西班牙',
u'瑞典', u'俄罗斯']
}
year = {
'title': u'年代:',
'name': 'year',
'k': [u'全部', u'2014', u'2013', u'2012', u'2011', u'2010', u'2009', u'2008', u'2007', u'2006', u'2005', u'2004',
u'2003', u'2002', u'2001', u'2000', u'1999', u'1998'],
'v': [u'', u'2014', u'2013', u'2012', u'2011', u'2010', u'2009', u'2008', u'2007', u'2006', u'2005', u'2004',
u'2003', u'2002', u'2001', u'2000', u'1999', u'1998']
}
resource = {
'title': u'资源:',
'name': 'resource',
'k': [u'不限', u'在线', u'网盘', u'3D高清', u'高清', u'普清', u'尝鲜'],
'v': [u'', {'resource.online': {'$gt': 0}}, {'resource.netdisk': {'$gt': 0}}, {'resource.stereo': {'$gt': 0}},
{'resource.hd': {'$gt': 0}}, {'resource.dvd': {'$gt': 0}}, {'resource.cam': {'$gt': 0}}]
}
sub_type = {
'title': u'主题:',
'name': 'type',
'k': [u'不限', u'电影', u'电视剧'],
'v': [u'', 'movie', 'tv']
}
order = {
'title': u'排序:',
'name': 'order',
'k': [u'默认', u'热门', u'经典', u'最新上映', u'添加时间'],
'v': [
[('year', pymongo.ASCENDING), ('addtime', pymongo.ASCENDING)],
{'year': -1, 'douban.ranking.count': -1, 'douban.ranking.score': -1},
[("douban.ranking.count", -1), ("douban.ranking.score", -1)],
[("showtime", pymongo.DESCENDING)],
[("addtime", pymongo.DESCENDING)]
]
}
table = {'genre': genre, 'area': area, 'year': year, 'resource': resource, 'type': sub_type}
if request.method == 'POST':
try:
js = json.loads(request.body)
except:
return JsonResponse({'status': 'fail'})
qs = list()
sort_key = order['v'][js['order']]
for k, v in table.items():
v = v['v'][js[k]]
if v:
if k == 'resource':
qs.append(v)
else:
qs.append({k: v})
collection = settings.MONGOINFO
if len(qs):
results = collection.find({'$and': qs}, {'title': 1, 'stdpic': 1, 'actor': 1}).limit(3500)
else:
results = collection.find({}, {'title': 1, 'stdpic': 1, 'actor': 1}).limit(3500)
contacts = get_page_part(results, js['page'], 20)
for c in contacts.object_list:
c['stdpic'] = render_pic(c['stdpic'])
try:
c['actor'] = c['actor'][0]
except IndexError:
pass
page = {
'has_previous': contacts.has_previous(), 'has_next': contacts.has_next(),
'current': str(contacts.number), 'range': contacts.paginator.page_range_ext,
}
if page['has_previous']:
page['previous_page_number'] = contacts.previous_page_number()
if page['has_next']:
page['next_page_number'] = contacts.next_page_number()
return JsonResponse({'status': 'success', 'results': contacts.object_list, 'page': page})
return render(request, 'retrieve.html', {'table': [genre, area, year, resource, sub_type, order]}, )
@cache_page(10 * 60)
def lazy(request):
sidebar = request.GET.get('s', 'recommend')
try:
number = abs(int(request.GET.get('n', 30)))
if number not in xrange(10, 60):
number = 30
except:
number = 30
oid = request.GET.get('o', '')
if verify_oid(oid):
try:
series = [{'id': s.id, 'no': s.sequence, 'title': s.title} for s in
Series.objects.get(id=oid).get_root().get_descendants(include_self=True)]
except:
series = []
else:
series = []
if sidebar == 'hot':
title = u"大家都在看"
recommands = MovieInfo.objects
first = recommands.filter(Q(image__isnull=False) & ~Q(image='')).order_by("-visitor")[0]
second = recommands.filter(Q(image__isnull=True) | Q(image='')).order_by("-visitor")[:number - 1]
else:
title = u"编辑墙裂推荐"
recommends = MovieInfo.objects.filter(recommend=True)
first = recommends.filter(Q(image__isnull=False) & ~Q(image='')).order_by("-timestamp")[0]
second = recommends.filter(Q(image__isnull=True) | Q(image='')).order_by("-timestamp")[:number - 1]
ranking = dict()
ranking['title'] = title
ranking['first'] = {'id': first.id, 'title': first.title, 'image': first.image.url}
ranking['second'] = [{'id': s.id, 'title': s.title, 'no': c + 2} for c, s in enumerate(second)]
return JsonResponse({'ranking': ranking, 'series': series, 'status': 'success'})
@cache_page(10 * 60)
@used_time_tag
def main(request):
page = get_request_page(request.GET)
if page == 1 and 'page' in request.GET:
return HttpResponseRedirect('/')
collection = settings.MONGOINFO
oids = [ObjectId(o.id) for o in MovieInfo.objects.filter(top=True)]
results = collection.find({"_id": {"$nin": oids}}, index_field).sort('updatetime', -1).limit(3500)
contacts = get_page_part(results, page)
m = {'results': contacts, 'pages': contacts, 'index': False}
if page == 1:
# fill big poster content
db = settings.MONGODB
m['ontop'] = collection.find({"_id": {"$in": oids}}, index_field)
m['big_poster'] = True
m['selection'] = group_list([d for d in db.selection.find().sort('addtime', -1).limit(21)], 7)
m['index'] = True
return render(request, 'index.html', {"m": m}, )
| [
"[email protected]"
] | |
28fb7ba8851425b473d645e4ded39fd630653ec6 | 1797576f7ebc6eea049fea3ff91831cb140afa35 | /Assignments/Assignment-2/string/format.py | af66f156a97503df9f587cacc99211eda27b24e3 | [] | no_license | Ayushd70/OCF-Python | 8dd44f9ec08509d9610a6d8310622354e88097c2 | bea41d997056235051db9f54f66790f66d7d8a2a | refs/heads/master | 2023-06-03T20:38:52.701002 | 2021-06-23T15:38:56 | 2021-06-23T15:39:46 | 367,636,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47 | py | S='hello {name}'
print(S.format(name="World"))
| [
"[email protected]"
] | |
c8ce36e7f047b623defb9b3a946c5a7cb799aa02 | be61a9f30274514857ea34297719157f1e5b8447 | /fhir/resources/DSTU2/age.py | 9975cdbeda716d349901880fad136791d72da6f6 | [
"BSD-3-Clause"
] | permissive | jwygoda/fhir.resources | ceff3a620100d2e875136b86d3e82816c0e60a33 | 5053565570d1ca992d9971d20db813c53fd350b9 | refs/heads/master | 2021-02-05T02:59:17.436485 | 2019-07-18T10:57:33 | 2019-07-18T10:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/Age) on 2019-05-14.
# 2019, SMART Health IT.
from . import quantity
class Age(quantity.Quantity):
""" A duration (length of time) with a UCUM code.
There SHALL be a code if there is a value and it SHALL be an expression of
time. If system is present, it SHALL be UCUM. If value is present, it
SHALL be positive.
"""
resource_name = "Age"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
super(Age, self).__init__(jsondict=jsondict, strict=strict)
| [
"[email protected]"
] | |
b3ae4dd8d3b6d3f3f5f2d0f12474ab0ea469bd94 | 7ad19e854135977ee5b789d7c9bdd39d67ec9ea4 | /members/amit/clf/audio_processing.py | 571343202183ddc05c2774531c7e5fd1d3a26acd | [
"MIT"
] | permissive | Leofltt/rg_sound_generation | 1b4d522507bf06247247f3ef929c8d0b93015e61 | 8e79b4d9dce028def43284f80521a2ec61d0066c | refs/heads/main | 2023-05-02T19:53:23.645982 | 2021-05-22T16:09:54 | 2021-05-22T16:09:54 | 369,842,561 | 0 | 0 | MIT | 2021-05-22T15:27:28 | 2021-05-22T15:27:27 | null | UTF-8 | Python | false | false | 1,248 | py | import librosa
import numpy as np
from typing import Dict
def get_mel_spectrogram(audio: np.ndarray, params: Dict) -> np.ndarray:
mel_spec = librosa.feature.melspectrogram(
audio,
sr=params.get("sample_rate"),
n_fft=params.get("n_fft"),
hop_length=params.get("hop_len"),
n_mels=params.get("n_mels")
)
return librosa.power_to_db(mel_spec)
def get_hpr(audio: np.ndarray, params: Dict) -> (np.ndarray, np.ndarray, np.ndarray):
D = librosa.stft(
audio,
n_fft=params.get("n_fft"),
hop_length=params.get("hop_len")
)
H, P = librosa.decompose.hpss(D)
return H, P, D - (H + P)
def get_features(file_path: str, params: Dict):
audio, _ = librosa.load(file_path, sr=params.get("sample_rate"), mono=True)
audio = np.squeeze(audio)[:params.get("sample_rate") * params.get("clip_audio_at")]
h, p, r = get_hpr(audio, params)
h, p, r = np.abs(h).mean(axis=-1), np.abs(p).mean(axis=-1), np.abs(r).mean(axis=-1)
dim = h.shape[0]
hpss = np.concatenate([h, p, r], axis=-1)
hpss = np.reshape(hpss, (dim * 3, 1))
spec = get_mel_spectrogram(audio, params)
spec = np.clip(spec, params.get("clip_at"), np.max(spec))
return spec, hpss
| [
"[email protected]"
] | |
5bdcae03801bc9263730f63678c10f2052be98f5 | 5c8139f1e57e06c7eaf603bd8fe74d9f22620513 | /PartB/py全排列4.py | b1e46e018cad4170fe7d76313c34805ed586b0ef | [] | no_license | madeibao/PythonAlgorithm | c8a11d298617d1abb12a72461665583c6a44f9d2 | b4c8a75e724a674812b8a38c0202485776445d89 | refs/heads/master | 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,133 | py |
# 列表的全排列的实现。
# 全排列算法
class Solution(object):
def permutations(self, nums):
if nums is None:
return []
res = []
def helper(start):
if start == len(nums):
res.append(nums[:])
for i in range(start, len(nums)):
nums[i], nums[start] = nums[start], nums[i]
helper(start + 1)
nums[i], nums[start] = nums[start], nums[i]
helper(0)
return res
if __name__ == "__main__":
s = Solution()
list2 = [1, 2, 3]
print(s.permutations(list2))
# 组合算法的实现
from typing import List
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
if not nums:
return []
res = []
N = len(nums)
def helper(idx, temp_list):
res.append(temp_list)
for i in range(idx, N):
helper(i + 1, temp_list + [nums[i]])
helper(0, [])
return res
if __name__ == "__main__":
s = Solution()
list2 = [1, 2, 3,]
print(s.subsets(list2))
| [
"[email protected]"
] | |
a1c16962e511343f6654c076de283096891c70f9 | 8e24e8bba2dd476f9fe612226d24891ef81429b7 | /geeksforgeeks/python/hard/3_1.py | 5e5a923d3f652d3bb692c335928a84af29e9c3c5 | [] | no_license | qmnguyenw/python_py4e | fb56c6dc91c49149031a11ca52c9037dc80d5dcf | 84f37412bd43a3b357a17df9ff8811eba16bba6e | refs/heads/master | 2023-06-01T07:58:13.996965 | 2021-06-15T08:39:26 | 2021-06-15T08:39:26 | 349,059,725 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,994 | py | GUI to Shutdown, Restart and Logout from the PC using Python
In this article, we are going to write a python script to shut down or Restart
or Logout your system and bind it with GUI Application.
The **OS module** in Python provides functions for interacting with the
operating system. OS is an inbuilt library python.
**Syntax :**
> **For shutdown your system :** os.system(“shutdown /s /t 1”)
>
> **For restart your system :** os.system(“shutdown /r /t 1”)
>
>
>
>
>
>
>
> **For Logout your system :** os.system(“shutdown -l”)
**Implementation GUI Application using Tkinter:**
## Python3
__
__
__
__
__
__
__
# import modules
from tkinter import *
import os
# user define funtion
def shutdown():
return os.system("shutdown /s /t 1")
def restart():
return os.system("shutdown /r /t 1")
def logout():
return os.system("shutdown -l")
# tkinter object
master = Tk()
# background set to grey
master.configure(bg='light grey')
# creating a button using the widget
# Buttons that will call the submit function
Button(master, text="Shutdown",
command=shutdown).grid(row=0)
Button(master, text="Restart", command=restart).grid(row=1)
Button(master, text="Log out", command=logout).grid(row=2)
mainloop()
---
__
__
**Output:**

**Note:** _Please ensure that you save and close all the_ programs _before
running this code on the IDLE, as this program will immediately shutdown and
restart your computer._
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
| [
"[email protected]"
] | |
b4b694bd0706b269f741b4e7b90bca506b194cc9 | 885c89c56923565117b6244afa6c16664e25094f | /vspk/v5_0/nugateway.py | 3158028e3193886ae359bc573db55e0e6968fa7a | [
"BSD-3-Clause"
] | permissive | ikbalcam/vspk-python | 0017f5c7c4f9aaca604fb4da42884eddc497ee00 | 1c6d28c8f8f7bcadbd1722cdc3046b01dbf1d9e8 | refs/heads/master | 2021-10-01T16:14:00.380613 | 2017-11-02T21:43:41 | 2017-11-02T21:43:41 | 115,129,333 | 0 | 0 | null | 2017-12-22T15:52:11 | 2017-12-22T15:52:11 | null | UTF-8 | Python | false | false | 19,056 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPATNATPoolsFetcher
from .fetchers import NUPermissionsFetcher
from .fetchers import NUWANServicesFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUAlarmsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUEnterprisePermissionsFetcher
from .fetchers import NUJobsFetcher
from .fetchers import NUPortsFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUGateway(NURESTObject):
""" Represents a Gateway in the VSD
Notes:
Represents Gateway object.
"""
__rest_name__ = "gateway"
__resource_name__ = "gateways"
## Constants
CONST_PERSONALITY_HARDWARE_VTEP = "HARDWARE_VTEP"
CONST_PERSONALITY_VSA = "VSA"
CONST_PERMITTED_ACTION_USE = "USE"
CONST_PERSONALITY_VSG = "VSG"
CONST_PERMITTED_ACTION_READ = "READ"
CONST_PERSONALITY_OTHER = "OTHER"
CONST_PERSONALITY_NSG = "NSG"
CONST_PERSONALITY_VRSB = "VRSB"
CONST_PERMITTED_ACTION_ALL = "ALL"
CONST_PERMITTED_ACTION_DEPLOY = "DEPLOY"
CONST_PERMITTED_ACTION_EXTEND = "EXTEND"
CONST_PERSONALITY_NUAGE_210_WBX_48_S = "NUAGE_210_WBX_48_S"
CONST_PERSONALITY_NUAGE_210_WBX_32_Q = "NUAGE_210_WBX_32_Q"
CONST_PERMITTED_ACTION_INSTANTIATE = "INSTANTIATE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_PERSONALITY_DC7X50 = "DC7X50"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PERSONALITY_VRSG = "VRSG"
def __init__(self, **kwargs):
""" Initializes a Gateway instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> gateway = NUGateway(id=u'xxxx-xxx-xxx-xxx', name=u'Gateway')
>>> gateway = NUGateway(data=my_dict)
"""
super(NUGateway, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._redundancy_group_id = None
self._peer = None
self._template_id = None
self._pending = None
self._permitted_action = None
self._personality = None
self._description = None
self._enterprise_id = None
self._entity_scope = None
self._use_gateway_vlanvnid = None
self._vtep = None
self._auto_disc_gateway_id = None
self._external_id = None
self._system_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="redundancy_group_id", remote_name="redundancyGroupID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="peer", remote_name="peer", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="template_id", remote_name="templateID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="pending", remote_name="pending", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="permitted_action", remote_name="permittedAction", attribute_type=str, is_required=False, is_unique=False, choices=[u'ALL', u'DEPLOY', u'EXTEND', u'INSTANTIATE', u'READ', u'USE'])
self.expose_attribute(local_name="personality", remote_name="personality", attribute_type=str, is_required=False, is_unique=False, choices=[u'DC7X50', u'HARDWARE_VTEP', u'NSG', u'NUAGE_210_WBX_32_Q', u'NUAGE_210_WBX_48_S', u'OTHER', u'VRSB', u'VRSG', u'VSA', u'VSG'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="use_gateway_vlanvnid", remote_name="useGatewayVLANVNID", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="vtep", remote_name="vtep", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_disc_gateway_id", remote_name="autoDiscGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="system_id", remote_name="systemID", attribute_type=str, is_required=False, is_unique=False)
# Fetchers
self.patnat_pools = NUPATNATPoolsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.wan_services = NUWANServicesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.alarms = NUAlarmsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.enterprise_permissions = NUEnterprisePermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ports = NUPortsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
Name of the Gateway
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Gateway
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def redundancy_group_id(self):
""" Get redundancy_group_id value.
Notes:
The Redundancy Gateway Group associated with this Gateway Instance. This is a read only attribute
This attribute is named `redundancyGroupID` in VSD API.
"""
return self._redundancy_group_id
@redundancy_group_id.setter
def redundancy_group_id(self, value):
""" Set redundancy_group_id value.
Notes:
The Redundancy Gateway Group associated with this Gateway Instance. This is a read only attribute
This attribute is named `redundancyGroupID` in VSD API.
"""
self._redundancy_group_id = value
@property
def peer(self):
""" Get peer value.
Notes:
The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant.
"""
return self._peer
@peer.setter
def peer(self, value):
""" Set peer value.
Notes:
The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant.
"""
self._peer = value
@property
def template_id(self):
""" Get template_id value.
Notes:
The ID of the template that this Gateway was created from. This should be set when instantiating a Gateway
This attribute is named `templateID` in VSD API.
"""
return self._template_id
@template_id.setter
def template_id(self, value):
""" Set template_id value.
Notes:
The ID of the template that this Gateway was created from. This should be set when instantiating a Gateway
This attribute is named `templateID` in VSD API.
"""
self._template_id = value
@property
def pending(self):
""" Get pending value.
Notes:
Indicates that this gateway is pending state or state. When in pending state it cannot be modified from REST.
"""
return self._pending
@pending.setter
def pending(self, value):
""" Set pending value.
Notes:
Indicates that this gateway is pending state or state. When in pending state it cannot be modified from REST.
"""
self._pending = value
@property
def permitted_action(self):
""" Get permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway.
This attribute is named `permittedAction` in VSD API.
"""
return self._permitted_action
@permitted_action.setter
def permitted_action(self, value):
""" Set permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway.
This attribute is named `permittedAction` in VSD API.
"""
self._permitted_action = value
@property
def personality(self):
""" Get personality value.
Notes:
Personality of the Gateway, cannot be changed after creation.
"""
return self._personality
@personality.setter
def personality(self, value):
""" Set personality value.
Notes:
Personality of the Gateway, cannot be changed after creation.
"""
self._personality = value
@property
def description(self):
""" Get description value.
Notes:
A description of the Gateway
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the Gateway
"""
self._description = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
The enterprise associated with this Gateway. This is a read only attribute
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
The enterprise associated with this Gateway. This is a read only attribute
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def use_gateway_vlanvnid(self):
""" Get use_gateway_vlanvnid value.
Notes:
When set, VLAN-VNID mapping must be unique for all the vports of the gateway
This attribute is named `useGatewayVLANVNID` in VSD API.
"""
return self._use_gateway_vlanvnid
@use_gateway_vlanvnid.setter
def use_gateway_vlanvnid(self, value):
""" Set use_gateway_vlanvnid value.
Notes:
When set, VLAN-VNID mapping must be unique for all the vports of the gateway
This attribute is named `useGatewayVLANVNID` in VSD API.
"""
self._use_gateway_vlanvnid = value
@property
def vtep(self):
""" Get vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
return self._vtep
@vtep.setter
def vtep(self, value):
""" Set vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
self._vtep = value
@property
def auto_disc_gateway_id(self):
""" Get auto_disc_gateway_id value.
Notes:
The Auto Discovered Gateway associated with this Gateway Instance
This attribute is named `autoDiscGatewayID` in VSD API.
"""
return self._auto_disc_gateway_id
@auto_disc_gateway_id.setter
def auto_disc_gateway_id(self, value):
""" Set auto_disc_gateway_id value.
Notes:
The Auto Discovered Gateway associated with this Gateway Instance
This attribute is named `autoDiscGatewayID` in VSD API.
"""
self._auto_disc_gateway_id = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def system_id(self):
""" Get system_id value.
Notes:
Identifier of the Gateway, cannot be modified after creation
This attribute is named `systemID` in VSD API.
"""
return self._system_id
@system_id.setter
def system_id(self, value):
""" Set system_id value.
Notes:
Identifier of the Gateway, cannot be modified after creation
This attribute is named `systemID` in VSD API.
"""
self._system_id = value
## Custom methods
def is_template(self):
""" Verify that the object is a template
Returns:
(bool): True if the object is a template
"""
return False
def is_from_template(self):
""" Verify if the object has been instantiated from a template
Note:
The object has to be fetched. Otherwise, it does not
have information from its parent
Returns:
(bool): True if the object is a template
"""
return self.template_id
| [
"[email protected]"
] | |
f987c95714b3b19c0d798755b64d27ae114cc266 | 6268a19db5d7806b3a91d6350ec2777b3e13cee6 | /old_stuff/code/hpe-feb2019/qi2016/huawei/preprocess/load_data.py | 76844a4293e4bbe7d395115563869a694f4be590 | [] | no_license | aaronlws95/phd_2019 | 3ae48b4936f039f369be3a40404292182768cf3f | 22ab0f5029b7d67d32421d06caaf3e8097a57772 | refs/heads/master | 2023-03-22T14:38:18.275184 | 2021-03-21T11:39:29 | 2021-03-21T11:39:29 | 186,387,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,463 | py | from __future__ import print_function
from sklearn.utils import shuffle
from PIL import Image
import numpy
import matplotlib.pyplot as plt
from ..utils import xyz_uvd
from skimage.transform import resize
def get_filenames_labels(dataset_dir):
xyz_jnt_gt=[]
file_name = []
our_index = [0,1,6,7,8,2,9,10,11,3,12,13,14,4,15,16,17,5,18,19,20]
with open('%s/Training_Annotation.txt'%(dataset_dir), mode='r',encoding='utf-8',newline='') as f:
for line in f:
part = line.split('\t')
file_name.append(part[0])
xyz_jnt_gt.append(part[1:64])
f.close()
xyz_jnt_gt=numpy.array(xyz_jnt_gt,dtype='float64')
xyz_jnt_gt.shape=(xyz_jnt_gt.shape[0],21,3)
xyz_jnt_gt=xyz_jnt_gt[:,our_index,:]
uvd_jnt_gt =xyz_uvd.xyz2uvd(xyz=xyz_jnt_gt,setname='mega')
return uvd_jnt_gt,xyz_jnt_gt,numpy.array(file_name)
def generate_fullimg_mask_from_file_unet(path,img_file_name,uvd,batch_size):
centerU=315.944855
phyMargin=50.0
padWidth=200
img_rows=480
img_cols=640
num_imgs=len(img_file_name)
idx = numpy.arange(len(img_file_name))
num = (batch_size - num_imgs%batch_size)%batch_size
idx = numpy.concatenate([idx,idx[0:num]],axis=0)
n_batches = int(idx.shape[0]/batch_size)
x0 = numpy.zeros((batch_size,img_rows,img_cols,1),dtype='float32')
y = numpy.zeros((batch_size,img_rows,img_cols,1),dtype='uint8')
while 1:
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname='mega',xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
x0[mi,:,:,0]=depth/2000.0
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
orimask = numpy.zeros_like(tmpDepth,dtype='uint8')
orimask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
y[mi,:,:,0] = orimask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
yield (x0,y)
def generate_train(path,img_file_name,uvd,batch_size):
num_imgs=uvd.shape[0]
idx = numpy.arange(num_imgs)
print('train.num',num_imgs)
n_batches=int(idx.shape[0]/batch_size)
phyMargin=50.0
padWidth=200
centerU=315.944855
setname='mega'
while True:
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
all_input = numpy.zeros((batch_size,128,160,1),dtype='float32')
all_mask=numpy.zeros((batch_size,128,160,1),dtype='uint8')
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname=setname,xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
orimask = numpy.zeros_like(tmpDepth,dtype='uint8')
orimask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
orimask = orimask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
orimask = resize(orimask,(120,160), order=3,preserve_range=True)
orimask[numpy.where(orimask>0)]=1
all_mask[mi,4:124,:,0]=orimask
all_input[mi,4:124,:,0]=resize(depth,(120,160), order=3,preserve_range=True)/2000.0
yield (all_input,all_mask)
def generate_downsample_img_mask_from_file_unet_aug(path,img_file_name,uvd,batch_size):
num_imgs=uvd.shape[0]
idx = numpy.arange(num_imgs)
print('train.num',num_imgs)
n_batches=int(idx.shape[0]/batch_size)
phyMargin=50.0
padWidth=200
centerU=315.944855
setname='mega'
while True:
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
all_input = numpy.zeros((batch_size*2,128,160,1),dtype='float32')
all_mask=numpy.zeros((batch_size*2,128,160,1),dtype='uint8')
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname=setname,xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
tmpMask = numpy.zeros_like(tmpDepth,dtype='uint8')
tmpMask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
orimask = tmpMask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
orimask = resize(orimask,(120,160), order=3,preserve_range=True)
orimask[numpy.where(orimask>0)]=1
all_mask[mi,4:124,:,0]=orimask
all_input[mi,4:124,:,0]=resize(depth,(120,160), order=3,preserve_range=True)/2000.0
jiter_width = numpy.random.randint(low=-padWidth,high=padWidth,size=1)[0]
# jiter_width = numpy.random.randint(low=-int(padWidth/2),high=int(padWidth/2),size=1)[0]
# print(jiter_width)
jiter_mask = tmpMask[jiter_width+padWidth:padWidth+depth.shape[0]-jiter_width,jiter_width+padWidth:padWidth+depth.shape[1]-jiter_width]
jiter_depth = tmpDepth[jiter_width+padWidth:padWidth+depth.shape[0]-jiter_width,jiter_width+padWidth:padWidth+depth.shape[1]-jiter_width]
orimask = resize(jiter_mask,(120,160), order=3,preserve_range=True)
orimask[numpy.where(orimask>0)]=1
all_mask[mi+batch_size,4:124,:,0]=orimask
all_input[mi+batch_size,4:124,:,0]=resize(jiter_depth,(120,160), order=3,preserve_range=True)/2000.0
#
# fig = plt.figure()
# ax=fig.add_subplot(221)
# ax.imshow(all_input[mi,4:124,:,0])
# ax=fig.add_subplot(222)
# ax.imshow(all_mask[mi,4:124,:,0])
#
# ax=fig.add_subplot(223)
# ax.imshow(all_input[mi+batch_size,4:124,:,0])
# ax=fig.add_subplot(224)
# ax.imshow(all_mask[mi+batch_size,4:124,:,0])
# plt.show()
yield (all_input,all_mask)
def generate_fullimg_mask_from_file_unet_for_test(path,img_file_name,uvd,batch_size,n_batches):
centerU=315.944855
phyMargin=50.0
padWidth=200
img_rows=480
img_cols=640
num_imgs=len(img_file_name)
idx = numpy.arange(len(batch_size*n_batches))
num = (batch_size - num_imgs%batch_size)%batch_size
idx = numpy.concatenate([idx,idx[0:num]],axis=0)
x0 = numpy.zeros((batch_size*n_batches,img_rows,img_cols,1),dtype='float32')
y = numpy.zeros((batch_size*n_batches,img_rows,img_cols,1),dtype='uint8')
for mi, cur_idx in enumerate(list(idx)):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname='mega',xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
x0[mi,:,:,0]=depth/2000.0
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
orimask = numpy.zeros_like(tmpDepth,dtype='uint8')
orimask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
y[mi,:,:,0] = orimask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
return x0,y
def generate_arrays_from_file_unet(path,img_file_name,uvd,batch_size):
output_down_ratio = 8.0
img_rows=120
img_cols=160
num_imgs=len(img_file_name)
idx = numpy.arange(len(img_file_name))
num = (batch_size - num_imgs%batch_size)%batch_size
idx = numpy.concatenate([idx,idx[0:num]],axis=0)
n_batches = int(idx.shape[0]/batch_size)
x0 = numpy.zeros((batch_size,img_rows+8,img_cols,1),dtype='float32')
y = numpy.zeros((batch_size,int((img_rows+8)/output_down_ratio),int(img_cols/output_down_ratio),1),dtype='float32')
# print('$'*20, 'validataion n_batches', n_batches)
target_rows = y[0].shape[0]
target_cols = y[1].shape[1]
while 1:
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
roiDepth = numpy.asarray(roiDepth, dtype='uint16')/2000.0
depth = resize(roiDepth,(img_rows,img_cols), order=3,preserve_range=True)
u_norm = int(cur_uvd[9,0]/4/output_down_ratio)
v_norm = int((cur_uvd[9,1]/4.0+4)/output_down_ratio)
if v_norm>0 and u_norm >0 and v_norm<target_rows and u_norm < target_cols:
y[mi,v_norm,u_norm,0]=1
x0[mi,4:(4+img_rows),:,0]=depth
yield (x0,y)
def tmp(path,img_file_name,uvd,batch_size):
output_down_ratio = 4.0
img_rows=120
img_cols=160
num_imgs=uvd.shape[0]
idx = numpy.arange(num_imgs)
num = (batch_size - num_imgs%batch_size)%batch_size
idx = numpy.concatenate([idx,idx[0:num]],axis=0)
n_batches = int(idx.shape[0]/batch_size)
x0 = numpy.zeros((batch_size,img_rows+8,img_cols,1),dtype='float32')
y = numpy.zeros((batch_size,int((img_rows+8)/output_down_ratio),int(img_cols/output_down_ratio),1),dtype='float32')
# print('$'*20, 'validataion n_batches', n_batches)
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print minibatch_index
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
roiDepth = numpy.asarray(roiDepth, dtype='uint16')/2000.0
print(numpy.max(roiDepth))
depth = resize(roiDepth,(img_rows,img_cols), order=3,preserve_range=True)
u_norm = int(cur_uvd[9,0]/16)
v_norm = int((cur_uvd[9,1]/4.0+4)/4.0)
y[mi,v_norm,u_norm,0]=1
x0[mi,4:(4+img_rows),:,0]=depth
plt.imshow(x0[mi,:,:,0],'gray')
plt.figure()
tmp = resize(x0[mi,:,:,0],(y[0].shape[0],y[0].shape[1]), order=3,preserve_range=True)
plt.imshow(tmp,'gray')
plt.scatter(u_norm,v_norm)
plt.show()
# print('yield validataion minibatch_index ',minibatch_index)
def generate_fullimg_mask_from_file_unet_show(path,img_file_name,uvd,batch_size):
centerU=315.944855
phyMargin=50.0
padWidth=200
img_rows=480
img_cols=640
num_imgs=len(img_file_name)
idx = numpy.arange(len(img_file_name))
num = (batch_size - num_imgs%batch_size)%batch_size
idx = numpy.concatenate([idx,idx[0:num]],axis=0)
n_batches = int(idx.shape[0]/batch_size)
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname='mega',xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
orimask = numpy.zeros_like(tmpDepth,dtype='uint8')
orimask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
mask= orimask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
plt.figure()
plt.imshow(depth,'gray')
plt.figure()
plt.imshow(mask,'gray')
plt.show()
def generate_train_tmp(path,img_file_name,uvd,batch_size):
num_imgs=uvd.shape[0]
idx = numpy.arange(num_imgs)
print(idx.shape)
n_batches=int(idx.shape[0]/batch_size)
phyMargin=50.0
padWidth=200
centerU=315.944855
setname='mega'
idx= shuffle(idx,random_state=0)
for minibatch_index in range(n_batches):
# print('minibatch_index',minibatch_index)
slice_idx = range(minibatch_index * batch_size, (minibatch_index + 1) * batch_size,1)
all_input = numpy.zeros((batch_size,128,160,1),dtype='float32')
all_mask=numpy.zeros((batch_size,128,160,1),dtype='uint8')
for mi, cur_idx in enumerate(list(idx[slice_idx])):
cur_file_name = img_file_name[cur_idx]
cur_uvd = uvd[cur_idx]
bb = numpy.array([(phyMargin,phyMargin,cur_uvd[9,2])])
bbox_uvd = xyz_uvd.xyz2uvd(setname=setname,xyz=bb)
margin = int(numpy.ceil(bbox_uvd[0,0] - centerU))
roiDepth = Image.open('%s/images/%s' %(path,cur_file_name))
depth = numpy.asarray(roiDepth, dtype='uint16')
axis_bounds = numpy.array([numpy.min(cur_uvd[:, 0]), numpy.max(cur_uvd[:, 0]),
numpy.min(cur_uvd[:, 1]), numpy.max(cur_uvd[:, 1]),
numpy.min(cur_uvd[:, 2]), numpy.max(cur_uvd[:, 2])],dtype='int32')
tmpDepth = numpy.zeros((depth.shape[0]+padWidth*2,depth.shape[1]+padWidth*2))
tmpDepth[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]=depth
crop = tmpDepth[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth]
loc = numpy.where(numpy.logical_and(crop>axis_bounds[4]-phyMargin,crop<axis_bounds[5]+phyMargin))
cropmask=numpy.zeros_like(crop)
cropmask[loc]=1
orimask = numpy.zeros_like(tmpDepth,dtype='uint8')
orimask[axis_bounds[2]-margin+padWidth:axis_bounds[3]+margin+padWidth,
axis_bounds[0]-margin+padWidth:axis_bounds[1]+margin+padWidth] =cropmask
orimask = orimask[padWidth:padWidth+depth.shape[0],padWidth:padWidth+depth.shape[1]]
orimask = resize(orimask,(120,160), order=3,preserve_range=True)
orimask[numpy.where(orimask>0)]=1
all_mask[mi,4:124,:,0]=orimask
all_input[mi,4:124,:,0]=resize(depth,(120,160), order=3,preserve_range=True)/2000.0
plt.figure()
plt.imshow(depth,'gray')
plt.scatter(cur_uvd[:,0],cur_uvd[:,1])
plt.figure()
plt.imshow(orimask,'gray')
plt.figure()
plt.imshow(crop,'gray')
plt.show()
for i in range(batch_size):
plt.figure()
plt.imshow(all_input[i,:,:,0],'gray')
plt.figure()
plt.imshow(all_mask[i,:,:,0],'gray')
plt.show()
import h5py
if __name__ == '__main__':
source_dir = 'F:/BigHand_Challenge/Training/'
save_dir = 'F:/HuaweiProj/data/mega'
f = h5py.File('%s/source/test_mask.h5'%save_dir, 'r')
filename = f['filename'][...]
uvd = f['uvd'][...]
f.close()
generate_train_tmp(path=source_dir,img_file_name=filename,uvd=uvd,batch_size=32)
# base_dir = 'D:/Project/3DHandPose/Data_3DHandPoseDataset/NYU_dataset/NYU_dataset/'
# generate_arrays_from_file_unet(path=base_dir,
# dataset='test',num_imgs=8252,num_classes=17,batch_size=1)
# # create_data(dataset='train',num_imgs = 72757)
# # create_data(dataset='test',num_imgs = 8252)
# uvd_jnt_gt,_,file_name=get_filenames_labels(dataset_dir=source_dir)
# num_img=len(file_name)
# idx = shuffle(numpy.arange(num_img),random_state=0)
# img_idx_train = idx[:int(num_img*0.9)]
# img_idx_test = idx[int(num_img*0.9):]
# generate_train_tmp(path=source_dir,img_file_name=file_name[img_idx_test],uvd=uvd_jnt_gt[img_idx_test],batch_size=32)
#
# generate_fullimg_mask_from_file_unet_show(path=source_dir,img_file_name=file_name[img_idx_test],uvd=uvd_jnt_gt[img_idx_test],batch_size=32) | [
"[email protected]"
] | |
c97d5c8534d89a3098f1408d6927557520a716a0 | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/xfel/command_line/xtc_dump.py | cf748f55c06c41577045989d90fdb4ed5880b085 | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 10,862 | py | from __future__ import absolute_import, division, print_function
from six.moves import range
# -*- Mode: Python; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*-
#
# LIBTBX_SET_DISPATCHER_NAME cctbx.xfel.xtc_dump
#
import psana
from xfel.cftbx.detector import cspad_cbf_tbx
from xfel.cxi.cspad_ana import cspad_tbx, rayonix_tbx
import os, sys
import libtbx.load_env
from libtbx.utils import Sorry, Usage
from dials.util.options import OptionParser
from libtbx.phil import parse
from libtbx import easy_pickle
phil_scope = parse('''
dispatch {
max_events = None
.type = int
.help = If not specified, process all events. Otherwise, only process this many
selected_events = False
.type = bool
.help = If True, only dump events specified in input.event scopes
}
input {
cfg = None
.type = str
.help = Path to psana config file. Genearlly not needed for CBFs. For image pickles, \
the psana config file should have a mod_image_dict module.
experiment = None
.type = str
.help = Experiment identifier, e.g. cxi84914
run_num = None
.type = int
.help = Run number or run range to process
address = None
.type = str
.help = Detector address, e.g. CxiDs2.0:Cspad.0 or detector alias, e.g. Ds1CsPad
calib_dir = None
.type = str
.help = Non-standard calib directory location
xtc_dir = None
.type = str
.help = Non-standard xtc directory location
timestamp = None
.type = str
.multiple = True
.help = Event timestamp(s) of event(s) in human-readable format of images to
.help = dump (must also specify dispatch.selected_events=True.)
}
format {
file_format = *cbf pickle
.type = choice
.help = Output file format, 64 tile segmented CBF or image pickle
pickle {
out_key = cctbx.xfel.image_dict
.type = str
.help = Key name that mod_image_dict uses to put image data in each psana event
}
cbf {
detz_offset = None
.type = float
.help = Distance from back of detector rail to sample interaction region (CXI) \
or actual detector distance (XPP/MFX)
override_energy = None
.type = float
.help = If not None, use the input energy for every event instead of the energy \
from the XTC stream
mode = *cspad rayonix
.type = choice
cspad {
gain_mask_value = None
.type = float
.help = If not None, use the gain mask for the run to multiply the low-gain pixels by this number
}
rayonix {
bin_size = 2
.type = int
.help = Detector binning mode
override_beam_x = None
.type = float
.help = If set, override the beam X position
override_beam_y = None
.type = float
.help = If set, override the beam Y position
}
}
}
output {
output_dir = .
.type = str
.help = Directory output files will be placed
tmp_output_dir = None
.type = str
.help = Directory for CBFlib tmp output files
}
''', process_includes=True)
class Script(object):
""" Script to process dump XFEL data at LCLS """
def __init__(self):
""" Set up the option parser. Arguments come from the command line or a phil file """
self.usage = """
%s input.experiment=experimentname input.run_num=N input.address=address
format.file_format=cbf format.cbf.detz_offset=N
%s input.experiment=experimentname input.run_num=N input.address=address
format.file_format=pickle format.pickle.cfg=path
"""%(libtbx.env.dispatcher_name, libtbx.env.dispatcher_name)
self.parser = OptionParser(
usage = self.usage,
phil = phil_scope)
def run(self):
""" Process all images assigned to this thread """
params, options = self.parser.parse_args(
show_diff_phil=True)
if params.input.experiment is None or \
params.input.run_num is None or \
params.input.address is None:
raise Usage(self.usage)
if params.format.file_format == "cbf":
if params.format.cbf.detz_offset is None:
raise Usage(self.usage)
elif params.format.file_format == "pickle":
if params.input.cfg is None:
raise Usage(self.usage)
else:
raise Usage(self.usage)
if not os.path.exists(params.output.output_dir):
raise Sorry("Output path not found:" + params.output.output_dir)
#Environment variable redirect for CBFLib temporary CBF_TMP_XYZ file output
if params.format.file_format == "cbf":
if params.output.tmp_output_dir is None:
tmp_dir = os.path.join(params.output.output_dir, '.tmp')
else:
tmp_dir = os.path.join(params.output.tmp_output_dir, '.tmp')
if not os.path.exists(tmp_dir):
try:
os.makedirs(tmp_dir)
except Exception as e:
if not os.path.exists(tmp_dir):
halraiser(e)
os.environ['CBF_TMP_DIR'] = tmp_dir
# Save the paramters
self.params = params
self.options = options
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
size = comm.Get_size() # size: number of processes running in this job
# set up psana
if params.input.cfg is not None:
psana.setConfigFile(params.input.cfg)
if params.input.calib_dir is not None:
psana.setOption('psana.calib-dir',params.input.calib_dir)
dataset_name = "exp=%s:run=%s:idx"%(params.input.experiment,params.input.run_num)
if params.input.xtc_dir is not None:
dataset_name = "exp=%s:run=%s:idx:dir=%s"%(params.input.experiment,params.input.run_num,params.input.xtc_dir)
ds = psana.DataSource(dataset_name)
if params.format.file_format == "cbf":
src = psana.Source('DetInfo(%s)'%params.input.address)
psana_det = psana.Detector(params.input.address, ds.env())
# set this to sys.maxint to analyze all events
if params.dispatch.max_events is None:
max_events = sys.maxsize
else:
max_events = params.dispatch.max_events
for run in ds.runs():
if params.format.file_format == "cbf":
if params.format.cbf.mode == "cspad":
# load a header only cspad cbf from the slac metrology
base_dxtbx = cspad_cbf_tbx.env_dxtbx_from_slac_metrology(run, params.input.address)
if base_dxtbx is None:
raise Sorry("Couldn't load calibration file for run %d"%run.run())
elif params.format.cbf.mode == "rayonix":
# load a header only rayonix cbf from the input parameters
detector_size = rayonix_tbx.get_rayonix_detector_dimensions(ds.env())
base_dxtbx = rayonix_tbx.get_dxtbx_from_params(params.format.cbf.rayonix, detector_size)
# list of all events
times = run.times()
if params.dispatch.selected_events:
times = [t for t in times if cspad_tbx.evt_timestamp((t.seconds(),t.nanoseconds()/1e6)) in params.input.timestamp]
nevents = min(len(times),max_events)
# chop the list into pieces, depending on rank. This assigns each process
# events such that the get every Nth event where N is the number of processes
mytimes = [times[i] for i in range(nevents) if (i+rank)%size == 0]
for i in range(len(mytimes)):
evt = run.event(mytimes[i])
id = evt.get(psana.EventId)
print("Event #",i," has id:",id)
timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)) # human readable format
if timestamp is None:
print("No timestamp, skipping shot")
continue
if evt.get("skip_event") or "skip_event" in [key.key() for key in evt.keys()]:
print("Skipping event",timestamp)
continue
t = timestamp
s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]
print("Processing shot", s)
if params.format.file_format == "pickle":
if evt.get("skip_event"):
print("Skipping event",id)
continue
# the data needs to have already been processed and put into the event by psana
data = evt.get(params.format.pickle.out_key)
if data is None:
print("No data")
continue
# set output paths according to the templates
path = os.path.join(params.output.output_dir, "shot-" + s + ".pickle")
print("Saving", path)
easy_pickle.dump(path, data)
elif params.format.file_format == "cbf":
if params.format.cbf.mode == "cspad":
# get numpy array, 32x185x388
data = cspad_cbf_tbx.get_psana_corrected_data(psana_det, evt, use_default=False, dark=True,
common_mode=None,
apply_gain_mask=params.format.cbf.cspad.gain_mask_value is not None,
gain_mask_value=params.format.cbf.cspad.gain_mask_value,
per_pixel_gain=False)
distance = cspad_tbx.env_distance(params.input.address, run.env(), params.format.cbf.detz_offset)
elif params.format.cbf.mode == "rayonix":
data = rayonix_tbx.get_data_from_psana_event(evt, params.input.address)
distance = params.format.cbf.detz_offset
if distance is None:
print("No distance, skipping shot")
continue
if self.params.format.cbf.override_energy is None:
wavelength = cspad_tbx.evt_wavelength(evt)
if wavelength is None:
print("No wavelength, skipping shot")
continue
else:
wavelength = 12398.4187/self.params.format.cbf.override_energy
# stitch together the header, data and metadata into the final dxtbx format object
if params.format.cbf.mode == "cspad":
image = cspad_cbf_tbx.format_object_from_data(base_dxtbx, data, distance, wavelength, timestamp, params.input.address, round_to_int=False)
elif params.format.cbf.mode == "rayonix":
image = rayonix_tbx.format_object_from_data(base_dxtbx, data, distance, wavelength, timestamp, params.input.address)
path = os.path.join(params.output.output_dir, "shot-" + s + ".cbf")
print("Saving", path)
# write the file
import pycbf
image._cbf_handle.write_widefile(path, pycbf.CBF,\
pycbf.MIME_HEADERS|pycbf.MSG_DIGEST|pycbf.PAD_4K, 0)
run.end()
ds.end()
if __name__ == "__main__":
from dials.util import halraiser
try:
script = Script()
script.run()
except Exception as e:
halraiser(e)
| [
"[email protected]"
] | |
13a4905ae7077bf34c1cfcef8d53ed482623a436 | 2ff4a38b86dfee4115c0a9280e95ff042d36f8bd | /programmers/lv2/emergency_boat_lv2.py | 4e7c867c41c62802da3a4d88574770e17a8f9e71 | [] | no_license | ohtaehyun/algo_study | 5e0adc6d18a186d959f0ad191af0d916f5c99793 | 87ac40b89b5ddbba09e8b3dd86ed0a3defc0590b | refs/heads/master | 2023-02-24T13:52:37.323111 | 2021-01-25T09:32:55 | 2021-01-25T09:32:55 | 284,712,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | from collections import deque
def solution(people, limit):
answer = 0
people.sort()
p =deque(list(people))
while p :
weight = p.popleft()
remain_weight = limit - weight
while p :
w = p.pop()
if w <= remain_weight:
break
else :
answer += 1
answer += 1
return answer
| [
"[email protected]"
] | |
3c2aab3ecac9f232f388ff9061c30d4daeb22e65 | c8b3882a09347076e78324dd106b40579b380f32 | /medical_web/manage.py | d91730869c305c0dd87957564ee1888837fde57b | [] | no_license | medical-weetech-co/medical.github.io | ec63323937a61ca08b04ea04070ba7c180a2cab1 | ed0324e104195a4b100aedc5da03d70a9982bac1 | refs/heads/master | 2021-07-05T12:51:37.400915 | 2017-01-25T08:54:06 | 2017-01-25T08:54:06 | 59,278,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "medical_web.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
dd1ed5bd20b5a60fd53bd43317230eb05bda02ff | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_267/ch80_2020_06_17_20_39_13_688401.py | d534bc9a3759e6fe0eb67eb6874f60c857066930 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | def interseccao_chaves(dicio1, dicio2):
lista_chaves = []
for i in dicio1.keys() and l in dicio2.keys():
if i == l:
lista_chaves.append(i)
return lista_chaves
| [
"[email protected]"
] | |
e1d364e8012b8e88a5aa8ea7ea24b49307bae086 | 5064d0a44fb1e1af0205ae0bfa711bdbf2a33cc6 | /test/main_json.py | 495de07bbef5c2a94bb17969b852bb609d084a3b | [] | no_license | lmxwade/DSCI_551 | 4e157ae87f370a5e0195ea64c1afb2cf385c2418 | eecdc9222ae0e3441c167525609dfd54ed4134a8 | refs/heads/master | 2023-02-10T15:48:38.755414 | 2020-07-04T16:24:35 | 2020-07-04T16:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | #
__author__ = 'Aaron Yang'
__email__ = '[email protected]'
__date__ = '9/6/2019 3:45 PM'
import json
if __name__ == "__main__":
data = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
print(type(data)) # <class 'list'>
# json.dumps 用于将 Python 对象编码成 JSON 字符串
jsonStr = json.dumps(data)
print(jsonStr) # <class 'str'>
jsonData = '{"a":1,"b":2,"c":3,"d":4,"e":{"1":23}}'
dict_obj = json.loads(jsonData)
print(dict_obj)
print(type(dict_obj))
# keys must be str, int, float, bool or None, not tuple
# print(json.dumps({(1,2): 3}))
# Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
#print(json.loads("{'1': 3}"))
# Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
# print(json.loads('{(1): 3}'))
# Expecting property name enclosed in double quotes: line 1 column 2 (char 1)
# print(json.loads('{1: 3}'))
print(json.loads('{"1": 3}'))
import urllib.parse
# str = 'l.a.a.c.-8th'
str = "HOUSE OF CURRY"
list = ' '.join(str.lower().split()).split(' ')
# list = str.lower().split(' ')
print(list)
| [
"[email protected]"
] | |
df2687be95865187cd182c14c35da780e63fbbda | abc1a497c41ddd8669c8c41da18af65d08ca54e4 | /AnalysisF/recon_wf/1ns/make_H1ns.py | 106466fe5adefdd90b89f9c759c167aade3faeb5 | [] | no_license | gerakolt/direxeno_privet | fcef5e3b654720e277c48935acc168472dfd8ecc | 75e88fb1ed44fce32fce02677f64106121259f6d | refs/heads/master | 2022-12-20T22:01:30.825891 | 2020-10-04T06:01:07 | 2020-10-04T06:01:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,645 | py | import numpy as np
import matplotlib.pyplot as plt
import time
import os
import sys
from scipy.optimize import minimize
from scipy.stats import poisson, binom
from scipy.special import erf as erf
pmts=[0,1,4,7,8,14]
path='/home/gerak/Desktop/DireXeno/190803/Cs137B/EventRecon/'
rec=np.load(path+'recon1ns98999.npz')['rec']
blw_cut=15
init_cut=20
chi2_cut=5000
left=700
right=1000
rec=rec[np.all(rec['init_wf']>20, axis=1)]
rec=rec[np.sqrt(np.sum(rec['blw']**2, axis=1))<blw_cut]
rec=rec[np.sqrt(np.sum(rec['chi2']**2, axis=1))<chi2_cut]
init=np.sum(np.sum(rec['h'][:,:10,:], axis=2), axis=1)
full=np.sum(np.sum(rec['h'], axis=2), axis=1)
rec=rec[init/full<0.5]
up=np.sum(rec['h'][:,:100,0], axis=1)+np.sum(rec['h'][:,:100,0], axis=1)
dn=np.sum(rec['h'][:,:100,-1], axis=1)+np.sum(rec['h'][:,:100,-2], axis=1)+np.sum(rec['h'][:,:100,-3], axis=1)
rec=rec[dn<3*up+18]
spectrum=np.histogram(np.sum(np.sum(rec['h'], axis=1), axis=1), bins=np.arange(1000)-0.5)[0]
rec=rec[np.sum(np.sum(rec['h'], axis=1), axis=1)>left]
rec=rec[np.sum(np.sum(rec['h'], axis=1), axis=1)<right]
H=np.zeros((50, 200, len(pmts)))
G=np.zeros((300, 200))
for j in range(200):
G[:,j]=np.histogram(np.sum(rec['h'][:,j,:], axis=1), bins=np.arange(np.shape(G)[0]+1)-0.5)[0]
spectra=np.zeros((350, len(pmts)))
for i, pmt in enumerate(pmts):
h=rec['h'][:,:,i]
spectra[:,i]=np.histogram(np.sum(h[:,:100], axis=1), bins=np.arange(351)-0.5)[0]
for j in range(200):
H[:,j,i]=np.histogram(h[:,j], bins=np.arange(np.shape(H)[0]+1)-0.5)[0]
np.savez(path+'H', H=H, G=G, left=left, right=right, spectra=spectra, spectrum=spectrum, up_dn_cut='dn<3*up+18')
| [
"[email protected]"
] | |
2d7752b5248ca30de42447503f8cb51b06fd5d1f | 21e64f9410323a11d4550b889fd0bb0d68543fab | /config/munin/mongodb_conn | 93f39733e6ea84ca3aa106275b63a88e87de9375 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | quanganhdo/NewsBlur | a7eaa3c5bdb2e57998651d736db861f88fcd1e75 | cef29f01658c845564a5044b48b4cf19efcaa4d6 | refs/heads/master | 2021-03-05T23:56:27.976498 | 2020-02-27T15:23:23 | 2020-02-27T15:23:23 | 246,164,347 | 1 | 0 | MIT | 2020-03-09T23:34:18 | 2020-03-09T23:34:17 | null | UTF-8 | Python | false | false | 791 | #!/srv/newsblur/venv/newsblur/bin/python
# -*- coding: utf-8 -*-
from vendor.munin.mongodb import MuninMongoDBPlugin
class MongoDBConnectionsPlugin(MuninMongoDBPlugin):
args = "-l 0 --base 1000"
vlabel = "count"
title = "MongoDB current connections"
info = "Current connections"
fields = (
('connections', dict(
label = "# of Connections",
info = "connections",
type = "GAUGE",
min = "0",
)),
)
def execute(self):
status = self.connection.admin.command('serverStatus')
try:
value = status['connections']['current']
except KeyError:
value = "U"
return dict(connections=value)
if __name__ == "__main__":
MongoDBConnectionsPlugin().run()
| [
"[email protected]"
] | ||
004329b3ddea39cfcdec79380491743f3b906eb9 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/leap/5b53afc49b5f418cb7d6bbf495c8fdd9.py | 38933221b5f04fff5d26cc532c350159342a7cc9 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 239 | py | '''This module will take in a year (positive integer) and return True if the
year is a leap year, False if it is not.
Lesson: Refactor.
'''
def is_leap_year(year):
return (year%400 == 0) or ((year%100 != 0) and (year%4 == 0))
| [
"[email protected]"
] | |
e5187be6c2339cacd981c880a7bcc4f600452526 | e1112bb6d54acb76e6e991fc4c3fc0d3a1f7b0d6 | /02 - Sets and tuples/Exercise/02-Sets_of_elements.py | 7bbb5ad6a390172f3d0bbbf55241d1a067f2744d | [] | no_license | MiroslavPK/Python-Advanced | 0326209d98254d4578a63dcd4c32b49be183baf2 | 0c696a220aa587edb2505e8d986b041cc90a46f3 | refs/heads/master | 2023-01-12T10:46:06.590096 | 2020-11-18T19:08:55 | 2020-11-18T19:08:55 | 295,449,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | n, m = map(int, input().split())
n_set = set()
m_set = set()
for i in range(n+m):
if i < n:
n_set.add(input())
else:
m_set.add(input())
intersection = n_set & m_set
print('\n'.join(intersection))
| [
"[email protected]"
] | |
d8b2df2337b5e5e71fcd5b02fe1fc120aa5d240b | fa93e53a9eee6cb476b8998d62067fce2fbcea13 | /devel/lib/python2.7/dist-packages/pal_device_msgs/msg/_DoTimedLedEffectGoal.py | 73f6163b853c14a328a4f720228d430539ed29a0 | [] | no_license | oyetripathi/ROS_conclusion_project | 2947ee2f575ddf05480dabc69cf8af3c2df53f73 | 01e71350437d57d8112b6cec298f89fc8291fb5f | refs/heads/master | 2023-06-30T00:38:29.711137 | 2021-08-05T09:17:54 | 2021-08-05T09:17:54 | 392,716,311 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | /home/sandeepan/tiago_public_ws/devel/.private/pal_device_msgs/lib/python2.7/dist-packages/pal_device_msgs/msg/_DoTimedLedEffectGoal.py | [
"[email protected]"
] | |
6f1bb4ff7967bfd1652c3e845f0f639580fcd308 | a45b8075f3c3b247a3cac43cb12bf4d80103f608 | /glamazer/urls.py | 2df7d0d6d8a24ce521febc8454892b0cfa167c9e | [] | no_license | kahihia/glamfame | c890a8772aa92b8ed9e3c0bb664c5dae187d1c09 | af91d4d16d0c8847c42eb97be839bf08015274b6 | refs/heads/master | 2021-01-21T09:59:52.700945 | 2016-02-15T17:16:13 | 2016-02-15T17:16:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,906 | py | from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'glamazer.core.views.home', name='home'),
url(r'^hairstyle/', 'glamazer.core.views.base', name='hair'),
url(r'^nails-design/', 'glamazer.core.views.base', name='nails'),
url(r'^make-up/', 'glamazer.core.views.base', name='make_up'),
url(r'^style/', 'glamazer.core.views.base', name='style'),
url(r'^contest/', 'glamazer.core.views.base', name='contest'),
url(r'^leaderboards/', 'glamazer.core.views.base', name='leaderboards'),
url(r'^result/', 'glamazer.core.views.search', name='result'),
url(r'^get_notifications/', 'glamazer.notifications.views.get_notifications', name='short_notifications'),
url(r'^get_notifications_count/', 'glamazer.notifications.views.get_notification_count', name='notification_count'),
url(r'^autocomplete_tags/', 'glamazer.core.views.autocomplete_tags', name='autocomplete_tags'),
url(r'^sign_up/', TemplateView.as_view(template_name="sign_up.html"), name='signup'),
url(r'^terms/', TemplateView.as_view(template_name="core/terms.html"), name='terms'),
url(r'^imprint/', TemplateView.as_view(template_name="core/imprint.html"), name='imprint'),
url(r'^privacy/', TemplateView.as_view(template_name="core/privacy.html"), name='privacy'),
url(r'^faq/', TemplateView.as_view(template_name="core/faq.html"), name='faq'),
url(r'^about_us/', TemplateView.as_view(template_name="core/about_us.html"), name='about_us'),
url(r'^contacts/', TemplateView.as_view(template_name="core/contact_us.html"), name='contacts'),
url(r'^admin/', include(admin.site.urls)),
url(r'^users/', include('glamazer.users.urls')),
url(r'^artists/', include('glamazer.artists.urls')),
url(r'^salons/', include('glamazer.salons.urls')),
url(r'^listings/', include('glamazer.listings.urls')),
url(r'^favorites/', include('glamazer.favorites.urls')),
url(r'^reviews/', include('glamazer.reviews.urls')),
url(r'^widget/', include('glamazer.widget.urls')),
url(r'^success/', 'glamazer.payments.views.success_payment', name='success'),
url(r'^error/', 'glamazer.payments.views.error_payment', name='error'),
url(r'^cancel/', 'glamazer.payments.views.cancel_payment', name='cancel'),
url(r'^get_hint/', 'glamazer.core.views.get_hint', name='get_hint'),
url(r'^start_payment/', 'glamazer.payments.views.start_payment', name='paypal_payment'),
url(r'^send_feedback/$', 'glamazer.users.views.send_feedback', name='send_feedback'),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
2fb3622d8520b0df9fdbf0783f3a2333622c2c5b | 46ad22b772f0bb115e1192ca24c86b1593d51870 | /eclipsed/src/cursor.py | f67a0516f50351fa94c3799522d029fba269422b | [
"CC0-1.0",
"WTFPL",
"CC-BY-4.0"
] | permissive | cosmologicon/unifac | fb533abfbba7ebb33561a330f7be5d22dbc2a373 | e7668c6736cd4db66f8d56e945afb69ec03f2160 | refs/heads/master | 2022-06-15T10:46:28.448477 | 2022-05-30T20:26:55 | 2022-05-30T20:26:55 | 37,033,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py |
tobuild = None
pointingto = None
# These don't really need to go here since they're redundant with hud.mode
# but this module was looking a little lonely so here they are.
disable = False
unbuild = False
| [
"[email protected]"
] | |
74d8d9d2cc5d152126537573a927ca64f8afb791 | 5a7abc4537039860c49e9a80219efa759aad1b6f | /tests/providers/aws/services/ec2/ec2_securitygroup_from_launch_wizard/ec2_securitygroup_from_launch_wizard_test.py | a3c14ce6680594654bebaca336460ebb88319e50 | [
"Apache-2.0"
] | permissive | sec-js/prowler | d5a06c72f5d7e490bade1167966f83f7a5d7ed15 | f72be9a1e492ad593c9ac267d3ca07f626263ccd | refs/heads/master | 2023-08-31T22:48:33.983360 | 2022-12-22T16:02:28 | 2022-12-22T16:02:28 | 243,866,744 | 0 | 0 | Apache-2.0 | 2022-12-23T12:23:20 | 2020-02-28T22:37:02 | Python | UTF-8 | Python | false | false | 4,834 | py | from re import search
from unittest import mock
from boto3 import client, resource
from moto import mock_ec2
AWS_REGION = "us-east-1"
EXAMPLE_AMI_ID = "ami-12c6146b"
class Test_ec2_securitygroup_from_launch_wizard:
@mock_ec2
def test_ec2_default_sgs(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info.audited_partition = "aws"
current_audit_info.audited_regions = ["eu-west-1", "us-east-1"]
with mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard import (
ec2_securitygroup_from_launch_wizard,
)
check = ec2_securitygroup_from_launch_wizard()
result = check.execute()
# One default sg per region
assert len(result) == 3
# All are compliant by default
assert result[0].status == "PASS"
@mock_ec2
def test_ec2_launch_wizard_sg(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
sg_id = ec2_client.create_security_group(
GroupName="launch-wizard-1", Description="launch wizard sg"
)["GroupId"]
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info.audited_partition = "aws"
current_audit_info.audited_regions = ["eu-west-1", "us-east-1"]
with mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard import (
ec2_securitygroup_from_launch_wizard,
)
check = ec2_securitygroup_from_launch_wizard()
result = check.execute()
# One default sg per region + created one
assert len(result) == 4
# Search changed sg
for sg in result:
if sg.resource_id == sg_id:
assert sg.status == "FAIL"
assert search(
"was created using the EC2 Launch Wizard",
sg.status_extended,
)
@mock_ec2
def test_ec2_compliant_default_sg(self):
# Create EC2 Mocked Resources
ec2_client = client("ec2", region_name=AWS_REGION)
ec2_client.create_vpc(CidrBlock="10.0.0.0/16")
default_sg_id = ec2_client.describe_security_groups(GroupNames=["default"])[
"SecurityGroups"
][0]["GroupId"]
ec2 = resource("ec2", region_name=AWS_REGION)
ec2.create_instances(
ImageId=EXAMPLE_AMI_ID,
MinCount=1,
MaxCount=1,
SecurityGroupIds=[
default_sg_id,
],
)
from prowler.providers.aws.lib.audit_info.audit_info import current_audit_info
from prowler.providers.aws.services.ec2.ec2_service import EC2
current_audit_info.audited_partition = "aws"
current_audit_info.audited_regions = ["eu-west-1", "us-east-1"]
with mock.patch(
"prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard.ec2_client",
new=EC2(current_audit_info),
):
# Test Check
from prowler.providers.aws.services.ec2.ec2_securitygroup_from_launch_wizard.ec2_securitygroup_from_launch_wizard import (
ec2_securitygroup_from_launch_wizard,
)
check = ec2_securitygroup_from_launch_wizard()
result = check.execute()
# One default sg per region
assert len(result) == 3
# Search changed sg
for sg in result:
if sg.resource_id == default_sg_id:
assert sg.status == "PASS"
assert search(
"was not created using the EC2 Launch Wizard",
sg.status_extended,
)
| [
"[email protected]"
] | |
46c75cfc66b41a8d03c8a63219aa1d6fb596b2ba | c7c001c9011f559b8b1c85d1c3e0387a86a99628 | /y2018/day18/lumber_collection.py | ec0c9f79bbc2fe5449c89bab26476d8ddca92c8e | [] | no_license | ericbgarnick/AOC | 5ddfd18850b96f198e125f5d1f0978e852195ccf | a935faad3fcbbe3ac601e2583ed27b38bc60ef69 | refs/heads/main | 2023-04-12T18:50:09.926169 | 2023-04-09T12:47:35 | 2023-04-09T12:47:35 | 224,573,310 | 2 | 1 | null | 2021-12-15T14:25:33 | 2019-11-28T05:00:52 | Python | UTF-8 | Python | false | false | 1,890 | py | from typing import List
from acre import Acre
class LumberCollection:
def __init__(self, area_map: List[str]):
row_len = len(area_map[0])
num_rows = len(area_map)
self.collection_area = [] # type: List[Acre]
self.populate_area(area_map)
self.link_acres(row_len, num_rows)
def populate_area(self, area_map: List[str]):
for row in area_map:
for acre in row:
acre_type = Acre.TYPE_FOR_SYMBOL[acre]
self.collection_area.append(Acre(acre_type))
def link_acres(self, row_len: int, num_rows: int):
for i, acre in enumerate(self.collection_area):
if i % row_len:
# W
acre.neighbors.add(self.collection_area[i - 1])
if i >= row_len:
# NW
acre.neighbors.add(self.collection_area[i - row_len - 1])
# N
acre.neighbors.add(self.collection_area[i - row_len])
if i < row_len * (num_rows - 1):
# SW
acre.neighbors.add(self.collection_area[i + row_len - 1])
# S
acre.neighbors.add(self.collection_area[i + row_len])
if i % row_len != row_len - 1:
# E
acre.neighbors.add(self.collection_area[i + 1])
if i >= row_len:
# NE
acre.neighbors.add(self.collection_area[i - row_len + 1])
# N
acre.neighbors.add(self.collection_area[i - row_len])
if i < row_len * (num_rows - 1):
# SE
acre.neighbors.add(self.collection_area[i + row_len + 1])
# S
acre.neighbors.add(self.collection_area[i + row_len])
| [
"[email protected]"
] | |
1c06d723254657701479f4b0179290148c45af44 | 0d76ba0da5446f20e500b7e31f53821b14cb49d8 | /Codility/python/abs_distinct.py | a14e108198517a0a7f73819039d873dfe2b9a69b | [] | no_license | filwaitman/playground | 948aa687be06d456c86b65ee3ab5fb9792149459 | dfdfab9002bff3a04f37e0c161363a864cd30f3e | refs/heads/master | 2021-01-12T12:59:49.057832 | 2020-01-26T18:51:02 | 2020-01-26T18:51:02 | 68,865,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # -*- coding: utf-8 -*-
# https://codility.com/programmers/task/abs_distinct
# 100/100
def solution(A):
positives = map(abs, A)
return len(set(positives))
assert solution([-5, -3, -1, 0, 3, 6]) == 5
assert solution([42]) == 1
assert solution(range(-1000, 0) + range(1, 1001)) == 1000
| [
"[email protected]"
] | |
70e7af3e0751be27c0879cdd30eb63c48c35d1d0 | a38670ee08ea64af33477899a68ee22936f70ce7 | /luffy/第三模块/第6章网络编程/第6章每小节/2 加上循环/04 模拟ssh远程执行命令/客户端.py | a62f5c2f2197ac5dd07315df58754ce788d23051 | [] | no_license | foremostxiao/d | 40ed37215f411e8b081a4cb92c8ecbd335cd9d76 | fe80672adc6b2406365b05d5cedd02c6abf66c11 | refs/heads/master | 2020-03-29T13:51:19.589004 | 2018-09-23T09:29:56 | 2018-09-23T09:29:56 | 149,985,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | import socket
phone = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
phone.connect(('127.0.0.1',8080))
while True:
# 1 发命令
cmd = input('>>>').strip()# 输入mingl
if not cmd:continue
phone.send(cmd.encode('utf-8')) # send 发给操作系统了
# 2 拿命令的结果并打印
data = phone.recv(1024) # 1024是个坑,有可能收到的超过1024,后续解决
print(data.decode('gbk'))
phone.close() | [
"[email protected]"
] | |
c0dfa6271b2327073a0a168b47640c937cbeee81 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/287/71629/submittedfiles/testes.py | 967c3e410a7b976e11bd707dcfdc03122824963f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | # -*- coding: utf-8 -*-
#COMECE AQUI
a=int(input('Que horas são? [0-23]'))
if a > 3 and a < 12:
print('Bom diiiia!')
elif a >= 12 and a < 18:
print('Boa tarde')
else:
print('Boa noite, ate amanha!') | [
"[email protected]"
] | |
e80a8d81e6392f1d7934470081943b1bf032f8fd | d53479a3a5efab85a065b4a7c08cb38b6246f0eb | /python-division.py | 66b9ee6a6492eb7b5fa6987137dcbe09a4d4af61 | [] | no_license | Snehal6697/hackerrank | 0242f927f630e652d6dcca901af8d8bd737b671f | c418fb24e08e5c57a1bd0d91f95ab2af32f01c64 | refs/heads/master | 2022-12-26T12:35:47.586007 | 2020-07-07T22:14:39 | 2020-07-07T22:14:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | #!/usr/bin/env python2.7
from __future__ import division
def main():
a = int(raw_input())
b = int(raw_input())
print a // b
print a / b
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
259cd5eaaa20071850043e7d7215f3ce6aebb6c9 | 02447b317690827683dc329153e74f1599e4db92 | /wazimap_ng/general/views.py | cf915a8c7c7477aca9df7290d0a6c06d7c62058d | [
"Apache-2.0"
] | permissive | neoromantique/wazimap-ng | fc8ca6704851db8d9941d3bcb9c06e367c2e1e94 | c19e9450655f5d404c60e2b4d214715ec8a0b1d9 | refs/heads/master | 2021-02-11T00:02:54.001820 | 2020-02-13T20:36:40 | 2020-02-13T20:36:40 | 244,431,358 | 0 | 0 | Apache-2.0 | 2020-03-02T17:23:51 | 2020-03-02T17:23:50 | null | UTF-8 | Python | false | false | 1,482 | py | from rest_framework.response import Response
from rest_framework.decorators import api_view
from ..datasets import models as dataset_models
from ..datasets import views as dataset_views
from ..boundaries import models as boundaries_models
from ..boundaries import views as boundaries_views
from ..utils import cache_decorator
@cache_decorator("consolidated_profile")
def consolidated_profile_helper(profile_id, code):
profile_js = dataset_views.profile_geography_data_helper(profile_id, code)
boundary_js = boundaries_views.geography_item_helper(code)
children_boundary_js = boundaries_views.geography_children_helper(code)
parent_layers = []
parents = profile_js["geography"]["parents"]
children_levels = [p["level"] for p in parents[1:]] + [profile_js["geography"]["level"]]
pairs = zip(parents, children_levels)
for parent, children_level in pairs:
layer = boundaries_views.geography_children_helper(parent["code"])
parent_layers.append(layer[children_level])
return ({
"profile": profile_js,
"boundary": boundary_js,
"children": children_boundary_js,
"parent_layers": parent_layers,
})
@api_view()
def consolidated_profile(request, profile_id, code):
js = consolidated_profile_helper(profile_id, code)
return Response(js)
@api_view()
def consolidated_profile_test(request, profile_id, code):
js = consolidated_profile_helper(profile_id, code)
return Response("test")
| [
"[email protected]"
] | |
3e7c790c56f14ea782d02ca44526b8f07db60168 | f76bdfd886ce116fdfeea408d7251142ed73d7c4 | /dash/_validate.py | 76047242a6a08c325706708be192ed92f0639e4a | [
"MIT"
] | permissive | pikhovkin/dj-plotly-dash | 73a4a679472eddfbb56c44ca054040b64b6a57a4 | 25efb612ead04bf3564c25b994dc633929eec457 | refs/heads/master | 2023-08-10T22:08:30.363654 | 2022-05-03T12:23:56 | 2022-05-03T12:23:56 | 151,003,269 | 53 | 16 | MIT | 2023-09-04T20:56:53 | 2018-09-30T20:18:22 | JavaScript | UTF-8 | Python | false | false | 12,126 | py | import collections
import re
from .development.base_component import Component
from . import exceptions
from ._utils import patch_collections_abc, _strings, stringify_id
def validate_callback(output, inputs, state, extra_args, types):
is_multi = isinstance(output, (list, tuple))
outputs = output if is_multi else [output]
Input, Output, State = types
if extra_args:
if not isinstance(extra_args[0], (Output, Input, State)):
raise exceptions.IncorrectTypeException(
"""
Callback arguments must be `Output`, `Input`, or `State` objects,
optionally wrapped in a list or tuple. We found (possibly after
unwrapping a list or tuple):
{}
""".format(
repr(extra_args[0])
)
)
raise exceptions.IncorrectTypeException(
"""
In a callback definition, you must provide all Outputs first,
then all Inputs, then all States. After this item:
{}
we found this item next:
{}
""".format(
repr((outputs + inputs + state)[-1]), repr(extra_args[0])
)
)
for args in [outputs, inputs, state]:
for arg in args:
validate_callback_arg(arg)
def validate_callback_arg(arg):
if not isinstance(getattr(arg, "component_property", None), _strings):
raise exceptions.IncorrectTypeException(
"""
component_property must be a string, found {!r}
""".format(
arg.component_property
)
)
if hasattr(arg, "component_event"):
raise exceptions.NonExistentEventException(
"""
Events have been removed.
Use the associated property instead.
"""
)
if isinstance(arg.component_id, dict):
validate_id_dict(arg)
elif isinstance(arg.component_id, _strings):
validate_id_string(arg)
else:
raise exceptions.IncorrectTypeException(
"""
component_id must be a string or dict, found {!r}
""".format(
arg.component_id
)
)
def validate_id_dict(arg):
arg_id = arg.component_id
for k in arg_id:
# Need to keep key type validation on the Python side, since
# non-string keys will be converted to strings in json.dumps and may
# cause unwanted collisions
if not isinstance(k, _strings):
raise exceptions.IncorrectTypeException(
"""
Wildcard ID keys must be non-empty strings,
found {!r} in id {!r}
""".format(
k, arg_id
)
)
def validate_id_string(arg):
arg_id = arg.component_id
invalid_chars = ".{"
invalid_found = [x for x in invalid_chars if x in arg_id]
if invalid_found:
raise exceptions.InvalidComponentIdError(
"""
The element `{}` contains `{}` in its ID.
Characters `{}` are not allowed in IDs.
""".format(
arg_id, "`, `".join(invalid_found), "`, `".join(invalid_chars)
)
)
def validate_multi_return(outputs_list, output_value, callback_id):
if not isinstance(output_value, (list, tuple)):
raise exceptions.InvalidCallbackReturnValue(
"""
The callback {} is a multi-output.
Expected the output type to be a list or tuple but got:
{}.
""".format(
callback_id, repr(output_value)
)
)
if len(output_value) != len(outputs_list):
raise exceptions.InvalidCallbackReturnValue(
"""
Invalid number of output values for {}.
Expected {}, got {}
""".format(
callback_id, len(outputs_list), len(output_value)
)
)
for i, outi in enumerate(outputs_list):
if isinstance(outi, list):
vi = output_value[i]
if not isinstance(vi, (list, tuple)):
raise exceptions.InvalidCallbackReturnValue(
"""
The callback {} output {} is a wildcard multi-output.
Expected the output type to be a list or tuple but got:
{}.
output spec: {}
""".format(
callback_id, i, repr(vi), repr(outi)
)
)
if len(vi) != len(outi):
raise exceptions.InvalidCallbackReturnValue(
"""
Invalid number of output values for {} item {}.
Expected {}, got {}
output spec: {}
output value: {}
""".format(
callback_id, i, len(vi), len(outi), repr(outi), repr(vi)
)
)
def fail_callback_output(output_value, output):
valid = _strings + (dict, int, float, type(None), Component)
def _raise_invalid(bad_val, outer_val, path, index=None, toplevel=False):
bad_type = type(bad_val).__name__
outer_id = (
"(id={:s})".format(outer_val.id) if getattr(outer_val, "id", False) else ""
)
outer_type = type(outer_val).__name__
if toplevel:
location = """
The value in question is either the only value returned,
or is in the top level of the returned list,
"""
else:
index_string = "[*]" if index is None else "[{:d}]".format(index)
location = """
The value in question is located at
{} {} {}
{},
""".format(
index_string, outer_type, outer_id, path
)
raise exceptions.InvalidCallbackReturnValue(
"""
The callback for `{output}`
returned a {object:s} having type `{type}`
which is not JSON serializable.
{location}
and has string representation
`{bad_val}`
In general, Dash properties can only be
dash components, strings, dictionaries, numbers, None,
or lists of those.
""".format(
output=repr(output),
object="tree with one value" if not toplevel else "value",
type=bad_type,
location=location,
bad_val=bad_val,
)
)
def _value_is_valid(val):
return isinstance(val, valid)
def _validate_value(val, index=None):
# val is a Component
if isinstance(val, Component):
# pylint: disable=protected-access
for p, j in val._traverse_with_paths():
# check each component value in the tree
if not _value_is_valid(j):
_raise_invalid(bad_val=j, outer_val=val, path=p, index=index)
# Children that are not of type Component or
# list/tuple not returned by traverse
child = getattr(j, "children", None)
if not isinstance(child, (tuple, collections.MutableSequence)):
if child and not _value_is_valid(child):
_raise_invalid(
bad_val=child,
outer_val=val,
path=p + "\n" + "[*] " + type(child).__name__,
index=index,
)
# Also check the child of val, as it will not be returned
child = getattr(val, "children", None)
if not isinstance(child, (tuple, collections.MutableSequence)):
if child and not _value_is_valid(child):
_raise_invalid(
bad_val=child,
outer_val=val,
path=type(child).__name__,
index=index,
)
# val is not a Component, but is at the top level of tree
elif not _value_is_valid(val):
_raise_invalid(
bad_val=val,
outer_val=type(val).__name__,
path="",
index=index,
toplevel=True,
)
if isinstance(output_value, list):
for i, val in enumerate(output_value):
_validate_value(val, index=i)
else:
_validate_value(output_value)
# if we got this far, raise a generic JSON error
raise exceptions.InvalidCallbackReturnValue(
"""
The callback for property `{property:s}` of component `{id:s}`
returned a value which is not JSON serializable.
In general, Dash properties can only be dash components, strings,
dictionaries, numbers, None, or lists of those.
""".format(
property=output.component_property, id=output.component_id
)
)
def check_obsolete(kwargs):
for key in kwargs:
if key in ["components_cache_max_age", "static_folder"]:
raise exceptions.ObsoleteKwargException(
"""
{} is no longer a valid keyword argument in Dash since v1.0.
See https://dash.plotly.com for details.
""".format(
key
)
)
# any other kwarg mimic the built-in exception
raise TypeError("Dash() got an unexpected keyword argument '" + key + "'")
def validate_js_path(registered_paths, package_name, path_in_package_dist):
if package_name not in registered_paths:
raise exceptions.DependencyException(
"""
Error loading dependency. "{}" is not a registered library.
Registered libraries are:
{}
""".format(
package_name, list(registered_paths.keys())
)
)
if path_in_package_dist not in registered_paths[package_name]:
raise exceptions.DependencyException(
"""
"{}" is registered but the path requested is not valid.
The path requested: "{}"
List of registered paths: {}
""".format(
package_name, path_in_package_dist, registered_paths
)
)
def validate_index(name, checks, index):
missing = [i for check, i in checks if not re.compile(check).search(index)]
if missing:
plural = "s" if len(missing) > 1 else ""
raise exceptions.InvalidIndexException(
"Missing item{pl} {items} in {name}.".format(
items=", ".join(missing), pl=plural, name=name
)
)
def validate_layout_type(value):
if not isinstance(value, (Component, patch_collections_abc("Callable"))):
raise exceptions.NoLayoutException(
"Layout must be a dash component "
"or a function that returns a dash component."
)
def validate_layout(layout, layout_value):
if layout is None:
raise exceptions.NoLayoutException(
"""
The layout was `None` at the time that `run_server` was called.
Make sure to set the `layout` attribute of your application
before running the server.
"""
)
layout_id = stringify_id(getattr(layout_value, "id", None))
component_ids = {layout_id} if layout_id else set()
for component in layout_value._traverse(): # pylint: disable=protected-access
component_id = stringify_id(getattr(component, "id", None))
if component_id and component_id in component_ids:
raise exceptions.DuplicateIdError(
"""
Duplicate component id found in the initial layout: `{}`
""".format(
component_id
)
)
component_ids.add(component_id)
| [
"[email protected]"
] | |
2b66f779ad34d216561b67a4a62e5d69750079e3 | 869d917ef14fb8e4bb899a192903dd1f64028d2b | /train/train_street_view_regression.py | faa00a2a48b49ee081de9cbc395048edd88abcef | [] | no_license | andreiqv/rotnet_not_my | bbd7fadba9c2e000d324e931d4fddc95ad8e4e25 | ce0ea3f80aba263ae5fc54549c5d3d571d02ef59 | refs/heads/master | 2020-04-26T11:00:21.724905 | 2019-03-02T22:50:31 | 2019-03-02T22:50:31 | 173,502,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,469 | py | from __future__ import print_function
import os
import sys
from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
from keras.applications.resnet50 import ResNet50
from keras.applications.imagenet_utils import preprocess_input
from keras.models import Model
from keras.layers import Dense, Flatten
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils import angle_error_regression, RotNetDataGenerator
from data.street_view import get_filenames as get_street_view_filenames
data_path = os.path.join('data', 'street_view')
train_filenames, test_filenames = get_street_view_filenames(data_path)
print(len(train_filenames), 'train samples')
print(len(test_filenames), 'test samples')
model_name = 'rotnet_street_view_resnet50_regression'
# input image shape
input_shape = (224, 224, 3)
# load base model
base_model = ResNet50(weights='imagenet', include_top=False,
input_shape=input_shape)
# append classification layer
x = base_model.output
x = Flatten()(x)
final_output = Dense(1, activation='sigmoid', name='fc1')(x)
# create the new model
model = Model(inputs=base_model.input, outputs=final_output)
model.summary()
# model compilation
model.compile(loss=angle_error_regression,
optimizer='adam')
# training parameters
batch_size = 16 # was 64
nb_epoch = 50
output_folder = 'models'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# callbacks
checkpointer = ModelCheckpoint(
filepath=os.path.join(output_folder, model_name + '.hdf5'),
save_best_only=True
)
early_stopping = EarlyStopping(patience=0)
tensorboard = TensorBoard()
# training loop
model.fit_generator(
RotNetDataGenerator(
train_filenames,
input_shape=input_shape,
batch_size=batch_size,
one_hot=False,
preprocess_func=preprocess_input,
crop_center=True,
crop_largest_rect=True,
shuffle=True
),
steps_per_epoch=len(train_filenames) / batch_size,
epochs=nb_epoch,
validation_data=RotNetDataGenerator(
test_filenames,
input_shape=input_shape,
batch_size=batch_size,
one_hot=False,
preprocess_func=preprocess_input,
crop_center=True,
crop_largest_rect=True
),
validation_steps=len(test_filenames) / batch_size,
callbacks=[checkpointer, early_stopping, tensorboard],
nb_worker=10,
pickle_safe=True,
verbose=1
)
| [
"[email protected]"
] | |
fdb8fc4c86a750baa500c7ee03cbb74671b28f35 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/minigame/RepairGlobals.py | 8027e62d79153e2436b77a14e3c56012b7f68cec | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,950 | py | from pandac.PandaModules import Vec3, Vec4, Point3
class VariableContainer:
def __init__(self):
pass
AI = VariableContainer()
AI.goldRewardRange = (15, 35)
AI.goldRewardMultiplier = [
(14.0, 3.0),
(18.0, 2.5),
(24.0, 2.0),
(36.0, 1.6000000000000001),
(52.0, 1.3),
(72.0, 1.1499999999999999)]
AI.repairRewardRange = (5000, 1000)
AI.grapeshotEffectCooldown = 2.0
AI.grapeshotEffectProbability = 0.5
AI.kickedTimestampLife = 60.0 * 60.0
AI.inactiveClientKickTime = 60.0 * 2.0 + 2.0
AI.numTimesKickedBeforeBlacklisted = 3
AI.maxPlayersPerBench = 5
AI.baseRepairAmount = 0.5
AI.maxRepairCount = 30
AI.reductionAtFullRepair = 0.5
AI.maxCombatCount = 20
AI.reductionAtFullCombat = 0.5
AI.critGrapeshotCombatDebuff = 3
AI.grapeshotCombatDebuff = 3
AI.regularCombatDebuff = 1
AI.totalDifficulty = AI.maxRepairCount + AI.maxCombatCount
AI.difficultyIncreasePoint = AI.totalDifficulty / 10.0
AI.repairDebuffPerModelClass = {
1: 1.0,
2: 1.0,
3: 1.0,
11: 1.0,
12: 1.0,
13: 1.0,
21: 1.0,
22: 1.0,
23: 1.0,
24: 1.0,
25: 1.0,
26: 1.0,
27: 1.0 }
AI.sailRepairPercent = 0.14999999999999999
AI.armorRepairPercent = 0.14999999999999999
AI.hpRepairPercent = 0.40000000000000002
AI.hpTertiaryDecay = 0.0
Common = VariableContainer()
Common.guiShakeCooldownTime = 2.0
Common.youWinPos = {
'careening': (-0.12, 0.0, 0.22),
'pumping': (0.0, 0.0, 0.14999999999999999),
'sawing': (0.0, 0.0, 0.14999999999999999),
'bracing': (0.0, 0.0, 0.22),
'hammering': (0.0, 0.0, 0.38),
'pitching': (0.0, 0.0, 0.22) }
Common.scorePos = {
'careening': (-0.12, 0.0, 0.089999999999999997),
'pumping': (0.0, 0.0, 0.02),
'sawing': (0.0, 0.0, 0.02),
'bracing': (0.0, 0.0, 0.089999999999999997),
'hammering': (0.0, 0.0, 0.25),
'pitching': (0.0, 0.0, 0.089999999999999997) }
Common.speedThresholds = {
'careening': [
(5.0, 15.0),
(10.0, 30.0),
(20.0, 90.0)],
'pumping': [
(10.0, 13.0),
(20.0, 40.0),
(40.0, 90.0)],
'sawing': [
(6.0, 9.0),
(12.0, 18.0),
(30.0, 45.0)],
'bracing': [
(5.0, 15.0),
(30.0, 45.0),
(90.0, 180.0)],
'hammering': [
(5.0, 10.0),
(10.0, 20.0),
(20.0, 40.0)],
'pitching': [
(8.0, 16.0),
(16.0, 32.0),
(32.0, 64.0)] }
Careening = VariableContainer()
Careening.barnacleCountRange = (15, 30)
Careening.superScrubMultiplier = 4.0
Careening.superScrubDecreaseRate = 0.40000000000000002
Careening.superScrubIncreaseRate = 0.80000000000000004
Careening.barnacleHPRange = (30, 70)
Careening.barnacleHPScaleRange = (1.0, 3.0)
Careening.xRange = (-0.61499999999999999, 0.375)
Careening.yRange = (-0.16500000000000001, 0.51500000000000001)
Careening.barnacleRadius = 0.040000000000000001
Careening.mossPercentage = 0.75
Careening.mossPosVariance = 0.01
Careening.mossEdgeRestrictionAmount = 0.10000000000000001
Careening.showBarnacleHP = False
Pumping = VariableContainer()
Pumping.pumpPowerRange = (0.059999999999999998, 0.02)
Pumping.hitRange = (0.17999999999999999, 0.17999999999999999)
Pumping.barStartRange = (1.2, 1.0)
Pumping.barSpeedMin = 2.0
Pumping.barSpeedMax = 0.29999999999999999
Pumping.barSpeedIncrease = 1.25
Pumping.barSpeedDecrease = 0.80000000000000004
Pumping.chainMultiplier = 0.080000000000000002
Sawing = VariableContainer()
Sawing.difficultySets = ((3, 3, 1, 1), (3, 1, 1, 2), (1, 2, 1, 2), (3, 1, 2, 2), (2, 2, 1, 2), (3, 2, 1, 4), (2, 4, 3, 2), (4, 2, 1, 2), (4, 1, 1, 5), (2, 2, 4, 5))
Sawing.waypointRange = (0.080000000000000002, 0.080000000000000002, 0.080000000000000002, 0.11, 0.10000000000000001)
Sawing.sawlineColor = Vec4(0.75, 0.75, 0.75, 0.69999999999999996)
Sawing.sawlineLineThickness = 4.0
Sawing.sawlineLinespawnDist = 0.02
Sawing.testWaypointDelta = 0.040000000000000001
Sawing.playSawingSoundDelta = 0.10000000000000001
Sawing.totalPoints = 20.0
Sawing.pointsPerBoard = 7.0
Sawing.pointsLostForZone1 = 4.0
Sawing.pointsLostForZone2 = 1.0
Sawing.cutColor = (0.29999999999999999, 0.29999999999999999, 0.29999999999999999, 1.0)
Sawing.zone1Color = (0.75, 0.75, 0.75, 1.0)
Sawing.zone2Color = (0.75, 0.75, 0.75, 1.0)
Sawing.sawTurnSpeed = 1000
Sawing.newBoardAnimTime = 0.25
Sawing.splitBoardAnimTime = 0.5
Sawing.activeBoardPosition = (0.0, 0.0, 0.10000000000000001)
Sawing.boardYDist = 1.3
from RepairGridPiece import GOAL_HORIZ_1, GOAL_HORIZ_2, GOAL_VERT_1
Bracing = VariableContainer()
Bracing.difficultyLevels = ((8, (GOAL_HORIZ_1,)), (7, (GOAL_HORIZ_1,)), (6, (GOAL_HORIZ_1,)), (7, (GOAL_HORIZ_1, GOAL_VERT_1)), (6, (GOAL_HORIZ_1, GOAL_VERT_1)), (5, (GOAL_HORIZ_1, GOAL_VERT_1)), (4, (GOAL_HORIZ_1, GOAL_VERT_1)), (5, (GOAL_HORIZ_1, GOAL_HORIZ_2)), (4, (GOAL_HORIZ_1, GOAL_HORIZ_2)), (3, (GOAL_HORIZ_1, GOAL_HORIZ_2)))
Bracing.moveTime = 0.080000000000000002
Bracing.fadeTime = 0.14999999999999999
Bracing.movePieceThreshold = 0.080000000000000002
Bracing.pushPieceThreshold = 0.01
Bracing.repairTimeframe = 20
Hammering = VariableContainer()
Hammering.reticleScaleRange = (0.20000000000000001, 1.0)
Hammering.reticleScaleRate = 1.0
Hammering.recoveryTime = 4.0
Hammering.nailCountRange = (4, 8)
Hammering.rankingThresholds = (5, 4, 3, 2, 1)
Hammering.hitForgiveness = 0.10000000000000001
Hammering.useReticleColor = True
Pitching = VariableContainer()
Pitching.leakScaleRange = (0.10000000000000001, 0.27500000000000002)
Pitching.spawnDelayRange = (0.5, 0.10000000000000001, 2.0, 1.0)
Pitching.leakCountRange = (16, 32)
Pitching.maxLeaksRange = (2, 5)
Pitching.useReticle = True
Pitching.ratingGive = 0
REPAIR_AT_SEA_REWARD_RATING = [
0,
1,
1,
1.5,
2.0]
REPAIR_AT_SEA_GAME_MULTIPLIER = [
20,
60,
200,
40,
20]
def getAtSeaRepairRating(rating, gameType):
if rating > 4 or rating < 0:
rating = 0
return REPAIR_AT_SEA_REWARD_RATING[rating] * REPAIR_AT_SEA_GAME_MULTIPLIER[gameType]
| [
"[email protected]"
] | |
bce6368fc8a866dd4bff9c0a271687bdaea848c1 | 5e014f95b49f376b34d20760c41f09bdca094247 | /flask_ide/auth/models.py | 2fe1fcdca8701cfe3cf45972adb5b95603c108eb | [] | no_license | jstacoder/flask-ide | 34ae304c211c7b263f37b2fcf0660ae76053c0a2 | 3890756c094b4b7872bad7d915e764e3e32dcb2d | refs/heads/master | 2023-02-12T11:22:24.412680 | 2020-07-20T17:21:55 | 2020-07-20T17:21:55 | 29,079,246 | 50 | 10 | null | 2023-02-02T07:17:40 | 2015-01-11T02:51:35 | JavaScript | UTF-8 | Python | false | false | 3,478 | py | from flask_xxl.basemodels import BaseMixin
from flask import url_for
from LoginUtils import encrypt_password, check_password
from sqlalchemy.ext.declarative import declared_attr
#import sqlalchemy to global namespace
from sqlalchemy import (
UnicodeText,func,Enum,UniqueConstraint,DateTime,Text,Column,Integer,
ForeignKey,Boolean,String,Table
)
from sqlalchemy.orm import relationship, backref
class UnknownUser(object):
is_unknown = True
class Role(BaseMixin):
__tablename__ = 'roles'
name = Column(String(255))
can_view = Column(Boolean,default=True,nullable=False)
can_add = Column(Boolean,default=False,nullable=False)
can_edit = Column(Boolean,default=False,nullable=False)
can_delete = Column(Boolean,default=False,nullable=False)
class User(BaseMixin):
__tablename__ = 'users'
first_name = Column(String(255),default="")
last_name = Column(String(255),default="")
email = Column(String(255),nullable=False,unique=True)
role_id = Column(Integer,ForeignKey('roles.id'))
role = relationship('Role',backref=backref(
'users',lazy='dynamic'))
add_date = Column(DateTime,default=func.now())
_pw_hash = Column(UnicodeText,nullable=False)
age = Column(Integer)
def __init__(self,*args,**kwargs):
if 'first_name' in kwargs:
self.first_name = kwargs.pop('first_name')
if 'last_name' in kwargs:
self.last_name = kwargs.pop('last_name')
if 'email' in kwargs:
self.email = kwargs.pop('email')
if 'role' in kwargs:
self.role = kwargs.pop('role')
if 'role_id' in kwargs:
self.role_id = kwargs.pop('role_id')
if 'password' in kwargs:
self.password = kwargs.pop('password')
def _to_json(self):
import json
return json.dumps(
{
'first_name':self.first_name,
'last_name':self.last_name,
'email':self.email,
'age':self.age,
'date_added':self.add_date,
}
)
@declared_attr
def __table_args__(cls):
return (UniqueConstraint('email','first_name','last_name'),{})
@property
def is_unknown(self):
return False
def check_password(self, pw):
return check_password(pw,self._pw_hash)
@classmethod
def get_by_email(cls, email):
return cls.query().filter_by(email=email).first()
@property
def password(self):
return 'private'
raise ValueError('Private Value!!!!')
@password.setter
def password(self,pw):
self._pw_hash = encrypt_password(pw)
@property
def full_name(self):
return '{} {}'.format(self.first_name.title(),self.last_name.title())
@property
def name(self):
return str(self.first_name)
def __str__(self):
if self.first_name != "":
rtn = self.full_name
else:
rtn = self.email
return rtn
def __repr__(self):
return 'User<{} {}'.format(self.email,self.first_name)
def _get_absolute_url(self):
return url_for('member.profile',member_id=str(int(self.id)))
@property
def absolute_url(self):
return str(self._get_absolute_url())
def _get_edit_url(self):
return '#'
@property
def edit_url(self):
return str(self._get_edit_url())
| [
"[email protected]"
] | |
87c7524501017490341a86012b5d7364f04aacde | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_54/78.py | 1e0afea1344679e1079ae74d8bb54a891e5ad167 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | def gcd(a,b):
while (b != 0):
c = a%b
a = b
b = c
return a
def get_gcd(line):
g = line[0]
cnt = len(line)
for i in range(1,cnt):
g = gcd(g,line[i])
return g
def solve(line):
N = int(line.pop(0))
for i in range(0,N):
line[i] = int(line[i])
line.sort()
diffs = list()
for i in range(0,N-1):
diff = line[i+1] - line[i]
diffs.append(diff)
g = pg = get_gcd(diffs)
if g < line[0]:
g = line[0] / pg * pg
if line[0] % pg != 0:
g += pg
ans = g - line[0]
return ans
AnsT = ""
myfile = open("B.in")
T = int(myfile.readline())
for i in range(0,T):
line = myfile.readline()
line = line.split("\n")
print i
ans = solve(line[0].split(" "))
AnsT = AnsT + "Case #"+ str(i+1) +": "+str(ans) + "\n"
outfile = open("B.out","w")
outfile.write(AnsT)
outfile.close()
| [
"[email protected]"
] | |
9b041b73b4058ed94e12ca2a03153ad4b7767547 | 3f911aca38f91e56890f5034b31ed81edb31b000 | /protein/FDR 구현실습/test.py | 2f29ade236cf8f8f9d1525d161b6fe892a63d725 | [] | no_license | sochic2/kis | 5dd83fd474176981f49cde967f49763405ed27b3 | 3ab07710c987110224b3fad0cb1ce3a0d6df6d1a | refs/heads/master | 2022-11-07T00:58:37.427148 | 2020-06-18T14:37:01 | 2020-06-18T14:37:01 | 257,481,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | a = 'abcdefg'
b = a[0:3]
print(b) | [
"[email protected]"
] | |
d5b659372a216b999b788a1e5dbe6d3852e2a1f3 | 474525154a4e1d48ef5242d1f44164d05399b145 | /tensorflow_probability/python/experimental/distributions/mvn_precision_factor_linop_test.py | 47676d4d6f31be7ebf0b5ac98d233982286579c7 | [
"Apache-2.0"
] | permissive | svshivapuja/probability | 9855737790f74a39169688fbfec9671deef804d9 | af7ccb22d972329633530c3b754ed1f49472f6a7 | refs/heads/main | 2023-07-17T04:14:53.703622 | 2021-08-30T17:47:06 | 2021-08-30T17:47:06 | 400,983,015 | 1 | 0 | Apache-2.0 | 2021-08-29T07:51:29 | 2021-08-29T07:51:29 | null | UTF-8 | Python | false | false | 8,157 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.python.experimental.distributions.mvn_precision_factor_linop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import test_combinations
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
tfd_e = tfp.experimental.distributions
@test_util.test_all_tf_execution_regimes
class MVNPrecisionFactorLinOpTest(test_util.TestCase):
def _random_constant_spd_linop(
self,
event_size,
batch_shape=(),
conditioning=1.2,
dtype=np.float32,
):
"""Randomly generate a constant SPD LinearOperator."""
# The larger conditioning is, the better posed the matrix is.
# With conditioning = 1, it will be on the edge of singular, and likely
# numerically singular if event_size is large enough.
# Conditioning on the small side is best, since then the matrix is not so
# diagonally dominant, and we therefore test use of transpositions better.
assert conditioning >= 1
scale_wishart = tfd.WishartLinearOperator(
df=dtype(conditioning * event_size),
scale=tf.linalg.LinearOperatorIdentity(event_size, dtype=dtype),
input_output_cholesky=False,
)
# Make sure to evaluate here. This ensures that the linear operator is a
# constant rather than a random operator.
matrix = self.evaluate(
scale_wishart.sample(batch_shape, seed=test_util.test_seed()))
return tf.linalg.LinearOperatorFullMatrix(
matrix, is_positive_definite=True, is_self_adjoint=True)
@test_combinations.generate(
test_combinations.combine(
use_loc=[True, False],
use_precision=[True, False],
event_size=[3],
batch_shape=[(), (2,)],
n_samples=[5000],
dtype=[np.float32, np.float64],
),
)
def test_log_prob_and_sample(
self,
use_loc,
use_precision,
event_size,
batch_shape,
dtype,
n_samples,
):
cov = self._random_constant_spd_linop(
event_size, batch_shape=batch_shape, dtype=dtype)
precision = cov.inverse()
precision_factor = precision.cholesky()
# Make sure to evaluate here, else you'll have a random loc vector!
if use_loc:
loc = self.evaluate(
tf.random.normal(
batch_shape + (event_size,),
dtype=dtype,
seed=test_util.test_seed()))
else:
loc = None
mvn_scale = tfd.MultivariateNormalTriL(
loc=loc, scale_tril=cov.cholesky().to_dense())
mvn_precision = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
loc=loc,
precision_factor=precision_factor,
precision=precision if use_precision else None,
)
point = tf.random.normal(
batch_shape + (event_size,), dtype=dtype, seed=test_util.test_seed())
mvn_scale_log_prob, mvn_precision_log_prob = self.evaluate(
[mvn_scale.log_prob(point),
mvn_precision.log_prob(point)])
self.assertAllClose(
mvn_scale_log_prob, mvn_precision_log_prob, atol=5e-4, rtol=5e-4)
batch_point = tf.random.normal(
(2,) + batch_shape + (event_size,),
dtype=dtype,
seed=test_util.test_seed())
mvn_scale_log_prob, mvn_precision_log_prob = self.evaluate(
[mvn_scale.log_prob(batch_point),
mvn_precision.log_prob(batch_point)])
self.assertAllClose(
mvn_scale_log_prob, mvn_precision_log_prob, atol=5e-4, rtol=5e-4)
samples = mvn_precision.sample(n_samples, seed=test_util.test_seed())
arrs = self.evaluate({
'stddev': tf.sqrt(cov.diag_part()),
'var': cov.diag_part(),
'cov': cov.to_dense(),
'sample_mean': tf.reduce_mean(samples, axis=0),
'sample_var': tfp.stats.variance(samples, sample_axis=0),
'sample_cov': tfp.stats.covariance(samples, sample_axis=0),
})
self.assertAllClose(
arrs['sample_mean'],
loc if loc is not None else np.zeros_like(arrs['cov'][..., 0]),
atol=5 * np.max(arrs['stddev']) / np.sqrt(n_samples))
self.assertAllClose(
arrs['sample_var'],
arrs['var'],
atol=5 * np.sqrt(2) * np.max(arrs['var']) / np.sqrt(n_samples))
self.assertAllClose(
arrs['sample_cov'],
arrs['cov'],
atol=5 * np.sqrt(2) * np.max(arrs['var']) / np.sqrt(n_samples))
def test_dynamic_shape(self):
x = tf.Variable(ps.ones([7, 3]), shape=[7, None])
self.evaluate(x.initializer)
# Check that the shape is actually `None`.
if not tf.executing_eagerly():
last_shape = x.shape[-1]
if last_shape is not None: # This is a `tf.Dimension` in tf1.
last_shape = last_shape.value
self.assertIsNone(last_shape)
dynamic_dist = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(tf.ones_like(x)))
static_dist = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(tf.ones([7, 3])))
in_ = tf.zeros([7, 3])
self.assertAllClose(self.evaluate(dynamic_dist.log_prob(in_)),
static_dist.log_prob(in_))
@test_combinations.generate(
test_combinations.combine(
batch_shape=[(), (2,)],
dtype=[np.float32, np.float64],
),
)
def test_mean_and_mode(self, batch_shape, dtype):
event_size = 3
cov = self._random_constant_spd_linop(
event_size, batch_shape=batch_shape, dtype=dtype)
precision_factor = cov.inverse().cholesky()
# Make sure to evaluate here, else you'll have a random loc vector!
loc = self.evaluate(
tf.random.normal(
batch_shape + (event_size,),
dtype=dtype,
seed=test_util.test_seed()))
mvn_precision = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
loc=loc,
precision_factor=precision_factor)
self.assertAllClose(mvn_precision.mean(), loc)
self.assertAllClose(mvn_precision.mode(), loc)
@test_combinations.generate(
test_combinations.combine(
batch_shape=[(), (2,)],
use_precision=[True, False],
dtype=[np.float32, np.float64],
),
)
def test_cov_var_stddev(self, batch_shape, use_precision, dtype):
event_size = 3
cov = self._random_constant_spd_linop(
event_size, batch_shape=batch_shape, dtype=dtype)
precision = cov.inverse()
precision_factor = precision.cholesky()
# Make sure to evaluate here, else you'll have a random loc vector!
loc = self.evaluate(
tf.random.normal(
batch_shape + (event_size,),
dtype=dtype,
seed=test_util.test_seed()))
mvn_precision = tfd_e.MultivariateNormalPrecisionFactorLinearOperator(
loc=loc,
precision_factor=precision_factor,
precision=precision if use_precision else None)
self.assertAllClose(mvn_precision.covariance(), cov.to_dense(), atol=1e-4)
self.assertAllClose(mvn_precision.variance(), cov.diag_part(), atol=1e-4)
self.assertAllClose(mvn_precision.stddev(), tf.sqrt(cov.diag_part()),
atol=1e-5)
if __name__ == '__main__':
test_util.main()
| [
"[email protected]"
] | |
bd374ed841b18e22b1108b9e8b2c12dac786d446 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Anscombe/trend_MovingMedian/cycle_30/ar_12/test_artificial_128_Anscombe_MovingMedian_30_12_100.py | ccc8235516ad5f149b1dacaabb8d05d4860cb57f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 269 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 30, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 12); | [
"[email protected]"
] | |
f0c8e4a7e7eedd40041bc507e96e9ebd1d7c55c0 | 3e713a67f370d1cc1ba0882159a03b673bd22f9a | /DataStructure and Alogorithms/[HACKERRANK]-cats and a mouse .py | d1edad7dc3eaf287f6fbb70ca5520a5f5a091571 | [] | no_license | s-abhishek2399/competitive-progamming--PYTHON | 739797ffea0b92cc2781559e7d4eed1d274678a6 | 29f9e63cfc05c01fa605c14fb8a3a55920296d43 | refs/heads/master | 2023-03-08T02:40:00.962109 | 2021-02-16T15:07:52 | 2021-02-16T15:07:52 | 328,732,345 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | n = int(input())
for i in range(n):
l=[int(x) for x in input().split()]
a = l[0]-l[2]
b = l[1]-l[2]
if abs(a)<abs(b):
print("Cat A")
elif abs(b)<abs(a):
print("Cat B")
else:
print("Mouse C")
| [
"[email protected]"
] | |
a6ca45275323f2440e95e9be09e07f653e6250ef | f9e4c2e9cd4a95dc228b384e2e8abadc9f1b0bda | /fratevents/settings.py | 22d7c2df522fd15d68bce7043a05c6b6fa4c9fe0 | [] | no_license | sanchitbareja/fratevents | 227adddd77c9a0055ccd74d5e0bf6f771790f8d3 | f50c8ccb40b8c9124b40e70d90c9190ef27a2fb7 | refs/heads/master | 2016-09-06T15:36:45.443412 | 2013-02-16T21:13:36 | 2013-02-16T21:13:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,645 | py | # Django settings for fratevents project.
import os, os.path, social_auth
if os.environ.has_key('DATABASE_URL'):
DEBUG = True
else:
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Sanchit Bareja', '[email protected]'),
)
MANAGERS = ADMINS
if os.environ.has_key('DATABASE_URL'):
import dj_database_url
DATABASES = {'default': dj_database_url.config(default='postgres://localhost')}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'fratevents', # Or path to database file if using sqlite3.
'USER': 'postgres', # Not used with sqlite3.
'PASSWORD': 'root', # Not used with sqlite3.
'HOST': 'localhost', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/static/'
SEND_BROKEN_LINK_EMAILS = True
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = os.path.join(os.path.dirname(__file__), 'static/').replace('\\','/')
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'static/').replace('\\','/'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'rsx9)l1^_bsmeyipfk9u#t#gdt%@po-i-hr+#8ensmg012!kpn'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'social_auth.middleware.SocialAuthExceptionMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'fratevents.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'fratevents.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'views').replace('\\','/'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'south',
'gunicorn',
'events',
'clubs',
'rage',
'userprofile',
'social_auth',
'storages',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# EMAIL SETTINGS
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'qwaszx12,'
EMAIL_PORT = 587
EVENT_MASTERS = ['[email protected]','[email protected]','[email protected]']
# Facebook Integration Settings
AUTHENTICATION_BACKENDS = (
'social_auth.backends.facebook.FacebookBackend',
'django.contrib.auth.backends.ModelBackend',
)
# userprofile creation
AUTH_PROFILE_MODULE = 'userprofile.UserProfile'
FACEBOOK_APP_ID = '343708889077375'
FACEBOOK_API_SECRET = '0bd34d3dbb482579fb990805860267bd'
FACEBOOK_EXTENDED_PERMISSIONS = ['email', 'user_birthday', 'user_interests', 'user_events', 'manage_pages']
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
'social_auth.context_processors.social_auth_by_type_backends',
)
LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_PIPELINE = (
'social_auth.backends.pipeline.social.social_auth_user',
#'social_auth.backends.pipeline.associate.associate_by_email',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
'fratevents.pipeline.create_user_profile',
'fratevents.pipeline.get_user_profile_pic',
'fratevents.pipeline.get_user_events',
'fratevents.pipeline.get_user_network',
'fratevents.pipeline.get_user_pages',
)
SOCIAL_AUTH_CREATE_USERS = True
SOCIAL_AUTH_FORCE_RANDOM_USERNAME = False
SOCIAL_AUTH_DEFAULT_USERNAME = 'socialauth_user'
SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete'
LOGIN_ERROR_URL = '/login/error/'
SOCIAL_AUTH_ERROR_KEY = 'socialauth_error'
SOCIAL_AUTH_FORCE_POST_DISCONNECT = True
#AWS S3 Credentials - django-storages
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = 'AKIAISDEISAIY3LRYY3Q'
AWS_SECRET_ACCESS_KEY = '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'
AWS_STORAGE_BUCKET_NAME = 'calevents'
BUCKET_NAME = 'calevents'
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
AWS_UPLOAD_DESTINATION = "http://s3.amazonaws.com/"+str(BUCKET_NAME)+"/"
| [
"[email protected]"
] | |
090f82f82fdc3d02c3bb17c3ee32ed6c85c8c08e | 0a25ea42bd8aff27c939b7de9d9a8ea036b0c66f | /thrift/thrift-utils/test/ezpz/__init__.py | 6b1fb9fe954c52e463b0d180312d8dccde9dae94 | [
"Apache-2.0"
] | permissive | ezbake/ezbake-common-python | 118a20e2f88aaa29f95459b6bb163d0a828407d0 | fc82fb71852750cc2cfcbd7af0cb6843fad13b89 | refs/heads/master | 2021-01-01T05:38:30.502302 | 2015-03-02T20:08:32 | 2015-03-02T20:08:32 | 31,560,413 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | # Copyright (C) 2013-2015 Computer Sciences Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| [
"[email protected]"
] | |
3fa07e5008b46020f7867d26769152465c99df3f | 07ffe8db66fbd50f87315df34074e20b3ce67f0e | /about/models.py | 80a8e89e5bba77662e330b6c74d3a6e0a8d8a48a | [] | no_license | jakiiii/jtro-ecommerce | 9acc6d37797e409a79921358958e50d66f20a0b4 | e6e5ae04c7756e99f862634ad21f1d3877b501ab | refs/heads/master | 2023-01-22T09:44:47.891286 | 2020-12-01T23:32:19 | 2020-12-01T23:32:19 | 316,202,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
from jtro_ecommerce.utils import upload_image_path
class About(models.Model):
title = models.CharField(max_length=150)
image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
description = RichTextUploadingField()
timestamp = models.DateField(auto_now_add=True)
update = models.DateField(auto_now=True)
def __str__(self):
return "ABOUT US"
| [
"[email protected]"
] | |
03668cd8657241fcab646595058f80c9f4125756 | c3aad901e32f735625f938b4c26cdfa307254a6b | /biothings_explorer/api_preprocess/reasoner.py | b89e427492a7a016d9355ed1ccfbe18fd59cd9d8 | [
"Apache-2.0"
] | permissive | andrewgcodes/biothings_explorer | 73c598fae2171e8b61687325fa1c1ee1a625fbe1 | b54aa195bbed19ff5be09ed24dee869b24bb3c16 | refs/heads/master | 2022-12-23T18:06:34.061346 | 2022-08-18T20:23:17 | 2022-08-18T20:23:17 | 279,000,723 | 0 | 0 | Apache-2.0 | 2020-07-12T05:49:16 | 2020-07-12T05:49:15 | null | UTF-8 | Python | false | false | 939 | py | from itertools import groupby
def restructure_reasoner_response(json_doc):
"""Restructure the API output from reasoner API.
:param: json_doc: json output from reasoner API
"""
edges = json_doc['knowledge_graph']['edges']
if not edges:
return {}
res = {}
edges = sorted(edges, key=lambda x: x['type'])
for k, g in groupby(edges, lambda x: x['type']):
res[k] = []
for _item in g:
if _item['target_id'].startswith("PANTHER.FAMILY"):
_item['panther'] = _item['target_id'][15:]
if _item['target_id'].startswith("CHEBI"):
_item['chebi'] = _item['target_id']
if _item['target_id'].startswith("CHEMBL:"):
_item['chembl'] = _item['target_id'][7:]
if _item['target_id'].startswith("MONDO:"):
_item['mondo'] = _item['target_id'][6:]
res[k].append(_item)
return res | [
"[email protected]"
] | |
57839fbdaf39ce151f280eecf2ac06516ded4c83 | 0123229ac84c057b188f6b17c1131ec630ecaf25 | /stochastic_gradient_descent/test_sire_offset/offset_fix_phiandpsi/extract_frcmod.py | 4598f60b3d2efdd919bfb1c52e5dd461d50b8d9e | [] | no_license | michellab/paramfit-tests | 689851ab95406aad7160403c4a70d3ec6be91981 | 39598e93936beff48aefff1604483fd265a5f46a | refs/heads/master | 2021-01-13T05:47:23.287857 | 2017-04-24T10:58:21 | 2017-04-24T10:58:21 | 76,249,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | from parmed.amber import *
import parmed
import os
base = AmberParm("orig.prmtop", "fit.rst7")
parmed.tools.writeFrcmod(base,"test.frcmod").execute()
frcmod_file = open("test.frcmod","r").readlines()
for fr in frcmod_file:
if "C -N -CT-C " in fr: # this is phi
print("value of Phi")
print(fr)
elif "N -CT-C -N" in fr:
print("value of Psi")
print(fr)
else:
continue
cmd = "rm test.frcmod"
os.system(cmd)
| [
"[email protected]"
] | |
35a457296554b87038a7ebfa03198c4b1c60e697 | ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f | /Sourcem8/pirates/effects/VoodooAura2.py | 852b91918310ef820ba576e0b80105d5ea24b395 | [] | no_license | BrandonAlex/Pirates-Online-Retribution | 7f881a64ec74e595aaf62e78a39375d2d51f4d2e | 980b7448f798e255eecfb6bd2ebb67b299b27dd7 | refs/heads/master | 2020-04-02T14:22:28.626453 | 2018-10-24T15:33:17 | 2018-10-24T15:33:17 | 154,521,816 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,226 | py | # File: V (Python 2.4)
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from otp.otpbase import OTPRender
from PooledEffect import PooledEffect
from EffectController import EffectController
import random
class VoodooAura2(PooledEffect, EffectController):
cardScale = 128.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/battleEffects')
self.card = model.find('**/effectVoodooShockwave')
if not self.particleDummy:
self.particleDummy = self.attachNewNode(ModelNode('VoodooAura2ParticleDummy'))
self.particleDummy.setDepthWrite(0)
self.particleDummy.setLightOff()
self.particleDummy.hide(OTPRender.ShadowCameraBitmask)
self.effectColor = Vec4(1, 1, 1, 1)
self.f = ParticleEffect.ParticleEffect('VoodooAura2')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('PointEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(64)
self.p0.setBirthRate(0.02)
self.p0.setLitterSize(1)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(0)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(1.0)
self.p0.factory.setLifespanSpread(0.0)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(0.5)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setInitialXScale(0.0050000000000000001 * self.cardScale)
self.p0.renderer.setFinalXScale(0.012 * self.cardScale)
self.p0.renderer.setInitialYScale(0.0050000000000000001 * self.cardScale)
self.p0.renderer.setFinalYScale(0.012 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, Vec4(0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1), self.effectColor, 1)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(0.20000000000000001)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
def createTrack(self, rate = 1):
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.029999999999999999), Func(self.p0.clearToInitial), Func(self.f.start, self, self.particleDummy))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 100), Wait(1.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(0.75), self.endEffect)
def setEffectColor(self, color):
self.effectColor = color
self.p0.renderer.getColorInterpolationManager().clearToInitial()
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, Vec4(0.80000000000000004, 0.80000000000000004, 0.80000000000000004, 1), self.effectColor, 1)
def cleanUpEffect(self):
self.detachNode()
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| [
"[email protected]"
] | |
f67e1e6de3d56e55471bc879166edec1c32ba813 | 8da79aedfb20c9798de0f4db4c5d85929a32f82b | /boo/columns.py | 200ff1a19478b1dd373b0d3bbfd9b11bfc79fc79 | [
"MIT"
] | permissive | nasingfaund/boo | a94e941ca8d3251fbb320c2e2f63e439f7ef4d59 | 96d08857abd790bc44f48256e7be7da130543a84 | refs/heads/master | 2023-07-01T00:33:33.085311 | 2021-08-03T21:23:03 | 2021-08-03T21:23:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,429 | py | """Преобразование сырых названий столбцов в названия переменных.
Описания полей отчетности можно посмотреть например в:
http://info.avtovaz.ru/files/avtovaz_ras_fs_2012_rus_secured.pdf
Более подробно о публикуемой форме отчетности:
http://www.consultant.ru/document/cons_doc_LAW_103394/b990bf4a13bd23fda86e0bba50c462a174c0d123/#dst100515
"""
from collections import OrderedDict
from dataclasses import dataclass
import numpy
import pandas as pd
# Column names as provided at Rosstat web site
TTL_COLUMNS = [
"Наименование",
"ОКПО",
"ОКОПФ",
"ОКФС",
"ОКВЭД",
"ИНН",
"Код единицы измерения",
"Тип отчета",
"11103",
"11104",
"11203",
"11204",
"11303",
"11304",
"11403",
"11404",
"11503",
"11504",
"11603",
"11604",
"11703",
"11704",
"11803",
"11804",
"11903",
"11904",
"11003",
"11004",
"12103",
"12104",
"12203",
"12204",
"12303",
"12304",
"12403",
"12404",
"12503",
"12504",
"12603",
"12604",
"12003",
"12004",
"16003",
"16004",
"13103",
"13104",
"13203",
"13204",
"13403",
"13404",
"13503",
"13504",
"13603",
"13604",
"13703",
"13704",
"13003",
"13004",
"14103",
"14104",
"14203",
"14204",
"14303",
"14304",
"14503",
"14504",
"14003",
"14004",
"15103",
"15104",
"15203",
"15204",
"15303",
"15304",
"15403",
"15404",
"15503",
"15504",
"15003",
"15004",
"17003",
"17004",
"21103",
"21104",
"21203",
"21204",
"21003",
"21004",
"22103",
"22104",
"22203",
"22204",
"22003",
"22004",
"23103",
"23104",
"23203",
"23204",
"23303",
"23304",
"23403",
"23404",
"23503",
"23504",
"23003",
"23004",
"24103",
"24104",
"24213",
"24214",
"24303",
"24304",
"24503",
"24504",
"24603",
"24604",
"24003",
"24004",
"25103",
"25104",
"25203",
"25204",
"25003",
"25004",
"32003",
"32004",
"32005",
"32006",
"32007",
"32008",
"33103",
"33104",
"33105",
"33106",
"33107",
"33108",
"33117",
"33118",
"33125",
"33127",
"33128",
"33135",
"33137",
"33138",
"33143",
"33144",
"33145",
"33148",
"33153",
"33154",
"33155",
"33157",
"33163",
"33164",
"33165",
"33166",
"33167",
"33168",
"33203",
"33204",
"33205",
"33206",
"33207",
"33208",
"33217",
"33218",
"33225",
"33227",
"33228",
"33235",
"33237",
"33238",
"33243",
"33244",
"33245",
"33247",
"33248",
"33253",
"33254",
"33255",
"33257",
"33258",
"33263",
"33264",
"33265",
"33266",
"33267",
"33268",
"33277",
"33278",
"33305",
"33306",
"33307",
"33406",
"33407",
"33003",
"33004",
"33005",
"33006",
"33007",
"33008",
"36003",
"36004",
"41103",
"41113",
"41123",
"41133",
"41193",
"41203",
"41213",
"41223",
"41233",
"41243",
"41293",
"41003",
"42103",
"42113",
"42123",
"42133",
"42143",
"42193",
"42203",
"42213",
"42223",
"42233",
"42243",
"42293",
"42003",
"43103",
"43113",
"43123",
"43133",
"43143",
"43193",
"43203",
"43213",
"43223",
"43233",
"43293",
"43003",
"44003",
"44903",
"61003",
"62103",
"62153",
"62203",
"62303",
"62403",
"62503",
"62003",
"63103",
"63113",
"63123",
"63133",
"63203",
"63213",
"63223",
"63233",
"63243",
"63253",
"63263",
"63303",
"63503",
"63003",
"64003",
"Дата актуализации",
]
# -- Текстовые поля
MAPPER = OrderedDict(
[
("Наименование", "name"),
("ОКПО", "okpo"),
("ОКОПФ", "okopf"),
("ОКФС", "okfs"),
("ОКВЭД", "okved"),
("ИНН", "inn"),
("Код единицы измерения", "unit"),
("Тип отчета", "report_type"),
("Дата актуализации", "date_published"),
# -- Баланс
# -- Внеоборотные активы
("1100", "ta_fix"),
("1150", "of"),
("1170", "ta_fix_fin"),
# -- Оборотные активы
("1200", "ta_nonfix"),
("1210", "inventory"),
("1230", "receivables"),
("1240", "ta_nonfix_fin"),
("1250", "cash"),
("1600", "ta"),
# -- Пассивы
("1300", "tp_capital"),
("1360", "retained_earnings"),
("1400", "tp_long"),
("1410", "debt_long"),
("1500", "tp_short"),
("1510", "debt_short"),
("1520", "payables"),
("1700", "tp"),
# -- ОПУ
("2110", "sales"),
("2120", "costs"),
("2200", "profit_oper"),
("2330", "exp_interest"),
("2300", "profit_before_tax"),
("2400", "profit_after_tax"),
# -- ОДДС
("4400", "cf"),
# -- Операционная деятельность
("4100", "cf_oper"),
("4110", "cf_oper_in"),
("4111", "cf_oper_in_sales"),
("4120", "cf_oper_out"),
("4121", "paid_to_supplier"),
("4122", "paid_to_worker"),
("4123", "paid_interest"),
("4124", "paid_profit_tax"),
# -- Инвестицонная деятельность
("4200", "cf_inv"),
("4210", "cf_inv_in"),
("4220", "cf_inv_out"),
("4221", "paid_fa_investment"),
# -- Финансовая деятельность
("4300", "cf_fin"),
("4310", "cf_fin_in"),
("4311", "cf_loan_in"),
("4312", "cf_eq_in_1"),
("4313", "cf_eq_in_2"),
("4314", "cf_bond_in"),
("4320", "cf_fin_out"),
("4321", "cf_eq_out"),
("4322", "cf_div_out"),
("4323", "cf_debt_out"),
]
)
def ask(code):
return MAPPER.get(str(code))
def fst(text):
return text[0]
def last(text):
return text[-1]
def trim(text):
return text[0:-1]
NON_NUMERIC = "x"
# This type assures missing interger values will be converted to NaNs
# See https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html
# and https://github.com/ru-corporate/boo/issues/18
INT_TYPE = pd.Int64Dtype()
@dataclass
class Column:
code: str
section: str
lag: bool
def rename_with(self, mapper: dict):
new_code = mapper.get(self.code, self.code)
return Column(new_code, self.section, self.lag)
def is_numeric(self):
return self.section != NON_NUMERIC
@property
def label(self):
return self.code + ("_lag" if self.lag else "")
@property
def dtype(self):
return INT_TYPE if self.is_numeric() else str
def is_lagged(text):
if fst(text) == "3":
return False
if last(text) == "3":
return False
if last(text) == "4":
return True
return None
assert is_lagged("63243") is False
assert is_lagged("Дата актуализации") is None
assert is_lagged("23304") is True
def section(text):
num = text[0]
return {
"1": "Баланс",
"2": "ОПУ",
"3": "Изменения капитала",
"4": "ОДДС",
"6": "Extras",
}.get(num, NON_NUMERIC)
def code(text):
if fst(text) in ["1", "2", "4", "6"]:
return text[0:-1]
else:
return text
def column(text):
return Column(code(text), section(text), is_lagged(text))
columns = [column(x) for x in TTL_COLUMNS]
INDEX = [i for (i, c) in enumerate(columns) if c.rename_with(MAPPER) != c]
columns_short = [c.rename_with(MAPPER) for c in columns if c.rename_with(MAPPER) != c]
NAMES = {c.label: c.dtype for c in columns_short}
assert len(INDEX) == len(NAMES)
| [
"[email protected]"
] | |
f771322752f5feab04cb77f3b2f35d3026f3513f | 8aa3069cd4840fd216b917187a9c96bd7d3e2367 | /Exercícios/binomiofatorial.py | 424d1e4b8b3bb4389d4000032efe0357afec0102 | [] | no_license | rafaelsaidbc/USP | b10a28f958a1af5670fe48061f7b0c8b9db5d5d0 | 8c077f392fccd814380ea0e1b5ec228a54d4f779 | refs/heads/master | 2020-03-24T00:41:12.718523 | 2018-07-25T18:31:47 | 2018-07-25T18:31:47 | 142,302,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | def fatorial(n):
fat = 1 #variavel fat recebe o valor 1, porque 1 eh um valor nulo em uma multiplicacao
while(n > 1): #enquanto n for maior que 1, o laço (while) continua executando
fat = fat * n #multiplica fat por n
n = n - 1 #atualiza o n subtraindo 1
return fat #finalzia o while e atualiza a variavel fat
def numero_binomial(n, k):
return fatorial(n) / (fatorial(k) * fatorial(n - k))
def testa_fatorial(): #testa a funcao fatorial
if fatorial(1) == 1:
print("Funciona para 1")
else:
print("Não funciona para 1")
if fatorial(2) == 2:
print("Funciona para 2")
else:
print("Não funciona para 2")
if fatorial(0) == 1:
print("Funciona para 0")
else:
print("Não funciona para 0")
if fatorial(5) == 120:
print("Funciona para 5")
else:
print("Não funciona para 5")
| [
"[email protected]"
] | |
cb840373802f4a2f053aa9b6db014d5a830284dd | 404cb0431675327a751f7a6f422f53288a92b85b | /chirp/library/order_test.py | 33fccabf573816f97b45246bff10199393e598bb | [
"Apache-2.0"
] | permissive | chirpradio/chirpradio-machine | ade94d7ac9ded65f91e1b3845be408723c0501da | 6fea6a87f2eb3cfac2a47831892c9ce02163b03b | refs/heads/master | 2023-09-01T02:57:07.749370 | 2023-08-28T23:57:46 | 2023-08-28T23:57:46 | 2,330,078 | 9 | 10 | Apache-2.0 | 2018-03-16T01:26:29 | 2011-09-05T19:10:48 | Python | UTF-8 | Python | false | false | 3,525 | py | #!/usr/bin/env python
import unittest
import mutagen.id3
from chirp.library import order
class OrderTest(unittest.TestCase):
def test_decode(self):
test_cases = (("1", 1, None),
(" 6", 6, None),
("006", 6, None),
("1/2", 1, 2),
("3 of 7", 3, 7),
("03anything04", 3, 4))
for text, order_num, max_num in test_cases:
self.assertEqual((order_num, max_num), order.decode(text))
# These should not be parseable.
error_test_cases = ("", "xxx", "0", "-1", "0/3", "3/", "3/0", "6/5",
"-1/4", "2/-1", "2/-", "3-4", "3/0")
for text in error_test_cases:
self.assertRaises(order.BadOrderError, order.decode, text)
def test_encode(self):
test_cases = ((1, 3, "1/3"), (7, None, "7"))
for order_num, total_num, expected_text in test_cases:
self.assertEqual(expected_text, order.encode(order_num, total_num))
error_test_cases = ((7, 5), (0, 3), (-1, 3), (4, 0), (4, -1))
for order_num, total_num in error_test_cases:
self.assertRaises(order.BadOrderError,
order.encode, order_num, total_num)
def test_standardize_str(self):
self.assertEqual("3", order.standardize_str(" 3 "))
self.assertEqual("3/7", order.standardize_str("3 of 7"))
def test_standardize(self):
tag = mutagen.id3.TRCK(text=["3 of 7"])
order_num, max_num = order.standardize(tag)
self.assertEqual(["3/7"], tag.text)
self.assertEqual(3, order_num)
self.assertEqual(7, max_num)
def test_is_archival(self):
self.assertTrue(order.is_archival("3/7"))
self.assertFalse(order.is_archival("bad"))
self.assertFalse(order.is_archival("3"))
self.assertFalse(order.is_archival("3 of 7"))
self.assertFalse(order.is_archival("7/3"))
self.assertFalse(order.is_archival(" 3/7"))
def test_verify_and_standardize_str_list(self):
# Check the simplest valid case.
self.assertEqual(["1/1"], order.verify_and_standardize_str_list(["1"]))
# Check an already-standardized set.
self.assertEqual(
["1/4", "3/4", "2/4", "4/4"],
order.verify_and_standardize_str_list(
["1/4", "3/4", "2/4", "4/4"]))
# Check strings without a max number.
self.assertEqual(
["1/4", "3/4", "2/4", "4/4"],
order.verify_and_standardize_str_list(["1", "3", "2", "4"]))
# Check mixed formats.
self.assertEqual(
["1/4", "3/4", "2/4", "4/4"],
order.verify_and_standardize_str_list(["1", "3/4", "2", "4 of 4"]))
# Check empty list.
self.assertRaises(order.BadOrderError,
order.verify_and_standardize_str_list, [])
# Check garbage in list.
self.assertRaises(order.BadOrderError,
order.verify_and_standardize_str_list, ["xxx"])
# Check treatment of gaps.
self.assertRaises(order.BadOrderError,
order.verify_and_standardize_str_list,
["1", "2", "4"])
# Check bad max number.
self.assertRaises(order.BadOrderError,
order.verify_and_standardize_str_list,
["1/5", "3/5", "2/5", "4/5"])
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
775d4e974bbace6a037417248f6885324aebea6a | 85764904e918310f9e4a209f64570dcdcf099818 | /loutilities/user/roles.py | 3df49ee5b5f5d63b1edda6261c2976dbd2e6b5e1 | [
"Apache-2.0"
] | permissive | louking/loutilities | 05bb20994ae06d2e68989cd6a779c350a9a430ad | aaf7410849d0167001cd5f06ab0dae6563e58ec7 | refs/heads/master | 2023-07-24T18:32:36.128102 | 2023-07-15T10:02:43 | 2023-07-15T10:02:43 | 5,824,315 | 2 | 2 | null | 2023-05-10T09:59:37 | 2012-09-15T21:29:29 | Python | UTF-8 | Python | false | false | 3,136 | py | ###########################################################################################
# roles - common location for xtility role declaration
#
# Date Author Reason
# ---- ------ ------
# 03/11/20 Lou King Create
#
# Copyright 2020 Lou King. All rights reserved
###########################################################################################
from loutilities.user.model import APP_CONTRACTS, APP_MEMBERS, APP_ROUTES, APP_SCORES, APP_ALL
# common roles
ROLE_SUPER_ADMIN = 'super-admin'
ROLES_COMMON = [ROLE_SUPER_ADMIN]
roles_common = [
{'name': 'super-admin', 'description': 'allowed to do everything on all applications', 'apps': APP_ALL},
]
# members roles
ROLE_LEADERSHIP_ADMIN = 'leadership-admin'
ROLE_LEADERSHIP_MEMBER = 'leadership-member'
ROLE_MEMBERSHIP_ADMIN = 'membership-admin'
ROLE_MEETINGS_ADMIN = 'meetings-admin'
ROLE_MEETINGS_MEMBER = 'meetings-member'
ROLE_RACINGTEAM_ADMIN = 'racingteam-admin'
ROLE_RACINGTEAM_MEMBER = 'racingteam-member'
roles_members = [
{'name': ROLE_LEADERSHIP_ADMIN, 'description': 'access to leadership tasks for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_LEADERSHIP_MEMBER, 'description': 'user of leadership tasks for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_MEMBERSHIP_ADMIN, 'description': 'access to membership admininstration for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_MEETINGS_ADMIN, 'description': 'access to meetings administration for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_MEETINGS_MEMBER, 'description': 'user of meetings for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_RACINGTEAM_ADMIN, 'description': 'access to racingteam administration for members application', 'apps':[APP_MEMBERS]},
{'name': ROLE_RACINGTEAM_MEMBER, 'description': 'user of racingteam module for members application', 'apps':[APP_MEMBERS]},
]
# routes roles
ROLE_ROUTES_ADMIN = 'routes-admin'
ROLE_ICON_ADMIN = 'icon-admin'
roles_routes = [{'name': ROLE_ROUTES_ADMIN, 'description': 'access to routes for routes application', 'apps':[APP_ROUTES]},
{'name': ROLE_ICON_ADMIN, 'description': 'access to icons for routes application', 'apps':[APP_ROUTES]}
]
# contracts roles
ROLE_EVENT_ADMIN = 'event-admin'
ROLE_SPONSOR_ADMIN = 'sponsor-admin'
roles_contracts = [{'name': ROLE_EVENT_ADMIN, 'description': 'access to events for contracts application', 'apps':[APP_CONTRACTS]},
{'name': ROLE_SPONSOR_ADMIN, 'description': 'access to sponsors/races for contracts application', 'apps':[APP_CONTRACTS]}
]
# scores roles
ROLE_SCORES_ADMIN = 'scores-admin'
ROLE_SCORES_VIEWER = 'scores-viewer'
roles_scores = [{'name': ROLE_SCORES_ADMIN, 'description': 'administer scores application', 'apps':[APP_SCORES]},
{'name': ROLE_SCORES_VIEWER, 'description': 'view scores application', 'apps':[APP_SCORES]},
]
all_roles = [roles_common, roles_contracts, roles_members, roles_routes, roles_scores] | [
"[email protected]"
] | |
c4fb0116985e3ace94fc0fe7bbfb80ab7f53d331 | 7edb6f64afb9a9d5fd2b712faae9841d45c3a3b3 | /monkeyAndPerformance/allCode/performanceTest/traffic/traffic.py | 9edb99221fc8f0b920e0abebe9a4f074378baddb | [] | no_license | Hanlen520/AppSpecialTest | 413babbbecbeaa8e25dd1fd70dd349a1de07eb5e | 06f69f116245162220985ad2632fbff3af72450c | refs/heads/master | 2023-04-22T19:59:35.523780 | 2019-08-08T09:48:28 | 2019-08-08T09:48:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,398 | py | import csv,os,time
from config.config import *
from monkeyAndPerformance.allCode.util.gettimestr import GetTimeStr
gettimestr = GetTimeStr() #实例化GetTimeStr
#控制类
class Controller(object):
def __init__(self):
self.counter = RunTrafficCount # 定义测试的次数
#定义收集数据的数组
self.alldata = [("deviceid","appversion","timestamp", "traffic")] # 要保存的数据,时间戳及流量
#单次测试过程
def TestProcessOnce(self):
#执行获取进程的命令
cmd = 'adb shell "ps | grep %s"' % AppPackageName # 获取进程
content = os.popen(cmd)
result = content.readlines()
print("result:%s"% result)
print("result.length:%s" % len(result))
if len(result):
#获取进程ID
# pid = result[0].split(" ")[5]
pid = result[0].split(" ")[3]
print("result[0].split():%s" % result[0].split(" "))
print("pid:%s"% pid)
self.DeleyTime(3)
#执行进程ID使用的流量
cmd = 'adb shell cat /proc/%s/net/dev'% pid # 获取流量
content = os.popen(cmd)
traffic = content.readlines()
print("traffic:%s"% traffic)
#获取流量
for line in traffic:
print("line:%s" % line)
if "wlan0" in line:
#将所有空行换成#
line = "#".join(line.split())
print("line##:%s"% line)
#按#号拆分,获取收到和发出的流量
receive = line.split("#")[1]
print("receive#:%s"%receive)
transmit = line.split("#")[9]
print("transmit##:%s"% transmit)
# if "eth0" in line:
# #将所有空行换成#
# line = "#".join(line.split())
# #按#号拆分,获取收到和发出的流量
# receive = line.split("#")[1]
# transmit = line.split("#")[9]
# elif "eth1" in line:
# # 将所有空行换成#
# line = "#".join(line.split())
# # 按#号拆分,获取收到和发出的流量
# receive2 = line.split("#")[1]
# transmit2 = line.split("#")[9]
#计算所有流量的之和
# alltraffic = int(receive) + int(transmit) + int(receive2) + int(transmit2)
alltraffic = int(receive) + int(transmit)
#按KB计算流量值
alltraffic = alltraffic/1024
currenttime = self.GetCurrentTime() # 获取当前时间
#将获取到的数据存储到数组中
self.alldata.append((TestDeviceID,AppVersion,currenttime,alltraffic)) # 写入数据到self.alldata
else:
print("没有获取到相应进程,请确定打开相应的app")
#延时函数
def DeleyTime(self,delaytime):
delaytime = int(delaytime)
time.sleep(delaytime) # 等待5秒
print("等待%s秒..."% delaytime)
#多次执行测试过程
def RunMore(self):
#设置手机进入非充电状态
cmd = 'adb shell dumpsys battery set status 1'
os.popen(cmd)
self.DeleyTime(3)
print("循环开始时间:%s" % self.GetCurrentTime() )
while self.counter>0: # 如果次数大于0
self.TestProcessOnce() # 则执行一次测试过程
self.counter = self.counter -1 # 测试次数减一
self.DeleyTime(5) # 间隔5秒取一次值
gettimestr.outPutMyLog("流量统计剩余运行次数为:%s" % self.counter)
print("循环结束时间:%s" % self.GetCurrentTime())
#获取当前存储数据的时间戳
def GetCurrentTime(self):
currenttime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()) # 获取当前时间
return currenttime # 返回当前时间
# 获取当前时间的字符串
def GetCurrentTimeString(self):
currenttime = time.strftime("%Y%m%d%H%M%S", time.localtime()) # 获取当前时间
return currenttime # 返回当前时间
#存储数据到CSV时间
def SaveDataToCSV(self,timestr):
basedir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) + "/" + "codeResult"
nyrsfmdir = gettimestr.createNYRSFMdir(basedir,timestr)
csvfile = "%s/%s_%s" % (nyrsfmdir,timestr,AppTrafficCSVFile)
opencsvfile = open(csvfile, "w",newline="") #加入newline="",解决python3写入csv出现空白行
writercsv = csv.writer(opencsvfile) # 写入文件
writercsv.writerows(self.alldata) # 写入数据,将字符串数据转换为字节,存储到CSV中
opencsvfile.close() # 关闭文件
print("数据:%s" % self.alldata)
print("数据保存路径:%s"% csvfile)
print("流量消耗:最后一次的流量值减去第一次的流量值,就是本次操作消耗的流量值")
def run(self,timestr): # 运行
self.RunMore()
self.SaveDataToCSV(timestr)
if __name__ == "__main__":
timestr = gettimestr.getTimeStr()
controller = Controller()
controller.run(timestr) | [
"[email protected]"
] | |
25e372cb14bdc5d7011802d05410d01a864a361a | 7f8d2288dc8d81275269bdb8e8f196339a52d30d | /code/1010_solution.py | c14133019520efb5a27564644e2a7e131773bfda | [] | no_license | ishaansharma/leetcode-3 | f9cab568c31322e2bf84768264f3c644182cd470 | 9081dd3ff86409d554b0298a8152ed40a6befa96 | refs/heads/master | 2023-03-25T15:36:04.235650 | 2021-03-30T20:15:45 | 2021-03-30T20:15:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
count = 0
seen = [0] * 60
for t in time:
count += seen[-t % 60]
seen[t % 60] += 1
return count
| [
"[email protected]"
] | |
c383a0ab8b68f0818c9f72c933f6d128dad4b8a6 | 3de707e3e7f3fcbf46700e1bf8d6c394a71410a2 | /augpathlib/remotes.py | bc67fd0ba4611b6910d60347b5944246e289d464 | [
"MIT"
] | permissive | tmsincomb/augpathlib | 984f1c8418e7e8eaa5675a3c209cbd745cdee3e7 | ed9c0edff540741fca866780a3d043a3b7644f08 | refs/heads/master | 2022-11-17T12:03:13.852433 | 2020-06-29T10:10:23 | 2020-06-29T10:10:23 | 276,260,552 | 0 | 0 | MIT | 2020-07-01T02:47:31 | 2020-07-01T02:47:30 | null | UTF-8 | Python | false | false | 33,457 | py | import os
import sys
import atexit
import pathlib
import warnings
import subprocess
from augpathlib import exceptions as exc
from augpathlib.meta import PathMeta
from augpathlib import caches, LocalPath
from augpathlib.utils import _bind_sysid_, StatResult, cypher_command_lookup, log
if os.name != 'nt':
# pexpect on windows does not support pxssh
# because it is missing spawn
from pexpect import pxssh
class RemotePath:
""" Remote data about a remote object. """
_cache_class = None
_debug = False
# ugh this is such a bad implementation, let the remote paths exists
# and init, and then just check if they exist, a path is not an object
# that always dereferences ... what the heck was I thinking when I did this ...
# we use a PurePath becuase we still want to key off this being local path
# but we don't want any of the local file system operations to work by accident
# so for example self.stat should return the remote value not the local value
# which is what would happen if we used a PosixPath as the base class
# need a way to pass the session information in independent of the actual path
# abstractly having remote.data(global_id_for_local, self)
# should be more than enough, the path object shouldn't need
# to know that it has a remote id, the remote manager should
# know that
@classmethod
def _new(cls, local_class, cache_class):
""" when constructing a new remote using _new you MUST
call init afterward to bind the remote api """
# FIXME 1:1ness issue from local -> cache
# probably best to force the type of the cache
# to switch if there are multiple remote mappings
# since there can be only 1 local file with the same
# path, a composite cache or a multi-remote cache
# seems a bit saner, or require explicit switching of
# the active remote if one-at-a-time semantics are desired
newcls = type(cls.__name__,
(cls,),
dict(_local_class=local_class,
_cache_class=cache_class))
local_class._remote_class = newcls
local_class._cache_class = cache_class
cache_class._remote_class = newcls
cache_class._local_class = local_class
newcls.weighAnchor()
cache_class.weighAnchor()
return newcls
@classmethod
def init(cls, identifier):
""" initialize the api from an identifier and bind the root """
if not hasattr(cls, '_api'):
cls._api = cls._api_class(identifier)
cls.root = cls._api.root
else:
raise ValueError(f'{cls} already bound an api to {cls._api}')
@classmethod
def anchorToCache(cls, cache_anchor, init=True):
# FIXME need to check for anchor after init and init after anchor
if not hasattr(cls, '_cache_anchor'):
if init:
if not hasattr(cls, '_api'):
cls.init(cache_anchor.id)
if hasattr(cls, 'root') and cls.root != cache_anchor.id:
raise ValueError('root and anchor ids do not match! '
f'{cls.root} != {cache_anchor.id}')
cls._cache_anchor = cache_anchor
return cls._cache_anchor
else:
raise ValueError(f'already anchored to {cls._cache_anchor}')
@classmethod
def anchorTo(cls, path, create=False):
""" You already know the rock you want and
you want the anchor stuck to it. """
# FIXME should we fail on create=True and exists?
if isinstance(path, caches.CachePath):
# FIXME the non-existence problem rears its head again
return cls.anchorToCache(path)
elif isinstance(path, LocalPath):
# FIXME the non-existence problem rears its head again
if path.cache:
return cls.anchorToCache(path.cache)
else:
root = cls.root if isinstance(cls.root, cls) else cls(cls.root)
if path.name != root.name:
# unlike git you cannot clone to a folder with a different
# name (for now ... maybe can figure out how in the future)
raise ValueError('Path name and root name do not match.'
f'{path.name} != {cls.root.name}')
if create:
return cls.dropAnchor(path.parent) # existing folder dealt with in dropAnchor
else:
raise ValueError(f'not creating {path} since create=False')
else:
raise TypeError(f"Don't know how to anchor to a {type(path)} {path}")
@classmethod
def _get_local_root_path(cls, parent_path=None):
if parent_path is None:
parent_path = cls._local_class.cwd()
else:
parent_path = cls._local_class(parent_path)
root = cls(cls.root) # FIXME formalize the use of root
path = parent_path / root.name
return root, path
@classmethod
def smartAnchor(cls, parent_path=None):
# work around the suspect logic
# in the implementation below
try:
return cls.dropAnchor(parent_path=parent_path)
except exc.RemoteAlreadyAnchoredError as e:
root, path = cls._get_local_root_path(parent_path)
if cls._cache_anchor == path.cache:
return cls._cache_anchor
else:
raise e # possibly check if the anchor is the same?
except exc.CacheExistsError as e:
root, path = cls._get_local_root_path(parent_path)
cls._cache_anchor = path.cache
return cls._cache_anchor
except exc.DirectoryNotEmptyError as e:
root, path = cls._get_local_root_path(parent_path)
if path.cache:
cls._cache_anchor = path.cache
return cls._cache_anchor
else:
raise e
@classmethod
def dropAnchor(cls, parent_path=None):
""" If a _cache_anchor does not exist then create it,
otherwise raise an error. If a local anchor already
exists do not use this method.
You know that the ship (path) is more or less in the right
place but you don't know for sure exactly which rock the
anchor will catch on (you don't know the name of the remote).
"""
if not hasattr(cls, '_cache_anchor'):
root, path = cls._get_local_root_path(parent_path)
if not path.exists():
if root.is_file():
raise NotImplementedError(
'Have not implemented mapping for individual files yet.')
elif root.is_dir():
path.mkdir()
else:
raise NotImplementedError(f'What\'s a {root}?!')
elif list(path.children):
raise exc.DirectoryNotEmptyError(f'has children {path}')
cls._cache_anchor = path.cache_init(root.id, anchor=True)
# we explicitly do not handle the possible CacheExistsError here
# so that there is a path where anchoring can fail loudly
# we may not need that at the end of the day, but we will see
return cls._cache_anchor
else:
raise exc.RemoteAlreadyAnchoredError(f'{cls} already anchored to '
f'{cls._cache_anchor}')
@classmethod
def weighAnchor(cls):
# TODO determine whether the current behavior is correct
# calling this will not cause the cache class to weigh anchor
# but there is a small chance that it should
# TODO is _abstract_class needed here? or do we not need it
# because remote paths don't have the crazy hierarchy that
# pathlib derived paths do? and will this change when we fix
# everything ...
if hasattr(cls, '_cache_anchor'):
delattr(cls, '_cache_anchor')
@classmethod
def setup(cls, local_class, cache_class):
""" call this once to bind everything together """
cn = self.__class__.__name__
warnings.warn(f'{cn}.setup is deprecated please switch to RemotePath._new',
DeprecationWarning,
stacklevel=2)
cache_class.setup(local_class, cls)
def bootstrap(self, recursive=False, only=tuple(), skip=tuple(), sparse=tuple()):
#self.cache.remote = self # duh
# if you forget to tell the cache you exist of course it will go to
# the internet to look for you, it isn't quite smart enough and
# we're trying not to throw dicts around willy nilly here ...
return self.cache.bootstrap(self.meta, recursive=recursive, only=only, skip=skip, sparse=sparse)
def __init__(self, thing_with_id, cache=None):
if isinstance(thing_with_id, str):
id = thing_with_id
elif isinstance(thing_with_id, PathMeta):
id = thing_with_id.id
elif isinstance(thing_with_id, RemotePath):
id = thing_with_id.id
else:
raise TypeError(f'Don\'t know how to initialize a remote from {thing_with_id}')
self._id = id
if cache is not None:
self._cache = cache
self.cache._remote = self
self._errors = []
@property
def id(self):
return self._id
@property
def errors(self):
raise NotImplementedError
@property
def cache(self):
if hasattr(self, '_cache_anchor') and self._cache_anchor is not None:
return self._cache
else:
# cache is not real
class NullCache:
@property
def local(self, remote=self):
raise TypeError(f'No cache for {remote}')
@property
def _are_we_there_yet(self, remote=self):
# this is useless since these classes are ephemoral
if hasattr(remote, '_cache_anchor') and remote._cache_anchor is not None:
remote.cache_init()
def __rtruediv__(self, other):
return None
def __truediv__(self, other):
return None
return NullCache()
def cache_init(self, parents=False):
try:
return self._cache_anchor / self
except FileNotFoundError:
if parents:
#parent, *rest = self.parent.cache_init(parents=parents)
#return (self.cache_init(), parent, *rest)
parent = self.parent
parent_cache = parent.cache_init(parents=parents)
parent_cache.local.cache_init(parent.meta) # FIXME hrm we shouldn't have to do this
# and it isn't working anyway ... the xattrs don't seem to be getting set
return self.cache_init()
else:
raise
@property
def _cache(self):
""" To catch a bad call to set ... """
if hasattr(self, '_c_cache'):
return self._c_cache
@_cache.setter
def _cache(self, cache):
if not isinstance(cache, caches.CachePath):
raise TypeError(f'cache is a {type(cache)} not a CachePath!')
#elif cache.meta is None: # useful for certain debugging situations
#raise ValueError(f'cache has no meta {cache}')
self._c_cache = cache
def _cache_setter(self, cache, update_meta=True):
cache._remote = self
# FIXME in principle
# setting cache needs to come before update_meta
# in the event that self.meta is missing file_id
# if meta updater fails we unset self._c_cache
self._cache = cache
if update_meta:
try:
cache._meta_updater(self.meta)
except BaseException as e:
self._c_cache = None
delattr(self, '_c_cache')
raise e
@property
def parent_id(self):
""" BEWARE if self.parent hits the network so will this.
In the event that it does, overwrite this method. """
return self.parent.id
def _parent_changed(self, cache):
return self.parent_id != cache.parent.id
def _on_cache_move_error(self, error, cache):
""" called after a failure to move a cached file to a new location """
raise error
def update_cache(self, cache=None, fetch=True):
""" Update a cache object using the metadata attached to this remote.
This is different form _cache_setter in that it runs update_meta
by default, handles many more edge cases, and checks for consistency.
_cache_setter is usually invoked internally by a CachePath method that
wants to register itself with a remote as an implementaiton detail. """
if cache is not None and self.cache is not None:
# TODO see if there are any exceptions to this behavior
raise TypeError('cannot accept cache kwarg when self.cache not None')
elif cache is None:
cache = self.cache
parent_changed = self._parent_changed(cache)
if self.cache is None:
# HACK test if cache is not None before it may have been reassigned
if cache.name != self.name:
msg = ('Cannot update the name and content of a file at the '
'same time.\nAre you sure you have passed the right '
f'cache object?\n{cache.name} != {self.name}')
raise ValueError(msg)
elif parent_changed:
msg = ('Cannot update the parent and content of a file at the '
'same time.\nAre you sure you have passed the right '
f'cache object?\n{cache.parent.id} != {self.parent_id}')
raise ValueError(msg)
log.debug(f'maybe updating cache for {self.name}')
file_is_different = cache._meta_updater(self.meta, fetch=fetch)
# update the cache first # FIXME this may be out of order ...
# then move to the new name if relevant
# prevents moving partial metadata onto existing files
if cache.name != self.name or parent_changed: # this is localy correct
# the issue is that move is now smarter
# and will detect if a parent path has changed
try:
cache.move(remote=self)
except exc.WhyDidntThisGetMovedBeforeError as e:
# AAAAAAAAAAAAAAAAAAAAAAAAAAAAA
# deal with the sadness that is non-unique filenames
# I am 99.999999999999999% certain that users do not
# expect this behavior ...
log.error(e)
self._on_cache_move_error(e, cache)
return file_is_different
@property
def local(self):
return self.cache.local # FIXME there are use cases for bypassing the cache ...
@property
def local_direct(self):
# kind of uninstrumeted ???
return self._local_class(self.as_path())
@property
def anchor(self):
""" the semantics of anchor for remote paths are a bit different
RemotePath code expects this function to return a RemotePath
NOT a string as is the case for core pathlib. """
raise NotImplementedError
@property
def _meta(self): # catch stragglers
raise NotImplementedError
def refresh(self):
""" Refresh the local in memory metadata for this remote.
Implement actual functionality in your subclass. """
raise NotImplementedError
# could be fetch or pull, but there are really multiple pulls as we know
# clear the cached value for _meta
if hasattr(self, '_meta'):
delattr(self, '_meta')
@property
def data(self):
raise NotImplementedError
self.cache.id
for chunk in chunks:
yield chunk
@property
def meta(self):
# on blackfynn this is the package id or object id
# this will error if there is no implementaiton if self.id
raise NotImplementedError
#return PathMeta(id=self.id)
def _meta_setter(self, value):
raise NotImplementedError
@property
def annotations(self):
# these are models etc in blackfynn
yield from []
raise NotImplementedError
def as_path(self):
""" returns the relative path construction for the child so that local can make use of it """
return pathlib.PurePath(*self.parts)
def _parts_relative_to(self, remote, cache_parent=None):
parent_names = [] # FIXME massive inefficient due to retreading subpaths :/
# have a look at how pathlib implements parents
parent = self.parent
if parent != remote:
parent_names.append(parent.name)
# FIXME can this go stale? if so how?
#log.debug(cache_parent)
if cache_parent is not None and parent.id == cache_parent.id:
for c_parent in cache_parent.parents:
if c_parent is None:
continue
elif c_parent.name == remote.name: # FIXME trick to avoid calling id
parent_names.append(c_parent.name) # since be compare one earlier we add here
break
else:
parent_names.append(c_parent.name)
else:
for parent in parent.parents:
if parent == remote:
break
elif parent is None:
continue # value error incoming
else:
parent_names.append(parent.name)
else:
self._errors += ['file-deleted']
msg = f'{remote} is not one of {self}\'s parents'
log.error(msg)
#raise ValueError()
args = (*reversed(parent_names), self.name)
elif self == parent:
args = ('',)
else:
args = self.name,
return args
@property
def parts(self):
if self == self.anchor:
return tuple()
if not hasattr(self, '_parts'):
if self.cache:
cache_parent = self.cache.parent
else:
cache_parent = None
self._parts = tuple(self._parts_relative_to(self.anchor, cache_parent))
return self._parts
@property
def parent(self):
""" The atomic parent operation as understood by the remote. """
raise NotImplementedError
@property
def parents(self):
parent = self.parent
while parent:
yield parent
parent = parent.parent
@property
def children(self):
# uniform interface for retrieving remote hierarchies decoupled from meta
raise NotImplementedError
@property
def rchildren(self):
# uniform interface for retrieving remote hierarchies decoupled from meta
yield from self._rchildren()
def _rchildren(self, create_cache=True, sparse=False):
raise NotImplementedError
def children_pull(self, existing):
# uniform interface for asking the remote to
# update children using its own implementation
raise NotImplementedError
def iterdir(self):
# I'm guessing most remotes don't support this
raise NotImplementedError
def glob(self, pattern):
raise NotImplementedError
def rglob(self, pattern):
raise NotImplementedError
def __eq__(self, other):
return self.id == other.id
def __ne__(self, other):
return not self == other
def __repr__(self):
return f'{self.__class__.__name__}({self.id!r})'
class SshRemote(RemotePath, pathlib.PurePath):
""" Testing. To be used with ssh-agent.
StuFiS The stupid file sync. """
_cache_class = None # set when calling __new__
encoding = 'utf-8'
_meta = None # override RemotePath dragnet
_meta_maker = LocalPath._meta_maker
sysid = None
_bind_sysid = classmethod(_bind_sysid_)
@classmethod
def _new(cls, local_class, cache_class):
newcls = super()._new(local_class, cache_class)
# must run before we can get the sysid, which is a bit odd
# given that we don't actually sandbox the filesystem
newcls._bind_flavours()
return newcls
@classmethod
def _bind_flavours(cls, pos_helpers=tuple(), win_helpers=tuple()):
pos, win = cls._get_flavours()
if pos is None:
pos = type(f'{cls.__name__}Posix',
(*pos_helpers, cls, pathlib.PurePosixPath), {})
if win is None:
win = type(f'{cls.__name__}Windows',
(*win_helpers, cls, pathlib.PureWindowsPath), {})
cls.__abstractpath = cls
cls.__posixpath = pos
cls.__windowspath = win
@classmethod
def _get_flavours(cls):
pos, win = None, None
for subcls in cls.__subclasses__(): # direct only
if subcls._flavour is pathlib._posix_flavour:
pos = subcls
elif subcls._flavour is pathlib._windows_flavour:
win = subcls
else:
raise TypeError(f'unknown flavour for {cls} {cls._flavour}')
return pos, win
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_flavour'):
cls = cls.__windowspath if os.name == 'nt' else cls.__posixpath
if isinstance(args[0], str) and args[0].startswith(cls.host + ':'):
# FIXME not great but allows less verbose where possible ...
# also possibly an opportunity to check if hostnames match?
# ugh unix everything is a stream of bytes is annoying here
_, *args = (args[0].split(':', 1), *args[1:])
_self = pathlib.PurePath.__new__(cls, *args) # no kwargs since the only kwargs are for init
_self.remote_platform = _self._remote_platform
return _self
# TODO this isn't quite working yet due to bootstrapping issues as usual
# it also isn't working because we want access to all paths in many cases
# the root remains and the calculation against anchor remains for any
# relative path that is provided, and the default behavior for absolute
# paths protects us from sillyness
if _self.id != cls.root: #_cache_anchor.id:
self = _self.relative_to(_self.anchor)
else:
self = pathlib.PurePath.__new__(cls, '.') # FIXME make sure this is interpreted correctly ...
self._errors = []
return self
@classmethod
def init(cls, host_path):
""" should only be invoked after _new has bound local and cache classes """
if not hasattr(cls, '_anchor'):
cls.root = host_path # I think this is right ...
host, path = host_path.split(':', 1)
if not hasattr(cls, '_flavour'):
cls = cls.__windowspath if os.name == 'nt' else cls.__posixpath
cls._anchor = pathlib.PurePath.__new__(cls, path)
session = pxssh.pxssh(options=dict(IdentityAgent=os.environ.get('SSH_AUTH_SOCK')))
session.login(host, ssh_config=LocalPath('~/.ssh/config').expanduser().as_posix())
cls._rows = 200
cls._cols = 200
session.setwinsize(cls._rows, cls._cols) # prevent linewraps of long commands
session.prompt()
atexit.register(lambda:(session.sendeof(), session.close()))
cls.host = host
cls.session = session
cls._uid, *cls._gids = [int(i) for i in (cls._ssh('echo $(id -u) $(id -G)')
.decode().split(' '))]
else:
raise ValueError(f'{cls} already bound an remote to {cls._anchor}')
@classmethod
def anchorToCache(cls, cache_anchor, init=True):
anchor = super().anchorToCache(cache_anchor=cache_anchor, init=init)
# _cache_anchor has to be bound for _bind_sysid to work
# that binding happens after init so we do this here
cls._bind_sysid()
return anchor
def __init__(self, thing_with_id, cache=None):
if isinstance(thing_with_id, pathlib.PurePath):
thing_with_id = thing_with_id.as_posix()
super().__init__(thing_with_id, cache=cache)
@property
def anchor(self):
return self._anchor
#return self._cache_anchor.remote
# FIXME warning on relative paths ...
# also ... might be convenient to allow
# setting non-/ anchors, but perhaps for another day
#return self.__class__('/', host=self.host)
@property
def id(self):
return f'{self.host}:{self.rpath}'
#return self.host + ':' + self.as_posix() # FIXME relative to anchor?
@property
def cache_key(self):
""" since some systems have compound ids ... """
raise NotImplementedError
@property
def rpath(self):
# FIXME relative paths when the anchor is set differently
# the anchor will have to be stored as well since there coulde
# be many possible anchors per host, thus, if an anchor relative
# identifier is supplied then we need to construct the full path
# conveniently in this case if self is a fully rooted path then
# it will overwrite the anchor path
# TODO make sure that the common path is the anchor ...
return (self.anchor / self).as_posix()
def _parts_relative_to(self, remote, cache_parent=None):
if remote == self.anchor:
# have to build from self.anchor._parts because it is the only
# place the keeps the original parts
remote = pathlib.PurePath(*self.anchor._parts)
return self.relative_to(remote).parts
def refresh(self):
# TODO probably not the best idea ...
raise NotImplementedError('This baby goes to the network every single time!')
def access(self, mode):
""" types are 'read', 'write', and 'execute' """
try:
st = self.stat()
except (PermissionError, FileNotFoundError) as e:
return False
r, w, x = 0x124, 0x92, 0x49
read = ((r & st.st_mode) >> 2) & (mode == 'read' or mode == os.R_OK) * x
write = ((w & st.st_mode) >> 1) & (mode == 'write' or mode == os.W_OK) * x
execute = (x & st.st_mode) & (mode == 'execute' or mode == os.X_OK) * x
current = read + write + execute
u, g, e = 0x40, 0x8, 0x1
return (u & current and st.st_uid == self._uid or
g & current and st.st_gid in self._gids or
e & current)
def open(self, mode='wt', buffering=-1, encoding=None,
errors=None, newline=None):
if mode not in ('wb', 'wt'):
raise TypeError('only w[bt] mode is supported') # TODO ...
#breakpoint()
return
class Hrm:
session = self.session
def write(self, value):
self.session
#cmd = ['ssh', self.host, f'"cat - > {self.rpath}"']
#self.session
#p = subprocess.Popen()
@property
def data(self):
cmd = ['scp', self.id, '/dev/stdout']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
while True:
data = p.stdout.read(4096) # TODO hinting
if not data:
break
yield data
p.communicate()
# reuse meta from local
# def meta (make it easier to search for this)
meta = LocalPath.meta # magic
#def _ssh(self, remote_cmd):
@classmethod
def _ssh(cls, remote_cmd):
#print(remote_cmd)
if len(remote_cmd) > cls._cols:
raise exc.CommandTooLongError
n_bytes = cls.session.sendline(remote_cmd)
cls.session.prompt()
raw = cls.session.before
out = raw[n_bytes + 1:].strip() # strip once here since we always will
#print(raw)
#print(out)
return out
@property
def _remote_platform(self):
remote_cmd = "uname -a | awk '{ print tolower($1) }'"
return self._ssh(remote_cmd).decode(self.encoding)
@property
def cypher_command(self):
# this one is a little backwards, because we can control
# whatever cypher we want, unlike in other cases
return cypher_command_lookup[self._cache_class.cypher]
def checksum(self):
remote_cmd = (f'{self.cypher_command} {self.rpath} | '
'awk \'{ print $1 }\';')
hex_ = self._ssh(remote_cmd).decode(self.encoding)
log.debug(hex_)
return bytes.fromhex(hex_)
def _stat_cmd(self, stat_format=StatResult.stat_format, path=None):
# TODO use _stat_format_darwin for cases where gstat is missing
cmd = 'gstat' if self.remote_platform == 'darwin' else 'stat'
if path is None:
path = self.rpath
if path == '':
_path = path
else:
_path = f' "{path}"'
return f'{cmd} -c {stat_format}{_path}'
def stat(self):
remote_cmd = self._stat_cmd()
out = self._ssh(remote_cmd)
try:
return StatResult(out)
except ValueError as e:
if out.endswith(b'Permission denied'):
raise PermissionError(out.decode())
elif out.endswith(b'No such file or directory'):
raise FileNotFoundError(out.decode())
else:
log.error(remote_cmd)
raise ValueError(out) from e
def exists(self):
try:
st = self.stat()
return bool(st) # FIXME
except FileNotFoundError: # FIXME there will be more types here ...
pass
@property
def __parent(self): # no longer needed since we inherit from path directly
# because the identifiers are paths if we move
# file.ext to another folder, we treat it as if it were another file
# at least for this SshRemote path, if we move a file on our end
# the we had best update our cache
# if someone else moves the file on the remote, well, then
# that file simply vanishes since we weren't notified about it
# if there is a remote transaction log we can replay if there isn't
# we have to assume the file was deleted or check all the names and
# hashes of new files to see if it has moved (and not been changed)
# a move and change without a sync will be bad for us
# If you have an unanchored path then resolve()
# always operates under the assumption that the
# current working directory which I think is incorrect
# as soon as you start passing unresolved paths around
# the remote system doesn't know what context you are in
# so we need to fail loudly
# basically force people to manually resolve their paths
return self.__class__(self.cache.parent) # FIXME not right ...
def is_dir(self):
remote_cmd = self._stat_cmd(stat_format="%F")
out = self._ssh(remote_cmd)
return out == b'directory'
def is_file(self):
remote_cmd = self._stat_cmd(stat_format="%F")
out = self._ssh(remote_cmd)
return out == b'regular file'
@property
def children(self):
# this is amusingly bad, also children_recursive ... drop the maxdepth
#("find ~/files/blackfynn_local/SPARC\ Consortium -maxdepth 1 "
#"-exec stat -c \"'%n' %o %s %W %X %Y %Z %g %u %f\" {} \;")
# chechsums when listing children? maybe ...
#\"'%n' %o %s %W %X %Y %Z %g %u %f\"
if self.is_dir():
# no children if it is a file sadly
remote_cmd = (f"cd {self.rpath};"
f"{self._stat_cmd(path='')} {{.,}}*;"
"echo '----';"
f"{self.cypher_command} {{.,}}*;" # FIXME fails on directories destroying alignment
'cd "${OLDPWD}"')
out = self._ssh(remote_cmd)
stats, checks = out.split(b'\r\n----\r\n')
#print(stats)
stats = {sr.name:sr for s in stats.split(b'\r\n')
for sr in (StatResult(s),)}
checks = {fn:bytes.fromhex(cs) for l in checks.split(b'\r\n')
if not b'Is a directory' in l
for cs, fn in (l.decode(self.encoding).split(' ', 1),)}
return stats, checks # TODO
def _mkdir_child(self, child_name):
raise NotImplementedError('implement in subclass and/or fix instantiation/existence issues')
def __repr__(self):
return f'{self.__class__.__name__}({self.rpath!r}, host={self.host!r})'
SshRemote._bind_flavours()
| [
"[email protected]"
] | |
09c0fefdd010970f39b250148bf0b0160b5f65a1 | a00fdfc743262d3d9253bab1f2e8b10f99f013ee | /Bambu/bambuToNero.py | 88f058034181c1d5bdb4ff97c5bcf43358b2fc8b | [] | no_license | pdoming/NeroProducer | 2a97101002c626d7f23f3c80e1abfaacc5c81968 | 8082361fa0a05c83cc6c6aacb1bdd5de24f65115 | refs/heads/master | 2021-01-15T17:35:58.814592 | 2015-07-25T16:50:39 | 2015-07-25T16:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,400 | py | from MitAna.TreeMod.bambu import mithep, analysis
import os
mitdata = os.environ['MIT_DATA']
from MitPhysics.Mods.GoodPVFilterMod import goodPVFilterMod
from MitPhysics.Mods.JetCorrectionMod import jetCorrectionMod
from MitPhysics.Mods.JetIdMod import jetIdMod
from MitPhysics.Mods.MetCorrectionMod import metCorrectionMod
from MitPhysics.Mods.PFTauIdMod import pfTauIdMod
pfTauIdMod.AddCutDiscriminator(mithep.PFTau.kDiscriminationByRawCombinedIsolationDBSumPtCorr3Hits, 5., False)
from MitPhysics.Mods.ElectronIdMod import electronIdMod
from MitPhysics.Mods.MuonIdMod import muonIdMod
from MitPhysics.Mods.PhotonIdMod import photonIdMod
from MitPhysics.Mods.SeparatePileUpMod import separatePileUpMod
generatorMod = mithep.GeneratorMod(
IsData = False,
CopyArrays = False,
MCMETName = "GenMet"
)
electronTightId = electronIdMod.clone('ElectronTightId',
IsFilterMode = False,
InputName = electronIdMod.GetOutputName(),
OutputName = 'TightElectronId',
IdType = mithep.ElectronTools.kPhys14Tight,
IsoType = mithep.ElectronTools.kPhys14TightIso
)
muonTightId = muonIdMod.clone('MuonTightId',
IsFilterMode = False,
InputName = muonIdMod.GetOutputName(),
OutputName = 'TightMuonId',
IdType = mithep.MuonTools.kMuonPOG2012CutBasedIdTight,
IsoType = mithep.MuonTools.kPFIsoBetaPUCorrected
)
muonTightIdMask = mithep.MaskCollectionMod('TightMuons',
InputName = muonIdMod.GetOutputName(),
MaskName = muonTightId.GetOutputName(),
OutputName = 'TightMuons'
)
fatJetCorrectionMod = mithep.JetCorrectionMod('FatJetCorrection',
InputName = 'AKt8PFJetsCHS',
CorrectedJetsName = 'CorrectedFatJets',
RhoAlgo = mithep.PileupEnergyDensity.kFixedGridFastjetAll
)
if analysis.isRealData:
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L1FastJet_AK8PFchs.txt")
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L2Relative_AK8PFchs.txt")
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L3Absolute_AK8PFchs.txt")
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/74X_dataRun2_Prompt_v1_L2L3Residual_AK8PFchs.txt")
else:
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/MCRUN2_74_V9_L1FastJet_AK8PFchs.txt")
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/MCRUN2_74_V9_L2Relative_AK8PFchs.txt")
fatJetCorrectionMod.AddCorrectionFromFile(mitdata + "/MCRUN2_74_V9_L3Absolute_AK8PFchs.txt")
fatJetIdMod = jetIdMod.clone('FatJetId',
InputName = fatJetCorrectionMod.GetOutputName(),
OutputName = 'GoodFatJets',
MVATrainingSet = mithep.JetIDMVA.nMVATypes
)
photonMediumId = photonIdMod.clone('PhotonMediumId',
IsFilterMode = False,
InputName = photonIdMod.GetOutputName(),
OutputName = 'PhotonMediumId',
IdType = mithep.PhotonTools.kPhys14Medium,
IsoType = mithep.PhotonTools.kPhys14Medium
)
photonTightId = photonMediumId.clone('PhotonTightId',
OutputName = 'PhotonTightId',
IdType = mithep.PhotonTools.kPhys14Tight,
IsoType = mithep.PhotonTools.kPhys14Tight
)
head = 'HEAD'
tag = 'BAMBU_041'
fillers = []
fillers.append(mithep.nero.EventFiller(
RhoAlgo = mithep.PileupEnergyDensity.kFixedGridFastjetAll
))
fillers.append(mithep.nero.VertexFiller(
VerticesName = goodPVFilterMod.GetOutputName()
))
fillers.append(mithep.nero.JetsFiller(
JetsName = jetIdMod.GetOutputName(),
VerticesName = goodPVFilterMod.GetOutputName(),
JetIDMVA = jetIdMod.GetJetIDMVA()
))
fillers.append(mithep.nero.TausFiller(
TausName = pfTauIdMod.GetOutputName()
))
fillers.append(mithep.nero.LeptonsFiller(
ElectronsName = electronIdMod.GetOutputName(),
MuonsName = muonIdMod.GetOutputName(),
ElectronIdsName = electronTightId.GetOutputName(),
MuonIdsName = muonTightId.GetOutputName(),
VerticesName = goodPVFilterMod.GetOutputName(),
PFCandsName = mithep.Names.gkPFCandidatesBrn,
NoPUPFCandsName = separatePileUpMod.GetPFNoPileUpName(),
PUPFCandsName = separatePileUpMod.GetPFPileUpName()
))
fillers.append(mithep.nero.FatJetsFiller(
FatJetsName = fatJetIdMod.GetOutputName()
))
fillers.append(mithep.nero.MetFiller(
MetName = metCorrectionMod.GetOutputName(),
MuonsName = muonTightIdMask.GetOutputName(),
GenMetName = generatorMod.GetMCMETName()
))
fillers.append(mithep.nero.PhotonsFiller(
PhotonsName = photonIdMod.GetOutputName(),
MediumIdName = photonMediumId.GetOutputName(),
TightIdName = photonTightId.GetOutputName(),
VerticesName = goodPVFilterMod.GetOutputName()
))
fillers.append(mithep.nero.MonteCarloFiller())
fillers.append(mithep.nero.TriggerFiller())
fillers.append(mithep.nero.AllFiller())
neroMod = mithep.NeroMod(
Info = 'Nero',
Head = head,
Tag = tag,
FileName = 'nero.root',
PrintLevel = 0
)
for filler in fillers:
neroMod.AddFiller(filler)
sequence = goodPVFilterMod
if not analysis.isRealData:
sequence *= generatorMod
sequence *= separatePileUpMod * \
jetCorrectionMod * \
jetIdMod * \
metCorrectionMod * \
pfTauIdMod * \
electronIdMod * \
muonIdMod * \
photonIdMod * \
electronTightId * \
muonTightId * \
muonTightIdMask * \
fatJetCorrectionMod * \
fatJetIdMod * \
photonMediumId * \
photonTightId * \
neroMod
analysis.SetAllowNoHLTTree(True)
analysis.setSequence(sequence)
| [
"[email protected]"
] | |
feab3ebba8930e7e527605d29f696b086b58d027 | 4c3094a869f59be8836993469b28f088fef9fff1 | /Questions/Q_093_RentalCarLocations.py | 35739a04cd88935d0ee54e3e84963fad486f00b2 | [] | no_license | Bharadwaja92/DataInterviewQuestions | d885d40da4d546a164eee37e7250ddb519fc8954 | 5b002f34c3b1440f4347a098f7ce1db84fc80e7f | refs/heads/master | 2022-11-06T08:57:49.283013 | 2020-06-22T09:10:40 | 2020-06-22T09:10:40 | 269,247,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | """""""""
Suppose you're working for a car rental company, looking to model potential location distribution of their
cars at major airports. The company operates in LA, SF, and San Jose.
Customers regularly pickup a car in one of these 3 cities and drop it off in another.
The company is looking to compute how likely it is that a given car will end up in a given city.
You can model this as a Markov chain (where each time step corresponds to a new customer taking the car).
The transition probabilities of the company's car allocation by city is as follows:
SF | LA | San Jose
0.6 0.1 0.3 | SF
0.2 0.8 0.3 | LA
0.2 0.1 0.4 | San Jose
As shown, the probability a car stays in SF is 0.6, the probability it moves from SF to LA is 0.2,
SF to San Jose is 0.2, etc.
Using the information above, determine the probability a car will start in SF but move to LA right after.
"""
| [
"[email protected]"
] | |
a1b04624df6910adad210fe98bb6ade2e31d986b | b772048db1d84de6071dcb3978b6f548d2b42ae4 | /tests/test_ner.py | 25161ef7c203bccec745b1000a646113cac4af98 | [
"BSD-2-Clause"
] | permissive | yutanakamura-tky/MedNER-J | 46ca13d87b6c4977b4042915ff2105ab4dc62d88 | a0c68a32553bbbdb9f5ae5fd41584198951bc14c | refs/heads/master | 2023-08-21T23:05:22.645001 | 2021-08-10T02:34:45 | 2021-08-10T02:34:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | import unittest
from medner_j import Ner
class TestNer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.model = Ner.from_pretrained(model_name="BERT", normalizer="dict")
cls.examples = ['それぞれの関節に関節液貯留は見られなかった', 'その後、左半身麻痺、CTにて右前側頭葉の出血を認める。 ']
cls.xmls = ['それぞれの関節に<CN value="かんせつえきちょりゅう;icd=E877;lv=C/freq=高;体液貯留">関節液貯留</CN>は見られなかった', 'その後、<C value="ひだりはんしんまひ;icd=G819;lv=A/freq=高;片麻痺">左半身麻痺</C>、CTにて右前側頭葉の<C value="しゅっけつ;icd=R58;lv=S/freq=高;出血">出血</C>を認める。 ']
cls.dicts = [
[{"span": (8, 13), "type": "CN", "disease":"関節液貯留", "norm":"かんせつえきちょりゅう;icd=E877;lv=C/freq=高;体液貯留"}],
[{"span": (4, 9), "type": "C", "disease": "左半身麻痺", "norm": "ひだりはんしんまひ;icd=G819;lv=A/freq=高;片麻痺"}, {"span": (20, 22), "type": "C", "disease": "出血", "norm": "しゅっけつ;icd=R58;lv=S/freq=高;出血"}]
]
def test_xml(self):
results = self.model.predict(self.examples)
self.assertEqual(results, self.xmls)
def test_dict(self):
results = self.model.predict(self.examples, output_format="dict")
self.assertEqual(results, self.dicts)
@classmethod
def tearDownClass(cls):
del cls.model
del cls.examples
del cls.xmls
del cls.dicts
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.