blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ede6a7910e34d87a8089ec7a7a792cc145ae0a44 | ec700463d9af81f68a477535ac233646f4d262f7 | /python/__main__.py | 2966615ac56651c81b277b34316ddc91361aca73 | [] | no_license | gregjhansell97/grid-map | 36579afa7beadb78a4b8cc53e2c7f45c75ac28a2 | 7d4c25b583474ec45265b01e524ed0884aaa2937 | refs/heads/master | 2020-03-16T20:42:24.156940 | 2018-10-02T23:29:58 | 2018-10-02T23:29:58 | 132,969,337 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from grid_map import GridMap
import timeit
if __name__ == "__main__":
gm = GridMap(5, bit_depth=10)
for x in range(1000):
for y in range(1000):
gm.add(x, y, "loc:" + str((x, y)))
gm = gm.sub_grids[1][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
print(gm)
gm = gm.sub_grids[0][0]
| [
"[email protected]"
] | |
e35cce8f90ca96866959109432451fecbde33194 | e7515012ccb999a499947bea3ef01e82b9a2b15f | /plaso/parsers/winjob.py | f971a8822e0f8e1fe7a3a3157e3264dd4c4eaa56 | [
"Apache-2.0"
] | permissive | vonnopsled/plaso | ebfe5af84b955b9e40610bd76598671256ddea4f | c14e3a0c1db0b05280ff58219d33f487c0a40a6f | refs/heads/master | 2020-12-31T04:07:25.047511 | 2016-01-04T07:07:01 | 2016-01-04T07:07:01 | 46,817,224 | 0 | 0 | null | 2015-11-24T20:38:20 | 2015-11-24T20:38:20 | null | UTF-8 | Python | false | false | 10,311 | py | # -*- coding: utf-8 -*-
"""Parser for Windows Scheduled Task job files."""
import construct
from plaso.events import time_events
from plaso.lib import binary
from plaso.lib import errors
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import interface
from plaso.parsers import manager
__author__ = 'Brian Baskin ([email protected])'
class WinJobEvent(time_events.TimestampEvent):
"""Convenience class for a Windows Scheduled Task event.
Attributes:
application: string that contains the path to job executable.
comment: string that contains the job description.
parameter: string that contains the application command line parameters.
trigger: an integer that contains the event trigger, e.g. DAILY.
username: string that contains the username that scheduled the job.
working_dir: string that contains the working path for task.
"""
DATA_TYPE = u'windows:tasks:job'
def __init__(
self, timestamp, timestamp_description, application, parameter,
working_dir, username, trigger, description):
"""Initializes the event object.
Args:
timestamp: the timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
timestamp_description: the usage string for the timestamp value.
application: string that contains the path to job executable.
parameter: string that contains the application command line parameters.
working_dir: string that contains the working path for task.
username: string that contains the username that scheduled the job.
trigger: an integer that contains the event trigger, e.g. DAILY.
description: string that contains the job description.
"""
super(WinJobEvent, self).__init__(timestamp, timestamp_description)
self.application = application
self.comment = description
self.parameter = parameter
self.trigger = trigger
self.username = username
self.working_dir = working_dir
class WinJobParser(interface.FileObjectParser):
"""Parse Windows Scheduled Task files for job events."""
NAME = u'winjob'
DESCRIPTION = u'Parser for Windows Scheduled Task job (or At-job) files.'
_PRODUCT_VERSIONS = {
0x0400: u'Windows NT 4.0',
0x0500: u'Windows 2000',
0x0501: u'Windows XP',
0x0600: u'Windows Vista',
0x0601: u'Windows 7',
0x0602: u'Windows 8',
0x0603: u'Windows 8.1'
}
_JOB_FIXED_STRUCT = construct.Struct(
u'job_fixed',
construct.ULInt16(u'product_version'),
construct.ULInt16(u'format_version'),
construct.Bytes(u'job_uuid', 16),
construct.ULInt16(u'application_length_offset'),
construct.ULInt16(u'trigger_offset'),
construct.ULInt16(u'error_retry_count'),
construct.ULInt16(u'error_retry_interval'),
construct.ULInt16(u'idle_deadline'),
construct.ULInt16(u'idle_wait'),
construct.ULInt32(u'priority'),
construct.ULInt32(u'max_run_time'),
construct.ULInt32(u'exit_code'),
construct.ULInt32(u'status'),
construct.ULInt32(u'flags'),
construct.ULInt16(u'ran_year'),
construct.ULInt16(u'ran_month'),
construct.ULInt16(u'ran_weekday'),
construct.ULInt16(u'ran_day'),
construct.ULInt16(u'ran_hour'),
construct.ULInt16(u'ran_minute'),
construct.ULInt16(u'ran_second'),
construct.ULInt16(u'ran_millisecond'),
)
# Using Construct's utf-16 encoding here will create strings with their
# null terminators exposed. Instead, we'll read these variables raw and
# convert them using Plaso's ReadUTF16() for proper formatting.
_JOB_VARIABLE_STRUCT = construct.Struct(
u'job_variable',
construct.ULInt16(u'running_instance_count'),
construct.ULInt16(u'application_length'),
construct.String(
u'application',
lambda ctx: ctx.application_length * 2),
construct.ULInt16(u'parameter_length'),
construct.String(
u'parameter',
lambda ctx: ctx.parameter_length * 2),
construct.ULInt16(u'working_dir_length'),
construct.String(
u'working_dir',
lambda ctx: ctx.working_dir_length * 2),
construct.ULInt16(u'username_length'),
construct.String(
u'username',
lambda ctx: ctx.username_length * 2),
construct.ULInt16(u'comment_length'),
construct.String(
u'comment',
lambda ctx: ctx.comment_length * 2),
construct.ULInt16(u'userdata_length'),
construct.String(
u'userdata',
lambda ctx: ctx.userdata_length),
construct.ULInt16(u'reserved_length'),
construct.String(
u'reserved',
lambda ctx: ctx.reserved_length),
construct.ULInt16(u'test'),
construct.ULInt16(u'trigger_size'),
construct.ULInt16(u'trigger_reserved1'),
construct.ULInt16(u'sched_start_year'),
construct.ULInt16(u'sched_start_month'),
construct.ULInt16(u'sched_start_day'),
construct.ULInt16(u'sched_end_year'),
construct.ULInt16(u'sched_end_month'),
construct.ULInt16(u'sched_end_day'),
construct.ULInt16(u'sched_start_hour'),
construct.ULInt16(u'sched_start_minute'),
construct.ULInt32(u'sched_duration'),
construct.ULInt32(u'sched_interval'),
construct.ULInt32(u'trigger_flags'),
construct.ULInt32(u'trigger_type'),
construct.ULInt16(u'trigger_arg0'),
construct.ULInt16(u'trigger_arg1'),
construct.ULInt16(u'trigger_arg2'),
construct.ULInt16(u'trigger_padding'),
construct.ULInt16(u'trigger_reserved2'),
construct.ULInt16(u'trigger_reserved3'))
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a Windows job file-like object.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
file_object: A file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
try:
header_struct = self._JOB_FIXED_STRUCT.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile(
u'Unable to parse Windows Task Job file with error: {0:s}'.format(
exception))
if not header_struct.product_version in self._PRODUCT_VERSIONS:
raise errors.UnableToParseFile((
u'Unsupported product version in: 0x{0:04x} Scheduled Task '
u'file').format(header_struct.product_version))
if not header_struct.format_version == 1:
raise errors.UnableToParseFile(
u'Unsupported format version in: {0:d} Scheduled Task file'.format(
header_struct.format_version))
try:
job_variable_struct = self._JOB_VARIABLE_STRUCT.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile(
u'Unable to parse Windows Task Job file with error: {0:s}'.format(
exception))
try:
last_run_date = timelib.Timestamp.FromTimeParts(
header_struct.ran_year,
header_struct.ran_month,
header_struct.ran_day,
header_struct.ran_hour,
header_struct.ran_minute,
header_struct.ran_second,
microseconds=header_struct.ran_millisecond * 1000,
timezone=parser_mediator.timezone)
except errors.TimestampError as exception:
last_run_date = None
parser_mediator.ProduceParseError(
u'unable to determine last run date with error: {0:s}'.format(
exception))
try:
scheduled_date = timelib.Timestamp.FromTimeParts(
job_variable_struct.sched_start_year,
job_variable_struct.sched_start_month,
job_variable_struct.sched_start_day,
job_variable_struct.sched_start_hour,
job_variable_struct.sched_start_minute,
0, # Seconds are not stored.
timezone=parser_mediator.timezone)
except errors.TimestampError as exception:
scheduled_date = None
parser_mediator.ProduceParseError(
u'unable to determine scheduled date with error: {0:s}'.format(
exception))
application = binary.ReadUTF16(job_variable_struct.application)
description = binary.ReadUTF16(job_variable_struct.comment)
parameter = binary.ReadUTF16(job_variable_struct.parameter)
username = binary.ReadUTF16(job_variable_struct.username)
working_dir = binary.ReadUTF16(job_variable_struct.working_dir)
if last_run_date is not None:
event_object = WinJobEvent(
last_run_date, eventdata.EventTimestamp.LAST_RUNTIME, application,
parameter, working_dir, username, job_variable_struct.trigger_type,
description)
parser_mediator.ProduceEvent(event_object)
if scheduled_date is not None:
event_object = WinJobEvent(
scheduled_date, u'Scheduled To Start', application, parameter,
working_dir, username, job_variable_struct.trigger_type,
description)
parser_mediator.ProduceEvent(event_object)
# TODO: create a timeless event object if last_run_date and scheduled_date
# are None? What should be the description of this event?
if job_variable_struct.sched_end_year:
try:
scheduled_end_date = timelib.Timestamp.FromTimeParts(
job_variable_struct.sched_end_year,
job_variable_struct.sched_end_month,
job_variable_struct.sched_end_day,
0, # Hours are not stored.
0, # Minutes are not stored.
0, # Seconds are not stored.
timezone=parser_mediator.timezone)
except errors.TimestampError as exception:
scheduled_end_date = None
parser_mediator.ProduceParseError(
u'unable to determine scheduled end date with error: {0:s}'.format(
exception))
if scheduled_end_date is not None:
event_object = WinJobEvent(
scheduled_end_date, u'Scheduled To End', application, parameter,
working_dir, username, job_variable_struct.trigger_type,
description)
parser_mediator.ProduceEvent(event_object)
manager.ParsersManager.RegisterParser(WinJobParser)
| [
"[email protected]"
] | |
d2856e764575cdb8308c02b69d2303ddf1692b83 | c6d852e5842cf6f74123445d20ff03876377ae26 | /lemon/python22/lemon_14_190918_测试框架_unittest/test_练习相减02.py | 447882bd4b22fb5aed635fbc7eb95a77abf6e076 | [] | no_license | songyongzhuang/PythonCode_office | 0b3d35ca5d58bc305ae90fea8b1e8c7214619979 | cfadd3132c2c7c518c784589e0dab6510a662a6c | refs/heads/master | 2023-02-13T14:06:10.610935 | 2021-01-14T09:11:32 | 2021-01-14T09:11:32 | 327,183,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | # --*-- coding : utf-8 --*--
# Project : python22
# Current file : test_练习相减02.py
# Author : Administrator
# Create time : 2019-09-19 10:22
# IDE : PyCharm
# TODO 成长很苦, 进步很甜, 加油!
import unittest
def minus(a, b): # add 加起来
""" 相减 """''
return a - b
x = 3
y = 5
expected = -2
class TestMinus(unittest.TestCase):
# 测试类方法,每一个测试类只运行一次
@classmethod
def setUpClass(cls):
print('每一个测试类之前只运行一次')
@classmethod
def tearDownClass(cls):
print('每一个测试类之后只运行一次')
# 测试用例的设计
# 前置条件
def setUp(self):
"""前置条件
测试用例方法之前自动运行 setUp 里面的程序"""
print('每个测试用例执行前置条件')
# 后置条件
def tearDown(self):
"""后置条件
测试用例方法之后自动运行 tearDown 里面的程序"""
print('每个测试用例执行后置条件')
def test_add_success(self):
""" 判断表达式是否为真 """''
self.assertTrue(expected == minus(x, y))
def test_add_error(self):
"""如果确定两个对象不相等,则失败。"""''
try:
self.assertEqual(-2, minus(x, y))
except SyntaxError:
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
4cb569f1636bfc4eae939e6f9a0744d37db16326 | 20899d453bc61c169153338ac9d22d324df089c1 | /abc/abc162/B.py | 9eb9826bfab9e83ccd7c92096c9c66a9611d1f39 | [] | no_license | mui-nyan/AtCoder | b2d926b113963915426af679bf9b28430569707c | a702280f11a5b0b1b29dd099dbfc7b1c31fb89fd | refs/heads/master | 2022-07-04T16:32:41.164564 | 2022-06-19T07:24:11 | 2022-06-19T07:24:11 | 182,425,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | import math
from functools import reduce
from collections import deque
import sys
sys.setrecursionlimit(10**7)
# スペース区切りの入力を読み込んで数値リストにして返します。
def get_nums_l():
return [ int(s) for s in input().split(" ")]
# 改行区切りの入力をn行読み込んで数値リストにして返します。
def get_nums_n(n):
return [ int(input()) for _ in range(n)]
# 改行またはスペース区切りの入力をすべて読み込んでイテレータを返します。
def get_all_int():
return map(int, open(0).read().split())
def log(*args):
print("DEBUG:", *args, file=sys.stderr)
n = int(input())
ans = 0
for i in range(1, n+1):
if i%3 == 0 or i%5 == 0:
continue
ans += i
print(ans)
| [
"[email protected]"
] | |
85e88feb381eeaebe8cd19e82b3cf2a9e88051bc | c8d7f2da5ff9e13a5bb6f92b9387a336e7059644 | /dolo/numeric/matrix_equations.py | 0d3eb87483d5360957fdf884ec03b391a427d468 | [
"BSD-2-Clause"
] | permissive | TomAugspurger/dolo | 675e5c051e7fdcc8d0af441335d526408128b71f | 5d9f0f772860eadf3b9df79e47d158155835bd6b | refs/heads/master | 2020-12-25T12:47:30.156775 | 2013-02-11T20:13:56 | 2013-02-11T20:13:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,339 | py | from dolo.numeric.tensor import sdot,mdot
import numpy as np
TOL = 1e-10
# credits : second_order_solver is adapted from Sven Schreiber's port of Uhlig's Toolkit.
def second_order_solver(FF,GG,HH):
from scipy.linalg import qz
from dolo.numeric.extern.qz import qzdiv
from numpy import array,mat,c_,r_,eye,zeros,real_if_close,diag,allclose,where,diagflat
from numpy.linalg import solve
Psi_mat = array(FF)
Gamma_mat = array(-GG)
Theta_mat = array(-HH)
m_states = FF.shape[0]
Xi_mat = r_[c_[Gamma_mat, Theta_mat],
c_[eye(m_states), zeros((m_states, m_states))]]
Delta_mat = r_[c_[Psi_mat, zeros((m_states, m_states))],
c_[zeros((m_states, m_states)), eye(m_states)]]
AAA,BBB,Q,Z = qz(Delta_mat, Xi_mat)
Delta_up,Xi_up,UUU,VVV = [real_if_close(mm) for mm in (AAA,BBB,Q,Z)]
Xi_eigval = diag(Xi_up)/where(diag(Delta_up)>TOL, diag(Delta_up), TOL)
Xi_sortindex = abs(Xi_eigval).argsort()
# (Xi_sortabs doesn't really seem to be needed)
Xi_sortval = Xi_eigval[Xi_sortindex]
Xi_select = slice(0, m_states)
stake = (abs(Xi_sortval[Xi_select])).max() + TOL
Delta_up,Xi_up,UUU,VVV = qzdiv(stake,Delta_up,Xi_up,UUU,VVV)
try:
# check that all unused roots are unstable
assert abs(Xi_sortval[m_states]) > (1-TOL)
# check that all used roots are stable
assert abs(Xi_sortval[Xi_select]).max() < 1+TOL
except:
raise BKError('generic')
# check for unit roots anywhere
# assert (abs((abs(Xi_sortval) - 1)) > TOL).all()
Lambda_mat = diagflat(Xi_sortval[Xi_select])
VVVH = VVV.T
VVV_2_1 = VVVH[m_states:2*m_states, :m_states]
VVV_2_2 = VVVH[m_states:2*m_states, m_states:2*m_states]
UUU_2_1 = UUU[m_states:2*m_states, :m_states]
PP = - solve(VVV_2_1, VVV_2_2)
# slightly different check than in the original toolkit:
assert allclose(real_if_close(PP), PP.real)
PP = PP.real
## end of solve_qz!
print(PP.__class__)
return [Xi_sortval[Xi_select],PP]
def solve_sylvester(A,B,C,D,Ainv = None):
# Solves equation : A X + B X [C,...,C] + D = 0
# where X is a multilinear function whose dimension is determined by D
# inverse of A can be optionally specified as an argument
import slycot
n_d = D.ndim - 1
n_v = C.shape[1]
n_c = D.size/n_v**n_d
# import dolo.config
# opts = dolo.config.use_engine
# if opts['sylvester']:
# DD = D.flatten().reshape( n_c, n_v**n_d)
# [err,XX] = dolo.config.engine.engine.feval(2,'gensylv',n_d,A,B,C,-DD)
# X = XX.reshape( (n_c,)+(n_v,)*(n_d))
DD = D.reshape( n_c, n_v**n_d )
if n_d == 1:
CC = C
else:
CC = np.kron(C,C)
for i in range(n_d-2):
CC = np.kron(CC,C)
if Ainv != None:
Q = sdot(Ainv,B)
S = sdot(Ainv,DD)
else:
Q = np.linalg.solve(A,B)
S = np.linalg.solve(A,DD)
n = n_c
m = n_v**n_d
XX = slycot.sb04qd(n,m,Q,CC,-S)
X = XX.reshape( (n_c,)+(n_v,)*(n_d) )
return X
class BKError(Exception):
def __init__(self,type):
self.type = type
def __str__(self):
return 'Blanchard-Kahn error ({0})'.format(self.type)
| [
"[email protected]"
] | |
9eb155ab168b320e301794c6d06721d8159379c8 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/329/usersdata/297/91364/submittedfiles/dec2bin.py | f499b6f8e6c0b866d68629df150aa2c83d3d617b | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | # -*- coding: utf-8 -*-
while(true):
p=int(input('digite um numero p: '))
q=int(input('digite um numero q: '))
if q>=p:
break
if str(p) in str(q):
print('S')
else :
print('N') | [
"[email protected]"
] | |
91ed919fe4f82d66d4c1e181233dc01892ee1182 | 420376c5a1fbf8a4572545a9c891a0f8f204ed5b | /scrapy_amazon/items.py | d2aeed20eb2ea2833ebfb79da6fce00b903d6891 | [] | no_license | kishoresurana/scrapy_amazon | 946fb8fe198736ba4233a2f3727ca1a1873ae937 | bbb72cdb5f468d5c8b605d273bb5c93b9a2b249a | refs/heads/master | 2020-12-25T21:55:35.192394 | 2014-07-27T20:09:24 | 2014-07-27T20:09:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScrapyAmazonItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
price = scrapy.Field()
condition = scrapy.Field()
seller = scrapy.Field()
delivery = scrapy.Field()
title = scrapy.Field()
date = scrapy.Field()
| [
"[email protected]"
] | |
c4e8389d93f36f8805d8c3cdf58cabc747343f84 | 91fe8f479fa921fa84111d19222a5c6aa6eff030 | /basis/execute-unit/aiohttp-and-asyncio-test.py | 25312be5c6ecba564f33a7ed14ddc40b68021a95 | [] | no_license | romanticair/python | 2055c9cdaa46894c9788d5797643283786ed46dd | 6f91fe5e7cbedcdf4b8f7baa7641fd615b4d6141 | refs/heads/master | 2022-11-03T17:17:17.608786 | 2019-07-05T07:07:29 | 2019-07-05T07:07:29 | 195,356,190 | 0 | 1 | null | 2022-10-14T20:51:14 | 2019-07-05T07:00:33 | Python | UTF-8 | Python | false | false | 1,355 | py | """
asyncio 可以实现单线程并发IO操作。如果仅用在客户端,发挥的威力不大。
如果把asyncio用在服务器端,例如Web服务器,由于HTTP连接就是IO操作,
因此可以用单线程+coroutine实现多用户的高并发支持
asyncio实现了TCP、UDP、SSL等协议,aiohttp则是基于asyncio实现的HTTP框架
aiohttp的初始化函数init()也是一个coroutine,loop.create_server()则利用asyncio创建TCP服务
编写一个HTTP服务器,分别处理以下URL
1. / - 首页返回b'<h1>Index</h1>';
2. /hello/{name} - 根据URL参数返回文本hello, %s!
"""
import asyncio
from aiohttp import web
async def index(request):
await asyncio.sleep(0.5)
return web.Response(body=b'<h1>Index</h1>')
async def hello(request):
await asyncio.sleep(0.5)
text = '<h1>hello, %s!</h1>' % request.match_info['name']
return web.Response(body=text.encode('utf-8'))
async def init(loop):
app = web.Application(loop=loop)
app.router.add_route('GET', '/', index)
app.router.add_route('GET', '/hello/{name}', hello)
srv = await loop.create_server(app.make_handler(), '127.0.0.1', 3000)
print('Server started at http://127.0.0.1:3000...')
return srv
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever()
| [
"[email protected]"
] | |
6a42d49d7d83b0b0520c6e6d394d79b1e6c4fd48 | b8302a17ad124b2432380c7274e4780ec5adfe55 | /exercises/de/solution_04_03.py | e63f7c7a9d4320eaae8436a4c058573e32639ff4 | [
"MIT",
"CC-BY-NC-4.0"
] | permissive | FrankGrimm/spacy-course | 10da4ebf976d93aec50aa1b200019b4217f4043e | 5e09ef9d296dad2b0fd5ff1945f4cf9a55109906 | refs/heads/master | 2022-04-24T18:18:06.202131 | 2020-04-21T19:17:09 | 2020-04-21T19:17:09 | 257,692,388 | 1 | 0 | MIT | 2020-04-21T19:14:21 | 2020-04-21T19:14:20 | null | UTF-8 | Python | false | false | 650 | py | import json
from spacy.matcher import Matcher
from spacy.lang.de import German
with open("exercises/de/iphone.json") as f:
TEXTS = json.loads(f.read())
nlp = German()
matcher = Matcher(nlp.vocab)
# Zwei Tokens, deren kleingeschriebene Formen "iphone" und "x" sind
pattern1 = [{"LOWER": "iphone"}, {"LOWER": "x"}]
# Token mit der kleingeschriebenen Form "iphone" und eine Ziffer
pattern2 = [{"LOWER": "iphone"}, {"IS_DIGIT": True}]
# Füge Patterns zum Matcher hinzu und überprüfe die Resultate
matcher.add("GADGET", None, pattern1, pattern2)
for doc in nlp.pipe(TEXTS):
print([doc[start:end] for match_id, start, end in matcher(doc)])
| [
"[email protected]"
] | |
e9056dcc8a8628a344e0ddf4e9add6e257ddabae | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_310/ch25_2019_03_01_00_00_25_791523.py | 70bb03eaebe4809ffcc0bcea7e9b4073d6f8312b | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | km=int(input(distancia):
if km <=200:
preco= km*0.5
print("{:.2f}".format(preco))
else:
preco= km*0.45
print("{:.2f}".format(preco)) | [
"[email protected]"
] | |
3cc7dc94fdb029bb70bc409a3dc8ffef0368bf06 | 2cec0797981b73c497866a75fb6d33f4c3a4c06c | /brain_tumor_classification/modules/data/utils.py | e5cd18bf3458f2de6aa299ac09b545c77cfc04b4 | [] | no_license | Vadbeg/brain_tumor_classification | ed44e50076627a0682e2eca13cf115716c510ed1 | ba87b65717cd1fe75871f3108db1394de271c62d | refs/heads/master | 2023-08-01T13:46:27.176780 | 2021-09-19T15:14:32 | 2021-09-19T15:14:32 | 397,667,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,718 | py | """Module with utilities for dataset"""
from pathlib import Path
from typing import List, Optional, Tuple, Union
import numpy as np
from monai.transforms import (
AddChanneld,
Compose,
LoadImaged,
Resized,
ScaleIntensityRanged,
Transform,
)
from torch.utils.data import DataLoader, Dataset
def get_train_val_paths(
train_path: Union[str, Path],
train_split_percent: float = 0.7,
ct_file_extension: str = '*.nii.gz',
item_limit: Optional[int] = None,
shuffle: bool = True,
) -> Tuple[List[Path], List[Path]]:
train_path = Path(train_path)
list_of_paths = list(train_path.glob(ct_file_extension))
if shuffle:
np.random.shuffle(list_of_paths)
edge_value = int(train_split_percent * len(list_of_paths))
train_list_of_paths = list_of_paths[:edge_value]
val_list_of_paths = list_of_paths[edge_value:]
if item_limit:
train_list_of_paths = train_list_of_paths[:item_limit]
val_list_of_paths = val_list_of_paths[:item_limit]
return train_list_of_paths, val_list_of_paths
def create_data_loader(
dataset: Dataset, batch_size: int = 1, shuffle: bool = True, num_workers: int = 2
) -> DataLoader:
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
)
return data_loader
def get_load_transforms(
img_key: str,
original_min: float = 0.0,
original_max: float = 200.0,
res_min: float = 0.0,
res_max: float = 1.0,
spatial_size: Tuple[int, int, int] = (196, 196, 128),
) -> Compose:
preprocessing_transforms = get_preprocessing_transforms(
img_key=img_key,
original_min=original_min,
original_max=original_max,
res_min=res_min,
res_max=res_max,
spatial_size=spatial_size,
)
load_transforms = Compose(
[LoadImaged(keys=[img_key], dtype=np.float32), preprocessing_transforms]
)
return load_transforms
def get_preprocessing_transforms(
img_key: str,
original_min: float = 0.0,
original_max: float = 200.0,
res_min: float = 0.0,
res_max: float = 1.0,
spatial_size: Tuple[int, int, int] = (196, 196, 128),
) -> Compose:
preprocessing_transforms = Compose(
[
AddChanneld(keys=[img_key]),
ScaleIntensityRanged(
keys=[img_key],
a_min=original_min,
a_max=original_max,
b_min=res_min,
b_max=res_max,
clip=True,
),
Resized(keys=[img_key], spatial_size=spatial_size),
]
)
return preprocessing_transforms
| [
"[email protected]"
] | |
e3f9b9ccd9704d797def23c50f582b8c877f8f37 | 9059d9cbad4188ed2980f551151b9678ffb68b44 | /Chapter12_logging/12-3.logging_config_example.py | 0262db2fa4267b523bc6fa234849422e7c5042d2 | [] | no_license | mhee4321/python_basic | ad0e64fa21ecfab231a6627ba6abeea82d725690 | 86031975a9121efe5785e83f663255a7b4e4ba77 | refs/heads/master | 2023-02-11T20:31:54.353219 | 2021-01-07T05:44:31 | 2021-01-07T05:44:31 | 326,850,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | import logging # 로깅 모듈 탑재
import logging.config # 로깅 설정 모듈 탑재
# 설정 파일 읽어 오기
logging.config.fileConfig('12-2.logging.conf')
# 로거 생성
logger = logging.getLogger(__name__) # 로거 생성
# 로그 메시지 출력
logger.debug('이 메시지는 개발자만 이해해요.') # DEBUG 로그 출력
logger.info('생각대로 동작 하고 있어요.') # INFO 로그 출력
logger.warning('곧 문제가 생길 가능성이 높습니다.') # WARNING 로그 출력
logger.error('문제가 생겼어요.기능이 동작 안해요.') # ERROR 로그 출력
logger.critical('시스템이 다운됩니다!!!!') # CRITICAL 로그 출력
| [
"[email protected]"
] | |
8a4871b4d661ef4a0a122394b00d6b5f55566f2e | 9d2bafb07baf657c447d09a6bc5a6e551ba1806d | /ros2_ws/build/std_msgs/rosidl_generator_py/std_msgs/msg/_multi_array_layout.py | e830a59dc03efc5d1893c4f8d32f97cabca4ecd6 | [] | no_license | weidafan/ros2_dds | f65c4352899a72e1ade662b4106e822d80a99403 | c0d9e6ff97cb7cc822fe25a62c0b1d56f7d12c59 | refs/heads/master | 2021-09-05T20:47:49.088161 | 2018-01-30T21:03:59 | 2018-01-30T21:03:59 | 119,592,597 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,630 | py | # generated from rosidl_generator_py/resource/_msg.py.em
# generated code does not contain a copyright notice
import logging
import traceback
class Metaclass(type):
"""Metaclass of message 'MultiArrayLayout'."""
_CONVERT_FROM_PY = None
_CONVERT_TO_PY = None
_DESTROY_ROS_MESSAGE = None
_TYPE_SUPPORT = None
__constants = {
}
@classmethod
def __import_type_support__(cls):
try:
from rosidl_generator_py import import_type_support
module = import_type_support('std_msgs')
except ImportError:
logger = logging.getLogger('rosidl_generator_py.MultiArrayLayout')
logger.debug(
'Failed to import needed modules for type support:\n' + traceback.format_exc())
else:
cls._CONVERT_FROM_PY = module.convert_from_py_msg_multi_array_layout
cls._CONVERT_TO_PY = module.convert_to_py_msg_multi_array_layout
cls._TYPE_SUPPORT = module.type_support_msg_multi_array_layout
cls._DESTROY_ROS_MESSAGE = module.destroy_ros_message_msg_multi_array_layout
from std_msgs.msg import MultiArrayDimension
if MultiArrayDimension.__class__._TYPE_SUPPORT is None:
MultiArrayDimension.__class__.__import_type_support__()
@classmethod
def __prepare__(cls, name, bases, **kwargs):
# list constant names here so that they appear in the help text of
# the message class under "Data and other attributes defined here:"
# as well as populate each message instance
return {
}
class MultiArrayLayout(metaclass=Metaclass):
"""Message class 'MultiArrayLayout'."""
__slots__ = [
'_dim',
'_data_offset',
]
def __init__(self, **kwargs):
assert all(['_' + key in self.__slots__ for key in kwargs.keys()]), \
'Invalid arguments passed to constructor: %r' % kwargs.keys()
self.dim = kwargs.get('dim', list())
self.data_offset = kwargs.get('data_offset', int())
def __repr__(self):
typename = self.__class__.__module__.split('.')
typename.pop()
typename.append(self.__class__.__name__)
args = [s[1:] + '=' + repr(getattr(self, s, None)) for s in self.__slots__]
return '%s(%s)' % ('.'.join(typename), ', '.join(args))
@property
def dim(self):
"""Message field 'dim'."""
return self._dim
@dim.setter
def dim(self, value):
from std_msgs.msg import MultiArrayDimension
from collections import Sequence
from collections import Set
from collections import UserList
from collections import UserString
assert \
((isinstance(value, Sequence) or
isinstance(value, Set) or
isinstance(value, UserList)) and
not isinstance(value, str) and
not isinstance(value, UserString) and
all([isinstance(v, MultiArrayDimension) for v in value]) and
True), \
"The 'dim' field must be a set or sequence and each value of type 'MultiArrayDimension'"
self._dim = value
@property
def data_offset(self):
"""Message field 'data_offset'."""
return self._data_offset
@data_offset.setter
def data_offset(self, value):
assert \
isinstance(value, int), \
"The 'data_offset' field must of type 'int'"
assert value >= 0 and value < 4294967296, \
"The 'data_offset' field must be an unsigned integer in [0, 4294967296)"
self._data_offset = value
| [
"[email protected]"
] | |
4b32a00c650bafd26ad85ee0f76ed96d200dfce0 | d99ac626d62c663704444a9cce7e7fc793a9e75e | /crypto_implementations/virgil-crypto-c/wrappers/python/virgil_crypto_lib/foundation/_c_bridge/_vscf_alg_info_der_serializer.py | 222936908c80c90638db7d52f3cdf4d1a644e7ae | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Experiment5X/CryptoFunctionDetection | 3ab32d5573a249d24db1faf772721bc80b8d905d | dac700193e7e84963943593e36844b173211a8a1 | refs/heads/master | 2023-04-19T09:12:35.828268 | 2021-05-13T22:39:27 | 2021-05-13T22:39:27 | 355,299,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,174 | py | # Copyright (C) 2015-2020 Virgil Security, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Lead Maintainer: Virgil Security Inc. <[email protected]>
from virgil_crypto_lib._libs import *
from ctypes import *
from ._vscf_impl import vscf_impl_t
from virgil_crypto_lib.common._c_bridge import vsc_buffer_t
class vscf_alg_info_der_serializer_t(Structure):
pass
class VscfAlgInfoDerSerializer(object):
"""Provide DER serializer of algorithm information."""
def __init__(self):
"""Create underlying C context."""
self._ll = LowLevelLibs()
self._lib = self._ll.foundation
def vscf_alg_info_der_serializer_new(self):
vscf_alg_info_der_serializer_new = self._lib.vscf_alg_info_der_serializer_new
vscf_alg_info_der_serializer_new.argtypes = []
vscf_alg_info_der_serializer_new.restype = POINTER(vscf_alg_info_der_serializer_t)
return vscf_alg_info_der_serializer_new()
def vscf_alg_info_der_serializer_delete(self, ctx):
vscf_alg_info_der_serializer_delete = self._lib.vscf_alg_info_der_serializer_delete
vscf_alg_info_der_serializer_delete.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_delete.restype = None
return vscf_alg_info_der_serializer_delete(ctx)
def vscf_alg_info_der_serializer_use_asn1_writer(self, ctx, asn1_writer):
vscf_alg_info_der_serializer_use_asn1_writer = self._lib.vscf_alg_info_der_serializer_use_asn1_writer
vscf_alg_info_der_serializer_use_asn1_writer.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t)]
vscf_alg_info_der_serializer_use_asn1_writer.restype = None
return vscf_alg_info_der_serializer_use_asn1_writer(ctx, asn1_writer)
def vscf_alg_info_der_serializer_serialized_len(self, ctx, alg_info):
"""Return buffer size enough to hold serialized algorithm."""
vscf_alg_info_der_serializer_serialized_len = self._lib.vscf_alg_info_der_serializer_serialized_len
vscf_alg_info_der_serializer_serialized_len.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t)]
vscf_alg_info_der_serializer_serialized_len.restype = c_size_t
return vscf_alg_info_der_serializer_serialized_len(ctx, alg_info)
def vscf_alg_info_der_serializer_serialize(self, ctx, alg_info, out):
"""Serialize algorithm info to buffer class."""
vscf_alg_info_der_serializer_serialize = self._lib.vscf_alg_info_der_serializer_serialize
vscf_alg_info_der_serializer_serialize.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t), POINTER(vsc_buffer_t)]
vscf_alg_info_der_serializer_serialize.restype = None
return vscf_alg_info_der_serializer_serialize(ctx, alg_info, out)
def vscf_alg_info_der_serializer_setup_defaults(self, ctx):
"""Setup predefined values to the uninitialized class dependencies."""
vscf_alg_info_der_serializer_setup_defaults = self._lib.vscf_alg_info_der_serializer_setup_defaults
vscf_alg_info_der_serializer_setup_defaults.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_setup_defaults.restype = None
return vscf_alg_info_der_serializer_setup_defaults(ctx)
def vscf_alg_info_der_serializer_serialize_inplace(self, ctx, alg_info):
"""Serialize by using internal ASN.1 writer.
Note, that caller code is responsible to reset ASN.1 writer with
an output buffer."""
vscf_alg_info_der_serializer_serialize_inplace = self._lib.vscf_alg_info_der_serializer_serialize_inplace
vscf_alg_info_der_serializer_serialize_inplace.argtypes = [POINTER(vscf_alg_info_der_serializer_t), POINTER(vscf_impl_t)]
vscf_alg_info_der_serializer_serialize_inplace.restype = c_size_t
return vscf_alg_info_der_serializer_serialize_inplace(ctx, alg_info)
def vscf_alg_info_der_serializer_shallow_copy(self, ctx):
vscf_alg_info_der_serializer_shallow_copy = self._lib.vscf_alg_info_der_serializer_shallow_copy
vscf_alg_info_der_serializer_shallow_copy.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_shallow_copy.restype = POINTER(vscf_alg_info_der_serializer_t)
return vscf_alg_info_der_serializer_shallow_copy(ctx)
def vscf_alg_info_der_serializer_impl(self, ctx):
vscf_alg_info_der_serializer_impl = self._lib.vscf_alg_info_der_serializer_impl
vscf_alg_info_der_serializer_impl.argtypes = [POINTER(vscf_alg_info_der_serializer_t)]
vscf_alg_info_der_serializer_impl.restype = POINTER(vscf_impl_t)
return vscf_alg_info_der_serializer_impl(ctx)
| [
"[email protected]"
] | |
519e6d1ad5bda54f6ed5b6ff5dc4202c57d10141 | 6f0d8416daeb787b13938d5fa49c3d2e08d15e02 | /tests/test_cam.py | 5a5dbc61b10d60caf62b858b4f880f2bed62d9ec | [
"MIT"
] | permissive | MartinHjelmare/matrixscreener | cbfc0ba95614c7dd6e152bb63a24b67ed03045ca | b6e93d9c96139cf5f2b8942d61681e45d7b6b4e5 | refs/heads/master | 2021-01-22T14:21:16.758654 | 2015-02-19T11:53:46 | 2015-02-19T11:53:46 | 57,959,734 | 0 | 0 | null | 2016-05-03T10:03:40 | 2016-05-03T10:03:40 | null | UTF-8 | Python | false | false | 1,526 | py | from matrixscreener.cam import *
import pytest
class EchoSocket:
"Dummy echo socket for mocking."
msg = ''
def send(self, msg):
self.msg = msg
return len(msg)
def recv(self, buffer_size):
return self.msg[0:buffer_size]
def connect(self, where):
pass
def settimeout(self, timeout):
pass
# TEST
#- key (here cli) overrided if defined several times
#- prefix added
#- types (integer, float) should be converted to strings
def test_echo(monkeypatch):
"Prefix + command sent should be same as echoed socket message."
# mock socket
monkeypatch.setattr("socket.socket", EchoSocket)
# setup cam
cam = CAM()
cmd = [('cli', 'custom'), ('cmd', 'enableall'), ('value', 'true'),
('integer', 1234), ('float', 0.00234)]
# monkeypathced EchoSocket will never flush
def flush():
pass
cam.flush = flush
echoed = cam.send(cmd)[0]
sent = tuples_as_dict(cam.prefix + cmd)
assert sent == echoed
def test_commands(monkeypatch):
"short hand commands should work as intended"
# mock socket
monkeypatch.setattr("socket.socket", EchoSocket)
# setup cam
cam = CAM()
# monkeypathced EchoSocket will never flush
def flush():
pass
cam.flush = flush
# get_information
cmd = cam.prefix + [
('cmd', 'getinfo'),
('dev', 'stage')
]
information = cam.get_information()
should_be = tuples_as_dict(cmd)
assert information == should_be
| [
"[email protected]"
] | |
20d215ab84216efee4da368d5a8ad6e24ed57fc4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03679/s358798230.py | 083bf4ccd4da704fe0bfff938691cf5dbc1ec004 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 133 | py | X, A, B = map(int, input().split())
if A >= B:
print('delicious')
elif A + X < B:
print('dangerous')
else:
print('safe') | [
"[email protected]"
] | |
da878145baa16b59947043420038f917d29d43bd | e7b483d88f80703c89553e1b9e2f5dd0322f7e38 | /sketch/util/http.py | e69fe5f151af3818aae7e26ffc6a7d32826a3f52 | [
"BSD-2-Clause"
] | permissive | nikcub/Sketch | 0f559ff9948bd355407257c25c261c1e0f237021 | 5d2d5f7e51c3eed374a8b12441dc8577b16c101e | refs/heads/master | 2016-09-09T23:32:10.243530 | 2011-11-04T13:56:03 | 2011-11-04T13:56:03 | 2,592,091 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,451 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=2:sw=2:expandtab
#
# Copyright (c) 2011, Nik Cubrilovic. All rights reserved.
#
# <[email protected]> <http://nikcub.appspot.com>
#
# Licensed under a BSD license. You may obtain a copy of the License at
#
# http://nikcub.appspot.com/bsd-license
#
"""
Sketch - TM_FILENAME}
desc
"""
import webob
import urlparse
def extract_dataurl(dataurl):
if not dataurl[:5] == 'data:':
return (None, None)
img_index = dataurl.index(',')
if not img_index:
return (None, None)
img_type = dataurl[5:img_index].split(';')[0]
img_dat_enc = dataurl[img_index + 1:]
import base64
img_dat = base64.decodestring(img_dat_enc)
return (img_dat, img_type)
def urlunsplit(scheme=None, netloc=None, path=None, query=None, fragment=None):
"""Similar to ``urlparse.urlunsplit``, but will escape values and
urlencode and sort query arguments.
:param scheme:
URL scheme, e.g., `http` or `https`.
:param netloc:
Network location, e.g., `localhost:8080` or `www.google.com`.
:param path:
URL path.
:param query:
URL query as an escaped string, or a dictionary or list of key-values
tuples to build a query.
:param fragment:
Fragment identifier, also known as "anchor".
:returns:
An assembled absolute or relative URL.
"""
if not scheme or not netloc:
scheme = None
netloc = None
if path:
path = urllib.quote(to_utf8(path))
if query and not isinstance(query, basestring):
if isinstance(query, dict):
query = query.items()
query_args = []
for key, values in query:
if isinstance(values, basestring):
values = (values,)
for value in values:
query_args.append((to_utf8(key), to_utf8(value)))
# Sorting should be optional? Sorted args are commonly needed to build
# URL signatures for services.
query_args.sort()
query = urllib.urlencode(query_args)
if fragment:
fragment = urllib.quote(to_utf8(fragment))
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
def test_normalize_url():
urls = [
# 'example.com',
# 'example.com/',
# 'http://example.com/',
# 'http://example.com',
# 'http://example.com?',
# 'http://example.com/?',
# 'http://example.com//',
# 'http://example.com/a',
# 'http://example.com/a/',
# 'http://example.com/a/?',
# 'http://example.com/a/../',
# 'http://example.com/a/../?',
# 'http://example.com/a/b/../?',
# 'http://example.com/a/../',
# 'http://example.com/a/b/?z=1',
'http://example.com/a/?',
'http://@example.com/a/?',
'http://example.com:/a/?',
'http://@example.com:/a/?',
'http://example.com:80/a/?',
]
for url in urls:
print "%s \t\t\t\t\t\tclean: %s" % (url, normalize_url(url))
def normalize_url(s, charset='utf-8'):
"""
function that attempts to mimic browser URL normalization.
Partly taken from werkzeug.utils
<http://www.bitbucket.org/mitsuhiko/werkzeug-main/src/tip/werkzeug/utils.py>
There is a lot to URL normalization, see:
<http://en.wikipedia.org/wiki/URL_normalization>
:param charset: The target charset for the URL if the url was
given as unicode string.
"""
if isinstance(s, unicode):
s = s.encode(charset, 'ignore')
scheme, netloc, path, qs, anchor = urlparse.urlsplit(s)
# print "scheme: %s\n netloc:%s\n path:%s\n qs:%s\n anchor:%s\n" % (scheme, netloc, path, qs, anchor)
path = urllib.unquote(path)
if not netloc:
netloc = path.strip("/\\:?&")
path = '/'
if not scheme:
scheme = "http"
if not path:
path = '/'
netloc = netloc.strip("/\\:@?&")
path = posixpath.normpath(path)
path = urlparse.urljoin('/', path)
# path = urllib.quote(path, '/%')
qs = urllib.quote_plus(qs, ':&=')
# print "scheme: %s\n netloc:%s\n path:%s\n qs:%s\n anchor:%s\n" % (scheme, netloc, path, qs, anchor)
return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
def redirect(location, code = 302):
assert code in (301, 302, 303, 305, 307), 'invalid code'
from sketch import Response
display_location = location
response = Response(
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
'<title>Redirecting...</title>\n'
'<h1>Redirecting...</h1>\n'
'<p>You should be redirected automatically to target URL: '
'<a href="%s">%s</a>. If not click the link.' %
(location, display_location), code, mimetype='text/html')
response.headers['Location'] = location
return response
def abort_old(code, *args, **kwargs):
"""Raises an ``HTTPException``. The exception is instantiated passing
*args* and *kwargs*.
:param code:
A valid HTTP error code from ``webob.exc.status_map``, a dictionary
mapping status codes to subclasses of ``HTTPException``.
:param args:
Arguments to be used to instantiate the exception.
:param kwargs:
Keyword arguments to be used to instantiate the exception.
"""
cls = webob.exc.status_map.get(code)
if not cls:
raise KeyError('No exception is defined for code %r.' % code)
raise cls(*args, **kwargs)
def get_valid_methods(handler):
"""Returns a list of HTTP methods supported by a handler.
:param handler:
A :class:`RequestHandler` instance.
:returns:
A list of HTTP methods supported by the handler.
"""
return [method for method in Application.ALLOWED_METHODS if getattr(handler,
method.lower().replace('-', '_'), None)]
| [
"[email protected]"
] | |
2182531e49175062ac8b030e998b5c2c6ca3ae8d | cad91ae76d2746a6c28ddda0f33a58f9d461378f | /PyTorch/Recommendation/NCF/feature_spec.py | 40d56a0e310d345e17261e9bbfbd4618f5acb691 | [
"Apache-2.0"
] | permissive | NVIDIA/DeepLearningExamples | fe677521e7e2a16e3cb0b77e358f9aab72f8c11a | a5388a45f71a949639b35cc5b990bd130d2d8164 | refs/heads/master | 2023-08-31T20:57:08.798455 | 2023-08-23T10:09:12 | 2023-08-23T10:09:12 | 131,881,622 | 11,838 | 3,124 | null | 2023-08-28T16:57:33 | 2018-05-02T17:04:05 | Jupyter Notebook | UTF-8 | Python | false | false | 1,943 | py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import os
from typing import List, Dict
class FeatureSpec:
def __init__(self, feature_spec, source_spec, channel_spec, metadata, base_directory):
self.feature_spec: Dict = feature_spec
self.source_spec: Dict = source_spec
self.channel_spec: Dict = channel_spec
self.metadata: Dict = metadata
self.base_directory: str = base_directory
@classmethod
def from_yaml(cls, path):
with open(path, 'r') as feature_spec_file:
base_directory = os.path.dirname(path)
feature_spec = yaml.safe_load(feature_spec_file)
return cls.from_dict(feature_spec, base_directory=base_directory)
@classmethod
def from_dict(cls, source_dict, base_directory):
return cls(base_directory=base_directory, **source_dict)
def to_dict(self) -> Dict:
attributes_to_dump = ['feature_spec', 'source_spec', 'channel_spec', 'metadata']
return {attr: self.__dict__[attr] for attr in attributes_to_dump}
def to_string(self):
return yaml.dump(self.to_dict())
def to_yaml(self, output_path=None):
if not output_path:
output_path = self.base_directory + '/feature_spec.yaml'
with open(output_path, 'w') as output_file:
print(yaml.dump(self.to_dict()), file=output_file)
| [
"[email protected]"
] | |
ac4cec9c23d857374e16c812fac948e0c272797e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03108/s870352488.py | 0b87a41dcc411c3fbc8ae14366e08bef4bb0f7fc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | import sys
import collections
readline = sys.stdin.readline
class UnionFind():
def __init__(self, n):
self.n = n
self.parents = [-1]*n
self.rank = [0]*n
self.size = [1]*n
def find(self, x):
if self.parents[x] < 0:
return x
else:
self.parents[x] = self.find(self.parents[x])
return self.parents[x]
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
if self.rank[x] < self.rank[y]:
self.size[y] += self.size[x]
self.parents[x] = y
else:
self.size[x] += self.size[y]
self.parents[y] = x
if self.rank[x] == self.rank[y]:
self.rank[x] += 1
def msize(self, x):
return -self.size[self.find(x)]
def main():
N, M = map(int, readline().split())
nodelist = []
for _ in range(M):
A, B = map(int, readline().split())
A -= 1; B -= 1
nodelist.append((A, B))
uf = UnionFind(N)
anstmp = (N*(N-1))//2
anslist = [anstmp]
for _ in range(M):
node = nodelist.pop()
n0 = uf.find(node[0])
n1 = uf.find(node[1])
if n0 != n1:
n0size = uf.size[n0]
n1size = uf.size[n1]
else:
n0size = 0; n1size = 0
uf.union(node[0], node[1])
anstmp = anslist[-1]
ans = anstmp - n0size*n1size
anslist.append(ans)
anslist = anslist[:-1]
for _ in range(len(anslist)):
ans = anslist.pop()
print(ans)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
9d31dd701cf90d929170893cddab05db06011ba7 | c4544c22c0618451746795090e07c80bc85a0877 | /file_upload/article/forms.py | fd00ffba0492b96c7d39b7f2448d488bfccf1d67 | [] | no_license | RelaxedDong/Django_course | 35f7027dc552ad148d2dc8679a19a1ffb12b8d14 | 2965089d15e4c80cd6402d362ee37f8cc675c08b | refs/heads/master | 2022-01-09T14:28:40.503099 | 2019-05-24T07:07:03 | 2019-05-24T07:07:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 379 | py | #encoding:utf-8
# __author__ = 'donghao'
# __time__ = 2019/5/13 21:52
from django import forms
from .models import Book
from django.core import validators
class BookForm(forms.ModelForm):
cover_url = forms.FileField(validators=[validators.FileExtensionValidator(allowed_extensions=['jpg','jpeg'])])
class Meta:
model = Book
fields = ['title','cover_url'] | [
"[email protected]"
] | |
bbb6268281ee09c15af62c26d0df2d1c6065e735 | f9d5bc590bd6c6274d7a6efec0f60cac1d8286b2 | /assets/coins/monero/moneroImportPrices.py | 6a92df9ceca004c233c3ecc5ce2799c0931dad42 | [] | no_license | pr0logas/grepblockBEpython | 35c83c1bf2114fc9417bedff6cf2a6e2ad2e667e | bbeaa290d13d80f993d843c7f1dbbfd373eee332 | refs/heads/master | 2022-10-03T23:35:44.600740 | 2020-03-09T08:24:53 | 2020-03-09T08:24:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | #:: By GrepBlock.com developers // pr0logas
#:: Modified date: 2019-11-30
#:: Description: This file is a workspace for Prices importation.
import sys, time
from time import gmtime, strftime
from monero import *
sys.path.append('../../../')
from mongoDB import *
from parsePrices import parseCoinGeckoPrices
db = database
col = collectionForPricesUSD
# Init Classes;
MC = mongoConnection(mongoAuth, db, col)
PP = parseCoinGeckoPrices(apiProvider, vsCurrencyUSD, assetName)
# CoinGecko
result = PP.parsePrice()
# Insert Unix Time
aggregatedData = PP.aggregateInsertUnixTime(result)
#Insert to MongoDB
res = MC.insertPricesData(collectionForPricesUSD, aggregatedData)
timeSet = strftime("%Y-%m-%d %H:%M:%S", gmtime())
print timeSet + " Succefully inserted asset price: $" + res
| [
"[email protected]"
] | |
f6d2ffae909f5992e8ceea3bdc223d04acc73d4b | 2c3da6e0bddf55d64d650040bbf286c47b31811a | /学习路线/1.python基础/day05/02-for-else语句.py | ec56422c4833eede814e9a25e0dca957f39f600e | [
"MIT"
] | permissive | Bngzifei/PythonNotes | 76bd53db3033a9c51ab4bdd727842cd89607b584 | 01590e1b6c1bc0f04aa2d355fa2553c04cce27f2 | refs/heads/master | 2023-02-04T06:49:00.725463 | 2020-12-15T09:26:40 | 2020-12-15T09:26:40 | 155,154,662 | 1 | 2 | MIT | 2020-09-08T01:30:19 | 2018-10-29T05:02:48 | Python | UTF-8 | Python | false | false | 670 | py | list1 = ["zhansan", "lisi1", 'ww']
# for name in list1: # 运行2次,出现逻辑错误
# if name == 'lisi':
# print('找到')
# else:
# print("没有找到")
"""当for执行完成后,默认for后面的else都会执行一次,如果不想让for后面的else执行,在for里面写个break"""
for name in list1: # 批量查找数据 if ... in...(判断有没有,True或False) 判断有没有我要的那个并返回(因为后续要用这个返回的)用for(break) else (判断有没有我要的那个)
if name == 'lisi':
print('找到')
break
else:
print('没找到')
# for ...else ... 是一个循环体内的.用于批量查找并返回一次提示信息
| [
"[email protected]"
] | |
e5029b3854dbaef24fb6cce6c6025ff4d71cca34 | e8e2f3cb21e3f3c289b890dcf3cde567bb92dc32 | /venv/bin/chardetect | a471d60fdc696af75d4b511e1d3b9a0af3f271c1 | [] | no_license | Timur597/Feliz | a0071b93a87eab015dd205e14cba88bcb5f34926 | 6f712ded791c84dee71f75934fb77d0ae101f5e6 | refs/heads/master | 2023-05-27T15:54:54.782528 | 2021-06-09T16:34:45 | 2021-06-09T16:34:45 | 373,058,036 | 0 | 1 | null | 2021-06-09T16:47:59 | 2021-06-02T06:07:12 | Python | UTF-8 | Python | false | false | 262 | #!/home/timur/PyCharmProjects/feeliz-master/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
b66f70766f6fe3c97d830918ab3d7c33e5f9c1d4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/59/usersdata/161/49113/submittedfiles/testes.py | 3ede0d9001c5c08b41881d224976a6c2ae167e4c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | n=int(input('numero:'))
soma=0
for i in range(1,n+1,1):
soma=4*(-1*i/(2*i+1)
print('%.4f' %soma)
| [
"[email protected]"
] | |
fea402ed06f40785cacbf954f34865f10e62de55 | 76dba08689db40edf2d01a98856fa2a20d98d679 | /甲鱼python/课程代码/第11讲/第11讲课后作业.py | f38d6087bebb08ecebe94960c7ce4388591454c7 | [] | no_license | pangfeiyo/PythonLearn | ce0747d75b53eb21acb6199acfe10934778420b2 | b514b3d7baa62fa7b801d26ff49266f02cb9cbd2 | refs/heads/master | 2021-05-11T10:20:14.818774 | 2020-01-16T15:47:16 | 2020-01-16T15:47:16 | 118,096,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 515 | py | # 从列表末尾取出一个元素,并将这个元素插入列表最前边
member = ['一','甲鱼','玩笑']
member.insert(0,member.pop())
print(member)
#python支持负数下标,列表最后一个元素为-1
list2 = [1,3,2,9,7,8]
print(list2[-3:-1])
#切片和赋值的区别
#切片相当于复制
sy1 = [1,3,2,9,7,8]
sy2 = sy1[:] #切片复制sy1的内容给sy2
sy3 = sy1 #sy1赋值给sy3
sy1.sort() #对sy1进行大小排序
print('sy1:',sy1)
print('sy2:',sy2)
print('sy3:',sy3)
| [
"[email protected]"
] | |
8edf7add9dd89a5a59c9d84008f56f0adbe83abc | b7b40fffd7d192b89a7ad3bdb791a7dbd072ac64 | /axelrod/tests/test_memoryone.py | 44167991b5bf6387399275371a16858e90bad540 | [
"MIT"
] | permissive | DEFALT303/Axelrod | f91911ad7a404c30edfef38afd02319fcd12bc15 | e59fc40ebb705afe05cea6f30e282d1e9c621259 | refs/heads/master | 2020-09-24T08:39:49.107919 | 2015-04-16T16:15:42 | 2015-04-16T16:15:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,640 | py | """Test for the memoryone strategies."""
import random
import axelrod
from test_player import TestPlayer
class TestWinStayLostShift(TestPlayer):
name = "Win-Stay Lose-Shift"
player = axelrod.WinStayLoseShift
def test_strategy(self):
"""Starts by cooperating"""
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
def test_effect_of_strategy(self):
"""Check that switches if does not get best payoff."""
P1 = self.player()
P2 = axelrod.Player()
P1.history = ['C']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'C')
class TestGTFT(TestPlayer):
name = "Generous Tit-For-Tat"
player = axelrod.GTFT
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
class TestStochasticCooperator(TestPlayer):
name = "Stochastic Cooperator"
player = axelrod.StochasticCooperator
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(15)
# With probability .065 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(1)
# With probability .229 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(3)
# With probability .266 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
random.seed(13)
# With probability .42 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
class TestStochasticWSLS(TestPlayer):
name = "Stochastic WSLS"
player = axelrod.StochasticWSLS
stochastic = True
def test_strategy(self):
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will cooperate
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['C']
random.seed(31)
# With probability .05 will cooperate
self.assertEqual(P1.strategy(P2), 'C')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
P1.history = ['D']
P2.history = ['D']
random.seed(2)
# With probability .05 will defect
self.assertEqual(P1.strategy(P2), 'D')
# But otherwise will defect
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
class TestZDChi(TestPlayer):
name = "ZDChi"
player = axelrod.ZDChi
stochastic = True
def test_four_vector(self):
P1 = self.player()
expected_dictionary = {('C', 'D'): 0.5, ('D', 'C'): 0.75, ('D', 'D'): 0.0, ('C', 'C'): 1.1666666666666667}
for key in sorted(expected_dictionary.keys()):
self.assertAlmostEqual(P1._four_vector[key],
expected_dictionary[key])
def test_strategy(self):
# Testing the expected value is difficult here so these just ensure that
# future changes that break these tests will be examined carefully.
P1 = self.player()
P2 = axelrod.Player()
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['C']
random.seed(2)
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['C']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['C']
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
self.assertEqual(P1.strategy(P2), 'C')
P1.history = ['D']
P2.history = ['D']
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
self.assertEqual(P1.strategy(P2), 'D')
| [
"[email protected]"
] | |
2ca40c9745cafec57f504ad00865b8a15eb016d0 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_15419.py | b16cd84c9dd57f33adebb10f84dbec1286edafa9 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | # Python comparing individual lists of lists elements
if x[i][0] > y[i][0]:
| [
"[email protected]"
] | |
46a2e88f482b70548c82568f1d10bf2234d6b0e0 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flasharray/FA_2_20/models/array.py | b454053102b55f917520181b04db56e7ba183f91 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 7,115 | py | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.20
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_20 import models
class Array(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'banner': 'str',
'capacity': 'int',
'console_lock_enabled': 'bool',
'encryption': 'ArrayEncryption',
'eradication_config': 'EradicationConfig',
'idle_timeout': 'int',
'ntp_servers': 'list[str]',
'os': 'str',
'parity': 'float',
'scsi_timeout': 'int',
'space': 'Space',
'version': 'str'
}
attribute_map = {
'id': 'id',
'name': 'name',
'banner': 'banner',
'capacity': 'capacity',
'console_lock_enabled': 'console_lock_enabled',
'encryption': 'encryption',
'eradication_config': 'eradication_config',
'idle_timeout': 'idle_timeout',
'ntp_servers': 'ntp_servers',
'os': 'os',
'parity': 'parity',
'scsi_timeout': 'scsi_timeout',
'space': 'space',
'version': 'version'
}
required_args = {
}
def __init__(
self,
id=None, # type: str
name=None, # type: str
banner=None, # type: str
capacity=None, # type: int
console_lock_enabled=None, # type: bool
encryption=None, # type: models.ArrayEncryption
eradication_config=None, # type: models.EradicationConfig
idle_timeout=None, # type: int
ntp_servers=None, # type: List[str]
os=None, # type: str
parity=None, # type: float
scsi_timeout=None, # type: int
space=None, # type: models.Space
version=None, # type: str
):
"""
Keyword args:
id (str): A globally unique, system-generated ID. The ID cannot be modified and cannot refer to another resource.
name (str): A user-specified name. The name must be locally unique and can be changed.
banner (str)
capacity (int): The usable capacity in bytes.
console_lock_enabled (bool)
encryption (ArrayEncryption)
eradication_config (EradicationConfig)
idle_timeout (int): The idle timeout in milliseconds. Valid values include `0` and any multiple of `60000` in the range of `300000` and `10800000`. Any other values are rounded down to the nearest multiple of `60000`.
ntp_servers (list[str])
os (str): Specifies the operating system. Valid values are `Purity`, `Purity//FA`, and `Purity//FB`.
parity (float): A representation of data redundancy on the array. Data redundancy is rebuilt automatically by the system whenever parity is less than `1.0`.
scsi_timeout (int): The SCSI timeout. If not specified, defaults to `60s`.
space (Space)
version (str)
"""
if id is not None:
self.id = id
if name is not None:
self.name = name
if banner is not None:
self.banner = banner
if capacity is not None:
self.capacity = capacity
if console_lock_enabled is not None:
self.console_lock_enabled = console_lock_enabled
if encryption is not None:
self.encryption = encryption
if eradication_config is not None:
self.eradication_config = eradication_config
if idle_timeout is not None:
self.idle_timeout = idle_timeout
if ntp_servers is not None:
self.ntp_servers = ntp_servers
if os is not None:
self.os = os
if parity is not None:
self.parity = parity
if scsi_timeout is not None:
self.scsi_timeout = scsi_timeout
if space is not None:
self.space = space
if version is not None:
self.version = version
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def __getitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
return object.__getattribute__(self, key)
def __setitem__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
object.__setattr__(self, key, value)
def __delitem__(self, key):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `Array`".format(key))
object.__delattr__(self, key)
def keys(self):
return self.attribute_map.keys()
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Array, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Array):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
ec1a5719f569715605b75d20d9dea2e9ea1a20ef | eee741a9d6d55357fb597e0cc3379085f47c2c13 | /processData.py | 85071304b5d9fe473ea285664cbd0cd5dac57f28 | [] | no_license | mbstacy/gdal_ok_mesonet_data_process | 6505be783056eeade9664782035c284d76f29e1c | 18fe989560d54cc0fff336462c26897778daeaef | refs/heads/master | 2021-01-10T07:32:55.865328 | 2016-02-23T22:42:48 | 2016-02-23T22:42:48 | 52,396,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,487 | py | #!/usr/bin/env python
'''
Created on Feb 2, 2016
@author: ledapsTwo
'''
from osgeo import gdal,osr
from os import path
from csv import DictReader
import shlex,sys
import pandas as pd
import numpy as np
class raster:
def __init__(self,inFile):
gf = gdal.Open(inFile)
self.raster = gf
self.grid = gf.ReadAsArray()
#get number of rows and columns in the shape
self.numGrids = 1
if len(self.grid.shape) == 3:
self.numGrids,self.numRows,self.numCols = self.grid.shape
else:
self.numRows,self.numCols = self.grid.shape
#get projection and spatial reference infomation
srs = osr.SpatialReference()
srs.ImportFromWkt(gf.GetProjection())
srsLatLong = srs.CloneGeogCS()
self.srs = srs ; self.srsLatLong = srsLatLong
#create coordinate transform object for sample/line to lon/lat conversion
self.ct = osr.CoordinateTransformation(srs, srsLatLong)
#create coordinate transform object for lon/lat to sample/line conversion
self.ctInv = osr.CoordinateTransformation(srsLatLong, srs)
#get geographic transform information in cartesian space
self.geoMatrix = gf.GetGeoTransform()
#with no north correction this is equal to (pixel height * pixel width) = -900
dev = (self.geoMatrix[1] * self.geoMatrix[5]) - (self.geoMatrix[2] * self.geoMatrix[4])
#divide height/width components by this -900 to get a decimal degrees value
self.gtinv = (self.geoMatrix[0], self.geoMatrix[5]/dev, -1 * self.geoMatrix[2]/dev, self.geoMatrix[3], -1 * self.geoMatrix[4]/dev, self.geoMatrix[1]/dev)
def parseMesonetFile():
mesoCSV = "{0}.csv".format(mesoFile.split('.')[0]) #path.join(curDir,'%s.csv'%path.basename(mesoFile).split('.')[0])
if not path.exists(mesoCSV):
with open(mesoFile,'r') as f1:
data = f1.read()
data_list=data.split('\n')
table = []
for line in data_list[2:-1]:
table.append(shlex.split(line))
headers = table.pop(0)
df = pd.DataFrame(table,columns=headers)
outFile = path.basename(mesoFile).split('.')[0]
df.to_csv("%s.csv" % (outFile),index=False)
f = open(mesoCSV,'r')
aSites = DictReader(f)
return aSites
def convertLatLontoPixelLine(inGrid,lat,lon):
#convert lon/lat to cartesian coordinates
x,y,z = inGrid.ctInv.TransformPoint(lon,lat,0)
#subtract out upper left pixel coordinates to move origin to upper-left corner of the grid
u = x - inGrid.gtinv[0]
v = y - inGrid.gtinv[3]
#print lon,lat,x,y,u,v
#multiply u & v by 0.333333 or -0.333333 to convert cartesian to pixel/line combo
col = (inGrid.gtinv[1] * u) + (inGrid.gtinv[2] * v)
row = (inGrid.gtinv[4] * u) + (inGrid.gtinv[5] * v)
#print lon,lat,x,y,u,v,col,row
return row,col
def convertPixelLinetoLatLong(inGrid,row,col):
X = (inGrid.geoMatrix[0] + (inGrid.geoMatrix[1] * col) + (inGrid.geoMatrix[2] * row)) + inGrid.geoMatrix[1]/2.0
Y = (inGrid.geoMatrix[3] + (inGrid.geoMatrix[4] * col) + (inGrid.geoMatrix[5] * row)) + inGrid.geoMatrix[5]/2.0
(lon, lat, height) = inGrid.ct.TransformPoint(X,Y)
lon = round(lon,11) ; lat = round(lat,11)
return lat,lon
def main():
#read in TIF file as a raster object
tif = raster(tifFile)
#read in mesonet data and break at each new line
aSites = parseMesonetFile()
#print(aSites)
aOut = []
#walk through each site, pull the lat/lon and determine point on raster grid
for mesoSite in aSites:
#print (mesoSite)
siteID = mesoSite["STID"] #the site ID from the CSV
stNum = mesoSite["STNM"] #station number
stTime = mesoSite["TIME"] #station time
lat = float(mesoSite["LATT"]) #the latitude from the CSV
lon = float(mesoSite["LONG"]) #the longitude from the CSV
#the row and column on the raster above this mesonet site
rasterRow,rasterColumn = convertLatLontoPixelLine(tif, lat, lon)
#the value on the raster at this grid point
rasterValue = tif.grid[rasterRow,rasterColumn]
#build skeleton for header and station lines
header = "STID,STNM,TIME,LATT,LONG,RASTERVAL"
strOut = "%s,%s,%s,%s,%s,%s"%(siteID,stNum,stTime,lat,lon,rasterValue)
#walk through all attributes and place into above strings
for param in sorted(mesoSite.keys()):
#skip any of these as they have already been defined above
if param in ["STID","STNM","TIME","LATT","LONG"]: continue
header += ",%s"%param
strOut += ",%s"%mesoSite[param]
#add header first so it will be at the top of the output file
if header not in aOut: aOut.append(header)
#append station attributes to list
aOut.append(strOut)
#convert list to block of text and write to file
outFile = open("summary%s.csv"%ext,'w')
outFile.write("\n".join(aOut))
outFile.close()
print ("DONE")
if __name__ == "__main__":
#global curDir ; curDir = path.dirname(path.realpath(__file__))
global tifFile ; tifFile = sys.argv[1] #path.join(curDir,'y12.modisSSEBopET.tif')
global mesoFile ; mesoFile = sys.argv[2] #path.join(curDir,'2012_annual.mdf')
global ext; ext = ""
main()
| [
"[email protected]"
] | |
044ef7733d33340e7cf093fa5b1b04a826c31548 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_summary.py | 18d09be192ac1b4023f64ab173806411d3dcea87 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py |
#calss header
class _SUMMARY():
def __init__(self,):
self.name = "SUMMARY"
self.definitions = [u'done suddenly, without discussion or legal arrangements: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
] | |
a0d550e2fdb493ba6c99d7490c06e07da09bcdde | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stubs/aws-xray-sdk/aws_xray_sdk/core/sampling/reservoir.pyi | 322d1d38c3d821602e3e08cb5f590e0f85608dd7 | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 337 | pyi | from enum import Enum
class Reservoir:
def __init__(self) -> None: ...
def borrow_or_take(self, now, can_borrow): ...
def load_quota(self, quota, TTL, interval) -> None: ...
@property
def quota(self): ...
@property
def TTL(self): ...
class ReservoirDecision(Enum):
TAKE: str
BORROW: str
NO: str
| [
"[email protected]"
] | |
60a71622737aa6e8a866253cebae37379422f533 | 7d84000f2977def7118b4c93a47b9d71c4ee38f8 | /app/src/utils/log_streamer.py | ad37f010c1610fdbb84800feadcdb0afd9627020 | [] | no_license | tensorci/core | d405d17099987163dfc589711345ce414ace406e | 50d18bb43f73b1d5d47fefad543c2554e87a6520 | refs/heads/master | 2021-03-19T13:27:26.219591 | 2020-12-03T01:14:57 | 2020-12-03T01:14:57 | 110,917,313 | 0 | 0 | null | 2020-12-03T01:15:26 | 2017-11-16T03:20:09 | Python | UTF-8 | Python | false | false | 3,800 | py | import log_formatter
from src import logger, dbi
from pyredis import redis
from src.helpers.definitions import tci_keep_alive
# TODO: This file is disgusting -- make it less disgusting
def should_complete_stream(data, deployment):
# Check if last_entry was specified in the log. Complete the stream if so.
complete = data.get('last_entry') == 'True'
# Check to see if this was an error log. Complete the stream if so.
if data.get('level') == 'error':
# Fail the deployment and log that this happened internally
logger.error('DEPLOYMENT FAILED: uid={}'.format(deployment.uid))
deployment.fail()
complete = True
return complete
def stream_deploy_logs(deployment, stream_key=None, block=30000):
complete = False
first_log = redis.xrange(stream_key, count=1)
# If logs already exist, yield the first one and then
# iterate over timestamps to continue yielding
if first_log:
ts, data = first_log[0]
first_log_yielded = False
while not complete:
try:
# yield the first log and continue
if not first_log_yielded:
first_log_yielded = True
complete = should_complete_stream(data, deployment)
yield log_formatter.deploy_log(data)
continue
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
complete = should_complete_stream(data, deployment)
yield log_formatter.deploy_log(data)
except:
break
else:
ts = '0-0'
while not complete:
try:
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
complete = should_complete_stream(data, deployment)
yield log_formatter.deploy_log(data)
except:
break
def stream_train_logs(deployment, block=30000):
stream_key = deployment.train_log()
first_log = redis.xrange(stream_key, count=1)
# If logs already exist, yield the first one and then
# iterate over timestamps to continue yielding
if first_log:
ts, data = first_log[0]
first_log_yielded = False
while True:
try:
# yield the first log and continue
if not first_log_yielded:
first_log_yielded = True
yield log_formatter.training_log(data, with_color=True)
continue
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
yield log_formatter.training_log(data, with_color=True)
except:
break
else:
ts = '0-0'
while True:
try:
# Get all logs since timestamp=ts
result = redis.xread(block=block, **{stream_key: ts})
if not result:
yield tci_keep_alive + '\n'
continue
items = result.get(stream_key)
if not items:
yield tci_keep_alive + '\n'
continue
for item in items:
ts, data = item
yield log_formatter.training_log(data, with_color=True)
except:
break | [
"[email protected]"
] | |
ee13787901e1cb2cb22e3ad0a896df200708d570 | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_ddos_custom_policies_operations.py | 374762f783c3c434a29e76a5bebf00abdd56790d | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 20,334 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosCustomPoliciesOperations:
"""DdosCustomPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs
) -> "_models.DdosCustomPolicy":
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs
) -> "_models.DdosCustomPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.DdosCustomPolicy",
**kwargs
) -> AsyncLROPoller["_models.DdosCustomPolicy"]:
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.DdosCustomPolicy":
"""Update a DDoS custom policy tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to update DDoS custom policy resource tags.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
| [
"[email protected]"
] | |
a42658845c9f20032a391940e548d739fa593468 | c8453f83242cd525a98606f665d9f5d9e84c6335 | /lib/googlecloudsdk/third_party/apis/bigquery/v2/bigquery_v2_messages.py | c9af6411f99770bce94fba3d09d11478fa6e7675 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | paulfoley/GCP-Cloud_SDK | 5188a04d8d80a2709fa3dba799802d57c7eb66a1 | bec7106686e99257cb91a50f2c1b1a374a4fc66f | refs/heads/master | 2021-06-02T09:49:48.309328 | 2017-07-02T18:26:47 | 2017-07-02T18:26:47 | 96,041,222 | 1 | 1 | NOASSERTION | 2020-07-26T22:40:49 | 2017-07-02T18:19:52 | Python | UTF-8 | Python | false | false | 98,916 | py | """Generated message classes for bigquery version v2.
A data platform for customers to create, manage, share and query data.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'bigquery'
class BigqueryDatasetsDeleteRequest(_messages.Message):
"""A BigqueryDatasetsDeleteRequest object.
Fields:
datasetId: Dataset ID of dataset being deleted
deleteContents: If True, delete all the tables in the dataset. If False
and the dataset contains tables, the request will fail. Default is False
projectId: Project ID of the dataset being deleted
"""
datasetId = _messages.StringField(1, required=True)
deleteContents = _messages.BooleanField(2)
projectId = _messages.StringField(3, required=True)
class BigqueryDatasetsDeleteResponse(_messages.Message):
"""An empty BigqueryDatasetsDelete response."""
class BigqueryDatasetsGetRequest(_messages.Message):
"""A BigqueryDatasetsGetRequest object.
Fields:
datasetId: Dataset ID of the requested dataset
projectId: Project ID of the requested dataset
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryDatasetsInsertRequest(_messages.Message):
"""A BigqueryDatasetsInsertRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
projectId: Project ID of the new dataset
"""
dataset = _messages.MessageField('Dataset', 1)
projectId = _messages.StringField(2, required=True)
class BigqueryDatasetsListRequest(_messages.Message):
"""A BigqueryDatasetsListRequest object.
Fields:
all: Whether to list all datasets, including hidden ones
filter: An expression for filtering the results of the request by label.
The syntax is "labels.<name>[:<value>]". Multiple filters can be ANDed
together by connecting with a space. Example:
"labels.department:receiving labels.active". See Filtering datasets
using labels for details.
maxResults: The maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the datasets to be listed
"""
all = _messages.BooleanField(1)
filter = _messages.StringField(2)
maxResults = _messages.IntegerField(3, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(4)
projectId = _messages.StringField(5, required=True)
class BigqueryDatasetsPatchRequest(_messages.Message):
"""A BigqueryDatasetsPatchRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: Dataset ID of the dataset being updated
projectId: Project ID of the dataset being updated
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class BigqueryDatasetsUpdateRequest(_messages.Message):
"""A BigqueryDatasetsUpdateRequest object.
Fields:
dataset: A Dataset resource to be passed as the request body.
datasetId: Dataset ID of the dataset being updated
projectId: Project ID of the dataset being updated
"""
dataset = _messages.MessageField('Dataset', 1)
datasetId = _messages.StringField(2, required=True)
projectId = _messages.StringField(3, required=True)
class BigqueryJobsCancelRequest(_messages.Message):
"""A BigqueryJobsCancelRequest object.
Fields:
jobId: [Required] Job ID of the job to cancel
projectId: [Required] Project ID of the job to cancel
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsGetQueryResultsRequest(_messages.Message):
"""A BigqueryJobsGetQueryResultsRequest object.
Fields:
jobId: [Required] Job ID of the query job
maxResults: Maximum number of results to read
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: [Required] Project ID of the query job
startIndex: Zero-based index of the starting row
timeoutMs: How long to wait for the query to complete, in milliseconds,
before returning. Default is 10 seconds. If the timeout passes before
the job completes, the 'jobComplete' field in the response will be false
"""
jobId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
startIndex = _messages.IntegerField(5, variant=_messages.Variant.UINT64)
timeoutMs = _messages.IntegerField(6, variant=_messages.Variant.UINT32)
class BigqueryJobsGetRequest(_messages.Message):
"""A BigqueryJobsGetRequest object.
Fields:
jobId: [Required] Job ID of the requested job
projectId: [Required] Project ID of the requested job
"""
jobId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsInsertRequest(_messages.Message):
"""A BigqueryJobsInsertRequest object.
Fields:
job: A Job resource to be passed as the request body.
projectId: Project ID of the project that will be billed for the job
"""
job = _messages.MessageField('Job', 1)
projectId = _messages.StringField(2, required=True)
class BigqueryJobsListRequest(_messages.Message):
"""A BigqueryJobsListRequest object.
Enums:
ProjectionValueValuesEnum: Restrict information returned to a set of
selected fields
StateFilterValueValuesEnum: Filter for job state
Fields:
allUsers: Whether to display jobs owned by all users in the project.
Default false
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the jobs to list
projection: Restrict information returned to a set of selected fields
stateFilter: Filter for job state
"""
class ProjectionValueValuesEnum(_messages.Enum):
"""Restrict information returned to a set of selected fields
Values:
full: Includes all job data
minimal: Does not include the job configuration
"""
full = 0
minimal = 1
class StateFilterValueValuesEnum(_messages.Enum):
"""Filter for job state
Values:
done: Finished jobs
pending: Pending jobs
running: Running jobs
"""
done = 0
pending = 1
running = 2
allUsers = _messages.BooleanField(1)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
stateFilter = _messages.EnumField('StateFilterValueValuesEnum', 6, repeated=True)
class BigqueryJobsQueryRequest(_messages.Message):
"""A BigqueryJobsQueryRequest object.
Fields:
projectId: Project ID of the project billed for the query
queryRequest: A QueryRequest resource to be passed as the request body.
"""
projectId = _messages.StringField(1, required=True)
queryRequest = _messages.MessageField('QueryRequest', 2)
class BigqueryProjectsListRequest(_messages.Message):
"""A BigqueryProjectsListRequest object.
Fields:
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(2)
class BigqueryTabledataInsertAllRequest(_messages.Message):
"""A BigqueryTabledataInsertAllRequest object.
Fields:
datasetId: Dataset ID of the destination table.
projectId: Project ID of the destination table.
tableDataInsertAllRequest: A TableDataInsertAllRequest resource to be
passed as the request body.
tableId: Table ID of the destination table.
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableDataInsertAllRequest = _messages.MessageField('TableDataInsertAllRequest', 3)
tableId = _messages.StringField(4, required=True)
class BigqueryTabledataListRequest(_messages.Message):
"""A BigqueryTabledataListRequest object.
Fields:
datasetId: Dataset ID of the table to read
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, identifying the result
set
projectId: Project ID of the table to read
startIndex: Zero-based index of the starting row to read
tableId: Table ID of the table to read
"""
datasetId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
startIndex = _messages.IntegerField(5, variant=_messages.Variant.UINT64)
tableId = _messages.StringField(6, required=True)
class BigqueryTablesDeleteRequest(_messages.Message):
"""A BigqueryTablesDeleteRequest object.
Fields:
datasetId: Dataset ID of the table to delete
projectId: Project ID of the table to delete
tableId: Table ID of the table to delete
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class BigqueryTablesDeleteResponse(_messages.Message):
"""An empty BigqueryTablesDelete response."""
class BigqueryTablesGetRequest(_messages.Message):
"""A BigqueryTablesGetRequest object.
Fields:
datasetId: Dataset ID of the requested table
projectId: Project ID of the requested table
tableId: Table ID of the requested table
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
tableId = _messages.StringField(3, required=True)
class BigqueryTablesInsertRequest(_messages.Message):
"""A BigqueryTablesInsertRequest object.
Fields:
datasetId: Dataset ID of the new table
projectId: Project ID of the new table
table: A Table resource to be passed as the request body.
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
class BigqueryTablesListRequest(_messages.Message):
"""A BigqueryTablesListRequest object.
Fields:
datasetId: Dataset ID of the tables to list
maxResults: Maximum number of results to return
pageToken: Page token, returned by a previous call, to request the next
page of results
projectId: Project ID of the tables to list
"""
datasetId = _messages.StringField(1, required=True)
maxResults = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
pageToken = _messages.StringField(3)
projectId = _messages.StringField(4, required=True)
class BigqueryTablesPatchRequest(_messages.Message):
"""A BigqueryTablesPatchRequest object.
Fields:
datasetId: Dataset ID of the table to update
projectId: Project ID of the table to update
table: A Table resource to be passed as the request body.
tableId: Table ID of the table to update
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
tableId = _messages.StringField(4, required=True)
class BigqueryTablesUpdateRequest(_messages.Message):
"""A BigqueryTablesUpdateRequest object.
Fields:
datasetId: Dataset ID of the table to update
projectId: Project ID of the table to update
table: A Table resource to be passed as the request body.
tableId: Table ID of the table to update
"""
datasetId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
table = _messages.MessageField('Table', 3)
tableId = _messages.StringField(4, required=True)
class BigtableColumn(_messages.Message):
"""A BigtableColumn object.
Fields:
encoding: [Optional] The encoding of the values when the type is not
STRING. Acceptable encoding values are: TEXT - indicates values are
alphanumeric text strings. BINARY - indicates values are encoded using
HBase Bytes.toBytes family of functions. 'encoding' can also be set at
the column family level. However, the setting at this level takes
precedence if 'encoding' is set at both levels.
fieldName: [Optional] If the qualifier is not a valid BigQuery field
identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier
must be provided as the column field name and is used as field name in
queries.
onlyReadLatest: [Optional] If this is set, only the latest version of
value in this column are exposed. 'onlyReadLatest' can also be set at
the column family level. However, the setting at this level takes
precedence if 'onlyReadLatest' is set at both levels.
qualifierEncoded: [Required] Qualifier of the column. Columns in the
parent column family that has this exact qualifier are exposed as .
field. If the qualifier is valid UTF-8 string, it can be specified in
the qualifier_string field. Otherwise, a base-64 encoded value must be
set to qualifier_encoded. The column field name is the same as the
column qualifier. However, if the qualifier is not a valid BigQuery
field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid
identifier must be provided as field_name.
qualifierString: A string attribute.
type: [Optional] The type to convert the value in cells of this column.
The values are expected to be encoded using HBase Bytes.toBytes function
when using the BINARY encoding value. Following BigQuery types are
allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default
type is BYTES. 'type' can also be set at the column family level.
However, the setting at this level takes precedence if 'type' is set at
both levels.
"""
encoding = _messages.StringField(1)
fieldName = _messages.StringField(2)
onlyReadLatest = _messages.BooleanField(3)
qualifierEncoded = _messages.BytesField(4)
qualifierString = _messages.StringField(5)
type = _messages.StringField(6)
class BigtableColumnFamily(_messages.Message):
"""A BigtableColumnFamily object.
Fields:
columns: [Optional] Lists of columns that should be exposed as individual
fields as opposed to a list of (column name, value) pairs. All columns
whose qualifier matches a qualifier in this list can be accessed as ..
Other columns can be accessed as a list through .Column field.
encoding: [Optional] The encoding of the values when the type is not
STRING. Acceptable encoding values are: TEXT - indicates values are
alphanumeric text strings. BINARY - indicates values are encoded using
HBase Bytes.toBytes family of functions. This can be overridden for a
specific column by listing that column in 'columns' and specifying an
encoding for it.
familyId: Identifier of the column family.
onlyReadLatest: [Optional] If this is set only the latest version of value
are exposed for all columns in this column family. This can be
overridden for a specific column by listing that column in 'columns' and
specifying a different setting for that column.
type: [Optional] The type to convert the value in cells of this column
family. The values are expected to be encoded using HBase Bytes.toBytes
function when using the BINARY encoding value. Following BigQuery types
are allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN
Default type is BYTES. This can be overridden for a specific column by
listing that column in 'columns' and specifying a type for it.
"""
columns = _messages.MessageField('BigtableColumn', 1, repeated=True)
encoding = _messages.StringField(2)
familyId = _messages.StringField(3)
onlyReadLatest = _messages.BooleanField(4)
type = _messages.StringField(5)
class BigtableOptions(_messages.Message):
"""A BigtableOptions object.
Fields:
columnFamilies: [Optional] List of column families to expose in the table
schema along with their types. This list restricts the column families
that can be referenced in queries and specifies their value types. You
can use this list to do type conversions - see the 'type' field for more
details. If you leave this list empty, all column families are present
in the table schema and their values are read as BYTES. During a query
only the column families referenced in that query are read from
Bigtable.
ignoreUnspecifiedColumnFamilies: [Optional] If field is true, then the
column families that are not specified in columnFamilies list are not
exposed in the table schema. Otherwise, they are read with BYTES type
values. The default value is false.
readRowkeyAsString: [Optional] If field is true, then the rowkey column
families will be read and converted to string. Otherwise they are read
with BYTES type values and users need to manually cast them with CAST if
necessary. The default value is false.
"""
columnFamilies = _messages.MessageField('BigtableColumnFamily', 1, repeated=True)
ignoreUnspecifiedColumnFamilies = _messages.BooleanField(2)
readRowkeyAsString = _messages.BooleanField(3)
class CsvOptions(_messages.Message):
"""A CsvOptions object.
Fields:
allowJaggedRows: [Optional] Indicates if BigQuery should accept rows that
are missing trailing optional columns. If true, BigQuery treats missing
trailing columns as null values. If false, records with missing trailing
columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. The default
value is false.
allowQuotedNewlines: [Optional] Indicates if BigQuery should allow quoted
data sections that contain newline characters in a CSV file. The default
value is false.
encoding: [Optional] The character encoding of the data. The supported
values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery
decodes the data after the raw, binary data has been split using the
values of the quote and fieldDelimiter properties.
fieldDelimiter: [Optional] The separator for fields in a CSV file.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary
state. BigQuery also supports the escape sequence "\t" to specify a tab
separator. The default value is a comma (',').
quote: [Optional] The value that is used to quote data sections in a CSV
file. BigQuery converts the string to ISO-8859-1 encoding, and then uses
the first byte of the encoded string to split the data in its raw,
binary state. The default value is a double-quote ('"'). If your data
does not contain quoted sections, set the property value to an empty
string. If your data contains quoted newline characters, you must also
set the allowQuotedNewlines property to true.
skipLeadingRows: [Optional] The number of rows at the top of a CSV file
that BigQuery will skip when reading the data. The default value is 0.
This property is useful if you have header rows in the file that should
be skipped.
"""
allowJaggedRows = _messages.BooleanField(1)
allowQuotedNewlines = _messages.BooleanField(2)
encoding = _messages.StringField(3)
fieldDelimiter = _messages.StringField(4)
quote = _messages.StringField(5, default=u'"')
skipLeadingRows = _messages.IntegerField(6)
class Dataset(_messages.Message):
"""A Dataset object.
Messages:
AccessValueListEntry: A AccessValueListEntry object.
LabelsValue: [Experimental] The labels associated with this dataset. You
can use these to organize and group your datasets. You can set this
property when inserting or updating a dataset. See Labeling Datasets for
more information.
Fields:
access: [Optional] An array of objects that define dataset access for one
or more entities. You can set this property when inserting or updating a
dataset in order to control who is allowed to access the data. If
unspecified at dataset creation time, BigQuery adds default dataset
access for the following entities: access.specialGroup: projectReaders;
access.role: READER; access.specialGroup: projectWriters; access.role:
WRITER; access.specialGroup: projectOwners; access.role: OWNER;
access.userByEmail: [dataset creator email]; access.role: OWNER;
creationTime: [Output-only] The time when this dataset was created, in
milliseconds since the epoch.
datasetReference: [Required] A reference that identifies the dataset.
defaultTableExpirationMs: [Optional] The default lifetime of all tables in
the dataset, in milliseconds. The minimum value is 3600000 milliseconds
(one hour). Once this property is set, all newly-created tables in the
dataset will have an expirationTime property set to the creation time
plus the value in this property, and changing the value will only affect
new tables, not existing ones. When the expirationTime for a given table
is reached, that table will be deleted automatically. If a table's
expirationTime is modified or removed before the table expires, or if
you provide an explicit expirationTime when creating a table, that value
takes precedence over the default expiration time indicated by this
property.
description: [Optional] A user-friendly description of the dataset.
etag: [Output-only] A hash of the resource.
friendlyName: [Optional] A descriptive name for the dataset.
id: [Output-only] The fully-qualified unique name of the dataset in the
format projectId:datasetId. The dataset name without the project name is
given in the datasetId field. When creating a new dataset, leave this
field blank, and instead specify the datasetId field.
kind: [Output-only] The resource type.
labels: [Experimental] The labels associated with this dataset. You can
use these to organize and group your datasets. You can set this property
when inserting or updating a dataset. See Labeling Datasets for more
information.
lastModifiedTime: [Output-only] The date when this dataset or any of its
tables was last modified, in milliseconds since the epoch.
location: [Experimental] The geographic location where the dataset should
reside. Possible values include EU and US. The default value is US.
selfLink: [Output-only] A URL that can be used to access the resource
again. You can use this URL in Get or Update requests to the resource.
"""
class AccessValueListEntry(_messages.Message):
"""A AccessValueListEntry object.
Fields:
domain: [Pick one] A domain to grant access to. Any users signed in with
the domain specified will be granted the specified access. Example:
"example.com".
groupByEmail: [Pick one] An email address of a Google Group to grant
access to.
role: [Required] Describes the rights granted to the user specified by
the other member of the access object. The following string values are
supported: READER, WRITER, OWNER.
specialGroup: [Pick one] A special group to grant access to. Possible
values include: projectOwners: Owners of the enclosing project.
projectReaders: Readers of the enclosing project. projectWriters:
Writers of the enclosing project. allAuthenticatedUsers: All
authenticated BigQuery users.
userByEmail: [Pick one] An email address of a user to grant access to.
For example: [email protected].
view: [Pick one] A view from a different dataset to grant access to.
Queries executed against that view will have read access to tables in
this dataset. The role field is not required when this field is set.
If that view is updated by any user, access to the view needs to be
granted again via an update operation.
"""
domain = _messages.StringField(1)
groupByEmail = _messages.StringField(2)
role = _messages.StringField(3)
specialGroup = _messages.StringField(4)
userByEmail = _messages.StringField(5)
view = _messages.MessageField('TableReference', 6)
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""[Experimental] The labels associated with this dataset. You can use
these to organize and group your datasets. You can set this property when
inserting or updating a dataset. See Labeling Datasets for more
information.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
access = _messages.MessageField('AccessValueListEntry', 1, repeated=True)
creationTime = _messages.IntegerField(2)
datasetReference = _messages.MessageField('DatasetReference', 3)
defaultTableExpirationMs = _messages.IntegerField(4)
description = _messages.StringField(5)
etag = _messages.StringField(6)
friendlyName = _messages.StringField(7)
id = _messages.StringField(8)
kind = _messages.StringField(9, default=u'bigquery#dataset')
labels = _messages.MessageField('LabelsValue', 10)
lastModifiedTime = _messages.IntegerField(11)
location = _messages.StringField(12)
selfLink = _messages.StringField(13)
class DatasetList(_messages.Message):
"""A DatasetList object.
Messages:
DatasetsValueListEntry: A DatasetsValueListEntry object.
Fields:
datasets: An array of the dataset resources in the project. Each resource
contains basic information. For full information about a particular
dataset resource, use the Datasets: get method. This property is omitted
when there are no datasets in the project.
etag: A hash value of the results page. You can use this property to
determine if the page has changed since the last request.
kind: The list type. This property always returns the value
"bigquery#datasetList".
nextPageToken: A token that can be used to request the next results page.
This property is omitted on the final results page.
"""
class DatasetsValueListEntry(_messages.Message):
"""A DatasetsValueListEntry object.
Messages:
LabelsValue: [Experimental] The labels associated with this dataset. You
can use these to organize and group your datasets.
Fields:
datasetReference: The dataset reference. Use this property to access
specific parts of the dataset's ID, such as project ID or dataset ID.
friendlyName: A descriptive name for the dataset, if one exists.
id: The fully-qualified, unique, opaque ID of the dataset.
kind: The resource type. This property always returns the value
"bigquery#dataset".
labels: [Experimental] The labels associated with this dataset. You can
use these to organize and group your datasets.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""[Experimental] The labels associated with this dataset. You can use
these to organize and group your datasets.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
datasetReference = _messages.MessageField('DatasetReference', 1)
friendlyName = _messages.StringField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default=u'bigquery#dataset')
labels = _messages.MessageField('LabelsValue', 5)
datasets = _messages.MessageField('DatasetsValueListEntry', 1, repeated=True)
etag = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#datasetList')
nextPageToken = _messages.StringField(4)
class DatasetReference(_messages.Message):
"""A DatasetReference object.
Fields:
datasetId: [Required] A unique ID for this dataset, without the project
name. The ID must contain only letters (a-z, A-Z), numbers (0-9), or
underscores (_). The maximum length is 1,024 characters.
projectId: [Optional] The ID of the project containing this dataset.
"""
datasetId = _messages.StringField(1)
projectId = _messages.StringField(2)
class ErrorProto(_messages.Message):
"""A ErrorProto object.
Fields:
debugInfo: Debugging information. This property is internal to Google and
should not be used.
location: Specifies where the error occurred, if present.
message: A human-readable description of the error.
reason: A short error code that summarizes the error.
"""
debugInfo = _messages.StringField(1)
location = _messages.StringField(2)
message = _messages.StringField(3)
reason = _messages.StringField(4)
class ExplainQueryStage(_messages.Message):
"""A ExplainQueryStage object.
Fields:
computeRatioAvg: Relative amount of time the average shard spent on CPU-
bound tasks.
computeRatioMax: Relative amount of time the slowest shard spent on CPU-
bound tasks.
id: Unique ID for stage within plan.
name: Human-readable name for stage.
readRatioAvg: Relative amount of time the average shard spent reading
input.
readRatioMax: Relative amount of time the slowest shard spent reading
input.
recordsRead: Number of records read into the stage.
recordsWritten: Number of records written by the stage.
status: Current status for the stage.
steps: List of operations within the stage in dependency order
(approximately chronological).
waitRatioAvg: Relative amount of time the average shard spent waiting to
be scheduled.
waitRatioMax: Relative amount of time the slowest shard spent waiting to
be scheduled.
writeRatioAvg: Relative amount of time the average shard spent on writing
output.
writeRatioMax: Relative amount of time the slowest shard spent on writing
output.
"""
computeRatioAvg = _messages.FloatField(1)
computeRatioMax = _messages.FloatField(2)
id = _messages.IntegerField(3)
name = _messages.StringField(4)
readRatioAvg = _messages.FloatField(5)
readRatioMax = _messages.FloatField(6)
recordsRead = _messages.IntegerField(7)
recordsWritten = _messages.IntegerField(8)
status = _messages.StringField(9)
steps = _messages.MessageField('ExplainQueryStep', 10, repeated=True)
waitRatioAvg = _messages.FloatField(11)
waitRatioMax = _messages.FloatField(12)
writeRatioAvg = _messages.FloatField(13)
writeRatioMax = _messages.FloatField(14)
class ExplainQueryStep(_messages.Message):
"""A ExplainQueryStep object.
Fields:
kind: Machine-readable operation type.
substeps: Human-readable stage descriptions.
"""
kind = _messages.StringField(1)
substeps = _messages.StringField(2, repeated=True)
class ExternalDataConfiguration(_messages.Message):
"""A ExternalDataConfiguration object.
Fields:
autodetect: [Experimental] Try to detect schema and format options
automatically. Any option specified explicitly will be honored.
bigtableOptions: [Optional] Additional options if sourceFormat is set to
BIGTABLE.
compression: [Optional] The compression type of the data source. Possible
values include GZIP and NONE. The default value is NONE. This setting is
ignored for Google Cloud Bigtable, Google Cloud Datastore backups and
Avro formats.
csvOptions: Additional properties to set if sourceFormat is set to CSV.
googleSheetsOptions: [Optional] Additional options if sourceFormat is set
to GOOGLE_SHEETS.
ignoreUnknownValues: [Optional] Indicates if BigQuery should allow extra
values that are not represented in the table schema. If true, the extra
values are ignored. If false, records with extra columns are treated as
bad records, and if there are too many bad records, an invalid error is
returned in the job result. The default value is false. The sourceFormat
property determines what BigQuery treats as an extra value: CSV:
Trailing columns JSON: Named values that don't match any column names
Google Cloud Bigtable: This setting is ignored. Google Cloud Datastore
backups: This setting is ignored. Avro: This setting is ignored.
maxBadRecords: [Optional] The maximum number of bad records that BigQuery
can ignore when reading data. If the number of bad records exceeds this
value, an invalid error is returned in the job result. The default value
is 0, which requires that all records are valid. This setting is ignored
for Google Cloud Bigtable, Google Cloud Datastore backups and Avro
formats.
schema: [Optional] The schema for the data. Schema is required for CSV and
JSON formats. Schema is disallowed for Google Cloud Bigtable, Cloud
Datastore backups, and Avro formats.
sourceFormat: [Required] The data format. For CSV files, specify "CSV".
For Google sheets, specify "GOOGLE_SHEETS". For newline-delimited JSON,
specify "NEWLINE_DELIMITED_JSON". For Avro files, specify "AVRO". For
Google Cloud Datastore backups, specify "DATASTORE_BACKUP".
[Experimental] For Google Cloud Bigtable, specify "BIGTABLE". Please
note that reading from Google Cloud Bigtable is experimental and has to
be enabled for your project. Please contact Google Cloud Support to
enable this for your project.
sourceUris: [Required] The fully-qualified URIs that point to your data in
Google Cloud. For Google Cloud Storage URIs: Each URI can contain one
'*' wildcard character and it must come after the 'bucket' name. Size
limits related to load jobs apply to external data sources. For Google
Cloud Bigtable URIs: Exactly one URI can be specified and it has be a
fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
For Google Cloud Datastore backups, exactly one URI can be specified,
and it must end with '.backup_info'. Also, the '*' wildcard character is
not allowed.
"""
autodetect = _messages.BooleanField(1)
bigtableOptions = _messages.MessageField('BigtableOptions', 2)
compression = _messages.StringField(3)
csvOptions = _messages.MessageField('CsvOptions', 4)
googleSheetsOptions = _messages.MessageField('GoogleSheetsOptions', 5)
ignoreUnknownValues = _messages.BooleanField(6)
maxBadRecords = _messages.IntegerField(7, variant=_messages.Variant.INT32)
schema = _messages.MessageField('TableSchema', 8)
sourceFormat = _messages.StringField(9)
sourceUris = _messages.StringField(10, repeated=True)
class GetQueryResultsResponse(_messages.Message):
"""A GetQueryResultsResponse object.
Fields:
cacheHit: Whether the query result was fetched from the query cache.
errors: [Output-only] All errors and warnings encountered during the
running of the job. Errors here do not necessarily mean that the job has
completed or was unsuccessful.
etag: A hash of this response.
jobComplete: Whether the query has completed or not. If rows or totalRows
are present, this will always be true. If this is false, totalRows will
not be available.
jobReference: Reference to the BigQuery Job that was created to run the
query. This field will be present even if the original request timed
out, in which case GetQueryResults can be used to read the results once
the query has completed. Since this API only returns the first page of
results, subsequent pages can be fetched via the same mechanism
(GetQueryResults).
kind: The resource type of the response.
numDmlAffectedRows: [Output-only, Experimental] The number of rows
affected by a DML statement. Present only for DML statements INSERT,
UPDATE or DELETE.
pageToken: A token used for paging results.
rows: An object with as many results as can be contained within the
maximum permitted reply size. To get any additional rows, you can call
GetQueryResults and specify the jobReference returned above. Present
only when the query completes successfully.
schema: The schema of the results. Present only when the query completes
successfully.
totalBytesProcessed: The total number of bytes processed for this query.
totalRows: The total number of rows in the complete query result set,
which can be more than the number of rows in this single page of
results. Present only when the query completes successfully.
"""
cacheHit = _messages.BooleanField(1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
etag = _messages.StringField(3)
jobComplete = _messages.BooleanField(4)
jobReference = _messages.MessageField('JobReference', 5)
kind = _messages.StringField(6, default=u'bigquery#getQueryResultsResponse')
numDmlAffectedRows = _messages.IntegerField(7)
pageToken = _messages.StringField(8)
rows = _messages.MessageField('TableRow', 9, repeated=True)
schema = _messages.MessageField('TableSchema', 10)
totalBytesProcessed = _messages.IntegerField(11)
totalRows = _messages.IntegerField(12, variant=_messages.Variant.UINT64)
class GoogleSheetsOptions(_messages.Message):
"""A GoogleSheetsOptions object.
Fields:
skipLeadingRows: [Optional] The number of rows at the top of a sheet that
BigQuery will skip when reading the data. The default value is 0. This
property is useful if you have header rows that should be skipped. When
autodetect is on, behavior is the following: * skipLeadingRows
unspecified - Autodetect tries to detect headers in the first row. If
they are not detected, the row is read as data. Otherwise data is read
starting from the second row. * skipLeadingRows is 0 - Instructs
autodetect that there are no headers and data should be read starting
from the first row. * skipLeadingRows = N > 0 - Autodetect skips N-1
rows and tries to detect headers in row N. If headers are not detected,
row N is just skipped. Otherwise row N is used to extract column names
for the detected schema.
"""
skipLeadingRows = _messages.IntegerField(1)
class Job(_messages.Message):
"""A Job object.
Fields:
configuration: [Required] Describes the job configuration.
etag: [Output-only] A hash of this resource.
id: [Output-only] Opaque ID field of the job
jobReference: [Optional] Reference describing the unique-per-user name of
the job.
kind: [Output-only] The type of the resource.
selfLink: [Output-only] A URL that can be used to access this resource
again.
statistics: [Output-only] Information about the job, including starting
time and ending time of the job.
status: [Output-only] The status of this job. Examine this value when
polling an asynchronous job to see if the job is complete.
user_email: [Output-only] Email address of the user who ran the job.
"""
configuration = _messages.MessageField('JobConfiguration', 1)
etag = _messages.StringField(2)
id = _messages.StringField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#job')
selfLink = _messages.StringField(6)
statistics = _messages.MessageField('JobStatistics', 7)
status = _messages.MessageField('JobStatus', 8)
user_email = _messages.StringField(9)
class JobCancelResponse(_messages.Message):
"""A JobCancelResponse object.
Fields:
job: The final state of the job.
kind: The resource type of the response.
"""
job = _messages.MessageField('Job', 1)
kind = _messages.StringField(2, default=u'bigquery#jobCancelResponse')
class JobConfiguration(_messages.Message):
"""A JobConfiguration object.
Messages:
LabelsValue: [Experimental] The labels associated with this job. You can
use these to organize and group your jobs. Label keys and values can be
no longer than 63 characters, can only contain lowercase letters,
numeric characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
Fields:
copy: [Pick one] Copies a table.
dryRun: [Optional] If set, don't actually run this job. A valid query will
return a mostly empty response with some processing statistics, while an
invalid query will return the same error it would if it wasn't a dry
run. Behavior of non-query jobs is undefined.
extract: [Pick one] Configures an extract job.
labels: [Experimental] The labels associated with this job. You can use
these to organize and group your jobs. Label keys and values can be no
longer than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
load: [Pick one] Configures a load job.
query: [Pick one] Configures a query job.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""[Experimental] The labels associated with this job. You can use these
to organize and group your jobs. Label keys and values can be no longer
than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
Label values are optional. Label keys must start with a letter and each
label in the list must have a different key.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
copy = _messages.MessageField('JobConfigurationTableCopy', 1)
dryRun = _messages.BooleanField(2)
extract = _messages.MessageField('JobConfigurationExtract', 3)
labels = _messages.MessageField('LabelsValue', 4)
load = _messages.MessageField('JobConfigurationLoad', 5)
query = _messages.MessageField('JobConfigurationQuery', 6)
class JobConfigurationExtract(_messages.Message):
"""A JobConfigurationExtract object.
Fields:
compression: [Optional] The compression type to use for exported files.
Possible values include GZIP and NONE. The default value is NONE.
destinationFormat: [Optional] The exported file format. Possible values
include CSV, NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV.
Tables with nested or repeated fields cannot be exported as CSV.
destinationUri: [Pick one] DEPRECATED: Use destinationUris instead,
passing only one URI as necessary. The fully-qualified Google Cloud
Storage URI where the extracted table should be written.
destinationUris: [Pick one] A list of fully-qualified Google Cloud Storage
URIs where the extracted table should be written.
fieldDelimiter: [Optional] Delimiter to use between fields in the exported
data. Default is ','
printHeader: [Optional] Whether to print out a header row in the results.
Default is true.
sourceTable: [Required] A reference to the table being exported.
"""
compression = _messages.StringField(1)
destinationFormat = _messages.StringField(2)
destinationUri = _messages.StringField(3)
destinationUris = _messages.StringField(4, repeated=True)
fieldDelimiter = _messages.StringField(5)
printHeader = _messages.BooleanField(6, default=True)
sourceTable = _messages.MessageField('TableReference', 7)
class JobConfigurationLoad(_messages.Message):
"""A JobConfigurationLoad object.
Fields:
allowJaggedRows: [Optional] Accept rows that are missing trailing optional
columns. The missing values are treated as nulls. If false, records with
missing trailing columns are treated as bad records, and if there are
too many bad records, an invalid error is returned in the job result.
The default value is false. Only applicable to CSV, ignored for other
formats.
allowQuotedNewlines: Indicates if BigQuery should allow quoted data
sections that contain newline characters in a CSV file. The default
value is false.
autodetect: [Experimental] Indicates if we should automatically infer the
options and schema for CSV and JSON sources.
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
destinationTable: [Required] The destination table to load the data into.
encoding: [Optional] The character encoding of the data. The supported
values are UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery
decodes the data after the raw, binary data has been split using the
values of the quote and fieldDelimiter properties.
fieldDelimiter: [Optional] The separator for fields in a CSV file. The
separator can be any ISO-8859-1 single-byte character. To use a
character in the range 128-255, you must encode the character as UTF8.
BigQuery converts the string to ISO-8859-1 encoding, and then uses the
first byte of the encoded string to split the data in its raw, binary
state. BigQuery also supports the escape sequence "\t" to specify a tab
separator. The default value is a comma (',').
ignoreUnknownValues: [Optional] Indicates if BigQuery should allow extra
values that are not represented in the table schema. If true, the extra
values are ignored. If false, records with extra columns are treated as
bad records, and if there are too many bad records, an invalid error is
returned in the job result. The default value is false. The sourceFormat
property determines what BigQuery treats as an extra value: CSV:
Trailing columns JSON: Named values that don't match any column names
maxBadRecords: [Optional] The maximum number of bad records that BigQuery
can ignore when running the job. If the number of bad records exceeds
this value, an invalid error is returned in the job result. The default
value is 0, which requires that all records are valid.
nullMarker: [Optional] Specifies a string that represents a null value in
a CSV file. For example, if you specify "\N", BigQuery interprets "\N"
as a null value when loading a CSV file. The default value is the empty
string. If you set this property to a custom value, BigQuery still
interprets the empty string as a null value for all data types except
for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets
the empty string as an empty value.
projectionFields: [Experimental] If sourceFormat is set to
"DATASTORE_BACKUP", indicates which entity properties to load into
BigQuery from a Cloud Datastore backup. Property names are case
sensitive and must be top-level properties. If no properties are
specified, BigQuery loads all properties. If any named property isn't
found in the Cloud Datastore backup, an invalid error is returned in the
job result.
quote: [Optional] The value that is used to quote data sections in a CSV
file. BigQuery converts the string to ISO-8859-1 encoding, and then uses
the first byte of the encoded string to split the data in its raw,
binary state. The default value is a double-quote ('"'). If your data
does not contain quoted sections, set the property value to an empty
string. If your data contains quoted newline characters, you must also
set the allowQuotedNewlines property to true.
schema: [Optional] The schema for the destination table. The schema can be
omitted if the destination table already exists, or if you're loading
data from Google Cloud Datastore.
schemaInline: [Deprecated] The inline schema. For CSV schemas, specify as
"Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER,
baz:FLOAT".
schemaInlineFormat: [Deprecated] The format of the schemaInline property.
schemaUpdateOptions: [Experimental] Allows the schema of the desitination
table to be updated as a side effect of the load job. Schema update
options are supported in two cases: when writeDisposition is
WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the
destination table is a partition of a table, specified by partition
decorators. For normal tables, WRITE_TRUNCATE will always overwrite the
schema. One or more of the following values are specified:
ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original
schema to nullable.
skipLeadingRows: [Optional] The number of rows at the top of a CSV file
that BigQuery will skip when loading the data. The default value is 0.
This property is useful if you have header rows in the file that should
be skipped.
sourceFormat: [Optional] The format of the data files. For CSV files,
specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". For
newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro,
specify "AVRO". The default value is CSV.
sourceUris: [Required] The fully-qualified URIs that point to your data in
Google Cloud Storage. Each URI can contain one '*' wildcard character
and it must come after the 'bucket' name.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_APPEND. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
allowJaggedRows = _messages.BooleanField(1)
allowQuotedNewlines = _messages.BooleanField(2)
autodetect = _messages.BooleanField(3)
createDisposition = _messages.StringField(4)
destinationTable = _messages.MessageField('TableReference', 5)
encoding = _messages.StringField(6)
fieldDelimiter = _messages.StringField(7)
ignoreUnknownValues = _messages.BooleanField(8)
maxBadRecords = _messages.IntegerField(9, variant=_messages.Variant.INT32)
nullMarker = _messages.StringField(10)
projectionFields = _messages.StringField(11, repeated=True)
quote = _messages.StringField(12, default=u'"')
schema = _messages.MessageField('TableSchema', 13)
schemaInline = _messages.StringField(14)
schemaInlineFormat = _messages.StringField(15)
schemaUpdateOptions = _messages.StringField(16, repeated=True)
skipLeadingRows = _messages.IntegerField(17, variant=_messages.Variant.INT32)
sourceFormat = _messages.StringField(18)
sourceUris = _messages.StringField(19, repeated=True)
writeDisposition = _messages.StringField(20)
class JobConfigurationQuery(_messages.Message):
"""A JobConfigurationQuery object.
Messages:
TableDefinitionsValue: [Optional] If querying an external data source
outside of BigQuery, describes the data format, location and other
properties of the data source. By defining these properties, the data
source can then be queried as if it were a standard BigQuery table.
Fields:
allowLargeResults: If true, allows the query to produce arbitrarily large
result tables at a slight cost in performance. Requires destinationTable
to be set.
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
defaultDataset: [Optional] Specifies the default dataset to use for
unqualified table names in the query.
destinationTable: [Optional] Describes the table where the query results
should be stored. If not present, a new table will be created to store
the results.
flattenResults: [Optional] Flattens all nested and repeated fields in the
query results. The default value is true. allowLargeResults must be true
if this is set to false.
maximumBillingTier: [Optional] Limits the billing tier for this job.
Queries that have resource usage beyond this tier will fail (without
incurring a charge). If unspecified, this will be set to your project
default.
maximumBytesBilled: [Optional] Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail (without
incurring a charge). If unspecified, this will be set to your project
default.
parameterMode: [Experimental] Standard SQL only. Set to POSITIONAL to use
positional (?) query parameters or to NAMED to use named (@myparam)
query parameters in this query.
preserveNulls: [Deprecated] This property is deprecated.
priority: [Optional] Specifies a priority for the query. Possible values
include INTERACTIVE and BATCH. The default value is INTERACTIVE.
query: [Required] BigQuery SQL query to execute.
queryParameters: Query parameters for standard SQL queries.
schemaUpdateOptions: [Experimental] Allows the schema of the destination
table to be updated as a side effect of the query job. Schema update
options are supported in two cases: when writeDisposition is
WRITE_APPEND; when writeDisposition is WRITE_TRUNCATE and the
destination table is a partition of a table, specified by partition
decorators. For normal tables, WRITE_TRUNCATE will always overwrite the
schema. One or more of the following values are specified:
ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema.
ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original
schema to nullable.
tableDefinitions: [Optional] If querying an external data source outside
of BigQuery, describes the data format, location and other properties of
the data source. By defining these properties, the data source can then
be queried as if it were a standard BigQuery table.
useLegacySql: Specifies whether to use BigQuery's legacy SQL dialect for
this query. The default value is true. If set to false, the query will
use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-
reference/ When useLegacySql is set to false, the values of
allowLargeResults and flattenResults are ignored; query will be run as
if allowLargeResults is true and flattenResults is false.
useQueryCache: [Optional] Whether to look for the result in the query
cache. The query cache is a best-effort cache that will be flushed
whenever tables in the query are modified. Moreover, the query cache is
only available when a query does not have a destination table specified.
The default value is true.
userDefinedFunctionResources: [Experimental] Describes user-defined
function resources used in the query.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_EMPTY. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class TableDefinitionsValue(_messages.Message):
"""[Optional] If querying an external data source outside of BigQuery,
describes the data format, location and other properties of the data
source. By defining these properties, the data source can then be queried
as if it were a standard BigQuery table.
Messages:
AdditionalProperty: An additional property for a TableDefinitionsValue
object.
Fields:
additionalProperties: Additional properties of type
TableDefinitionsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a TableDefinitionsValue object.
Fields:
key: Name of the additional property.
value: A ExternalDataConfiguration attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('ExternalDataConfiguration', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
allowLargeResults = _messages.BooleanField(1)
createDisposition = _messages.StringField(2)
defaultDataset = _messages.MessageField('DatasetReference', 3)
destinationTable = _messages.MessageField('TableReference', 4)
flattenResults = _messages.BooleanField(5, default=True)
maximumBillingTier = _messages.IntegerField(6, variant=_messages.Variant.INT32, default=1)
maximumBytesBilled = _messages.IntegerField(7)
parameterMode = _messages.StringField(8)
preserveNulls = _messages.BooleanField(9)
priority = _messages.StringField(10)
query = _messages.StringField(11)
queryParameters = _messages.MessageField('QueryParameter', 12, repeated=True)
schemaUpdateOptions = _messages.StringField(13, repeated=True)
tableDefinitions = _messages.MessageField('TableDefinitionsValue', 14)
useLegacySql = _messages.BooleanField(15)
useQueryCache = _messages.BooleanField(16, default=True)
userDefinedFunctionResources = _messages.MessageField('UserDefinedFunctionResource', 17, repeated=True)
writeDisposition = _messages.StringField(18)
class JobConfigurationTableCopy(_messages.Message):
"""A JobConfigurationTableCopy object.
Fields:
createDisposition: [Optional] Specifies whether the job is allowed to
create new tables. The following values are supported: CREATE_IF_NEEDED:
If the table does not exist, BigQuery creates the table. CREATE_NEVER:
The table must already exist. If it does not, a 'notFound' error is
returned in the job result. The default value is CREATE_IF_NEEDED.
Creation, truncation and append actions occur as one atomic update upon
job completion.
destinationTable: [Required] The destination table
sourceTable: [Pick one] Source table to copy.
sourceTables: [Pick one] Source tables to copy.
writeDisposition: [Optional] Specifies the action that occurs if the
destination table already exists. The following values are supported:
WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the
table data. WRITE_APPEND: If the table already exists, BigQuery appends
the data to the table. WRITE_EMPTY: If the table already exists and
contains data, a 'duplicate' error is returned in the job result. The
default value is WRITE_EMPTY. Each action is atomic and only occurs if
BigQuery is able to complete the job successfully. Creation, truncation
and append actions occur as one atomic update upon job completion.
"""
createDisposition = _messages.StringField(1)
destinationTable = _messages.MessageField('TableReference', 2)
sourceTable = _messages.MessageField('TableReference', 3)
sourceTables = _messages.MessageField('TableReference', 4, repeated=True)
writeDisposition = _messages.StringField(5)
class JobList(_messages.Message):
"""A JobList object.
Messages:
JobsValueListEntry: A JobsValueListEntry object.
Fields:
etag: A hash of this page of results.
jobs: List of jobs that were requested.
kind: The resource type of the response.
nextPageToken: A token to request the next page of results.
"""
class JobsValueListEntry(_messages.Message):
"""A JobsValueListEntry object.
Fields:
configuration: [Full-projection-only] Specifies the job configuration.
errorResult: A result object that will be present only if the job has
failed.
id: Unique opaque ID of the job.
jobReference: Job reference uniquely identifying the job.
kind: The resource type.
state: Running state of the job. When the state is DONE, errorResult can
be checked to determine whether the job succeeded or failed.
statistics: [Output-only] Information about the job, including starting
time and ending time of the job.
status: [Full-projection-only] Describes the state of the job.
user_email: [Full-projection-only] Email address of the user who ran the
job.
"""
configuration = _messages.MessageField('JobConfiguration', 1)
errorResult = _messages.MessageField('ErrorProto', 2)
id = _messages.StringField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#job')
state = _messages.StringField(6)
statistics = _messages.MessageField('JobStatistics', 7)
status = _messages.MessageField('JobStatus', 8)
user_email = _messages.StringField(9)
etag = _messages.StringField(1)
jobs = _messages.MessageField('JobsValueListEntry', 2, repeated=True)
kind = _messages.StringField(3, default=u'bigquery#jobList')
nextPageToken = _messages.StringField(4)
class JobReference(_messages.Message):
"""A JobReference object.
Fields:
jobId: [Required] The ID of the job. The ID must contain only letters
(a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum
length is 1,024 characters.
projectId: [Required] The ID of the project containing this job.
"""
jobId = _messages.StringField(1)
projectId = _messages.StringField(2)
class JobStatistics(_messages.Message):
"""A JobStatistics object.
Fields:
creationTime: [Output-only] Creation time of this job, in milliseconds
since the epoch. This field will be present on all jobs.
endTime: [Output-only] End time of this job, in milliseconds since the
epoch. This field will be present whenever a job is in the DONE state.
extract: [Output-only] Statistics for an extract job.
load: [Output-only] Statistics for a load job.
query: [Output-only] Statistics for a query job.
startTime: [Output-only] Start time of this job, in milliseconds since the
epoch. This field will be present when the job transitions from the
PENDING state to either RUNNING or DONE.
totalBytesProcessed: [Output-only] [Deprecated] Use the bytes processed in
the query statistics instead.
"""
creationTime = _messages.IntegerField(1)
endTime = _messages.IntegerField(2)
extract = _messages.MessageField('JobStatistics4', 3)
load = _messages.MessageField('JobStatistics3', 4)
query = _messages.MessageField('JobStatistics2', 5)
startTime = _messages.IntegerField(6)
totalBytesProcessed = _messages.IntegerField(7)
class JobStatistics2(_messages.Message):
"""A JobStatistics2 object.
Fields:
billingTier: [Output-only] Billing tier for the job.
cacheHit: [Output-only] Whether the query result was fetched from the
query cache.
numDmlAffectedRows: [Output-only, Experimental] The number of rows
affected by a DML statement. Present only for DML statements INSERT,
UPDATE or DELETE.
queryPlan: [Output-only, Experimental] Describes execution plan for the
query.
referencedTables: [Output-only, Experimental] Referenced tables for the
job. Queries that reference more than 50 tables will not have a complete
list.
schema: [Output-only, Experimental] The schema of the results. Present
only for successful dry run of non-legacy SQL queries.
statementType: [Output-only, Experimental] The type of query statement, if
valid.
totalBytesBilled: [Output-only] Total bytes billed for the job.
totalBytesProcessed: [Output-only] Total bytes processed for the job.
undeclaredQueryParameters: [Output-only, Experimental] Standard SQL only:
list of undeclared query parameters detected during a dry run
validation.
"""
billingTier = _messages.IntegerField(1, variant=_messages.Variant.INT32)
cacheHit = _messages.BooleanField(2)
numDmlAffectedRows = _messages.IntegerField(3)
queryPlan = _messages.MessageField('ExplainQueryStage', 4, repeated=True)
referencedTables = _messages.MessageField('TableReference', 5, repeated=True)
schema = _messages.MessageField('TableSchema', 6)
statementType = _messages.StringField(7)
totalBytesBilled = _messages.IntegerField(8)
totalBytesProcessed = _messages.IntegerField(9)
undeclaredQueryParameters = _messages.MessageField('QueryParameter', 10, repeated=True)
class JobStatistics3(_messages.Message):
"""A JobStatistics3 object.
Fields:
inputFileBytes: [Output-only] Number of bytes of source data in a load
job.
inputFiles: [Output-only] Number of source files in a load job.
outputBytes: [Output-only] Size of the loaded data in bytes. Note that
while a load job is in the running state, this value may change.
outputRows: [Output-only] Number of rows imported in a load job. Note that
while an import job is in the running state, this value may change.
"""
inputFileBytes = _messages.IntegerField(1)
inputFiles = _messages.IntegerField(2)
outputBytes = _messages.IntegerField(3)
outputRows = _messages.IntegerField(4)
class JobStatistics4(_messages.Message):
"""A JobStatistics4 object.
Fields:
destinationUriFileCounts: [Output-only] Number of files per destination
URI or URI pattern specified in the extract configuration. These values
will be in the same order as the URIs specified in the 'destinationUris'
field.
"""
destinationUriFileCounts = _messages.IntegerField(1, repeated=True)
class JobStatus(_messages.Message):
"""A JobStatus object.
Fields:
errorResult: [Output-only] Final error result of the job. If present,
indicates that the job has completed and was unsuccessful.
errors: [Output-only] All errors encountered during the running of the
job. Errors here do not necessarily mean that the job has completed or
was unsuccessful.
state: [Output-only] Running state of the job.
"""
errorResult = _messages.MessageField('ErrorProto', 1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
state = _messages.StringField(3)
@encoding.MapUnrecognizedFields('additionalProperties')
class JsonObject(_messages.Message):
"""Represents a single JSON object.
Messages:
AdditionalProperty: An additional property for a JsonObject object.
Fields:
additionalProperties: Additional properties of type JsonObject
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a JsonObject object.
Fields:
key: Name of the additional property.
value: A JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
JsonValue = extra_types.JsonValue
class ProjectList(_messages.Message):
"""A ProjectList object.
Messages:
ProjectsValueListEntry: A ProjectsValueListEntry object.
Fields:
etag: A hash of the page of results
kind: The type of list.
nextPageToken: A token to request the next page of results.
projects: Projects to which you have at least READ access.
totalItems: The total number of projects in the list.
"""
class ProjectsValueListEntry(_messages.Message):
"""A ProjectsValueListEntry object.
Fields:
friendlyName: A descriptive name for this project.
id: An opaque ID of this project.
kind: The resource type.
numericId: The numeric ID of this project.
projectReference: A unique reference to this project.
"""
friendlyName = _messages.StringField(1)
id = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#project')
numericId = _messages.IntegerField(4, variant=_messages.Variant.UINT64)
projectReference = _messages.MessageField('ProjectReference', 5)
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#projectList')
nextPageToken = _messages.StringField(3)
projects = _messages.MessageField('ProjectsValueListEntry', 4, repeated=True)
totalItems = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class ProjectReference(_messages.Message):
"""A ProjectReference object.
Fields:
projectId: [Required] ID of the project. Can be either the numeric ID or
the assigned ID of the project.
"""
projectId = _messages.StringField(1)
class QueryParameter(_messages.Message):
"""A QueryParameter object.
Fields:
name: [Optional] If unset, this is a positional parameter. Otherwise,
should be unique within a query.
parameterType: [Required] The type of this parameter.
parameterValue: [Required] The value of this parameter.
"""
name = _messages.StringField(1)
parameterType = _messages.MessageField('QueryParameterType', 2)
parameterValue = _messages.MessageField('QueryParameterValue', 3)
class QueryParameterType(_messages.Message):
"""A QueryParameterType object.
Messages:
StructTypesValueListEntry: A StructTypesValueListEntry object.
Fields:
arrayType: [Optional] The type of the array's elements, if this is an
array.
structTypes: [Optional] The types of the fields of this struct, in order,
if this is a struct.
type: [Required] The top level type of this field.
"""
class StructTypesValueListEntry(_messages.Message):
"""A StructTypesValueListEntry object.
Fields:
description: [Optional] Human-oriented description of the field.
name: [Optional] The name of this field.
type: [Required] The type of this field.
"""
description = _messages.StringField(1)
name = _messages.StringField(2)
type = _messages.MessageField('QueryParameterType', 3)
arrayType = _messages.MessageField('QueryParameterType', 1)
structTypes = _messages.MessageField('StructTypesValueListEntry', 2, repeated=True)
type = _messages.StringField(3)
class QueryParameterValue(_messages.Message):
"""A QueryParameterValue object.
Messages:
StructValuesValue: [Optional] The struct field values, in order of the
struct type's declaration.
Fields:
arrayValues: [Optional] The array values, if this is an array type.
structValues: [Optional] The struct field values, in order of the struct
type's declaration.
value: [Optional] The value of this value, if a simple scalar type.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class StructValuesValue(_messages.Message):
"""[Optional] The struct field values, in order of the struct type's
declaration.
Messages:
AdditionalProperty: An additional property for a StructValuesValue
object.
Fields:
additionalProperties: Additional properties of type StructValuesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a StructValuesValue object.
Fields:
key: Name of the additional property.
value: A QueryParameterValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('QueryParameterValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
arrayValues = _messages.MessageField('QueryParameterValue', 1, repeated=True)
structValues = _messages.MessageField('StructValuesValue', 2)
value = _messages.StringField(3)
class QueryRequest(_messages.Message):
"""A QueryRequest object.
Fields:
defaultDataset: [Optional] Specifies the default datasetId and projectId
to assume for any unqualified table names in the query. If not set, all
table names in the query string must be qualified in the format
'datasetId.tableId'.
dryRun: [Optional] If set to true, BigQuery doesn't run the job. Instead,
if the query is valid, BigQuery returns statistics about the job such as
how many bytes would be processed. If the query is invalid, an error
returns. The default value is false.
kind: The resource type of the request.
maxResults: [Optional] The maximum number of rows of data to return per
page of results. Setting this flag to a small value such as 1000 and
then paging through results might improve reliability when the query
result set is large. In addition to this limit, responses are also
limited to 10 MB. By default, there is no maximum row count, and only
the byte limit applies.
parameterMode: [Experimental] Standard SQL only. Set to POSITIONAL to use
positional (?) query parameters or to NAMED to use named (@myparam)
query parameters in this query.
preserveNulls: [Deprecated] This property is deprecated.
query: [Required] A query string, following the BigQuery query syntax, of
the query to execute. Example: "SELECT count(f1) FROM
[myProjectId:myDatasetId.myTableId]".
queryParameters: [Experimental] Query parameters for Standard SQL queries.
timeoutMs: [Optional] How long to wait for the query to complete, in
milliseconds, before the request times out and returns. Note that this
is only a timeout for the request, not the query. If the query takes
longer to run than the timeout value, the call returns without any
results and with the 'jobComplete' flag set to false. You can call
GetQueryResults() to wait for the query to complete and read the
results. The default value is 10000 milliseconds (10 seconds).
useLegacySql: Specifies whether to use BigQuery's legacy SQL dialect for
this query. The default value is true. If set to false, the query will
use BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-
reference/ When useLegacySql is set to false, the values of
allowLargeResults and flattenResults are ignored; query will be run as
if allowLargeResults is true and flattenResults is false.
useQueryCache: [Optional] Whether to look for the result in the query
cache. The query cache is a best-effort cache that will be flushed
whenever tables in the query are modified. The default value is true.
"""
defaultDataset = _messages.MessageField('DatasetReference', 1)
dryRun = _messages.BooleanField(2)
kind = _messages.StringField(3, default=u'bigquery#queryRequest')
maxResults = _messages.IntegerField(4, variant=_messages.Variant.UINT32)
parameterMode = _messages.StringField(5)
preserveNulls = _messages.BooleanField(6)
query = _messages.StringField(7)
queryParameters = _messages.MessageField('QueryParameter', 8, repeated=True)
timeoutMs = _messages.IntegerField(9, variant=_messages.Variant.UINT32)
useLegacySql = _messages.BooleanField(10, default=True)
useQueryCache = _messages.BooleanField(11, default=True)
class QueryResponse(_messages.Message):
"""A QueryResponse object.
Fields:
cacheHit: Whether the query result was fetched from the query cache.
errors: [Output-only] All errors and warnings encountered during the
running of the job. Errors here do not necessarily mean that the job has
completed or was unsuccessful.
jobComplete: Whether the query has completed or not. If rows or totalRows
are present, this will always be true. If this is false, totalRows will
not be available.
jobReference: Reference to the Job that was created to run the query. This
field will be present even if the original request timed out, in which
case GetQueryResults can be used to read the results once the query has
completed. Since this API only returns the first page of results,
subsequent pages can be fetched via the same mechanism
(GetQueryResults).
kind: The resource type.
numDmlAffectedRows: [Output-only, Experimental] The number of rows
affected by a DML statement. Present only for DML statements INSERT,
UPDATE or DELETE.
pageToken: A token used for paging results.
rows: An object with as many results as can be contained within the
maximum permitted reply size. To get any additional rows, you can call
GetQueryResults and specify the jobReference returned above.
schema: The schema of the results. Present only when the query completes
successfully.
totalBytesProcessed: The total number of bytes processed for this query.
If this query was a dry run, this is the number of bytes that would be
processed if the query were run.
totalRows: The total number of rows in the complete query result set,
which can be more than the number of rows in this single page of
results.
"""
cacheHit = _messages.BooleanField(1)
errors = _messages.MessageField('ErrorProto', 2, repeated=True)
jobComplete = _messages.BooleanField(3)
jobReference = _messages.MessageField('JobReference', 4)
kind = _messages.StringField(5, default=u'bigquery#queryResponse')
numDmlAffectedRows = _messages.IntegerField(6)
pageToken = _messages.StringField(7)
rows = _messages.MessageField('TableRow', 8, repeated=True)
schema = _messages.MessageField('TableSchema', 9)
totalBytesProcessed = _messages.IntegerField(10)
totalRows = _messages.IntegerField(11, variant=_messages.Variant.UINT64)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters. Overrides userIp if both are provided.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: IP address of the site where the request originates. Use this if
you want to enforce per-user limits.
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default=u'json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class Streamingbuffer(_messages.Message):
"""A Streamingbuffer object.
Fields:
estimatedBytes: [Output-only] A lower-bound estimate of the number of
bytes currently in the streaming buffer.
estimatedRows: [Output-only] A lower-bound estimate of the number of rows
currently in the streaming buffer.
oldestEntryTime: [Output-only] Contains the timestamp of the oldest entry
in the streaming buffer, in milliseconds since the epoch, if the
streaming buffer is available.
"""
estimatedBytes = _messages.IntegerField(1, variant=_messages.Variant.UINT64)
estimatedRows = _messages.IntegerField(2, variant=_messages.Variant.UINT64)
oldestEntryTime = _messages.IntegerField(3, variant=_messages.Variant.UINT64)
class Table(_messages.Message):
"""A Table object.
Messages:
LabelsValue: [Experimental] The labels associated with this table. You can
use these to organize and group your tables. Label keys and values can
be no longer than 63 characters, can only contain lowercase letters,
numeric characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
Fields:
creationTime: [Output-only] The time when this table was created, in
milliseconds since the epoch.
description: [Optional] A user-friendly description of this table.
etag: [Output-only] A hash of this resource.
expirationTime: [Optional] The time when this table expires, in
milliseconds since the epoch. If not present, the table will persist
indefinitely. Expired tables will be deleted and their storage
reclaimed.
externalDataConfiguration: [Optional] Describes the data format, location,
and other properties of a table stored outside of BigQuery. By defining
these properties, the data source can then be queried as if it were a
standard BigQuery table.
friendlyName: [Optional] A descriptive name for this table.
id: [Output-only] An opaque ID uniquely identifying the table.
kind: [Output-only] The type of the resource.
labels: [Experimental] The labels associated with this table. You can use
these to organize and group your tables. Label keys and values can be no
longer than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are
allowed. Label values are optional. Label keys must start with a letter
and each label in the list must have a different key.
lastModifiedTime: [Output-only] The time when this table was last
modified, in milliseconds since the epoch.
location: [Output-only] The geographic location where the table resides.
This value is inherited from the dataset.
numBytes: [Output-only] The size of this table in bytes, excluding any
data in the streaming buffer.
numLongTermBytes: [Output-only] The number of bytes in the table that are
considered "long-term storage".
numRows: [Output-only] The number of rows of data in this table, excluding
any data in the streaming buffer.
schema: [Optional] Describes the schema of this table.
selfLink: [Output-only] A URL that can be used to access this resource
again.
streamingBuffer: [Output-only] Contains information regarding this table's
streaming buffer, if one is present. This field will be absent if the
table is not being streamed to or if there is no data in the streaming
buffer.
tableReference: [Required] Reference describing the ID of this table.
timePartitioning: [Experimental] If specified, configures time-based
partitioning for this table.
type: [Output-only] Describes the table type. The following values are
supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined
by a SQL query. EXTERNAL: A table that references data stored in an
external storage system, such as Google Cloud Storage. The default value
is TABLE.
view: [Optional] The view definition.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""[Experimental] The labels associated with this table. You can use these
to organize and group your tables. Label keys and values can be no longer
than 63 characters, can only contain lowercase letters, numeric
characters, underscores and dashes. International characters are allowed.
Label values are optional. Label keys must start with a letter and each
label in the list must have a different key.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
creationTime = _messages.IntegerField(1)
description = _messages.StringField(2)
etag = _messages.StringField(3)
expirationTime = _messages.IntegerField(4)
externalDataConfiguration = _messages.MessageField('ExternalDataConfiguration', 5)
friendlyName = _messages.StringField(6)
id = _messages.StringField(7)
kind = _messages.StringField(8, default=u'bigquery#table')
labels = _messages.MessageField('LabelsValue', 9)
lastModifiedTime = _messages.IntegerField(10, variant=_messages.Variant.UINT64)
location = _messages.StringField(11)
numBytes = _messages.IntegerField(12)
numLongTermBytes = _messages.IntegerField(13)
numRows = _messages.IntegerField(14, variant=_messages.Variant.UINT64)
schema = _messages.MessageField('TableSchema', 15)
selfLink = _messages.StringField(16)
streamingBuffer = _messages.MessageField('Streamingbuffer', 17)
tableReference = _messages.MessageField('TableReference', 18)
timePartitioning = _messages.MessageField('TimePartitioning', 19)
type = _messages.StringField(20)
view = _messages.MessageField('ViewDefinition', 21)
class TableCell(_messages.Message):
"""A TableCell object.
Fields:
v: A extra_types.JsonValue attribute.
"""
v = _messages.MessageField('extra_types.JsonValue', 1)
class TableDataInsertAllRequest(_messages.Message):
"""A TableDataInsertAllRequest object.
Messages:
RowsValueListEntry: A RowsValueListEntry object.
Fields:
ignoreUnknownValues: [Optional] Accept rows that contain values that do
not match the schema. The unknown values are ignored. Default is false,
which treats unknown values as errors.
kind: The resource type of the response.
rows: The rows to insert.
skipInvalidRows: [Optional] Insert all valid rows of a request, even if
invalid rows exist. The default value is false, which causes the entire
request to fail if any invalid rows exist.
templateSuffix: [Experimental] If specified, treats the destination table
as a base template, and inserts the rows into an instance table named
"{destination}{templateSuffix}". BigQuery will manage creation of the
instance table, using the schema of the base template table. See
https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-
tables for considerations when working with templates tables.
"""
class RowsValueListEntry(_messages.Message):
"""A RowsValueListEntry object.
Fields:
insertId: [Optional] A unique ID for each row. BigQuery uses this
property to detect duplicate insertion requests on a best-effort
basis.
json: [Required] A JSON object that contains a row of data. The object's
properties and values must match the destination table's schema.
"""
insertId = _messages.StringField(1)
json = _messages.MessageField('JsonObject', 2)
ignoreUnknownValues = _messages.BooleanField(1)
kind = _messages.StringField(2, default=u'bigquery#tableDataInsertAllRequest')
rows = _messages.MessageField('RowsValueListEntry', 3, repeated=True)
skipInvalidRows = _messages.BooleanField(4)
templateSuffix = _messages.StringField(5)
class TableDataInsertAllResponse(_messages.Message):
"""A TableDataInsertAllResponse object.
Messages:
InsertErrorsValueListEntry: A InsertErrorsValueListEntry object.
Fields:
insertErrors: An array of errors for rows that were not inserted.
kind: The resource type of the response.
"""
class InsertErrorsValueListEntry(_messages.Message):
"""A InsertErrorsValueListEntry object.
Fields:
errors: Error information for the row indicated by the index property.
index: The index of the row that error applies to.
"""
errors = _messages.MessageField('ErrorProto', 1, repeated=True)
index = _messages.IntegerField(2, variant=_messages.Variant.UINT32)
insertErrors = _messages.MessageField('InsertErrorsValueListEntry', 1, repeated=True)
kind = _messages.StringField(2, default=u'bigquery#tableDataInsertAllResponse')
class TableDataList(_messages.Message):
"""A TableDataList object.
Fields:
etag: A hash of this page of results.
kind: The resource type of the response.
pageToken: A token used for paging results. Providing this token instead
of the startIndex parameter can help you retrieve stable results when an
underlying table is changing.
rows: Rows of results.
totalRows: The total number of rows in the complete table.
"""
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#tableDataList')
pageToken = _messages.StringField(3)
rows = _messages.MessageField('TableRow', 4, repeated=True)
totalRows = _messages.IntegerField(5)
class TableFieldSchema(_messages.Message):
"""A TableFieldSchema object.
Fields:
description: [Optional] The field description. The maximum length is 16K
characters.
fields: [Optional] Describes the nested schema fields if the type property
is set to RECORD.
mode: [Optional] The field mode. Possible values include NULLABLE,
REQUIRED and REPEATED. The default value is NULLABLE.
name: [Required] The field name. The name must contain only letters (a-z,
A-Z), numbers (0-9), or underscores (_), and must start with a letter or
underscore. The maximum length is 128 characters.
type: [Required] The field data type. Possible values include STRING,
BYTES, INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT),
BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, RECORD
(where RECORD indicates that the field contains a nested schema) or
STRUCT (same as RECORD).
"""
description = _messages.StringField(1)
fields = _messages.MessageField('TableFieldSchema', 2, repeated=True)
mode = _messages.StringField(3)
name = _messages.StringField(4)
type = _messages.StringField(5)
class TableList(_messages.Message):
"""A TableList object.
Messages:
TablesValueListEntry: A TablesValueListEntry object.
Fields:
etag: A hash of this page of results.
kind: The type of list.
nextPageToken: A token to request the next page of results.
tables: Tables in the requested dataset.
totalItems: The total number of tables in the dataset.
"""
class TablesValueListEntry(_messages.Message):
"""A TablesValueListEntry object.
Messages:
LabelsValue: [Experimental] The labels associated with this table. You
can use these to organize and group your tables.
ViewValue: Additional details for a view.
Fields:
friendlyName: The user-friendly name for this table.
id: An opaque ID of the table
kind: The resource type.
labels: [Experimental] The labels associated with this table. You can
use these to organize and group your tables.
tableReference: A reference uniquely identifying the table.
type: The type of table. Possible values are: TABLE, VIEW.
view: Additional details for a view.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
"""[Experimental] The labels associated with this table. You can use
these to organize and group your tables.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
class ViewValue(_messages.Message):
"""Additional details for a view.
Fields:
useLegacySql: True if view is defined in legacy SQL dialect, false if
in standard SQL.
"""
useLegacySql = _messages.BooleanField(1)
friendlyName = _messages.StringField(1)
id = _messages.StringField(2)
kind = _messages.StringField(3, default=u'bigquery#table')
labels = _messages.MessageField('LabelsValue', 4)
tableReference = _messages.MessageField('TableReference', 5)
type = _messages.StringField(6)
view = _messages.MessageField('ViewValue', 7)
etag = _messages.StringField(1)
kind = _messages.StringField(2, default=u'bigquery#tableList')
nextPageToken = _messages.StringField(3)
tables = _messages.MessageField('TablesValueListEntry', 4, repeated=True)
totalItems = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class TableReference(_messages.Message):
"""A TableReference object.
Fields:
datasetId: [Required] The ID of the dataset containing this table.
projectId: [Required] The ID of the project containing this table.
tableId: [Required] The ID of the table. The ID must contain only letters
(a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is
1,024 characters.
"""
datasetId = _messages.StringField(1)
projectId = _messages.StringField(2)
tableId = _messages.StringField(3)
class TableRow(_messages.Message):
"""A TableRow object.
Fields:
f: Represents a single row in the result set, consisting of one or more
fields.
"""
f = _messages.MessageField('TableCell', 1, repeated=True)
class TableSchema(_messages.Message):
"""A TableSchema object.
Fields:
fields: Describes the fields in a table.
"""
fields = _messages.MessageField('TableFieldSchema', 1, repeated=True)
class TimePartitioning(_messages.Message):
"""A TimePartitioning object.
Fields:
expirationMs: [Optional] Number of milliseconds for which to keep the
storage for a partition.
type: [Required] The only type supported is DAY, which will generate one
partition per day based on data loading time.
"""
expirationMs = _messages.IntegerField(1)
type = _messages.StringField(2)
class UserDefinedFunctionResource(_messages.Message):
"""A UserDefinedFunctionResource object.
Fields:
inlineCode: [Pick one] An inline resource that contains code for a user-
defined function (UDF). Providing a inline code resource is equivalent
to providing a URI for a file containing the same code.
resourceUri: [Pick one] A code resource to load from a Google Cloud
Storage URI (gs://bucket/path).
"""
inlineCode = _messages.StringField(1)
resourceUri = _messages.StringField(2)
class ViewDefinition(_messages.Message):
"""A ViewDefinition object.
Fields:
query: [Required] A query that BigQuery executes when the view is
referenced.
useLegacySql: Specifies whether to use BigQuery's legacy SQL for this
view. The default value is true. If set to false, the view will use
BigQuery's standard SQL: https://cloud.google.com/bigquery/sql-
reference/ Queries and views that reference this view must use the same
flag value.
userDefinedFunctionResources: [Experimental] Describes user-defined
function resources used in the query.
"""
query = _messages.StringField(1)
useLegacySql = _messages.BooleanField(2)
userDefinedFunctionResources = _messages.MessageField('UserDefinedFunctionResource', 3, repeated=True)
| [
"[email protected]"
] | |
67d8405dae494c985db55a7991291fe6a81e390a | 38c10c01007624cd2056884f25e0d6ab85442194 | /third_party/chromite/cbuildbot/autotest_rpc_errors.py | 1ee19f4a5238f93886962b5f9968b1f009275cf6 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 670 | py | # Copyright 2015 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Error codes used for the Autotest RPC Client, Proxy, and Server.
This is a copy of scripts/slave-internal/autotest_rpc/autotest_rpc_errors.py
from https://chrome-internal.googlesource.com/chrome/tools/build.
"""
CLIENT_CANNOT_CONNECT = 1
CLIENT_HTTP_CODE = 2
CLIENT_EMPTY_RESPONSE = 3
CLIENT_NO_RETURN_CODE = 4
PROXY_CANNOT_SEND_REQUEST = 11
PROXY_CONNECTION_LOST = 12
PROXY_TIMED_OUT = 13
SERVER_NO_COMMAND = 21
SERVER_NO_ARGUMENTS = 22
SERVER_UNKNOWN_COMMAND = 23
SERVER_BAD_ARGUMENT_COUNT = 24
| [
"[email protected]"
] | |
46b142b96d6ec205f215bf65fe76cf618722fad6 | 7236d1d4873faa9735fd5e2d4598b211a370f731 | /project/n/projects/projects/ecommapp/users/migrations/0007_myuser_date_join.py | d2f2c4be22f4cc171f14f93f40710f105bb9009e | [] | no_license | Dreambigxz/my_first_django_app | 05f5a5d330d72084489f9306fca9ca232af13999 | 9e21ebcbe63c7394280558d2977ef8a796960e0d | refs/heads/main | 2023-01-03T18:45:20.712074 | 2020-10-23T09:05:47 | 2020-10-23T09:05:47 | 306,180,592 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | # Generated by Django 3.0.8 on 2020-09-03 16:55
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20200903_1752'),
]
operations = [
migrations.AddField(
model_name='myuser',
name='date_join',
field=models.DateField(default=django.utils.timezone.now),
),
]
| [
"[email protected]"
] | |
116d387dd717fabe096b4ea161ad403d2870e88a | 33976fddb32feae0b6b5d38b0a8994490fc4b1db | /src/ar6/constants/gases.py | 4df95e992cbd6ed95181fc2ed1bf4bafd19e54c8 | [
"MIT"
] | permissive | chrisroadmap/ar6 | e72e4bad8d1c1fa2751513dbecddb8508711859c | 2f948c862dbc158182ba47b863395ec1a4aa7998 | refs/heads/main | 2023-04-16T22:57:02.280787 | 2022-09-27T13:31:38 | 2022-09-27T13:31:38 | 305,981,969 | 27 | 20 | MIT | 2022-09-27T13:31:38 | 2020-10-21T10:02:03 | Jupyter Notebook | UTF-8 | Python | false | false | 6,315 | py | """
Gas properties
"""
# Number of bromine atoms
br_atoms = {
'CCl4': 0,
'CFC11': 0,
'CFC113': 0,
'CFC114': 0,
'CFC115': 0,
'CFC12': 0,
'CH2Cl2': 0,
'CH3Br': 1,
'CH3CCl3': 0,
'CH3Cl': 0,
'CHCl3': 0,
'HCFC141b': 0,
'HCFC142b': 0,
'HCFC22': 0,
'Halon1211': 1,
'Halon1301': 1,
'Halon2402': 2,
}
# Number of chlorine atoms
cl_atoms = {
'CCl4': 4,
'CFC11': 3,
'CFC113': 3,
'CFC114': 2,
'CFC115': 1,
'CFC12': 2,
'CH2Cl2': 2,
'CH3Br': 0,
'CH3CCl3': 3,
'CH3Cl': 1,
'CHCl3': 3,
'HCFC141b': 2,
'HCFC142b': 1,
'HCFC22': 1,
'Halon1211': 0,
'Halon1301': 0,
'Halon2402': 0,
}
# Fractional release (for ozone depletion)
# References:
# Daniel, J. and Velders, G.: A focus on information and options for
# policymakers, in: Scientific Assessment of Ozone Depletion, WMO, 2011
# Newman et al., 2007: A new formulation of equivalent effective stratospheric
# chlorine (EESC)
fracrel = {
'CCl4': 0.56,
'CFC11': 0.47,
'CFC113': 0.29,
'CFC114': 0.12,
'CFC115': 0.04,
'CFC12': 0.23,
'CH2Cl2': 0, # no literature value available
'CH3Br': 0.60,
'CH3CCl3': 0.67,
'CH3Cl': 0.44,
'CHCl3': 0, # no literature value available
'HCFC141b': 0.34,
'HCFC142b': 0.17,
'HCFC22': 0.13,
'Halon1211': 0.62,
'Halon1301': 0.28,
'Halon2402': 0.65,
}
# Conversion between GHG names in GHG spreadsheet and RCMIP.
ghg_to_rcmip_names={
'HFC-125': 'HFC125',
'HFC-134a': 'HFC134a',
'HFC-143a': 'HFC143a',
'HFC-152a': 'HFC152a',
'HFC-227ea': 'HFC227ea',
'HFC-23': 'HFC23',
'HFC-236fa': 'HFC236fa',
'HFC-245fa': 'HFC245fa',
'HFC-32': 'HFC32',
'HFC-365mfc': 'HFC365mfc',
'HFC-43-10mee': 'HFC4310mee',
'NF3': 'NF3',
'C2F6': 'C2F6',
'C3F8': 'C3F8',
'n-C4F10': 'C4F10',
'n-C5F12': 'C5F12',
'n-C6F14': 'C6F14',
'i-C6F14': None,
'C7F16': 'C7F16',
'C8F18': 'C8F18',
'CF4': 'CF4',
'c-C4F8': 'cC4F8',
'SF6': 'SF6',
'SO2F2': 'SO2F2',
'CCl4': 'CCl4',
'CFC-11': 'CFC11',
'CFC-112': 'CFC112',
'CFC-112a': None,
'CFC-113': 'CFC113',
'CFC-113a': None,
'CFC-114': 'CFC114',
'CFC-114a': None,
'CFC-115': 'CFC115',
'CFC-12': 'CFC12',
'CFC-13': None,
'CH2Cl2': 'CH2Cl2',
'CH3Br': 'CH3Br',
'CH3CCl3': 'CH3CCl3',
'CH3Cl': 'CH3Cl',
'CHCl3': 'CHCl3',
'HCFC-124': None,
'HCFC-133a': None,
'HCFC-141b': 'HCFC141b',
'HCFC-142b': 'HCFC142b',
'HCFC-22': 'HCFC22',
'HCFC-31': None,
'Halon-1211': 'Halon1211',
'Halon-1301': 'Halon1301',
'Halon-2402': 'Halon2402',
}
# Hodnebrog et al., 2020: https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2019RG000691
# unless stated
lifetimes = {
'CH4': 11.8, # chapter 6
'N2O': 109, # AR6 SOD
'HFC-125': 30,
'HFC-134a': 14,
'HFC-143a': 51,
'HFC-152a': 1.6,
'HFC-227ea': 36,
'HFC-23': 228,
'HFC-236fa': 213,
'HFC-245fa': 7.9,
'HFC-32': 5.4,
'HFC-365mfc': 8.9,
'HFC-43-10mee': 17,
'NF3': 569,
'C2F6': 10000,
'C3F8': 2600,
'n-C4F10': 2600,
'n-C5F12': 4100,
'n-C6F14': 3100,
'i-C6F14': 3100, # assumed
'C7F16': 3000,
'C8F18': 3000,
'CF4': 50000,
'c-C4F8': 3200,
'SF6': 3200,
'SO2F2': 36,
'CCl4': 32,
'CFC-11': 52,
'CFC-112': 63.6,
'CFC-112a': 52,
'CFC-113': 93,
'CFC-113a': 55,
'CFC-114': 189,
'CFC-114a': 105,
'CFC-115': 540,
'CFC-12': 102,
'CFC-13': 640,
'CH2Cl2': 0.4932,
'CH3Br': 0.8,
'CH3CCl3': 5,
'CH3Cl': 0.9,
'CHCl3': 0.5014,
'HCFC-124': 5.9,
'HCFC-133a': 4.6,
'HCFC-141b': 9.4,
'HCFC-142b': 18,
'HCFC-22': 11.9,
'HCFC-31': 1.2,
'Halon-1211': 16,
'Halon-1301': 72,
'Halon-2402': 28,
}
# Ozone depleting substances
ods_species = [
'CCl4',
'CFC11',
'CFC113',
'CFC114',
'CFC115',
'CFC12',
'CH2Cl2',
'CH3Br',
'CH3CCl3',
'CH3Cl',
'CHCl3',
'HCFC141b',
'HCFC142b',
'HCFC22',
'Halon1211',
'Halon1301',
'Halon2402',
]
# radiative efficiencies
# source: Hodnebrog et al 2020 https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2019RG000691
radeff = {
'HFC-125': 0.23378,
'HFC-134a': 0.16714,
'HFC-143a': 0.168,
'HFC-152a': 0.10174,
'HFC-227ea': 0.27325,
'HFC-23': 0.19111,
'HFC-236fa': 0.25069,
'HFC-245fa': 0.24498,
'HFC-32': 0.11144,
'HFC-365mfc': 0.22813,
'HFC-43-10mee': 0.35731,
'NF3': 0.20448,
'C2F6': 0.26105,
'C3F8': 0.26999,
'n-C4F10': 0.36874,
'n-C5F12': 0.4076,
'n-C6F14': 0.44888,
'i-C6F14': 0.44888,
'C7F16': 0.50312,
'C8F18': 0.55787,
'CF4': 0.09859,
'c-C4F8': 0.31392,
'SF6': 0.56657,
'SO2F2': 0.21074,
'CCl4': 0.16616,
'CFC-11': 0.25941,
'CFC-112': 0.28192,
'CFC-112a': 0.24564,
'CFC-113': 0.30142,
'CFC-113a': 0.24094,
'CFC-114': 0.31433,
'CFC-114a': 0.29747,
'CFC-115': 0.24625,
'CFC-12': 0.31998,
'CFC-13': 0.27752,
'CH2Cl2': 0.02882,
'CH3Br': 0.00432,
'CH3CCl3': 0.06454,
'CH3Cl': 0.00466,
'CHCl3': 0.07357,
'HCFC-124': 0.20721,
'HCFC-133a': 0.14995,
'HCFC-141b': 0.16065,
'HCFC-142b': 0.19329,
'HCFC-22': 0.21385,
'HCFC-31': 0.068,
'Halon-1211': 0.30014,
'Halon-1301': 0.29943,
'Halon-2402': 0.31169,
}
rcmip_to_ghg_names = {v: k for k, v in ghg_to_rcmip_names.items()}
| [
"[email protected]"
] | |
2592cd0cd2bea747a58634eb2386b2e46bdca291 | a0015a3dc121c8fcdd5d2eadd522ece03b4ceec8 | /docs/cornell CS class/Lesson 29. Coroutines/demos/read2.py | fe3ecc58b16f7d80b45c890599a931e740dcc82a | [
"MIT"
] | permissive | LizzieDeng/kalman_fliter_analysis | fc40d475797dbddba5f9f2dfb224fbf68d77865f | 50e728f32c496c3fcbb8ca3ee00857b999b88d99 | refs/heads/main | 2023-03-03T02:46:19.020078 | 2021-02-05T07:53:10 | 2021-02-05T07:53:10 | 329,243,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,137 | py | """
A module to show off a long-running function as a coroutine.
This module shows another advantage of a coroutine. We can
interleave two functions as they load from a file. There are
reasons we might want to do this when working with large data,
but they are beyond the scope of this course.
Author: Walker M. White
Date: November 2, 2020
"""
def merge(dict1,dict2):
"""
Returns a new dictionary merging (joining keys) dict1
and dict2.
If a key appears in only one of dict1 or dict2, the
value is the value from that dictionary. If it is in
both, the value is the sum of values.
Example: merge({'a':1,'b':2},{'b':3,'c':4}) returns
{'a':1,'b':5,'c':4}
Parameter dict1: The first dictionary to merge
Precondition: dict1 a dictionary with int or float values
Parameter dict2: The second dictionary to merge
Precondition: dict2 a dictionary with int or float values
"""
result = dict(dict1) # Makes a (shallow) copy
for k in dict2:
if k in dict1:
result[k] = result[k]+1
else:
result[k] = 1
return result
def add_word(word,counts):
"""
Adds a word to a word-count dictionary.
The keys of the dictionaries are strings, and the values
are integers. If the word is already in the dictionary,
adding it will increase the value by 1. Otherwise it
will add the key and assign it a value for 1.
Example: If count = ['a':1,'b':1}, add_word('a',count)
alters count to be {'a':2,'b':1}
Parameter word: The word to add
Precondition: word is a string
Parameter counts: The word-count dictionary
Precondition: count is a dictionary with string keys
and integer values
"""
if word in counts:
counts[word] = counts[word]+1
else:
counts[word] = 1
def wordcount(fname):
"""
Returns a dictionary with the individual word count of
fname
The is function opens the specified text file and creates
a dictionary from it. The keys of the dictionaries are
words (i.e. adjacent letters with no spaces or
punctuation). For example, in the string 'Who are you?',
the words are 'Who', 'are', and 'you'. The values are
the number of times that word (paying attention to
capitalization) appears in the file.
This function is a generator-based coroutine that stops
at every 10% of the file to return its amount of progress
to the original caller (the function that calls next()).
Parameter fname: The file name
Precondition: fname is a string and the name of a text
file
"""
# Load the entire file into a single string
file = open(fname)
text = file.read()
file.close()
counts = {}
word = '' # Accumulator to build a word
for pos in range(len(text)):
# Yield every 10%
if pos % (len(text)//10) == 0:
# Indicate the amount of progress we made
yield round(100*pos/len(text))
# Build up the word, one letter at a time
x = text[pos]
if x.isalpha():
word = word+x
else: # Word ends
# Add it if not empty
if word != '':
add_word(word,counts)
word = '' # Reset the accumulator
# Add the last word
if word != '':
add_word(word,counts)
return counts
def loadfiles(fname1,fname2):
"""
Creates a word-count dictionary for fname1, fname2 and
prints the combined size
The size of the word-count dictionary is the number of
distinct words in the file.
This function is the parent of wordcount, pushing it
forward with the next() function until it is done
reading the file. This function creates two wordcount
coroutines and interleaves them.
Parameter fname1: The first file name
Precondition: fname1 is a string and the name of a text file
Parameter fname2: The second file name
Precondition: fname2 is a string and the name of a text file
"""
loader1 = wordcount(fname1)
loader2 = wordcount(fname2)
result = {}
# We keep going as long as either loader is working
while (not loader1 is None) or (not loader2 is None):
# Load the next batch from fname1
if not loader1 is None:
try:
amount = next(loader1)
print('Loaded '+str(amount)+'% of '+repr(fname1))
except StopIteration as e:
result = merge(result,e.args[0]) # Access the return value
loader1 = None # We are done
# Load the next batch from fname2
if not loader2 is None:
try:
amount = next(loader2)
print('Loaded '+str(amount)+'% of '+repr(fname2))
except StopIteration as e:
result = merge(result,e.args[0]) # Access the return value
loader2 = None # We are done
print('Read a total of '+str(len(result))+' words.')
if __name__ == '__main__':
loadfiles('warpeace10.txt','kingjames10.txt')
| [
"[email protected]"
] | |
2cb33275754ec783f5f546a411cf0fe226a579eb | f7982a468b6f76dc72c53e7c3644ae4e7e6f2f49 | /pyEX/refdata/ric.py | 6e4ab19987f1f4ec33a268a2b177446c705a78b6 | [
"Apache-2.0"
] | permissive | timkpaine/pyEX | 55002c3718214c6e207976ab3661a47108c6c114 | f678c791d05bc28911e25807241c392a9ee8134f | refs/heads/main | 2023-08-20T00:17:53.162803 | 2022-11-22T02:51:13 | 2022-11-22T02:51:13 | 109,551,372 | 350 | 95 | Apache-2.0 | 2023-09-11T12:26:54 | 2017-11-05T04:21:16 | Python | UTF-8 | Python | false | false | 1,129 | py | # *****************************************************************************
#
# Copyright (c) 2021, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
from functools import wraps
import pandas as pd
from ..common import _get
def ricLookup(ric, token="", version="stable", filter="", format="json"):
"""This call converts a RIC to an iex symbol
https://iexcloud.io/docs/api/#ric-mapping
8am, 9am, 12pm, 1pm UTC daily
Args:
ric (str): ric to lookup
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame or list: result
"""
return _get(
"ref-data/ric?ric={}".format(ric),
token=token,
version=version,
filter=filter,
format=format,
)
@wraps(ricLookup)
def ricLookupDF(*args, **kwargs):
return pd.DataFrame(ricLookup(*args, **kwargs))
| [
"[email protected]"
] | |
726083e657d4bfe7dfdd3ffc0d4860c40b2161b0 | 98d9305b1717642bcfb842eecd84d63b6eeaf759 | /Funtions/Passing_Information.py | 95f7621fc81d57dd2ebdb67a24a82da35ae5f6f4 | [] | no_license | er-aditi/Learning-Python | 5ceb020f4df8db9e34df78edfaecca3e1854c8a9 | 297eda435ee2e1cee643f94ea4c5de6a82e3c8a7 | refs/heads/master | 2020-03-24T17:22:22.129081 | 2019-06-19T05:47:26 | 2019-06-19T05:47:26 | 142,856,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | def greet_user(username):
print("Hello! " + username + ". ")
greet_user('jess')
greet_user('sarah')
| [
"[email protected]"
] | |
0f7d8ae5196b70d080e081d05be8478206494a1d | 82cd10c024f284555845f006e518924fed3254c7 | /Day-06[09-10-2021]/EmployeeProject/EmployeeProject/urls.py | 256d1ab7ebea77beebcb3a9ed2b40858b129c6a2 | [] | no_license | AP-Skill-Development-Corporation/APSSDC-Workshop2021 | 61acba18eb55ec2e4bb96ded95d339c73c8ea1ac | fe1f5517f99b17bd0ebcf07c70ee26bd23f262ea | refs/heads/main | 2023-08-12T16:29:53.208949 | 2021-10-16T15:47:22 | 2021-10-16T15:47:22 | 413,299,596 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,549 | py | """EmployeeProject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Employee import views
urlpatterns = [
path('admin/', admin.site.urls),
path('demo/',views.sample),
path('de/<str:name>/',views.dsname),
path('fe/<str:name>/<int:age>/',views.fname),
path('g/<str:fname>/<str:lname>/<int:sal>/',views.emp),
path('gt/<str:empname>/<int:sal>/',views.empdetials),
path('fy/<str:sname>/',views.dname),
path('sty/<str:stname>/<int:year>/<str:branch>/',views.stname),
path('reg/',views.regis),
path('se/',views.index,name="ind"),
path('about/',views.about,name="ab"),
path('contact/',views.contact,name="cnt"),
path('sam/',views.sample),
path('re/',views.register,name="rg"),
path('',views.operations,name="op"),
path('emv/<int:t>/',views.eview,name="ev"),
path('eup/<int:p>/',views.emup,name="ep"),
path('ed/<int:f>/',views.emdl,name="edl"),
]
| [
"[email protected]"
] | |
b53fb27016d732c08a7869d38d13162383b30b32 | 1e09bc56bf2904b349df1873e11da3d527437880 | /lecture-27/AdjListGraph.py | 8a03efffe1f2ce76c121133adbb645df489cf2d6 | [] | no_license | codeforcauseorg-archive/DSA-Live-Python-Jun-0621 | f3444f5671cb4985644c7432517477c3585c70fb | e4fe544178d7851c24755242390f39675b99fabe | refs/heads/main | 2023-08-09T08:31:41.449120 | 2021-09-14T16:44:39 | 2021-09-14T16:44:39 | 384,725,085 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,095 | py | class Vertex:
def __init__(self, value):
self.value = value
self.neighbours = {}
class AdjListGraph:
def __init__(self):
self.vertices = dict()
def add_vertex(self, value):
if value not in self.vertices:
self.vertices[value] = Vertex(value)
def add_edge(self, first, second, weight):
if (first in self.vertices) and (second in self.vertices):
vfirst = self.vertices[first]
vsecond = self.vertices[second]
vfirst.neighbours[vsecond] = weight
vsecond.neighbours[vfirst] = weight
def min_spanning_tree(self):
edges = []
for vertex in self.vertices.values():
# print(vertex.neighbours.items())
for neighbour, weight in vertex.neighbours.items():
edges.append([weight, vertex.value, neighbour.value])
sorted_edges = sorted(edges)
acc = 0
for [weight, source, dest] in sorted_edges:
if self.union(source, dest):
acc += weight
return acc
# parents = {}
# for vertex in self.vertices:
# parents[vertex.value] = None
def union(self, parents, first, second):
first = self.find(parents, first)
second = self.find(parents, second)
if first == second:
return False
else:
parents[first] = second
def find(self, parents, item):
while parents[item] != None:
item = parents[item]
return item
def represent(self):
for vertex in self.vertices.values():
print(vertex.value, end="-> ")
for neighbour in vertex.neighbours:
print("[{} : {}]".format(neighbour.value, vertex.neighbours[neighbour]), end=", ")
print()
graph = AdjListGraph()
graph.add_vertex("A")
graph.add_vertex("B")
graph.add_vertex("C")
graph.add_vertex("D")
graph.add_edge("A", "B", 10)
graph.add_edge("B", "C", 15)
graph.add_edge("D", "C", 10)
graph.add_edge("A", "D", 20)
graph.min_spanning_tree()
| [
"[email protected]"
] | |
cd3d97d846876037d74f4ccc46eefb915c555830 | 823b69dffc4a6e28b9e4c27ec176f8ce54d2e586 | /args/arg_parser.py | c2cea4c5d26614670271806fddc28b28fb6b4b19 | [] | no_license | potlee/pbt | 1f5af632aa100561da1c284b522a6ca181ea21c1 | 05160eca9f3a557a25d043502f90aca1a7b76b46 | refs/heads/master | 2020-03-25T23:48:47.867151 | 2018-06-23T19:40:16 | 2018-06-23T19:40:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,599 | py | import argparse
import util
class ArgParser(object):
def __init__(self):
self.parser = argparse.ArgumentParser(description='Population-Based Training')
self.parser.add_argument('--gpu_ids', type=str, default='0',
help='Comma-separated list of GPUs to use.')
self.parser.add_argument('--batch_size', type=int, default=32, help='Batch size.')
self.parser.add_argument('--num_workers', type=int, default=4, help='Number of workers per data loader.')
self.parser.add_argument('--num_epochs', type=int, default=30,
help='Number of epochs to train for. If 0, train forever.')
self.parser.add_argument('--population_size', type=int, default=3,
help='Number of models in a population.')
self.parser.add_argument('--dataset', type=str, default='CIFAR10', choices=('CIFAR10',),
help='Dataset to train on.')
self.parser.add_argument('--ckpt_dir', type=str, default='ckpts/',
help='Directory to save checkpoints and population info.')
self.parser.add_argument('--name', type=str, required=True, help='Experiment name.')
self.parser.add_argument('--model', type=str, default='resnet50', help='Model name.')
self.parser.add_argument('--metric_name', type=str, default='val_loss',
help='Metric to optimize during PBT. Make sure to also set --maximize_metric')
self.parser.add_argument('--maximize_metric', type=util.str_to_bool, default=False,
help='If true, maximize the metric. Else minimize.')
self.parser.add_argument('--max_eval', type=int, default=1000,
help='Max number of examples to evaluate from the training set.')
self.parser.add_argument('--max_ckpts', type=int, default=3,
help='Max number of recent checkpoints to keep per model.')
self.parser.add_argument('--save_dir', type=str, default='logs', help='Directory for saving logs.')
self.parser.add_argument('--learning_rate', type=float, default=1e-1, help='Initial learning rate.')
self.parser.add_argument('--optimizer', type=str, default='sgd', choices=('sgd', 'adam'), help='Optimizer.')
self.parser.add_argument('--sgd_momentum', type=float, default=0.9, help='SGD momentum (SGD only).')
self.parser.add_argument('--sgd_dampening', type=float, default=0.9, help='SGD momentum (SGD only).')
self.parser.add_argument('--adam_beta_1', type=float, default=0.9, help='Adam beta 1 (Adam only).')
self.parser.add_argument('--adam_beta_2', type=float, default=0.999, help='Adam beta 2 (Adam only).')
self.parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (i.e., L2 regularization factor).')
self.parser.add_argument('--iters_per_print', type=int, default=4,
help='Number of iterations between printing loss to the console and TensorBoard.')
self.parser.add_argument('--search_space', type=str, default='lr,momentum,weight_decay')
def parse_args(self):
args = self.parser.parse_args()
args.gpu_ids = [int(i) for i in str(args.gpu_ids).split(',') if int(i) >= 0]
args.device = 'cpu' if len(args.gpu_ids) == 0 else 'cuda:{}'.format(args.gpu_ids[0])
args.search_space = str(args.search_space).split(',')
return args
| [
"[email protected]"
] | |
1a5cc4dd4b02297aa61785f8fe17d28cdf7bae2c | 99e494d9ca83ebafdbe6fbebc554ab229edcbacc | /.history/Day 1/Test/Answers/NegativeMarking_20210304211811.py | d220b7261e6beb16198606a036f3688522eaee56 | [] | no_license | Datta2901/CCC | c0364caa1e4937bc7bce68e4847c8d599aef0f59 | 4debb2c1c70df693d0e5f68b5798bd9c7a7ef3dc | refs/heads/master | 2023-04-19T10:05:12.372578 | 2021-04-23T12:50:08 | 2021-04-23T12:50:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | t = int(input())
for i in range(t):
questions,requiredscore = map(int,input().split())
if questions * 4 < requiredscore:
print(-1)
continue
attempt = (requiredscore/questions) + 3
accuracy = attempt / 7
print(format(accuracy*100,'.2f')
#
# Here Accuracy can be find by using two linear equations
# They are Total Score(Required Score) = 4 * x - 3 * y
# Total Questions = x + y
# Here x is the total number of crct answers
# y is the total number of wrong answers | [
"[email protected]"
] | |
39764d8d79f6697d5e9b2cffeb3f3e9487f9ea0a | 2eee2da886d2574f030b22771e707e32f56cbaed | /chaospy/distributions/collection/chi.py | cb04231c2d16b7f21de4aa90574562d6e927b4fc | [
"MIT"
] | permissive | lblonk/chaospy | 1759f050229d1365802320d9b13f6195ec55a72c | 1759a4307c6134b74ce63ff44973195f1e185f94 | refs/heads/master | 2022-11-12T19:50:15.108219 | 2020-07-03T11:13:42 | 2020-07-03T11:13:42 | 276,879,282 | 0 | 0 | MIT | 2020-07-03T11:03:14 | 2020-07-03T11:03:13 | null | UTF-8 | Python | false | false | 3,779 | py | """Chi distribution."""
import numpy
from scipy import special
from ..baseclass import Dist
from ..operators.addition import Add
class chi(Dist):
"""Chi distribution."""
def __init__(self, df=1):
Dist.__init__(self, df=df)
def _pdf(self, x, df):
return x**(df-1.)*numpy.exp(-x*x*0.5)/(2.0)**(df*0.5-1)\
/special.gamma(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return numpy.sqrt(2*special.gammaincinv(df*0.5, q))
def _lower(self, df):
return 0.
def _mom(self, k, df):
return 2**(.5*k)*special.gamma(.5*(df+k))\
/special.gamma(.5*df)
class Chi(Add):
"""
Chi distribution.
Args:
df (float, Dist):
Degrees of freedom
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.Chi(2, 4, 1)
>>> distribution
Chi(df=2, scale=4, shift=1)
>>> q = numpy.linspace(0, 1, 5)
>>> distribution.inv(q).round(4)
array([ 1. , 4.0341, 5.7096, 7.6604, 28.1446])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0. , 0.1422, 0.1472, 0.1041, 0. ])
>>> distribution.sample(4).round(4)
array([ 6.8244, 2.9773, 10.8003, 5.5892])
>>> distribution.mom(1).round(4)
6.0133
"""
def __init__(self, df=1, scale=1, shift=0):
self._repr = {"df": df, "scale": scale, "shift": shift}
Add.__init__(self, left=chi(df)*scale, right=shift)
class Maxwell(Add):
"""
Maxwell-Boltzmann distribution
Chi distribution with 3 degrees of freedom
Args:
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.Maxwell(2, 3)
>>> distribution
Maxwell(scale=2, shift=3)
>>> q = numpy.linspace(0, 1, 5)
>>> distribution.inv(q).round(4)
array([ 3. , 5.2023, 6.0763, 7.0538, 17.0772])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0. , 0.2638, 0.2892, 0.2101, 0. ])
>>> distribution.sample(4).round(4)
array([6.6381, 4.6119, 8.5955, 6.015 ])
>>> distribution.mom(1).round(4)
6.1915
"""
def __init__(self, scale=1, shift=0):
self._repr = {"scale": scale, "shift": shift}
Add.__init__(self, left=chi(3)*scale, right=shift)
class Rayleigh(Add):
"""
Rayleigh distribution
Args:
scale (float, Dist):
Scaling parameter
shift (float, Dist):
Location parameter
Examples:
>>> distribution = chaospy.Rayleigh(2, 3)
>>> distribution
Rayleigh(scale=2, shift=3)
>>> q = numpy.linspace(0, 1, 5)
>>> distribution.inv(q).round(4)
array([ 3. , 4.5171, 5.3548, 6.3302, 16.5723])
>>> distribution.fwd(distribution.inv(q)).round(4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
>>> distribution.pdf(distribution.inv(q)).round(4)
array([0. , 0.2844, 0.2944, 0.2081, 0. ])
>>> distribution.sample(4).round(4)
array([5.9122, 3.9886, 7.9001, 5.2946])
>>> distribution.mom(1).round(4)
5.5066
"""
def __init__(self, scale=1, shift=0):
self._repr = {"scale": scale, "shift": shift}
Add.__init__(self, left=chi(2)*scale, right=shift)
| [
"[email protected]"
] | |
c0593805d9fcc7d217660376fbb2688f706642e2 | 0798277f2706998ab80442ac931579eb47f676e5 | /boundary/property_handler.py | 45635669e8b5a3731f321b2d7a0d6eb87f6a6557 | [
"Apache-2.0"
] | permissive | isabella232/pulse-api-cli | 49ed38b0694ab289802f69ee6df4911cf3378e3f | b01ca65b442eed19faac309c9d62bbc3cb2c098f | refs/heads/master | 2023-03-18T00:23:15.295727 | 2016-05-13T15:44:08 | 2016-05-13T15:44:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | #
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class PropertyHandler(object):
def __init__(self):
self._properties = None
def _process_properties(self, properties):
"""
Transforms the command line properties into python dictionary
:return:
"""
if properties is not None:
self._properties = {}
for p in properties:
d = p.split('=')
self._properties[d[0]] = d[1]
def _add_property_argument(self, parser, help_text):
parser.add_argument('-p', '--property', dest='properties', action='append',
required=False, metavar='property=value', help=help_text)
| [
"[email protected]"
] | |
87339e4385a890dc9a46c6e5efc4674cb85aefa2 | 4073f351551c2f73c5659cb3038a68360cc5b369 | /Arbetsbok/kap 14/övn 14.1, sid. 36 - söka tal.py | 9b318176e080635b41a000e7aeb4734430c42602 | [
"MIT"
] | permissive | Pharou/programmering1python | b9a5aca72354d3e7e91a5023a621d22a962ecd7c | 9b689027db1f7fbf06925f3094fcb126880453e4 | refs/heads/master | 2022-11-28T06:33:17.295157 | 2020-07-25T11:02:07 | 2020-07-25T11:02:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,437 | py | #!/usr/bin/python3.8
# Filnamn: övn 14.1, sid. 36 - söka tal.py
# Sökning
# Programmeringsövningar till kapitel 14
# Programmet slumpar först fram 20 tal mellan 1 och 100 och lagrar alla talen i
# en lista och sedan skrivs listan ut på skärmen. Därefter frågar programmet
# användaren efter ett tal som ska eftersökas. Slutligen undersöker programmet
# om talet finns i listan och om det finns, skriva ut på indexet det finns på.
# Om inte talet finns så ska användaren informeras om att det inte finns.
# Sökmetod: Linjär sökning
# Import av modul
from random import randint
# Funktionsdefinitioner
# Huvudprogram
def main():
lista = []
# Slumpa 20 st heltal mellan 1 och 100 och lägg dem eftervarandra i listan
for c in range(20):
lista.append(randint(1,100))
# Skriv ut listan
print(lista)
# Fråga användaren efte tal som eftersöks
tal = int(input('Anget tal som eftersöks: '))
# Utför en linjär sökning i hela listan
# Utgå ifrån att talet inte finns
index = -1
for i in range(len(lista)):
if tal == lista[i]:
# Om talet hittas sätt index till det och avbryt loopen
index = i
break
if index >= 0:
print('Talet ' + str(tal) + ' finns på index ' + str(index) + ' i listan.')
else:
print('Talet ' + str(tal) + ' finns inte i listan.')
## Huvudprogram anropas
main() | [
"[email protected]"
] | |
4ad42cd6418d521ed2f275b7b73aaa4e7036fbea | 964b063c2461aad267ddd991fefaf7ab53b1ca94 | /6-kyu/iq-test/python/solution.py | 2221bc57375308dc79e1d3f085e299509f860057 | [] | no_license | lucasbflopes/codewars-solutions | 26c4e2cd1be19db50cc8c1d9fc117c51c82a2259 | 72ef2c02dde7bd0d5a691e04e3b2a383e892f84b | refs/heads/master | 2022-03-14T01:26:41.816498 | 2019-11-23T17:17:19 | 2019-11-23T17:17:19 | 114,834,447 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | def iq_test(numbers):
arr = [i % 2 == 0 for i in [int(j) for j in numbers.split()]]
if arr.count(True) > 1:
return arr.index(False)+1
else:
return arr.index(True)+1 | [
"[email protected]"
] | |
5be34879011c0f4d0308e93c05824f2a437ec963 | 44b87d9faad99d542914c35410ba7d354d5ba9cd | /1/collection/list/divisible by 8 using compre.py | 857a0b6ada0c2d9dc98bd9180ec1370a09173462 | [] | no_license | append-knowledge/pythondjango | 586292d1c7d0ddace3630f0d77ca53f442667e54 | 0e5dab580e8cc48e9940fb93a71bcd36e8e6a84e | refs/heads/master | 2023-06-24T07:24:53.374998 | 2021-07-13T05:55:25 | 2021-07-13T05:55:25 | 385,247,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | num=[i for i in range(1,1000) if i%8==0]
print(num)
print("length of num is",len(num))
# odd=[i for i in range(1000) if i%2!=0]
# print(odd) | [
"[email protected]"
] | |
00f065d20644809c36a60a0fbfe0ad0aa3cd6ef9 | 4a0f2cc27cd39b8b8901ade728f3b1dc20c2a2ee | /controller/qt_classes/UbNewDocumentViewDelegate.py | 096e2b7becda90dbfcb58540466702c64771dd6f | [] | no_license | teamlm2/lm2_mgis | 2c016423983a31fcdf15e34508401acf48177f35 | 9144b1234b25665737986995bd1da7492871151c | refs/heads/master | 2021-11-11T23:43:12.647749 | 2021-10-26T07:55:58 | 2021-10-26T07:55:58 | 155,568,182 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,303 | py | # coding=utf8
__author__ = 'B.Ankhbold'
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from sqlalchemy.exc import SQLAlchemyError
from ...model import SettingsConstants
from ...model.SetOfficialDocument import SetOfficialDocument
from ...utils.FileUtils import FileUtils
from ...utils.PluginUtils import PluginUtils
from ...utils.SessionHandler import SessionHandler
from ...utils.DatabaseUtils import *
from ...utils.FilePath import *
from ftplib import *
import shutil
import codecs
NAME_COLUMN = 0
DESCRIPTION_COLUMN = 1
VIEW_COLUMN = 2
FILE_PDF = 'pdf'
FILE_IMAGE = 'png'
class UbNewDocumentViewDelegate(QStyledItemDelegate):
def __init__(self, widget, parent):
super(UbNewDocumentViewDelegate, self).__init__(parent)
self.widget = widget
self.parent = parent
self.session = SessionHandler().session_instance()
self.button = QPushButton("", parent)
self.button.hide()
self.viewIcon = QIcon(":/plugins/lm2/file.png")
def paint(self, painter, option, index):
if index.column() == VIEW_COLUMN:
self.button.setIcon(self.viewIcon)
else:
super(UbNewDocumentViewDelegate, self).paint(painter, option, index)
return
self.button.setGeometry(option.rect)
button_picture = QPixmap.grabWidget(self.button)
painter.drawPixmap(option.rect.x(), option.rect.y(), button_picture)
def editorEvent(self, event, model, option, index):
if index is not None:
if index.isValid() and event.type() == QEvent.MouseButtonRelease:
if event.button() == Qt.RightButton:
return False
if index.column() == VIEW_COLUMN:
ftp = self.widget.item(index.row(), NAME_COLUMN).data(Qt.UserRole)
file_name = self.widget.item(index.row(), NAME_COLUMN).data(Qt.UserRole + 1)
file_type = self.widget.item(index.row(), NAME_COLUMN).data(Qt.UserRole + 2)
# print file_name
# print file_type
# print ftp.pwd()
# print ftp.nlst()
view_pdf = open(FilePath.view_file_path(), 'wb')
view_png = open(FilePath.view_file_png_path(), 'wb')
if file_type == FILE_IMAGE:
ftp.retrbinary('RETR ' + file_name, view_png.write)
else:
ftp.retrbinary('RETR ' + file_name, view_pdf.write)
try:
if file_type == FILE_IMAGE:
QDesktopServices.openUrl(QUrl.fromLocalFile(FilePath.view_file_png_path()))
else:
QDesktopServices.openUrl(QUrl.fromLocalFile(FilePath.view_file_path()))
except SQLAlchemyError, e:
PluginUtils.show_error(self.parent, self.tr("File Error"), self.tr("Could not execute: {0}").format(e.message))
return True
elif index.column() == DESCRIPTION_COLUMN or index.column() == NAME_COLUMN:
return True
else:
index.model().setData(index, 0, Qt.EditRole)
return False
| [
"[email protected]"
] | |
116f6963b88edfdb0db9fda927ba4e4947b376fa | 5ec7d0bad8a77c79843a2813f5effcb3a2b7e288 | /lean/models/brokerages/cloud/tradier.py | fd5e10b9f48bced5ac4faae3e74d4fac7886ec50 | [
"Apache-2.0"
] | permissive | xdpknx/lean-cli | aca9b9c9c4e156c9faefcfa8ccdfc20423b510a0 | c1051bd3e8851ae96f6e84f608a7116b1689c9e9 | refs/heads/master | 2023-08-08T02:30:09.827647 | 2021-09-21T21:36:24 | 2021-09-21T21:36:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,163 | py | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import click
from lean.components.util.logger import Logger
from lean.models.brokerages.cloud.base import CloudBrokerage
class TradierBrokerage(CloudBrokerage):
"""A CloudBrokerage implementation for Tradier."""
def __init__(self, account_id: str, access_token: str, environment: str) -> None:
self._account_id = account_id
self._access_token = access_token
self._environment = environment
@classmethod
def get_id(cls) -> str:
return "TradierBrokerage"
@classmethod
def get_name(cls) -> str:
return "Tradier"
@classmethod
def build(cls, logger: Logger) -> CloudBrokerage:
logger.info("""
Your Tradier account id and API token can be found on your Settings/API Access page (https://dash.tradier.com/settings/api).
The account id is the alpha-numeric code in a dropdown box on that page.
Your account details are not saved on QuantConnect.
""".strip())
account_id = click.prompt("Account id")
access_token = logger.prompt_password("Access token")
environment = click.prompt("Environment", type=click.Choice(["demo", "real"], case_sensitive=False))
return TradierBrokerage(account_id, access_token, environment)
def _get_settings(self) -> Dict[str, str]:
return {
"account": self._account_id,
"token": self._access_token,
"environment": "live" if self._environment == "real" else "paper"
}
| [
"[email protected]"
] | |
19f3c8b7d94aae6549e86646e36334cb826a906e | 6e820756b82ffbe9837348937e53f1a0ce0e6cca | /Lib/site-packages/pandas_datareader/io/jsdmx.py | d602ca88beb058636aceaac714662ee2f457a6c4 | [] | no_license | AndreasPatsimas/pms_papei | c2afd941de6ae234dd37784d746e794183ebb8d3 | da10220ea468304c1066bed55b8f92ba9e5ada8a | refs/heads/master | 2023-02-01T23:33:39.221747 | 2020-12-19T12:17:59 | 2020-12-19T12:17:59 | 321,115,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,167 | py | # pylint: disable-msg=E1101,W0613,W0603
from __future__ import unicode_literals
from collections import OrderedDict
import itertools
import re
import sys
import numpy as np
import pandas as pd
from pandas_datareader.io.util import _read_content
def read_jsdmx(path_or_buf):
"""
Convert a SDMX-JSON string to panda object
Parameters
----------
path_or_buf : a valid SDMX-JSON string or file-like
https://github.com/sdmx-twg/sdmx-json
Returns
-------
results : Series, DataFrame, or dictionary of Series or DataFrame.
"""
jdata = _read_content(path_or_buf)
try:
import simplejson as json
except ImportError:
if sys.version_info[:2] < (2, 7):
raise ImportError("simplejson is required in python 2.6")
import json
if isinstance(jdata, dict):
data = jdata
else:
data = json.loads(jdata, object_pairs_hook=OrderedDict)
structure = data["structure"]
index = _parse_dimensions(structure["dimensions"]["observation"])
columns = _parse_dimensions(structure["dimensions"]["series"])
dataset = data["dataSets"]
if len(dataset) != 1:
raise ValueError("length of 'dataSets' must be 1")
dataset = dataset[0]
values = _parse_values(dataset, index=index, columns=columns)
df = pd.DataFrame(values, columns=columns, index=index)
return df
def _get_indexer(index):
if index.nlevels == 1:
return [str(i) for i in range(len(index))]
else:
it = itertools.product(*[range(len(level)) for level in index.levels])
return [":".join(map(str, i)) for i in it]
def _fix_quarter_values(value):
"""Make raw quarter values Pandas-friendly (e.g. 'Q4-2018' -> '2018Q4')."""
m = re.match(r"Q([1-4])-(\d\d\d\d)", value)
if not m:
return value
quarter, year = m.groups()
value = "%sQ%s" % (quarter, year)
return value
def _parse_values(dataset, index, columns):
size = len(index)
series = dataset["series"]
values = []
# for s_key, s_value in iteritems(series):
for s_key in _get_indexer(columns):
try:
observations = series[s_key]["observations"]
observed = []
for o_key in _get_indexer(index):
try:
observed.append(observations[o_key][0])
except KeyError:
observed.append(np.nan)
except KeyError:
observed = [np.nan] * size
values.append(observed)
return np.transpose(np.array(values))
def _parse_dimensions(dimensions):
arrays = []
names = []
for key in dimensions:
values = [v["name"] for v in key["values"]]
role = key.get("role", None)
if role in ("time", "TIME_PERIOD"):
values = [_fix_quarter_values(v) for v in values]
values = pd.DatetimeIndex(values)
arrays.append(values)
names.append(key["name"])
midx = pd.MultiIndex.from_product(arrays, names=names)
if len(arrays) == 1 and isinstance(midx, pd.MultiIndex):
# Fix for panda >= 0.21
midx = midx.levels[0]
return midx
| [
"[email protected]"
] | |
42242438bea8875d7471ea2ddf09291f67a15799 | 30a34b3503decf1b4516039df3106cd152631819 | /4AL17IS050_T_K_HARSHITH_PRASAD/19_05_2020/2.py | 90236ef15cb59e0d27deb74598351d1745cafda7 | [] | no_license | alvas-education-foundation/ISE_3rd_Year_Coding_challenge | 8ddb6c325bf6ab63e2f73d16573fa0b6e2484136 | b4074cab4a47aad07ed0fa426eacccbfafdef7f8 | refs/heads/master | 2022-11-23T20:52:19.204693 | 2020-07-23T11:28:15 | 2020-07-23T11:28:15 | 265,195,514 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | # This program adds two numbers
num1 = 1.5
num2 = 6.3
# Add two numbers
sum = float(num1) + float(num2)
# Display the sum
print('The sum of {0} and {1} is {2}'.format(num1, num2, sum)) | [
"[email protected]"
] | |
190c0b7174e3ee074dcee7447dd6149444d96d20 | 9030481ef925278a174cbbf58c74bc5058e8d302 | /contrib/testgen/base58.py | 0b6e6e1ae339c3c25f894b09b621c4777509d655 | [
"MIT"
] | permissive | hideoussquid/aureus-13-gui | 1b8f85f262cbc1970c3d8072b064956073bc4182 | 8865c958ba1680d4615128dabcc3cc4d47a24c51 | refs/heads/master | 2021-01-19T08:22:45.795165 | 2017-04-26T07:34:19 | 2017-04-26T07:34:19 | 87,622,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,999 | py | # Copyright (c) 2012 The Aureus Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Aureus base58 encoding and decoding.
Based on https://aureustalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Aureus does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/aureus/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| [
"[email protected]"
] | |
58bb40f95b996bb5aaf4c9706c5271c0c5978cc2 | 25d8bac5635ac1cc3577a3593a4512e042ea7ecd | /scripts/asyncore-example-2.py | 27a4738c22e98525faf3534d4f880e283ad582e0 | [] | no_license | mtslong/demo | 2333fa571d6d9def7bdffc90f7bcb623b15e6e4b | a78b74e0eea7f84df489f5c70969b9b4797a4873 | refs/heads/master | 2020-05-18T18:28:48.237100 | 2013-11-11T16:10:11 | 2013-11-11T16:10:11 | 4,136,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 885 | py | import asyncore
import socket, time
# reference time
TIME1970 = 2208988800L
class TimeChannel(asyncore.dispatcher):
def handle_write(self):
t = int(time.time()) + TIME1970
t = chr(t>>24&255) + chr(t>>16&255) + chr(t>>8&255) + chr(t&255)
self.send(t)
self.close()
class TimeServer(asyncore.dispatcher):
def __init__(self, port=37):
self.port = port
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.bind(("", port))
self.listen(5)
print "listening on port", self.port
def handle_accept(self):
channel, addr = self.accept()
TimeChannel(channel)
server = TimeServer(8037)
asyncore.loop()
## log: adding channel <TimeServer at 8cb940>
## listening on port 8037
## log: adding channel <TimeChannel at 8b2fd0>
## log: closing channel 52:<TimeChannel connected at 8b2fd0>
| [
"[email protected]"
] | |
deece369baf689aed3e350790563652c99e1df4c | ca0d710ed0469beb7f87ae53f5efdef7bac19a27 | /MainView/migrations/0001_initial.py | c421c7915ab1a3ced242749c9b05288a7231a3c2 | [
"MIT"
] | permissive | CiganOliviu/wedding_invitation | 5d441d786f742d6a4baf5ff418370c0cfbb1b81e | 8b243b287b6577b4f5f899e33ade1fec651152f0 | refs/heads/main | 2023-03-03T08:12:36.345173 | 2021-02-08T15:37:04 | 2021-02-08T15:37:04 | 333,568,503 | 0 | 0 | MIT | 2021-02-08T15:37:05 | 2021-01-27T21:43:34 | null | UTF-8 | Python | false | false | 646 | py | # Generated by Django 3.0.8 on 2020-08-10 08:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ConfirmAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('submitted', models.BooleanField(default=True)),
('answer_sent', models.DateTimeField(auto_now_add=True)),
],
),
]
| [
"[email protected]"
] | |
86a20d0a802a3b77e91c16b62fb4c5702450b991 | dc69872f21492d34d7da6eee9f0d03f7c09a8a8d | /libraries/edge/opensearch/granuleisoresponse.py | fd3ed16eb03bd91778c8ff34354a963de13a58c8 | [
"Apache-2.0"
] | permissive | isabella232/incubator-sdap-edge | 125e9ba8cb1738d8407222f9d21f5452fc5fa840 | c725dad1098096048faed9a42a56f3cfc5c25bc5 | refs/heads/master | 2022-03-19T18:49:03.752184 | 2019-12-02T23:40:12 | 2019-12-02T23:40:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,127 | py | import datetime
from edge.opensearch.isoresponsebysolr import IsoResponseBySolr
class GranuleIsoResponse(IsoResponseBySolr):
def __init__(self, linkToGranule):
super(GranuleIsoResponse, self).__init__()
self.linkToGranule = linkToGranule.split(',')
def _populateChannel(self, solrResponse):
pass
def _populateItem(self, solrResponse, doc, item):
link = self._getLinkToGranule(doc)
if link is not None:
doc['link'] = link
def _getLinkToGranule(self, doc):
link = None
if 'GranuleReference-Type' in doc and len(self.linkToGranule) > 0:
granuleRefDict = dict(list(zip(doc['GranuleReference-Type'], list(zip(doc['GranuleReference-Path'], doc['GranuleReference-Status'])))))
for type in self.linkToGranule:
# check if reference type exists
if type in granuleRefDict:
# check if reference is online
if granuleRefDict[type][1] == 'ONLINE':
link = granuleRefDict[type][0]
break
return link
| [
"[email protected]"
] | |
22cce56ad1cf624ac9db09d203ea57c2bd8a72fe | e34d4bf879910b8f41068c1efb90915897e53d53 | /sprint/SquaresOfSortedArray.py | a58ff6bd16baa33b009ff18fbabf44af40766e9e | [] | no_license | ZhouningMan/LeetCodePython | 6cfc30f0b76f6162502410fef5639fde4801bd74 | cad9585c440efb329c9321648f94c58ded198438 | refs/heads/master | 2020-12-10T03:53:48.824344 | 2020-01-13T02:29:02 | 2020-01-13T02:29:02 | 233,494,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | class Solution:
def sortedSquares(self, A):
size = len(A)
squares = [0] * size
for i in range(size):
squares[i] = A[i] * A[i]
copy = [0] * size
begin = 0
end = size - 1
i = size - 1
while begin <= end:
if squares[begin] > squares[end]:
copy[i] = squares[begin]
begin += 1
else:
copy[i] = squares[end]
end -= 1
i -= 1
return copy
if __name__ == '__main__':
s = Solution()
ans = s.sortedSquares([-3,-3,-2,1])
print(ans) | [
"[email protected]"
] | |
81286eab7404c79ae264329c873fd324031b3ce5 | b7054c7dc39eeb79aa4aecb77a8de222400b19a7 | /object.py | deee2a4715df5ac355f73bac61921bfff028351c | [] | no_license | csuxh/python_fullstack | 89027133c7f9585931455a6a85a24faf41792379 | f78571976b3bef104309e95304892fdb89739d9e | refs/heads/master | 2023-05-11T09:36:40.482788 | 2019-06-12T14:21:26 | 2019-06-12T14:21:26 | 145,090,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | #!/usr/bin/env python
#!-*-coding:utf-8 -*-
#!@Auther : jack.xia
#!@Time : 2018/5/29 21:56
#!@File : object.py
class Stuf(object):
count = 0
__slots__ = ('name', 'id', 'position')
def __init__(self, name, id, position):
self.__name = name
self.__id = id
self.__position = position
def print_obj(self):
print('name: %s ;id: %d ;position %s ' %(self.__name, self.__id, self.__position))
class Account(Stuf):
pass
class IT(Stuf):
pass
if Stuf.count != 0:
print('测试失败!')
else:
bart = Stuf('Bart', 12, '2-4')
if Stuf.count != 1:
print('测试失败!')
Stuf.count +=1
print('%d' %(Stuf.count + 1) )
else:
lisa = Stuf('lisa', 11, '2-5')
if Stuf.count != 2:
print('测试失败!')
else:
print('Stuf:', Stuf.count)
print('测试通过!')
#stu1 = Stuf('jack', 13, '1-2')
#stu1.print_obj()
#print(stu1.id)
#print(stu1.__name) | [
"[email protected]"
] | |
08f4aced36fe56bcec48deaa99f0e5ad628d5792 | b978cf7f47c5cd6295f3c0c104752d3e1e9d89d6 | /test.py | f88b6b9a5b2b21a543c221161f595e2588fd53b5 | [] | no_license | sepidmnorozy/backup-crawler | 1e4cd62d5a48b6e3bf974f89d1d513765e5d9c5b | 73beddd2febd0dec3a0d1f5706557de073035a06 | refs/heads/master | 2022-11-18T19:56:43.507394 | 2020-07-22T13:11:53 | 2020-07-22T13:11:53 | 281,674,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from pymongo import MongoClient
from rss import rss_reader
import json
if rss_reader('https://www.khabaronline.ir/rss') == 'Success':
with open("links.json", 'r') as f:
urls = json.load(f)
else:
urls = []
client = MongoClient()
db = client['newsdb_week']
articles = db.weekarticles
start_urls = []
for url in urls:
if articles.find_one({"url": url}) is None:
start_urls.append(url)
print(start_urls)
print(len(start_urls))
| [
"[email protected]"
] | |
e286247caef6608e64d3f83668b0e57d5c35c469 | 07e6fc323f657d1fbfc24f861a278ab57338b80a | /python/test_chem_reaction.py | a45fb01f6793461a249921c48059b569c7d781b2 | [
"MIT"
] | permissive | ProkopHapala/SimpleSimulationEngine | 99cf2532501698ee8a03b2e40d1e4bedd9a12609 | 47543f24f106419697e82771289172d7773c7810 | refs/heads/master | 2022-09-05T01:02:42.820199 | 2022-08-28T10:22:41 | 2022-08-28T10:22:41 | 40,007,027 | 35 | 4 | null | null | null | null | UTF-8 | Python | false | false | 462 | py | #!/usr/bin/python
import re
import numpy as np
import sys
from pySimE import chemistry as ch
#print ch.str2composition( sys.argv[1] )
#sides = ch.parseReaction( 'Fe+O2=Fe2O3' )
#sides = ch.parseReaction( 'C12H22O11+KNO3=H2O+CO2+K2CO3+N2' )
#print sides
#print ch.reaction2string( sides )
#print ch.balanceReactionString( 'Fe+O2=Fe2O3' )
print ch.balanceReactionString( 'C12H22O11+KNO3=H2O+CO2+K2CO3+N2' )
#print atomicBalance( reaction[0], reaction[1] )
| [
"[email protected]"
] | |
fb4d6144389ec8eb93a016186bb5908c2683cdc8 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_clattering.py | 3893e7f6289447dca25d947171005c4f61ce3729 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py |
#calss header
class _CLATTERING():
def __init__(self,):
self.name = "CLATTERING"
self.definitions = clatter
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['clatter']
| [
"[email protected]"
] | |
a0c529fe9ac1114d4ea620a3a09ab644868c12c2 | 7c59bbd4ff413a95dc9d25fbfccd11c6db60202a | /python_stack/full_stack_django/test_orm/apps/test_orm_app/migrations/0001_initial.py | ff84e3ca46db76c12c5baaeb018a42283bcbe193 | [] | no_license | soikatesc/DojoAssignments | 9a185a1164e42a985aea5e49d0ee270fd476d42a | c5c84bc9bd4aedd0fe6aa26bf75793e284edb248 | refs/heads/master | 2021-01-23T04:34:19.617679 | 2017-05-16T03:52:58 | 2017-05-16T03:52:58 | 86,211,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-04-19 00:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('blog', models.TextField(max_length=1000)),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField(max_length=1000)),
('created_at', models.DateField(auto_now_add=True)),
('updated_at', models.DateField(auto_now=True)),
('blog', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='test_orm_app.Blog')),
],
),
]
| [
"[email protected]"
] | |
b6cd32dd7c58e44b484925d0981c527b8eb6d61f | ddd09683d9cbd681db5dae4e2d036d28bd4d24c1 | /PA3/BAL3.py | f82978400cd729be26ca286631abcea6caa2356a | [] | no_license | nivedn3/DL4CV-EE6132- | 41f9cd877a4c43db0a2f511a57df8b624fbc0a07 | 2cd97c7d2170a8e4fe36b6ccc8443c009e3d003a | refs/heads/master | 2021-01-20T05:41:37.019460 | 2017-11-22T10:17:16 | 2017-11-22T10:17:16 | 101,465,640 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,240 | py | import tensorflow as tf
import numpy as np
sess = tf.InteractiveSession()
def data(number,size):
a = []
b = []
out = []
for i in range(number):
a_in = np.random.choice([0,1],size)
a_in = a_in.tolist()
#a_in = [1,0,0,0,0]
b_in = np.random.choice([0,1],size)
b_in = b_in.tolist()
#b_in = [1,0,0,0,0]
a_str = ','.join(str(x) for x in a_in).replace(',','')
b_str = ','.join(str(x) for x in b_in).replace(',','')
c = bin(int(a_str,2) + int(b_str,2)).split('b')[1]
c = [int(i) for i in list(c)]
c_out = np.array(c)
if len(c_out) == size:
c_out = np.insert(c_out,0,0)
if len(c_out) < size:
while(len(c_out) != size+1):
c_out = np.insert(c_out,0,0)
test = []
for j in range(len(a_in)):
test.append(a_in[j])
test.append(b_in[j])
a.append(test)
#b.append(b_in)
out.append(c_out)
return a,out
size = 3
hs = 5
x = tf.placeholder(tf.float32,shape = [None,size,2])
y = tf.placeholder(tf.float32,shape = [None,size+1])
w = tf.Variable(tf.random_normal([hs,size+1]))
b = tf.Variable(tf.random_normal([size+1]))
rnn_inp = tf.unstack(x,size,1)
lstm = tf.contrib.rnn.BasicRNNCell(hs)
outputs, states = tf.contrib.rnn.static_rnn(lstm, rnn_inp, dtype=tf.float32)
logits = tf.sigmoid(tf.matmul(outputs[-1], w) + b)
logitst = tf.add(logits,tf.scalar_mul(-0.5,tf.ones_like(logits)))
logitst = tf.nn.relu(logits)
logitst = tf.scalar_mul(1000000,logits)
logitst = tf.clip_by_value(logits,0,1)
logitsc = tf.cast(logitst,tf.int32)
yc = tf.cast(y,tf.int32)
with tf.name_scope("cross_entropy"):
#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits,labels = y))
cross_entropy = tf.losses.mean_squared_error(labels = y, predictions = logits)
tf.summary.scalar('cross entropy',cross_entropy)
with tf.name_scope("train"):
train_step = tf.train.AdamOptimizer(0.1).minimize(cross_entropy)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(logitsc,yc)
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
tf.summary.scalar('accuracy',accuracy)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter("/home/psycholearner/projects//DL4CV-EE6132-/PA3/2035")
writer.add_graph(sess.graph)
writer2 = tf.summary.FileWriter("/home/psycholearner/projects/DL4CV-EE6132-/PA3/20351")
writer2.add_graph(sess.graph)
sess.run(tf.global_variables_initializer())
for i in range(20000):
a,batch_y = data(500,size)
batch_x = np.array(a)
batch_x = batch_x.reshape(500,size,2)
batch_x = [j[::-1] for j in batch_x]
batch_x = np.array(batch_x)
batch_x.astype(float)
batch_y = np.array(batch_y)
#batch_y.astype(float)
if i % 25 == 0:
s = sess.run(merged_summary,feed_dict = {x: batch_x,y: batch_y})
writer.add_summary(s,i)
at,batch_yt = data(500,size)
batch_xt = np.array(at)
batch_xt = batch_xt.reshape(500,size,2)
batch_xt = [j[::-1] for j in batch_xt]
batch_xt = np.array(batch_xt)
batch_xt.astype(float)
batch_yt = np.array(batch_yt)
k = sess.run(merged_summary,feed_dict = {x: batch_xt,y: batch_yt})
writer2.add_summary(k,i)
#train_accuracy = sess.run(accuracy.eval(feed_dict={x: batch[0], y: batch[1]}))
#[train_accuracy] = sess.run([cross_entropy],feed_dict = {x: batch_x, y:batch_y})
#[test] = sess.run([accuracy],feed_dict = {x: batch_x, y:batch_y})
#logits = sess.run([accuracy],feed_dict = {x: batch_x, y:batch_y})
#print('step %d, training accuracy %g %g' % (i, train_accuracy,test))
#[test_acc] = sess.run([test_accuracy],feed_dict = {x: mnist.test.images, y:mnist.test.labels})
#print('step %d, test accuracy %g' % (i, test_acc))
#saver.restore(sess, "/home/psycholearner/projects//DL4CV-EE6132-/PA2/model.ckpt")
sess.run(train_step,feed_dict = {x:batch_x,y:batch_y})
'''
test_data = mnist.test.images[:128].reshape((-1, 28, 28))
test_label = mnist.test.labels[:128]
print("Testing Accuracy:",sess.run([accuracy], feed_dict={x: test_data, y: test_label}))
'''
a,batch_y = data(500,size)
batch_x = np.array(a)
batch_x = batch_x.reshape(500,size,2)
batch_x = [j[::-1] for j in batch_x]
batch_x = np.array(batch_x)
batch_x.astype(float)
batch_y = np.array(batch_y)
print("Testing Accuracy:",sess.run([accuracy], feed_dict={x: batch_x, y: batch_y}))
| [
"[email protected]"
] | |
f0921f29f3f682945a8f671213dc391d565db088 | 9d41570295cc05b66fd52584a90fe87f29155943 | /src/crawler/delay.py | 649fb6282c26a77936487a5bcd18eeda56ff6aa7 | [
"MIT"
] | permissive | diegojromerolopez/relwrac | ed56feeb2a5e455e0fa58f6bc130445e5a0831bd | 23ee278ab4019b98269419c53feed2194f079c25 | refs/heads/master | 2022-12-11T08:06:19.888698 | 2019-11-16T12:35:34 | 2019-11-16T12:35:34 | 219,372,323 | 0 | 0 | MIT | 2022-12-08T06:49:05 | 2019-11-03T22:09:35 | Python | UTF-8 | Python | false | false | 294 | py | import random
class Delay(object):
@classmethod
def none(cls):
return None
@classmethod
def uniform(cls, lower_bound: float, upper_bound: float):
def uniform_delay_():
return random.uniform(lower_bound, upper_bound)
return uniform_delay_
| [
"[email protected]"
] | |
5f1c2a99593a7553184a6e88dacd5cfddfa94dc2 | 11286e7989264134a8a8d610e0f609e6fbff9140 | /ch06/ch06_6.py | 611bb36abeda2b0457a21b95c8675ec3d9cc42ed | [] | no_license | p2c2e/machine_learning_with_python_cookbook | 04eeed2e00e0a3e9c0681d4b2f4125aa85485a1d | b176323a02f5b5722e312a579ad764a0276ec9c6 | refs/heads/main | 2023-01-30T06:54:34.138786 | 2020-12-13T05:02:07 | 2020-12-13T05:02:07 | 320,987,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | # Load libraries
import unicodedata
import sys
# Create text
text_data = ['Hi!!!! I. Love. This. Song....',
'10000% Agree!!!! #LoveIT',
'Right?!?!']
# Create a dictionary of punctuation characters
punctuation = dict.fromkeys(i for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith('P'))
# For each string, remove any punctuation characters
[string.translate(punctuation) for string in text_data] | [
"[email protected]"
] | |
2b59d2bc871b13882aa71629e364e5ee5cde3a00 | 186736f265fa7954e95198955546305ab1b9b981 | /notesApi/settings.py | d3fd465d97e808c8f69bde9fd61320c402413ffb | [] | no_license | nova-sangeeth/notes-api | 6449669870dfb69a72e1aad71c8859ca9de8bfbb | d5d15a4df615b0b276ccf8f49efc9e21eb177b65 | refs/heads/master | 2022-12-22T11:38:03.065884 | 2020-09-23T19:58:14 | 2020-09-23T19:58:14 | 298,022,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,607 | py | """
Django settings for notesApi project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "v1jk=4%^w9@)42-xumnuc3ho+7!&ug#q3*^y)x^@rlu#-96o*d"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
# crispy forms
"crispy_forms",
# all auth apps
"django.contrib.sites",
"allauth",
"allauth.account",
"allauth.socialaccount",
# apps
"rest_framework",
"api_notes",
]
SITE_ID = 1
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "notesApi.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "notesApi.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
ACCOUNT_EMAIL_VERIFICATION = "required"
ACCOUNT_AUTHENTICATED_LOGIN_REDIRECTS = True
ACCOUNT_EMAIL_REQUIRED = False
| [
"[email protected]"
] | |
4c92871a9b092599b369eba37b5e69ca438f451d | 3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2 | /src/Week4/Practice/Trace1.py | 6db80027484d73a47f843382e033603034f1470c | [] | no_license | theguyoverthere/CMU15-112-Spring17 | b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8 | b8287092b14e82d2a3aeac6c27bffbc95382eb34 | refs/heads/master | 2021-04-27T08:52:45.237631 | 2018-10-02T15:38:18 | 2018-10-02T15:38:18 | 107,882,442 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 267 | py | def onesDigit(n):
return n%10
def ct1(L):
for i in range(len(L)):
L[i] += sum(L) + max(L)
# The function onesDigit is called on each element before
# making comparison.
return sorted(L, key=onesDigit)
a = [2,1,0]
print(ct1(a))
print(a)
| [
"[email protected]"
] | |
ddb617b3840deff9580b1979fa5f9a1accfb1906 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/you-get/2016/8/common.py | a5a0fbab63c9d5e6a52916b9ad5356b87ef836b7 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 46,179 | py | #!/usr/bin/env python
SITES = {
'163' : 'netease',
'56' : 'w56',
'acfun' : 'acfun',
'archive' : 'archive',
'baidu' : 'baidu',
'bandcamp' : 'bandcamp',
'baomihua' : 'baomihua',
'bigthink' : 'bigthink',
'bilibili' : 'bilibili',
'cctv' : 'cntv',
'cntv' : 'cntv',
'cbs' : 'cbs',
'dailymotion' : 'dailymotion',
'dilidili' : 'dilidili',
'dongting' : 'dongting',
'douban' : 'douban',
'douyu' : 'douyutv',
'ehow' : 'ehow',
'facebook' : 'facebook',
'fc2' : 'fc2video',
'flickr' : 'flickr',
'freesound' : 'freesound',
'fun' : 'funshion',
'google' : 'google',
'heavy-music' : 'heavymusic',
'huaban' : 'huaban',
'iask' : 'sina',
'ifeng' : 'ifeng',
'imgur' : 'imgur',
'in' : 'alive',
'infoq' : 'infoq',
'instagram' : 'instagram',
'interest' : 'interest',
'iqilu' : 'iqilu',
'iqiyi' : 'iqiyi',
'isuntv' : 'suntv',
'joy' : 'joy',
'jpopsuki' : 'jpopsuki',
'kankanews' : 'bilibili',
'khanacademy' : 'khan',
'ku6' : 'ku6',
'kugou' : 'kugou',
'kuwo' : 'kuwo',
'le' : 'le',
'letv' : 'le',
'lizhi' : 'lizhi',
'magisto' : 'magisto',
'metacafe' : 'metacafe',
'mgtv' : 'mgtv',
'miomio' : 'miomio',
'mixcloud' : 'mixcloud',
'mtv81' : 'mtv81',
'musicplayon' : 'musicplayon',
'naver' : 'naver',
'7gogo' : 'nanagogo',
'nicovideo' : 'nicovideo',
'panda' : 'panda',
'pinterest' : 'pinterest',
'pixnet' : 'pixnet',
'pptv' : 'pptv',
'qianmo' : 'qianmo',
'qq' : 'qq',
'showroom-live' : 'showroom',
'sina' : 'sina',
'smgbb' : 'bilibili',
'sohu' : 'sohu',
'soundcloud' : 'soundcloud',
'ted' : 'ted',
'theplatform' : 'theplatform',
'thvideo' : 'thvideo',
'tucao' : 'tucao',
'tudou' : 'tudou',
'tumblr' : 'tumblr',
'twimg' : 'twitter',
'twitter' : 'twitter',
'videomega' : 'videomega',
'vidto' : 'vidto',
'vimeo' : 'vimeo',
'wanmen' : 'wanmen',
'weibo' : 'miaopai',
'veoh' : 'veoh',
'vine' : 'vine',
'vk' : 'vk',
'xiami' : 'xiami',
'xiaokaxiu' : 'yixia',
'xiaojiadianvideo' : 'fc2video',
'yinyuetai' : 'yinyuetai',
'miaopai' : 'yixia',
'youku' : 'youku',
'youtu' : 'youtube',
'youtube' : 'youtube',
'zhanqi' : 'zhanqi',
}
import getopt
import json
import locale
import logging
import os
import platform
import re
import socket
import sys
import time
from urllib import request, parse, error
from http import cookiejar
from importlib import import_module
from .version import __version__
from .util import log, term
from .util.git import get_version
from .util.strings import get_filename, unescape_html
from . import json_output as json_output_
dry_run = False
json_output = False
force = False
player = None
extractor_proxy = None
cookies = None
output_filename = None
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
}
if sys.stdout.isatty():
default_encoding = sys.stdout.encoding.lower()
else:
default_encoding = locale.getpreferredencoding().lower()
def maybe_print(*s):
try: print(*s)
except: pass
def tr(s):
if default_encoding == 'utf-8':
return s
else:
return s
#return str(s.encode('utf-8'))[2:-1]
# DEPRECATED in favor of match1()
def r1(pattern, text):
m = re.search(pattern, text)
if m:
return m.group(1)
# DEPRECATED in favor of match1()
def r1_of(patterns, text):
for p in patterns:
x = r1(p, text)
if x:
return x
def match1(text, *patterns):
"""Scans through a string for substrings matched some patterns (first-subgroups only).
Args:
text: A string to be scanned.
patterns: Arbitrary number of regex patterns.
Returns:
When only one pattern is given, returns a string (None if no match found).
When more than one pattern are given, returns a list of strings ([] if no match found).
"""
if len(patterns) == 1:
pattern = patterns[0]
match = re.search(pattern, text)
if match:
return match.group(1)
else:
return None
else:
ret = []
for pattern in patterns:
match = re.search(pattern, text)
if match:
ret.append(match.group(1))
return ret
def matchall(text, patterns):
"""Scans through a string for substrings matched some patterns.
Args:
text: A string to be scanned.
patterns: a list of regex pattern.
Returns:
a list if matched. empty if not.
"""
ret = []
for pattern in patterns:
match = re.findall(pattern, text)
ret += match
return ret
def launch_player(player, urls):
import subprocess
import shlex
subprocess.call(shlex.split(player) + list(urls))
def parse_query_param(url, param):
"""Parses the query string of a URL and returns the value of a parameter.
Args:
url: A URL.
param: A string representing the name of the parameter.
Returns:
The value of the parameter.
"""
try:
return parse.parse_qs(parse.urlparse(url).query)[param][0]
except:
return None
def unicodize(text):
return re.sub(r'\\u([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])', lambda x: chr(int(x.group(0)[2:], 16)), text)
# DEPRECATED in favor of util.legitimize()
def escape_file_path(path):
path = path.replace('/', '-')
path = path.replace('\\', '-')
path = path.replace('*', '-')
path = path.replace('?', '-')
return path
def ungzip(data):
"""Decompresses data for Content-Encoding: gzip.
"""
from io import BytesIO
import gzip
buffer = BytesIO(data)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
def undeflate(data):
"""Decompresses data for Content-Encoding: deflate.
(the zlib compression is used.)
"""
import zlib
decompressobj = zlib.decompressobj(-zlib.MAX_WBITS)
return decompressobj.decompress(data)+decompressobj.flush()
# DEPRECATED in favor of get_content()
def get_response(url, faker = False):
# install cookies
if cookies:
opener = request.build_opener(request.HTTPCookieProcessor(cookies))
request.install_opener(opener)
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
else:
response = request.urlopen(url)
data = response.read()
if response.info().get('Content-Encoding') == 'gzip':
data = ungzip(data)
elif response.info().get('Content-Encoding') == 'deflate':
data = undeflate(data)
response.data = data
return response
# DEPRECATED in favor of get_content()
def get_html(url, encoding = None, faker = False):
content = get_response(url, faker).data
return str(content, 'utf-8', 'ignore')
# DEPRECATED in favor of get_content()
def get_decoded_html(url, faker = False):
response = get_response(url, faker)
data = response.data
charset = r1(r'charset=([\w-]+)', response.headers['content-type'])
if charset:
return data.decode(charset, 'ignore')
else:
return data
def get_location(url):
response = request.urlopen(url)
# urllib will follow redirections and it's too much code to tell urllib
# not to do that
return response.geturl()
def get_content(url, headers={}, decoded=True):
"""Gets the content of a URL via sending a HTTP GET request.
Args:
url: A URL.
headers: Request headers used by the client.
decoded: Whether decode the response body using UTF-8 or the charset specified in Content-Type.
Returns:
The content as a string.
"""
logging.debug('get_content: %s' % url)
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
for i in range(10):
try:
response = request.urlopen(req)
break
except socket.timeout:
logging.debug('request attempt %s timeout' % str(i + 1))
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(response.getheader('Content-Type'), r'charset=([\w-]+)')
if charset is not None:
data = data.decode(charset)
else:
data = data.decode('utf-8')
return data
def url_size(url, faker = False, headers = {}):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
elif headers:
response = request.urlopen(request.Request(url, headers = headers), None)
else:
response = request.urlopen(url)
size = response.headers['content-length']
return int(size) if size!=None else float('inf')
def urls_size(urls, faker = False, headers = {}):
return sum([url_size(url, faker=faker, headers=headers) for url in urls])
def get_head(url, headers = {}):
if headers:
req = request.Request(url, headers = headers)
else:
req = request.Request(url)
req.get_method = lambda : 'HEAD'
res = request.urlopen(req)
return dict(res.headers)
def url_info(url, faker = False, headers = {}):
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
elif headers:
response = request.urlopen(request.Request(url, headers = headers), None)
else:
response = request.urlopen(request.Request(url))
headers = response.headers
type = headers['content-type']
if type == 'image/jpg; charset=UTF-8' or type == 'image/jpg' : type = 'audio/mpeg' #fix for netease
mapping = {
'video/3gpp': '3gp',
'video/f4v': 'flv',
'video/mp4': 'mp4',
'video/MP2T': 'ts',
'video/quicktime': 'mov',
'video/webm': 'webm',
'video/x-flv': 'flv',
'video/x-ms-asf': 'asf',
'audio/mp4': 'mp4',
'audio/mpeg': 'mp3',
'image/jpeg': 'jpg',
'image/png': 'png',
'image/gif': 'gif',
'application/pdf': 'pdf',
}
if type in mapping:
ext = mapping[type]
else:
type = None
if headers['content-disposition']:
try:
filename = parse.unquote(r1(r'filename="?([^"]+)"?', headers['content-disposition']))
if len(filename.split('.')) > 1:
ext = filename.split('.')[-1]
else:
ext = None
except:
ext = None
else:
ext = None
if headers['transfer-encoding'] != 'chunked':
size = headers['content-length'] and int(headers['content-length'])
else:
size = None
return type, ext, size
def url_locations(urls, faker = False, headers = {}):
locations = []
for url in urls:
if faker:
response = request.urlopen(request.Request(url, headers = fake_headers), None)
elif headers:
response = request.urlopen(request.Request(url, headers = headers), None)
else:
response = request.urlopen(request.Request(url))
locations.append(response.url)
return locations
def url_save(url, filepath, bar, refer = None, is_part = False, faker = False, headers = {}):
file_size = url_size(url, faker = faker, headers = headers)
if os.path.exists(filepath):
if not force and file_size == os.path.getsize(filepath):
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
else:
if bar:
bar.update_received(file_size)
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download' if file_size!=float('inf') else filepath
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if received < file_size:
if faker:
headers = fake_headers
elif headers:
headers = headers
else:
headers = {}
if received:
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
response = request.urlopen(request.Request(url, headers = headers), None)
try:
range_start = int(response.headers['content-range'][6:].split('/')[0].split('-')[0])
end_length = end = int(response.headers['content-range'][6:].split('/')[1])
range_length = end_length - range_start
except:
content_length = response.headers['content-length']
range_length = int(content_length) if content_length!=None else float('inf')
if file_size != received + range_length:
received = 0
if bar:
bar.received = 0
open_mode = 'wb'
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
if received == file_size: # Download finished
break
else: # Unexpected termination. Retry request
headers['Range'] = 'bytes=' + str(received) + '-'
response = request.urlopen(request.Request(url, headers = headers), None)
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath), temp_filepath)
if os.access(filepath, os.W_OK):
os.remove(filepath) # on Windows rename could fail if destination filepath exists
os.rename(temp_filepath, filepath)
def url_save_chunked(url, filepath, bar, refer = None, is_part = False, faker = False, headers = {}):
if os.path.exists(filepath):
if not force:
if not is_part:
if bar:
bar.done()
print('Skipping %s: file already exists' % tr(os.path.basename(filepath)))
else:
if bar:
bar.update_received(os.path.getsize(filepath))
return
else:
if not is_part:
if bar:
bar.done()
print('Overwriting %s' % tr(os.path.basename(filepath)), '...')
elif not os.path.exists(os.path.dirname(filepath)):
os.mkdir(os.path.dirname(filepath))
temp_filepath = filepath + '.download'
received = 0
if not force:
open_mode = 'ab'
if os.path.exists(temp_filepath):
received += os.path.getsize(temp_filepath)
if bar:
bar.update_received(os.path.getsize(temp_filepath))
else:
open_mode = 'wb'
if faker:
headers = fake_headers
elif headers:
headers = headers
else:
headers = {}
if received:
headers['Range'] = 'bytes=' + str(received) + '-'
if refer:
headers['Referer'] = refer
response = request.urlopen(request.Request(url, headers = headers), None)
with open(temp_filepath, open_mode) as output:
while True:
buffer = response.read(1024 * 256)
if not buffer:
break
output.write(buffer)
received += len(buffer)
if bar:
bar.update_received(len(buffer))
assert received == os.path.getsize(temp_filepath), '%s == %s == %s' % (received, os.path.getsize(temp_filepath))
if os.access(filepath, os.W_OK):
os.remove(filepath) # on Windows rename could fail if destination filepath exists
os.rename(temp_filepath, filepath)
class SimpleProgressBar:
term_size = term.get_terminal_size()[1]
def __init__(self, total_size, total_pieces = 1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
self.speed = ''
self.last_updated = time.time()
total_pieces_len = len(str(total_pieces))
# 38 is the size of all statically known size in self.bar
total_str = '%5s' % round(self.total_size / 1048576, 1)
total_str_width = max(len(total_str), 5)
self.bar_size = self.term_size - 27 - 2*total_pieces_len - 2*total_str_width
self.bar = '{:>4}%% ({:>%s}/%sMB) ├{:─<%s}┤[{:>%s}/{:>%s}] {}' % (
total_str_width, total_str, self.bar_size, total_pieces_len, total_pieces_len)
def update(self):
self.displayed = True
bar_size = self.bar_size
percent = round(self.received * 100 / self.total_size, 1)
if percent >= 100:
percent = 100
dots = bar_size * int(percent) // 100
plus = int(percent) - dots // bar_size * 100
if plus > 0.8:
plus = '█'
elif plus > 0.4:
plus = '>'
else:
plus = ''
bar = '█' * dots + plus
bar = self.bar.format(percent, round(self.received / 1048576, 1), bar, self.current_piece, self.total_pieces, self.speed)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
time_diff = time.time() - self.last_updated
bytes_ps = n / time_diff if time_diff else 0
if bytes_ps >= 1024 ** 3:
self.speed = '{:4.0f} GB/s'.format(bytes_ps / 1024 ** 3)
elif bytes_ps >= 1024 ** 2:
self.speed = '{:4.0f} MB/s'.format(bytes_ps / 1024 ** 2)
elif bytes_ps >= 1024:
self.speed = '{:4.0f} kB/s'.format(bytes_ps / 1024)
else:
self.speed = '{:4.0f} B/s'.format(bytes_ps)
self.last_updated = time.time()
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class PiecesProgressBar:
def __init__(self, total_size, total_pieces = 1):
self.displayed = False
self.total_size = total_size
self.total_pieces = total_pieces
self.current_piece = 1
self.received = 0
def update(self):
self.displayed = True
bar = '{0:>5}%[{1:<40}] {2}/{3}'.format('', '=' * 40, self.current_piece, self.total_pieces)
sys.stdout.write('\r' + bar)
sys.stdout.flush()
def update_received(self, n):
self.received += n
self.update()
def update_piece(self, n):
self.current_piece = n
def done(self):
if self.displayed:
print()
self.displayed = False
class DummyProgressBar:
def __init__(self, *args):
pass
def update_received(self, n):
pass
def update_piece(self, n):
pass
def done(self):
pass
def get_output_filename(urls, title, ext, output_dir, merge):
# lame hack for the --output-filename option
global output_filename
if output_filename: return output_filename
merged_ext = ext
if (len(urls) > 1) and merge:
from .processor.ffmpeg import has_ffmpeg_installed
if ext in ['flv', 'f4v']:
if has_ffmpeg_installed():
merged_ext = 'mp4'
else:
merged_ext = 'flv'
elif ext == 'mp4':
merged_ext = 'mp4'
elif ext == 'ts':
if has_ffmpeg_installed():
merged_ext = 'mkv'
else:
merged_ext = 'ts'
return '%s.%s' % (title, merged_ext)
def download_urls(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False, headers = {}, **kwargs):
assert urls
if json_output:
json_output_.download_urls(urls=urls, title=title, ext=ext, total_size=total_size, refer=refer)
return
if dry_run:
print('Real URLs:\n%s' % '\n'.join(urls))
return
if player:
launch_player(player, urls)
return
if not total_size:
try:
total_size = urls_size(urls, faker=faker, headers=headers)
except:
import traceback
traceback.print_exc(file=sys.stdout)
pass
title = tr(get_filename(title))
output_filename = get_output_filename(urls, title, ext, output_dir, merge)
output_filepath = os.path.join(output_dir, output_filename)
if total_size:
if not force and os.path.exists(output_filepath) and os.path.getsize(output_filepath) >= total_size * 0.9:
print('Skipping %s: file already exists' % output_filepath)
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
url = urls[0]
print('Downloading %s ...' % tr(output_filename))
bar.update()
url_save(url, output_filepath, bar, refer = refer, faker = faker, headers = headers)
bar.done()
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
bar.update()
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save(url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
bar.done()
if not merge:
print()
return
if 'av' in kwargs and kwargs['av']:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_av
ret = ffmpeg_concat_av(parts, output_filepath, ext)
print('Merged into %s' % output_filename)
if ret == 0:
for part in parts: os.remove(part)
elif ext in ['flv', 'f4v']:
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_flv_to_mp4
ffmpeg_concat_flv_to_mp4(parts, output_filepath)
else:
from .processor.join_flv import concat_flv
concat_flv(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == 'mp4':
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_mp4_to_mp4
ffmpeg_concat_mp4_to_mp4(parts, output_filepath)
else:
from .processor.join_mp4 import concat_mp4
concat_mp4(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
elif ext == "ts":
try:
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
ffmpeg_concat_ts_to_mkv(parts, output_filepath)
else:
from .processor.join_ts import concat_ts
concat_ts(parts, output_filepath)
print('Merged into %s' % output_filename)
except:
raise
else:
for part in parts:
os.remove(part)
else:
print("Can't merge %s files" % ext)
print()
def download_urls_chunked(urls, title, ext, total_size, output_dir='.', refer=None, merge=True, faker=False, headers = {}):
assert urls
if dry_run:
print('Real URLs:\n%s\n' % urls)
return
if player:
launch_player(player, urls)
return
title = tr(get_filename(title))
filename = '%s.%s' % (title, ext)
filepath = os.path.join(output_dir, filename)
if total_size and ext in ('ts'):
if not force and os.path.exists(filepath[:-3] + '.mkv'):
print('Skipping %s: file already exists' % filepath[:-3] + '.mkv')
print()
return
bar = SimpleProgressBar(total_size, len(urls))
else:
bar = PiecesProgressBar(total_size, len(urls))
if len(urls) == 1:
parts = []
url = urls[0]
print('Downloading %s ...' % tr(filename))
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
url_save_chunked(url, filepath, bar, refer = refer, faker = faker, headers = headers)
bar.done()
if not merge:
print()
return
if ext == 'ts':
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_convert_ts_to_mkv
if ffmpeg_convert_ts_to_mkv(parts, os.path.join(output_dir, title + '.mkv')):
for part in parts:
os.remove(part)
else:
os.remove(os.path.join(output_dir, title + '.mkv'))
else:
print('No ffmpeg is found. Conversion aborted.')
else:
print("Can't convert %s files" % ext)
else:
parts = []
print('Downloading %s.%s ...' % (tr(title), ext))
for i, url in enumerate(urls):
filename = '%s[%02d].%s' % (title, i, ext)
filepath = os.path.join(output_dir, filename)
parts.append(filepath)
#print 'Downloading %s [%s/%s]...' % (tr(filename), i + 1, len(urls))
bar.update_piece(i + 1)
url_save_chunked(url, filepath, bar, refer = refer, is_part = True, faker = faker, headers = headers)
bar.done()
if not merge:
print()
return
if ext == 'ts':
from .processor.ffmpeg import has_ffmpeg_installed
if has_ffmpeg_installed():
from .processor.ffmpeg import ffmpeg_concat_ts_to_mkv
if ffmpeg_concat_ts_to_mkv(parts, os.path.join(output_dir, title + '.mkv')):
for part in parts:
os.remove(part)
else:
os.remove(os.path.join(output_dir, title + '.mkv'))
else:
print('No ffmpeg is found. Merging aborted.')
else:
print("Can't merge %s files" % ext)
print()
def download_rtmp_url(url,title, ext,params={}, total_size=0, output_dir='.', refer=None, merge=True, faker=False):
assert url
if dry_run:
print('Real URL:\n%s\n' % [url])
if params.get("-y",False): #None or unset ->False
print('Real Playpath:\n%s\n' % [params.get("-y")])
return
if player:
from .processor.rtmpdump import play_rtmpdump_stream
play_rtmpdump_stream(player, url, params)
return
from .processor.rtmpdump import has_rtmpdump_installed, download_rtmpdump_stream
assert has_rtmpdump_installed(), "RTMPDump not installed."
download_rtmpdump_stream(url, title, ext,params, output_dir)
def download_url_ffmpeg(url,title, ext,params={}, total_size=0, output_dir='.', refer=None, merge=True, faker=False):
assert url
if dry_run:
print('Real URL:\n%s\n' % [url])
if params.get("-y",False): #None or unset ->False
print('Real Playpath:\n%s\n' % [params.get("-y")])
return
if player:
launch_player(player, [url])
return
from .processor.ffmpeg import has_ffmpeg_installed, ffmpeg_download_stream
assert has_ffmpeg_installed(), "FFmpeg not installed."
ffmpeg_download_stream(url, title, ext, params, output_dir)
def playlist_not_supported(name):
def f(*args, **kwargs):
raise NotImplementedError('Playlist is not supported for ' + name)
return f
def print_info(site_info, title, type, size):
if json_output:
json_output_.print_info(site_info=site_info, title=title, type=type, size=size)
return
if type:
type = type.lower()
if type in ['3gp']:
type = 'video/3gpp'
elif type in ['asf', 'wmv']:
type = 'video/x-ms-asf'
elif type in ['flv', 'f4v']:
type = 'video/x-flv'
elif type in ['mkv']:
type = 'video/x-matroska'
elif type in ['mp3']:
type = 'audio/mpeg'
elif type in ['mp4']:
type = 'video/mp4'
elif type in ['mov']:
type = 'video/quicktime'
elif type in ['ts']:
type = 'video/MP2T'
elif type in ['webm']:
type = 'video/webm'
elif type in ['jpg']:
type = 'image/jpeg'
elif type in ['png']:
type = 'image/png'
elif type in ['gif']:
type = 'image/gif'
if type in ['video/3gpp']:
type_info = "3GPP multimedia file (%s)" % type
elif type in ['video/x-flv', 'video/f4v']:
type_info = "Flash video (%s)" % type
elif type in ['video/mp4', 'video/x-m4v']:
type_info = "MPEG-4 video (%s)" % type
elif type in ['video/MP2T']:
type_info = "MPEG-2 transport stream (%s)" % type
elif type in ['video/webm']:
type_info = "WebM video (%s)" % type
#elif type in ['video/ogg']:
# type_info = "Ogg video (%s)" % type
elif type in ['video/quicktime']:
type_info = "QuickTime video (%s)" % type
elif type in ['video/x-matroska']:
type_info = "Matroska video (%s)" % type
#elif type in ['video/x-ms-wmv']:
# type_info = "Windows Media video (%s)" % type
elif type in ['video/x-ms-asf']:
type_info = "Advanced Systems Format (%s)" % type
#elif type in ['video/mpeg']:
# type_info = "MPEG video (%s)" % type
elif type in ['audio/mp4']:
type_info = "MPEG-4 audio (%s)" % type
elif type in ['audio/mpeg']:
type_info = "MP3 (%s)" % type
elif type in ['image/jpeg']:
type_info = "JPEG Image (%s)" % type
elif type in ['image/png']:
type_info = "Portable Network Graphics (%s)" % type
elif type in ['image/gif']:
type_info = "Graphics Interchange Format (%s)" % type
else:
type_info = "Unknown type (%s)" % type
maybe_print("Site: ", site_info)
maybe_print("Title: ", unescape_html(tr(title)))
print("Type: ", type_info)
print("Size: ", round(size / 1048576, 2), "MiB (" + str(size) + " Bytes)")
print()
def mime_to_container(mime):
mapping = {
'video/3gpp': '3gp',
'video/mp4': 'mp4',
'video/webm': 'webm',
'video/x-flv': 'flv',
}
if mime in mapping:
return mapping[mime]
else:
return mime.split('/')[1]
def parse_host(host):
"""Parses host name and port number from a string.
"""
if re.match(r'^(\d+)$', host) is not None:
return ("0.0.0.0", int(host))
if re.match(r'^(\w+)://', host) is None:
host = "//" + host
o = parse.urlparse(host)
hostname = o.hostname or "0.0.0.0"
port = o.port or 0
return (hostname, port)
def set_proxy(proxy):
proxy_handler = request.ProxyHandler({
'http': '%s:%s' % proxy,
'https': '%s:%s' % proxy,
})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
def unset_proxy():
proxy_handler = request.ProxyHandler({})
opener = request.build_opener(proxy_handler)
request.install_opener(opener)
# DEPRECATED in favor of set_proxy() and unset_proxy()
def set_http_proxy(proxy):
if proxy == None: # Use system default setting
proxy_support = request.ProxyHandler()
elif proxy == '': # Don't use any proxy
proxy_support = request.ProxyHandler({})
else: # Use proxy
proxy_support = request.ProxyHandler({'http': '%s' % proxy, 'https': '%s' % proxy})
opener = request.build_opener(proxy_support)
request.install_opener(opener)
def download_main(download, download_playlist, urls, playlist, **kwargs):
for url in urls:
if url.startswith('https://'):
url = url[8:]
if not url.startswith('http://'):
url = 'http://' + url
if playlist:
download_playlist(url, **kwargs)
else:
download(url, **kwargs)
def script_main(script_name, download, download_playlist, **kwargs):
def version():
log.i('version %s, a tiny downloader that scrapes the web.'
% get_version(kwargs['repo_path']
if 'repo_path' in kwargs else __version__))
logging.basicConfig(format='[%(levelname)s] %(message)s')
help = 'Usage: %s [OPTION]... [URL]...\n\n' % script_name
help += '''Startup options:
-V | --version Print version and exit.
-h | --help Print help and exit.
\n'''
help += '''Dry-run options: (no actual downloading)
-i | --info Print extracted information.
-u | --url Print extracted information with URLs.
--json Print extracted URLs in JSON format.
\n'''
help += '''Download options:
-n | --no-merge Do not merge video parts.
--no-caption Do not download captions.
(subtitles, lyrics, danmaku, ...)
-f | --force Force overwriting existed files.
-F | --format <STREAM_ID> Set video format to STREAM_ID.
-O | --output-filename <FILE> Set output filename.
-o | --output-dir <PATH> Set output directory.
-p | --player <PLAYER [OPTIONS]> Stream extracted URL to a PLAYER.
-c | --cookies <COOKIES_FILE> Load cookies.txt or cookies.sqlite.
-x | --http-proxy <HOST:PORT> Use an HTTP proxy for downloading.
-y | --extractor-proxy <HOST:PORT> Use an HTTP proxy for extracting only.
--no-proxy Never use a proxy.
-s | --socks-proxy <HOST:PORT> Use an SOCKS5 proxy for downloading.
-t | --timeout <SECONDS> Set socket timeout.
-d | --debug Show traceback and other debug info.
'''
short_opts = 'Vhfiuc:ndF:O:o:p:x:y:s:t:'
opts = ['version', 'help', 'force', 'info', 'url', 'cookies', 'no-caption', 'no-merge', 'no-proxy', 'debug', 'json', 'format=', 'stream=', 'itag=', 'output-filename=', 'output-dir=', 'player=', 'http-proxy=', 'socks-proxy=', 'extractor-proxy=', 'lang=', 'timeout=']
if download_playlist:
short_opts = 'l' + short_opts
opts = ['playlist'] + opts
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, opts)
except getopt.GetoptError as err:
log.e(err)
log.e("try 'you-get --help' for more options")
sys.exit(2)
global force
global dry_run
global json_output
global player
global extractor_proxy
global cookies
global output_filename
info_only = False
playlist = False
caption = True
merge = True
stream_id = None
lang = None
output_dir = '.'
proxy = None
socks_proxy = None
extractor_proxy = None
traceback = False
timeout = 600
for o, a in opts:
if o in ('-V', '--version'):
version()
sys.exit()
elif o in ('-h', '--help'):
version()
print(help)
sys.exit()
elif o in ('-f', '--force'):
force = True
elif o in ('-i', '--info'):
info_only = True
elif o in ('-u', '--url'):
dry_run = True
elif o in ('--json', ):
json_output = True
# to fix extractors not use VideoExtractor
dry_run = True
info_only = False
elif o in ('-c', '--cookies'):
try:
cookies = cookiejar.MozillaCookieJar(a)
cookies.load()
except:
import sqlite3
cookies = cookiejar.MozillaCookieJar()
con = sqlite3.connect(a)
cur = con.cursor()
try:
cur.execute("SELECT host, path, isSecure, expiry, name, value FROM moz_cookies")
for item in cur.fetchall():
c = cookiejar.Cookie(0, item[4], item[5],
None, False,
item[0],
item[0].startswith('.'),
item[0].startswith('.'),
item[1], False,
item[2],
item[3], item[3]=="",
None, None, {})
cookies.set_cookie(c)
except: pass
# TODO: Chromium Cookies
# SELECT host_key, path, secure, expires_utc, name, encrypted_value FROM cookies
# http://n8henrie.com/2013/11/use-chromes-cookies-for-easier-downloading-with-python-requests/
elif o in ('-l', '--playlist'):
playlist = True
elif o in ('--no-caption',):
caption = False
elif o in ('-n', '--no-merge'):
merge = False
elif o in ('--no-proxy',):
proxy = ''
elif o in ('-d', '--debug'):
traceback = True
# Set level of root logger to DEBUG
logging.getLogger().setLevel(logging.DEBUG)
elif o in ('-F', '--format', '--stream', '--itag'):
stream_id = a
elif o in ('-O', '--output-filename'):
output_filename = a
elif o in ('-o', '--output-dir'):
output_dir = a
elif o in ('-p', '--player'):
player = a
caption = False
elif o in ('-x', '--http-proxy'):
proxy = a
elif o in ('-s', '--socks-proxy'):
socks_proxy = a
elif o in ('-y', '--extractor-proxy'):
extractor_proxy = a
elif o in ('--lang',):
lang = a
elif o in ('-t', '--timeout'):
timeout = int(a)
else:
log.e("try 'you-get --help' for more options")
sys.exit(2)
if not args:
print(help)
sys.exit()
if (socks_proxy):
try:
import socket
import socks
socks_proxy_addrs = socks_proxy.split(':')
socks.set_default_proxy(socks.SOCKS5,
socks_proxy_addrs[0],
int(socks_proxy_addrs[1]))
socket.socket = socks.socksocket
def getaddrinfo(*args):
return [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
socket.getaddrinfo = getaddrinfo
except ImportError:
log.w('Error importing PySocks library, socks proxy ignored.'
'In order to use use socks proxy, please install PySocks.')
else:
import socket
set_http_proxy(proxy)
socket.setdefaulttimeout(timeout)
try:
if stream_id:
if not extractor_proxy:
download_main(download, download_playlist, args, playlist, stream_id=stream_id, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
else:
download_main(download, download_playlist, args, playlist, stream_id=stream_id, extractor_proxy=extractor_proxy, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
else:
if not extractor_proxy:
download_main(download, download_playlist, args, playlist, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
else:
download_main(download, download_playlist, args, playlist, extractor_proxy=extractor_proxy, output_dir=output_dir, merge=merge, info_only=info_only, json_output=json_output, caption=caption)
except KeyboardInterrupt:
if traceback:
raise
else:
sys.exit(1)
except UnicodeEncodeError:
log.e('[error] oops, the current environment does not seem to support Unicode.')
log.e('please set it to a UTF-8-aware locale first,')
log.e('so as to save the video (with some Unicode characters) correctly.')
log.e('you can do it like this:')
log.e(' (Windows) % chcp 65001 ')
log.e(' (Linux) $ LC_CTYPE=en_US.UTF-8')
sys.exit(1)
except Exception:
if not traceback:
log.e('[error] oops, something went wrong.')
log.e('don\'t panic, c\'est la vie. please try the following steps:')
log.e(' (1) Rule out any network problem.')
log.e(' (2) Make sure you-get is up-to-date.')
log.e(' (3) Check if the issue is already known, on')
log.e(' https://github.com/soimort/you-get/wiki/Known-Bugs')
log.e(' https://github.com/soimort/you-get/issues')
log.e(' (4) Run the command with \'--debug\' option,')
log.e(' and report this issue with the full output.')
else:
version()
log.i(args)
raise
sys.exit(1)
def google_search(url):
keywords = r1(r'https?://(.*)', url)
url = 'https://www.google.com/search?tbm=vid&q=%s' % parse.quote(keywords)
page = get_content(url, headers=fake_headers)
videos = re.findall(r'<a href="(https?://[^"]+)" onmousedown="[^"]+">([^<]+)<', page)
vdurs = re.findall(r'<span class="vdur _dwc">([^<]+)<', page)
durs = [r1(r'(\d+:\d+)', unescape_html(dur)) for dur in vdurs]
print("Google Videos search:")
for v in zip(videos, durs):
print("- video: %s [%s]" % (unescape_html(v[0][1]),
v[1] if v[1] else '?'))
print("# you-get %s" % log.sprint(v[0][0], log.UNDERLINE))
print()
print("Best matched result:")
return(videos[0][0])
def url_to_module(url):
try:
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
assert video_host and video_url
except:
url = google_search(url)
video_host = r1(r'https?://([^/]+)/', url)
video_url = r1(r'https?://[^/]+(.*)', url)
if video_host.endswith('.com.cn'):
video_host = video_host[:-3]
domain = r1(r'(\.[^.]+\.[^.]+)$', video_host) or video_host
assert domain, 'unsupported url: ' + url
k = r1(r'([^.]+)', domain)
if k in SITES:
return import_module('.'.join(['you_get', 'extractors', SITES[k]])), url
else:
import http.client
conn = http.client.HTTPConnection(video_host)
conn.request("HEAD", video_url, headers=fake_headers)
res = conn.getresponse()
location = res.getheader('location')
if location and location != url and not location.startswith('/'):
return url_to_module(location)
else:
return import_module('you_get.extractors.universal'), url
def any_download(url, **kwargs):
m, url = url_to_module(url)
m.download(url, **kwargs)
def any_download_playlist(url, **kwargs):
m, url = url_to_module(url)
m.download_playlist(url, **kwargs)
def main(**kwargs):
script_main('you-get', any_download, any_download_playlist, **kwargs)
| [
"[email protected]"
] | |
67c8f6e68f42cf14fa5dda19c602fbd7976c47fc | b61efe2686feb44c5b0d2fb3094dd2ea94e6ca93 | /src/control_decision_4.py | be6dc49f088a3f399c8bf5df9b0a6c7de0b509ca | [] | no_license | idrissahil/bat_wifi_exploration | 888f0f7243cc4bedeba6fe8d702762e6e2ad5da9 | 5a1bc74c1b35360d21d01e5e2a721b38fb380ac8 | refs/heads/master | 2020-05-31T16:38:49.118742 | 2019-06-29T14:03:28 | 2019-06-29T14:03:28 | 190,386,321 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,239 | py | #! /usr/bin/env python
import rospy
import math
from sensor_msgs.msg import BatteryState
from geometry_msgs.msg import Twist, PoseArray, Pose, PoseStamped
rospy.init_node('control_decision_drone')
control_decision_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=1)
state=1
curr_pos = [0,0,0]
rrt_list=[]
index=0
def callback_gps(gps):
global curr_pos
global rrt_list
global state
global index
curr_pos[0] = gps.pose.position.x
curr_pos[1] = gps.pose.position.y
curr_pos[2] = gps.pose.position.z
if state==1:
print(state)
#curr_pos[0]=gps.pose.position.x
#curr_pos[1]=gps.pose.position.y
#curr_pos[2]=gps.pose.position.z
if len(rrt_list)>1:
state=2
print(state)
dist_point = math.sqrt(math.pow(rrt_list[index].position.x - curr_pos[0], 2)+math.pow(rrt_list[index].position.y - curr_pos[1], 2)+math.pow(rrt_list[index].position.z - curr_pos[2], 2))
if dist_point<0.3:
index=index+1
if index==len(rrt_list):
index=index-1
curr_position=PoseStamped()
#hold_position.pose.position.x= 0
#hold_position.pose.position.y = 14
#hold_position.pose.position.z= 1
curr_position.pose.position.x= rrt_list[index].position.x
curr_position.pose.position.y= rrt_list[index].position.y
curr_position.pose.position.z= rrt_list[index].position.z
curr_position.header.frame_id = "map"
control_decision_pub.publish(curr_position)
def callback_battery(rrt):
global state
global curr_pos
global rrt_list
rrt_list=rrt.poses
def callback_exploration(explore):
global state
global exploration_point_x
exploration_point_x = explore.pose.position.x
print(state)
if state ==1:
control_decision_pub.publish(explore)
def main():
exploration_sub = rospy.Subscriber('/mavros/setpoint_position/local1', PoseStamped, callback_exploration)
battery_sub = rospy.Subscriber('visual_marker_rrt', PoseArray, callback_battery)
gps_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, callback_gps)
rospy.spin()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c2137568a2e94f717e43fd034e129651b46804a3 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/streamanalytics/azure-mgmt-streamanalytics/azure/mgmt/streamanalytics/operations/_inputs_operations.py | 890d33f1b8b1901067d5182d5396b9ae6a0bfef4 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 28,587 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class InputsOperations(object):
"""InputsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~stream_analytics_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create_or_replace(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input, # type: "models.Input"
if_match=None, # type: Optional[str]
if_none_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.Input"
"""Creates an input or replaces an already existing input under an existing streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:param input: The definition of the input that will be used to create a new input or replace
the existing one under the streaming job.
:type input: ~stream_analytics_management_client.models.Input
:param if_match: The ETag of the input. Omit this value to always overwrite the current input.
Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
:type if_match: str
:param if_none_match: Set to '*' to allow a new input to be created, but to prevent updating an
existing input. Other values will result in a 412 Pre-condition Failed response.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Input, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.Input
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Input"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_replace.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
if if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'Input')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_replace.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def update(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input, # type: "models.Input"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.Input"
"""Updates an existing input under an existing streaming job. This can be used to partially update
(ie. update one or two properties) an input without affecting the rest the job or input
definition.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:param input: An Input object. The properties specified here will overwrite the corresponding
properties in the existing input (ie. Those properties will be updated). Any properties that
are set to null here will mean that the corresponding property in the existing input will
remain the same and not change as a result of this PATCH operation.
:type input: ~stream_analytics_management_client.models.Input
:param if_match: The ETag of the input. Omit this value to always overwrite the current input.
Specify the last-seen ETag value to prevent accidentally overwriting concurrent changes.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Input, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.Input
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Input"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(input, 'Input')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes an input from the streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.Input"
"""Gets details about the specified input.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Input, or the result of cls(response)
:rtype: ~stream_analytics_management_client.models.Input
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Input"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('Input', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}'} # type: ignore
def list_by_streaming_job(
self,
resource_group_name, # type: str
job_name, # type: str
select=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.InputListResult"]
"""Lists all of the inputs under the specified streaming job.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param select: The $select OData query parameter. This is a comma-separated list of structural
properties to include in the response, or "\ *" to include all properties. By default, all
properties are returned except diagnostics. Currently only accepts '*\ ' as a valid value.
:type select: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either InputListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~stream_analytics_management_client.models.InputListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.InputListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_streaming_job.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('InputListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_streaming_job.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs'} # type: ignore
def _test_initial(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input=None, # type: Optional["models.Input"]
**kwargs # type: Any
):
# type: (...) -> Optional["models.ResourceTestStatus"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.ResourceTestStatus"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._test_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'jobName': self._serialize.url("job_name", job_name, 'str'),
'inputName': self._serialize.url("input_name", input_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if input is not None:
body_content = self._serialize.body(input, 'Input')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceTestStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_test_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}/test'} # type: ignore
def begin_test(
self,
resource_group_name, # type: str
job_name, # type: str
input_name, # type: str
input=None, # type: Optional["models.Input"]
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ResourceTestStatus"]
"""Tests whether an input’s datasource is reachable and usable by the Azure Stream Analytics
service.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param job_name: The name of the streaming job.
:type job_name: str
:param input_name: The name of the input.
:type input_name: str
:param input: If the input specified does not already exist, this parameter must contain the
full input definition intended to be tested. If the input specified already exists, this
parameter can be left null to test the existing input as is or if specified, the properties
specified will overwrite the corresponding properties in the existing input (exactly like a
PATCH operation) and the resulting input will be tested.
:type input: ~stream_analytics_management_client.models.Input
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ResourceTestStatus or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~stream_analytics_management_client.models.ResourceTestStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ResourceTestStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._test_initial(
resource_group_name=resource_group_name,
job_name=job_name,
input_name=input_name,
input=input,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ResourceTestStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_test.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.StreamAnalytics/streamingjobs/{jobName}/inputs/{inputName}/test'} # type: ignore
| [
"[email protected]"
] | |
59944bb8fa971396a0f7e49931ba6f9bf8ed1091 | 4b29c3e3c8a2cad5071a3fb2ea674253c6f0ef21 | /pycharm/digiin/case/TestLogin.py | 70e3880684b38a0a5d5a1bb7b50cd59768931663 | [] | no_license | yz9527-1/1YZ | a0303b00fd1c7f782b7e4219c52f9589dd3b27b7 | 5f843531d413202f4f4e48ed0c3d510db21f4396 | refs/heads/master | 2022-11-30T23:50:56.682852 | 2020-08-10T02:11:13 | 2020-08-10T02:11:13 | 286,354,211 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,586 | py | #coding=utf-8
import ddt,data
from common.ExcelUtil import ExcelUtil
import time
import unittest
from selenium import webdriver
def self(args):
pass
class Case(object):
def __init__(self):
pass
def get_case(self):
"""
获取数据
得到有用的数据,并且使数据以邮箱地址、密码、预期结果定位、预期结果的顺序返回
:return:
"""
#获取Excel中的文件数据
sheet='Login'
file=ExcelUtil(sheet_name=sheet)
data=file.get_data()
#得到所需要数据的索引,然后根据索引获取相应顺序的数据
email_index=data[0].index("邮箱地址")
password_index=data[1].index("密码")
expected_element_index=data[2].index("预期结果定位")
expected_index=data[3].index("预期结果")
data_length=data.__len__()
all_cass=[]
#去除header行,和其他无用的数据
for i in range(1,data_length):
case=[]
case.append(data[i][email_index])
case.append(data[i][password_index])
case.append(data[i][expected_element_index])
case.append(data[i][expected_index])
all_cass.append(case)
return all_cass
class Login(object):
def __init__(self,driver):
self.driver=driver
def login(self,email,password):
"""登录步骤"""
#driver=webdriver.Chrome()
#self.driver=driver
#邮箱地址、密码、点击登录按钮操作
time.sleep(1)
if email!=None:
email_element=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[1]/input')
email_element.send_keys(email)
time.sleep(1)
if password!=None:
password_element=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[2]/input')
password_element.send_keys(password)
time.sleep(1)
login_btn=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[3]/input')
login_btn.click()
def login_assert(self,assert_type,assert_message):
"""登录断言"""
time.sleep(1)
if assert_type=='email error':
email_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[1]/input').text
assert email_message==assert_message
elif assert_type=='password error':
password_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[2]/input').text
assert password_message==assert_message
elif assert_type=='login sucess'or assert_type=='login fail':
login_message=self.driver.find_element_by_xpath('//*[@id="app"]/div/div[1]/div/div[3]/input').text
assert login_message==assert_message
else:
print("输入的断言类型不正确")
@ddt
class TextLogin(unittest.TestCase):
"""测试登录"""
def setUp(self):
self.driver=webdriver.Chrome()
url="http://192.168.0.30:18069"
self.driver.implicitly_wait(20)
self.driver.maximize_window()
self.driver.get(url=url)
def tearDown(self):
self.driver.quit()
case=Case().get_case()
@data(*case)
@unpack
def test_login(self,password,assert_type,assert_message):
login=Login(driver=self.driver)
login.login(email=email,password=password)
login.login_assert(assert_type=assert_type,assert_message=assert_message)
if __name__=='__main__':
unittest.main | [
"[email protected]"
] | |
2d24c2b1849fbb578426985672e634ca4e13e282 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/baiduads/keyword/api/__init__.py | d86d7640ef2ab230105e5b576757bc5d81a011fe | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 151 | py | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from baiduads.keyword.api.keyword_service import KeywordService
| [
"[email protected]"
] | |
6bb7357e4c3c78a71da4398592fc78ff38a7ab5c | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /gaussiana/ch3_2020_09_14_14_36_41_642784.py | 986bff292e3d397ff9a597fd31a1ee3912e49175 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | import math
def calcula_gaussiana (x,mu,sigma) :
f1 = 1/(sigma*math.sqrt(2*math.pi))
f2 = math.exp((-0.5*((x-mu)/(sigma)**2))
y = f1*f2
return y | [
"[email protected]"
] | |
91b306ecb2af69f0d6d781d57251266678f159f2 | f8d3f814067415485bb439d7fe92dc2bbe22a048 | /models/research/syntaxnet/dragnn/python/file_diff_test.py | 9e9f1daa40a64ff9595724e30dbc95591ae299c2 | [
"Apache-2.0"
] | permissive | gmonkman/python | 2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3 | 9123aa6baf538b662143b9098d963d55165e8409 | refs/heads/master | 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 | Python | UTF-8 | Python | false | false | 1,631 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diff test that compares two files are identical."""
from absl import flags
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string('actual_file', None, 'File to test.')
flags.DEFINE_string('expected_file', None, 'File with expected contents.')
class DiffTest(tf.test.TestCase):
def testEqualFiles(self):
content_actual = None
content_expected = None
try:
with open(FLAGS.actual_file) as actual:
content_actual = actual.read()
except IOError as e:
self.fail("Error opening '%s': %s" % (FLAGS.actual_file, e.strerror))
try:
with open(FLAGS.expected_file) as expected:
content_expected = expected.read()
except IOError as e:
self.fail("Error opening '%s': %s" % (FLAGS.expected_file, e.strerror))
self.assertTrue(content_actual == content_expected)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
22e70becf6b691016982f2b828b13d8eeaf45564 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02571/s663642129.py | 60a84cc30f58c36b037db16bb95f49473b02d187 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py |
def main():
s = input()
t = input()
min = int(1e9)
for i in range(len(s)-len(t)+1):
cnt = 0
for j in range(len(t)):
if s[i+j] != t[j]:
cnt += 1
if min > cnt:
min = cnt
print(min)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
200e9917ea1a71489173315c12ac6c736aac3a7c | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/PyBox/pybox2d/library/Box2D/examples/chain.py | c1f19e55dbac3e2fa63532f8b24c48d5d1e22b19 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e79af0d06dbe1710b8ba767355096adc26f63f6435e754284e2a3caa01b35291
size 2366
| [
"[email protected]"
] | |
cf37aac9d227dfbd4c7430df7abe6facb7d78387 | 9bb01fa882e713aa59345051fec07f4e3d3478b0 | /tests/cysparse_/sparse/memory/test_copy_CSCSparseMatrix_INT32_t_COMPLEX64_t.py | 647b1079524c4d905c0e53d370b23d6cd9d3eca0 | [] | no_license | syarra/cysparse | f1169c496b54d61761fdecbde716328fd0fb131b | 7654f7267ab139d0564d3aa3b21c75b364bcfe72 | refs/heads/master | 2020-05-25T16:15:38.160443 | 2017-03-14T21:17:39 | 2017-03-14T21:17:39 | 84,944,993 | 0 | 0 | null | 2017-03-14T12:11:48 | 2017-03-14T12:11:48 | null | UTF-8 | Python | false | false | 4,646 | py | #!/usr/bin/env python
"""
This file tests ``copy()`` for all sparse-likes objects.
"""
import unittest
from cysparse.sparse.ll_mat import *
from cysparse.common_types.cysparse_types import *
########################################################################################################################
# Tests
########################################################################################################################
#######################################################################
# Case: store_symmetry == False, Store_zero==False
#######################################################################
class CySparseCopyNoSymmetryNoZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.nrow = 10
self.ncol = 14
self.A = LinearFillLLSparseMatrix(nrow=self.nrow, ncol=self.ncol, dtype=COMPLEX64_T, itype=INT32_T)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.nrow):
for j in range(self.ncol):
self.assertTrue(self.C[i, j] == C_copy[i, j])
#######################################################################
# Case: store_symmetry == True, Store_zero==False
#######################################################################
class CySparseCopyWithSymmetryNoZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.size = 10
self.A = LinearFillLLSparseMatrix(size=self.size, dtype=COMPLEX64_T, itype=INT32_T, store_symmetry=True)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.size):
for j in range(self.size):
self.assertTrue(self.C[i, j] == C_copy[i, j])
#######################################################################
# Case: store_symmetry == False, Store_zero==True
#######################################################################
class CySparseCopyNoSymmetrySWithZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.nrow = 10
self.ncol = 14
self.A = LinearFillLLSparseMatrix(nrow=self.nrow, ncol=self.ncol, dtype=COMPLEX64_T, itype=INT32_T, store_zero=True)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.nrow):
for j in range(self.ncol):
self.assertTrue(self.C[i, j] == C_copy[i, j])
#######################################################################
# Case: store_symmetry == True, Store_zero==True
#######################################################################
class CySparseCopyWithSymmetrySWithZero_CSCSparseMatrix_INT32_t_COMPLEX64_t_TestCase(unittest.TestCase):
def setUp(self):
self.size = 10
self.A = LinearFillLLSparseMatrix(size=self.size, dtype=COMPLEX64_T, itype=INT32_T, store_symmetry=True, store_zero=True)
self.C = self.A.to_csc()
def test_copy_not_same_reference(self):
"""
Test we have a real deep copy for matrices and views and proxies are singletons.
Warning:
If the matrix element type is real, proxies may not be returned.
"""
self.assertTrue(id(self.C) != id(self.C.copy()))
def test_copy_element_by_element(self):
C_copy = self.C.copy()
for i in range(self.size):
for j in range(self.size):
self.assertTrue(self.C[i, j] == C_copy[i, j])
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
1bb7b97ff0d7ed871f4280d115fe7d2651c8300f | e2334e514d9a0321fc834d6398519fa86dc1ba93 | /cira_ml_short_course/utils/upconvnet.py | 2c80a660190e61d2e1945a456101ea1ecc85d46e | [
"MIT"
] | permissive | ChanJeunlam/cira_ml_short_course | 4fc99da5a6e051a51fe7fdc307df17eeb06516eb | 23741f7ebba9dde8e4f5985ed43bed50b4f99cc3 | refs/heads/master | 2023-04-30T20:33:37.974674 | 2021-05-10T17:14:36 | 2021-05-10T17:14:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,151 | py | """Helper methods for upconvnets (upconvolutional networks)."""
import copy
import numpy
import keras.models
from cira_ml_short_course.utils import cnn, utils, image_utils, \
image_normalization
KERNEL_INITIALIZER_NAME = cnn.KERNEL_INITIALIZER_NAME
BIAS_INITIALIZER_NAME = cnn.BIAS_INITIALIZER_NAME
PLATEAU_PATIENCE_EPOCHS = cnn.PLATEAU_PATIENCE_EPOCHS
PLATEAU_LEARNING_RATE_MULTIPLIER = cnn.PLATEAU_LEARNING_RATE_MULTIPLIER
PLATEAU_COOLDOWN_EPOCHS = cnn.PLATEAU_COOLDOWN_EPOCHS
EARLY_STOPPING_PATIENCE_EPOCHS = cnn.EARLY_STOPPING_PATIENCE_EPOCHS
LOSS_PATIENCE = cnn.LOSS_PATIENCE
DEFAULT_INPUT_DIMENSIONS = numpy.array([4, 4, 256], dtype=int)
DEFAULT_CONV_BLOCK_LAYER_COUNTS = numpy.array([2, 2, 2, 2], dtype=int)
DEFAULT_CONV_CHANNEL_COUNTS = numpy.array(
[256, 128, 128, 64, 64, 32, 32, 4], dtype=int
)
DEFAULT_CONV_DROPOUT_RATES = numpy.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0])
DEFAULT_CONV_FILTER_SIZES = numpy.full(8, 3, dtype=int)
DEFAULT_INNER_ACTIV_FUNCTION_NAME = copy.deepcopy(utils.RELU_FUNCTION_NAME)
DEFAULT_INNER_ACTIV_FUNCTION_ALPHA = 0.2
DEFAULT_OUTPUT_ACTIV_FUNCTION_NAME = None
DEFAULT_OUTPUT_ACTIV_FUNCTION_ALPHA = 0.
DEFAULT_L1_WEIGHT = 0.
DEFAULT_L2_WEIGHT = 0.001
def _get_transposed_conv_layer(
num_rows_in_filter, num_columns_in_filter, upsampling_factor,
num_filters, weight_regularizer=None):
"""Creates layer for 2-D transposed convolution.
:param num_rows_in_filter: Number of rows in each filter (kernel).
:param num_columns_in_filter: Number of columns in each filter (kernel).
:param upsampling_factor: Upsampling factor (integer >= 1).
:param num_filters: Number of filters (output channels).
:param weight_regularizer: Will be used to regularize weights in the new
layer. This may be instance of `keras.regularizers` or None (if you
want no regularization).
:return: layer_object: Instance of `keras.layers.Conv2DTranspose`.
"""
return keras.layers.Conv2DTranspose(
filters=num_filters,
kernel_size=(num_rows_in_filter, num_columns_in_filter),
strides=(upsampling_factor, upsampling_factor),
padding='same',
dilation_rate=(1, 1), activation=None, use_bias=True,
kernel_initializer=KERNEL_INITIALIZER_NAME,
bias_initializer=BIAS_INITIALIZER_NAME,
kernel_regularizer=weight_regularizer,
bias_regularizer=weight_regularizer
)
def _get_upsampling_layer(upsampling_factor):
"""Creates layer for 2-D upsampling.
:param upsampling_factor: Upsampling factor (integer >= 1).
:return: layer_object: Instance of `keras.layers.Upsampling2D`.
"""
try:
return keras.layers.UpSampling2D(
size=(upsampling_factor, upsampling_factor),
data_format='channels_last', interpolation='bilinear'
)
except:
return keras.layers.UpSampling2D(
size=(upsampling_factor, upsampling_factor),
data_format='channels_last'
)
def setup_upconvnet(
input_dimensions=DEFAULT_INPUT_DIMENSIONS,
conv_block_layer_counts=DEFAULT_CONV_BLOCK_LAYER_COUNTS,
conv_layer_channel_counts=DEFAULT_CONV_CHANNEL_COUNTS,
conv_layer_dropout_rates=DEFAULT_CONV_DROPOUT_RATES,
conv_layer_filter_sizes=DEFAULT_CONV_FILTER_SIZES,
inner_activ_function_name=DEFAULT_INNER_ACTIV_FUNCTION_NAME,
inner_activ_function_alpha=DEFAULT_INNER_ACTIV_FUNCTION_ALPHA,
output_activ_function_name=DEFAULT_OUTPUT_ACTIV_FUNCTION_NAME,
output_activ_function_alpha=DEFAULT_OUTPUT_ACTIV_FUNCTION_ALPHA,
l1_weight=DEFAULT_L1_WEIGHT, l2_weight=DEFAULT_L2_WEIGHT,
use_transposed_conv=True, use_batch_norm_inner=True,
use_batch_norm_output=True):
"""Sets up (but does not train) upconvnet.
This method sets up the architecture, loss function, and optimizer.
B = number of convolutional blocks
C = number of convolutional layers
D = number of dense layers
:param input_dimensions: numpy array with dimensions of input data. Entries
should be (num_grid_rows, num_grid_columns, num_channels).
:param conv_block_layer_counts: length-B numpy array with number of
convolutional layers in each block. Remember that each conv block
except the last upsamples the image by a factor of 2.
:param conv_layer_channel_counts: length-C numpy array with number of
channels (filters) produced by each convolutional layer.
:param conv_layer_dropout_rates: length-C numpy array of dropout rates. To
turn off dropout for a given layer, use NaN or a non-positive number.
:param conv_layer_filter_sizes: length-C numpy array of filter sizes. All
filters will be square (num rows = num columns).
:param inner_activ_function_name: Name of activation function for all inner
(non-output) layers.
:param inner_activ_function_alpha: Alpha (slope parameter) for
activation function for all inner layers. Applies only to ReLU and eLU.
:param output_activ_function_name: Same as `inner_activ_function_name` but
for output layer. This may be None.
:param output_activ_function_alpha: Same as `inner_activ_function_alpha` but
for output layer.
:param l1_weight: Weight for L_1 regularization.
:param l2_weight: Weight for L_2 regularization.
:param use_transposed_conv: Boolean flag. If True (False), will use
transposed convolution (upsampling followed by normal convolution).
:param use_batch_norm_inner: Boolean flag. If True, will use batch
normalization after each inner layer.
:param use_batch_norm_output: Same but for output layer.
:return: model_object: Untrained instance of `keras.models.Model`.
"""
num_conv_layers = len(conv_layer_channel_counts)
assert numpy.sum(conv_block_layer_counts) == num_conv_layers
num_input_rows = input_dimensions[0]
num_input_columns = input_dimensions[1]
num_input_channels = input_dimensions[2]
input_layer_object = keras.layers.Input(
shape=(numpy.prod(input_dimensions),)
)
regularizer_object = utils._get_weight_regularizer(
l1_weight=l1_weight, l2_weight=l2_weight
)
layer_object = keras.layers.Reshape(
target_shape=(num_input_rows, num_input_columns, num_input_channels)
)(input_layer_object)
for i in range(num_conv_layers):
if (
i + 1 in numpy.cumsum(conv_block_layer_counts)
and i != num_conv_layers - 1
):
if use_transposed_conv:
layer_object = _get_transposed_conv_layer(
num_rows_in_filter=conv_layer_filter_sizes[i],
num_columns_in_filter=conv_layer_filter_sizes[i],
upsampling_factor=2,
num_filters=conv_layer_channel_counts[i],
weight_regularizer=regularizer_object
)(layer_object)
else:
layer_object = _get_upsampling_layer(
upsampling_factor=2
)(layer_object)
layer_object = cnn._get_2d_conv_layer(
num_rows_in_filter=conv_layer_filter_sizes[i],
num_columns_in_filter=conv_layer_filter_sizes[i],
num_rows_per_stride=1, num_columns_per_stride=1,
num_filters=conv_layer_channel_counts[i],
use_edge_padding=True,
weight_regularizer=regularizer_object
)(layer_object)
else:
layer_object = cnn._get_2d_conv_layer(
num_rows_in_filter=conv_layer_filter_sizes[i],
num_columns_in_filter=conv_layer_filter_sizes[i],
num_rows_per_stride=1, num_columns_per_stride=1,
num_filters=conv_layer_channel_counts[i], use_edge_padding=True,
weight_regularizer=regularizer_object
)(layer_object)
if i == num_conv_layers - 1:
if output_activ_function_name is not None:
layer_object = utils._get_activation_layer(
function_name=output_activ_function_name,
slope_param=output_activ_function_alpha
)(layer_object)
else:
layer_object = utils._get_activation_layer(
function_name=inner_activ_function_name,
slope_param=inner_activ_function_alpha
)(layer_object)
if conv_layer_dropout_rates[i] > 0:
layer_object = utils._get_dropout_layer(
dropout_fraction=conv_layer_dropout_rates[i]
)(layer_object)
if i != num_conv_layers - 1 and use_batch_norm_inner:
layer_object = utils._get_batch_norm_layer()(layer_object)
if i == num_conv_layers - 1 and use_batch_norm_output:
layer_object = utils._get_batch_norm_layer()(layer_object)
model_object = keras.models.Model(
inputs=input_layer_object, outputs=layer_object
)
model_object.compile(
loss=keras.losses.mean_squared_error,
optimizer=keras.optimizers.Adam()
)
model_object.summary()
return model_object
def create_data(image_file_names, normalization_dict, cnn_model_object):
"""Creates input data for upconvnet.
E = number of examples (storm objects)
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
Z = number of features (from CNN's flattening layer)
:param image_file_names: 1-D list of paths to input files (readable by
`image_utils.read_file`).
:param normalization_dict: Dictionary with params used to normalize
predictors. See doc for `image_normalization.normalize_data`.
:param cnn_model_object: Trained CNN (instance of `keras.models.Model` or
`keras.models.Sequential`). Inputs for upconvnet will be outputs from
CNN's flattening layer.
:return: feature_matrix: E-by-Z numpy array of features. These are inputs
for the upconvnet.
:return: target_matrix: E-by-M-by-N-by-C numpy array of target values.
These are targets for the upconvnet but inputs for the CNN.
"""
image_dict = image_utils.read_many_files(image_file_names)
target_matrix, _ = image_normalization.normalize_data(
predictor_matrix=image_dict[image_utils.PREDICTOR_MATRIX_KEY],
predictor_names=image_dict[image_utils.PREDICTOR_NAMES_KEY],
normalization_dict=normalization_dict
)
feature_matrix = cnn.apply_model(
model_object=cnn_model_object, predictor_matrix=target_matrix,
verbose=True,
output_layer_name=cnn.get_flattening_layer(cnn_model_object)
)
return feature_matrix, target_matrix
def train_model_sans_generator(
model_object, cnn_model_object, training_file_names,
validation_file_names, num_examples_per_batch, normalization_dict,
num_epochs, output_dir_name):
"""Trains upconvnet without generator.
:param model_object: Untrained upconvnet (instance of `keras.models.Model`
or `keras.models.Sequential`).
:param cnn_model_object: Trained CNN (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param training_file_names: 1-D list of paths to training files (readable by
`image_utils.read_file`).
:param validation_file_names: Same but for validation files.
:param num_examples_per_batch: Batch size.
:param normalization_dict: See doc for `create_data`.
:param num_epochs: Number of epochs.
:param output_dir_name: Path to output directory (model will be saved here).
"""
utils._mkdir_recursive_if_necessary(directory_name=output_dir_name)
model_file_name = (
output_dir_name + '/model_epoch={epoch:03d}_val-loss={val_loss:.6f}.h5'
)
history_object = keras.callbacks.CSVLogger(
filename='{0:s}/history.csv'.format(output_dir_name),
separator=',', append=False
)
checkpoint_object = keras.callbacks.ModelCheckpoint(
filepath=model_file_name, monitor='val_loss', verbose=1,
save_best_only=True, save_weights_only=False, mode='min', period=1
)
early_stopping_object = keras.callbacks.EarlyStopping(
monitor='val_loss', min_delta=LOSS_PATIENCE,
patience=EARLY_STOPPING_PATIENCE_EPOCHS, verbose=1, mode='min'
)
plateau_object = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=PLATEAU_LEARNING_RATE_MULTIPLIER,
patience=PLATEAU_PATIENCE_EPOCHS, verbose=1, mode='min',
min_delta=LOSS_PATIENCE, cooldown=PLATEAU_COOLDOWN_EPOCHS
)
list_of_callback_objects = [
history_object, checkpoint_object, early_stopping_object, plateau_object
]
training_feature_matrix, training_target_matrix = create_data(
image_file_names=training_file_names,
normalization_dict=normalization_dict,
cnn_model_object=cnn_model_object
)
print('\n')
validation_feature_matrix, validation_target_matrix = create_data(
image_file_names=validation_file_names,
normalization_dict=normalization_dict,
cnn_model_object=cnn_model_object
)
print('\n')
model_object.fit(
x=training_feature_matrix, y=training_target_matrix,
batch_size=num_examples_per_batch, epochs=num_epochs,
steps_per_epoch=None, shuffle=True, verbose=1,
callbacks=list_of_callback_objects,
validation_data=(validation_feature_matrix, validation_target_matrix),
validation_steps=None
)
def read_model(hdf5_file_name):
"""Reads model from HDF5 file.
:param hdf5_file_name: Path to input file.
"""
return keras.models.load_model(hdf5_file_name)
def apply_model(model_object, cnn_model_object, cnn_predictor_matrix,
verbose=True):
"""Applies trained upconvnet to new data.
E = number of examples (storm objects)
M = number of rows in each storm-centered grid
N = number of columns in each storm-centered grid
C = number of channels (predictor variables)
:param model_object: Trained upconvnet (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param cnn_model_object: Trained CNN (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param cnn_predictor_matrix: E-by-M-by-N-by-C numpy array of predictor
values for CNN.
:param verbose: Boolean flag. If True, will print progress messages.
:return: reconstructed_predictor_matrix: Upconvnet reconstruction of
`cnn_predictor_matrix`.
"""
num_examples = cnn_predictor_matrix.shape[0]
num_examples_per_batch = 1000
reconstructed_predictor_matrix = numpy.full(
cnn_predictor_matrix.shape, numpy.nan
)
for i in range(0, num_examples, num_examples_per_batch):
this_first_index = i
this_last_index = min(
[i + num_examples_per_batch - 1, num_examples - 1]
)
if verbose:
print((
'Applying upconvnet to examples {0:d}-{1:d} of {2:d}...'
).format(
this_first_index, this_last_index, num_examples
))
these_indices = numpy.linspace(
this_first_index, this_last_index,
num=this_last_index - this_first_index + 1, dtype=int
)
this_feature_matrix = cnn.apply_model(
model_object=cnn_model_object,
predictor_matrix=cnn_predictor_matrix[these_indices, ...],
verbose=False,
output_layer_name=cnn.get_flattening_layer(cnn_model_object)
)
reconstructed_predictor_matrix[these_indices, ...] = (
model_object.predict(
this_feature_matrix, batch_size=len(these_indices)
)
)
if verbose:
print('Have applied upconvnet to all {0:d} examples!'.format(
num_examples
))
return reconstructed_predictor_matrix
| [
"[email protected]"
] | |
4e554d1fb9a88ed2d04b9397feb311493507f223 | 289da5146b8991942ba22eefe948289ee024d3ff | /sheng/tutorial/L3函数/8 global.py | 380ea400f5deb82c17c96c689facbc7d471efff3 | [] | no_license | a1424186319/tutorial | 263585961ab40e7a9a55405263d80057a88298d4 | 909bfc9f850118af7892a7ba4b0f7e3d0798db8a | refs/heads/master | 2022-12-09T01:05:36.063099 | 2019-02-18T12:12:52 | 2019-02-18T12:12:52 | 166,967,437 | 0 | 0 | null | 2021-06-01T23:20:20 | 2019-01-22T09:38:51 | Python | UTF-8 | Python | false | false | 500 | py | #
#(老写法 a是全局变量) 从1 加到 100的和
# a = 0
# for i in range(1,101):
# a = a + i
# print(a)
## global(全局) 显示声明变量为全局变量
# total = 0
# def add1(n):
# global total
# total = total + 1
# add1()
# add1()
# add1()
# print(total)
## nonlocal(局部的)https://www.cnblogs.com/saintdingspage/p/7788958.html
def outer():
num = 10
def inner():
nonlocal num
num = 100
print(num)
inner()
print(num)
outer() | [
"[email protected]"
] | |
86dce18c7b5d76d01f32df22306412f7ca2feb73 | d7d19d6918029de88bcf060cea23d5b4a1f7efb1 | /xiab/apps/subjects/models.py | 85c54cc05e21150cfe80e2ddb9d412d7c622452e | [] | no_license | petercollingridge/xiab | 8abe2b2b7124eeb0cfa06d2f21ce858a4ffbd975 | ae84d3d228f3fe9392d0fd894652e290b219b1d2 | refs/heads/master | 2020-03-26T04:25:28.163381 | 2019-09-29T16:20:25 | 2019-09-29T16:20:25 | 144,503,055 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | from django.db import models
from wagtail.core.models import Page
from wagtail.core.fields import RichTextField
from wagtail.admin.edit_handlers import FieldPanel
class SubjectPage(Page):
summary = RichTextField(blank=True)
content_panels = Page.content_panels + [
FieldPanel('summary'),
]
def get_context(self, request):
# Update context to include only published posts
context = super().get_context(request)
context['children'] = self.get_children().live()
return context
| [
"[email protected]"
] | |
3999cde4262817329bdd68fd5ae82079cf8e5078 | 1b382fa35424074f6e93d5efa26412057507ef7e | /brax/experimental/composer/composer.py | 4a850f3ccb8b2b5020d9be7537077256b6e02021 | [
"Apache-2.0"
] | permissive | LARS12llt/brax | 91f2914f78480308930dc83435f076de8a55b470 | 8cf936d60a393f586daa145e8f378c7aa4bafce6 | refs/heads/main | 2023-07-27T22:49:59.609896 | 2021-09-17T11:16:49 | 2021-09-17T15:06:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,125 | py | # Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Composer for environments.
ComponentEnv composes a scene from descriptions of the form below:
composer = Composer(
components=dict(
ant1=dict(component='ant', pos=(0, 1, 0)),
ant2=dict(component='ant', pos=(0, -1, 0)),
),
edges=dict(ant1__ant2=dict(collide_type='full'),),
)
env = ComposeEnv(composer=composer)
(more examples available in experimental/composer/env_descs.py)
During loading, it:
- creates components: loads and pieces together brax.Config()
components defined in experimental/composer/components/
such as ant.py or ground.py
- support multiple instances of the same component through suffixes
- each component requires: ROOT=root body, SYS_CONFIG=config in string form,
TERM_FN=termination function of this component, COLLIDES=bodies that
are allowed to collide, DEFAULT_OBSERVERS=a list of observers (
see experimental/composer/observers.py for references)
- creates edges: automatically create necessary edge information
between components, such as collide_include's in brax.Config()
- optionally edge information can be supplied,
e.g. `collide_type`={'full', 'root', None} specifying full collisons,
collision only between roots, or no collision between two components
- sets termination as any(termination_fn of each component)
- sets observation to concatenation of observations of each component defined
by each component's `observers` argument
"""
import collections
import copy
import functools
import itertools
from typing import Dict, Any, Callable, Tuple
import brax
from brax import envs
from brax.envs import Env
from brax.envs import State
from brax.experimental.braxlines.common import sim_utils
from brax.experimental.composer import component_editor
from brax.experimental.composer import env_descs
from brax.experimental.composer import observers
import jax
from jax import numpy as jnp
MetaData = collections.namedtuple('MetaData', [
'components', 'edges', 'global_options', 'config_str', 'config_json',
'extra_observers'
])
class Composer(object):
"""Compose a brax system."""
def __init__(self,
components: Dict[str, Dict[str, Any]],
edges: Dict[str, Dict[str, Any]] = None,
extra_observers: Tuple[observers.Observer] = (),
add_ground: bool = True,
global_options: Dict[str, Any] = None):
components = copy.deepcopy(components)
edges = copy.deepcopy(edges or {})
# load components
if add_ground:
components['ground'] = dict(component='ground')
components = {
name: component_editor.load_component(**value)
for name, value in components.items()
}
component_keys = sorted(components.keys())
components_ = collections.OrderedDict([
(k, components[k]) for k in component_keys
])
# set global
v = dict(
json=component_editor.json_global_options(**(global_options or {})))
v['message_str'] = component_editor.json2message_str(v['json'])
global_options_ = v
for k, v in components_.items():
# convert to json format for easy editing
v['json'] = component_editor.message_str2json(v['message_str'])
# add suffices
suffix = v.get('suffix', k)
if suffix:
rename_fn = functools.partial(
component_editor.json_add_suffix, suffix=suffix)
v['json'] = rename_fn(v['json'])
v['collides'] = rename_fn(v['collides'], force_add=True)
v['root'] = rename_fn(v['root'], force_add=True)
v['bodies'] = [b['name'] for b in v['json'].get('bodies', [])]
v['joints'] = [b['name'] for b in v['json'].get('joints', [])]
v['suffix'] = suffix
# convert back to str
v['message_str'] = component_editor.json2message_str(v['json'])
# set transform or not
if 'pos' in v or 'quat' in v:
v['transform'] = True
v['pos'] = jnp.array(v.get('pos', [0, 0, 0]), dtype='float')
v['quat_origin'] = jnp.array(
v.get('quat_origin', [0, 0, 0]), dtype='float')
v['quat'] = jnp.array(v.get('quat', [1., 0., 0., 0.]), dtype='float')
else:
v['transform'] = False
edges_ = {}
for k1, k2 in itertools.combinations(list(components_.keys()), 2):
if k1 == k2:
continue
k1, k2 = sorted([k1, k2]) # ensure the name is always sorted in order
edge_name = f'{k1}__{k2}'
v, new_v = edges.pop(edge_name, {}), {}
v1, v2 = [components_[k] for k in [k1, k2]]
collide_type = v.pop('collide_type', 'full')
v_json = {}
# add colliders
if collide_type == 'full':
v_json.update(
component_editor.json_collides(v1['collides'], v2['collides']))
elif collide_type == 'root':
v_json.update(
component_editor.json_collides([v1['root']], [v2['root']]))
else:
assert not collide_type, collide_type
if v_json:
# convert back to str
new_v['message_str'] = component_editor.json2message_str(v_json)
else:
new_v['message_str'] = ''
new_v['json'] = v_json
assert not v, f'unused edges[{edge_name}]: {v}'
edges_[edge_name] = new_v
assert not edges, f'unused edges: {edges}'
edge_keys = sorted(edges_.keys())
edges_ = collections.OrderedDict([(k, edges_[k]) for k in edge_keys])
# merge all message strs
message_str = ''
for _, v in sorted(components_.items()):
message_str += v.get('message_str', '')
for _, v in sorted(edges_.items()):
message_str += v.get('message_str', '')
message_str += global_options_.get('message_str', '')
config_str = message_str
config_json = component_editor.message_str2json(message_str)
metadata = MetaData(
components=components_,
edges=edges_,
global_options=global_options_,
config_str=config_str,
config_json=config_json,
extra_observers=extra_observers,
)
config = component_editor.message_str2message(message_str)
self.config, self.metadata = config, metadata
def reset_fn(self, sys, qp: brax.QP):
"""Reset state."""
# apply translations and rotations
for _, v in sorted(self.metadata.components.items()):
if v['transform']:
_, _, mask = sim_utils.names2indices(sys.config, v['bodies'], 'body')
qp = sim_utils.transform_qp(qp, mask[..., None], v['quat'],
v['quat_origin'], v['pos'])
return qp
def term_fn(self, done: jnp.ndarray, sys, qp: brax.QP, info: brax.Info):
"""Termination."""
for k, v in self.metadata.components.items():
term_fn = v['term_fn']
if term_fn:
done = term_fn(done, sys, qp, info, k)
return done
def obs_fn(self, sys, qp: brax.QP, info: brax.Info):
"""Return observation as OrderedDict."""
cached_obs_dict = {}
obs_dict = collections.OrderedDict()
for _, v in self.metadata.components.items():
for observer in v['observers']:
obs_dict_ = observers.get_obs_dict(sys, qp, info, observer,
cached_obs_dict, v)
obs_dict = collections.OrderedDict(
list(obs_dict.items()) + list(obs_dict_.items()))
for observer in self.metadata.extra_observers:
obs_dict_ = observers.get_obs_dict(sys, qp, info, observer,
cached_obs_dict, None)
obs_dict = collections.OrderedDict(
list(obs_dict.items()) + list(obs_dict_.items()))
return obs_dict
class ComponentEnv(Env):
"""Make a brax Env fromc config/metadata for training and inference."""
def __init__(self, composer: Composer, *args, **kwargs):
self.observer_shapes = None
self.composer = composer
super().__init__(
*args, config=self.composer.metadata.config_str, **kwargs)
def reset(self, rng: jnp.ndarray) -> State:
"""Resets the environment to an initial state."""
qp = self.sys.default_qp()
qp = self.composer.reset_fn(self.sys, qp)
info = self.sys.info(qp)
obs = self._get_obs(qp, info)
reward, done = jnp.zeros(2)
metrics = {}
return State(qp, obs, reward, done, metrics, info)
def step(self, state: State, action: jnp.ndarray) -> State:
"""Run one timestep of the environment's dynamics."""
qp, info = self.sys.step(state.qp, action)
obs = self._get_obs(qp, info)
reward = 0.0
done = False
done = self.composer.term_fn(done, self.sys, qp, info)
metrics = {}
return State(qp, obs, reward, done, metrics, info)
def _get_obs(
self,
qp: brax.QP,
info: brax.Info,
) -> jnp.ndarray:
"""Observe."""
obs_dict = self.composer.obs_fn(self.sys, qp, info)
if self.observer_shapes is None:
self.observer_shapes = observers.get_obs_dict_shape(obs_dict)
return jnp.concatenate(list(obs_dict.values()))
def get_env_obs_dict_shape(env: Env):
"""Gets an Env's observation shape(s)."""
if isinstance(env, ComponentEnv):
assert env.observation_size # ensure env.observer_shapes is set
return env.observer_shapes
else:
return (env.observation_size,)
def create(env_name: str = None,
components: Dict[str, Dict[str, Any]] = None,
edges: Dict[str, Dict[str, Any]] = None,
add_ground: bool = True,
global_options: Dict[str, Any] = None,
**kwargs) -> Env:
"""Creates an Env with a specified brax system."""
if env_name in env_descs.ENV_DESCS:
composer = Composer(
add_ground=add_ground,
global_options=global_options,
**env_descs.ENV_DESCS[env_name])
return ComponentEnv(composer=composer, **kwargs)
elif components:
composer = Composer(
components=components,
edges=edges,
add_ground=add_ground,
global_options=global_options)
return ComponentEnv(composer=composer, **kwargs)
else:
return envs.create(env_name, **kwargs)
def create_fn(env_name: str = None,
components: Dict[str, Dict[str, Any]] = None,
edges: Dict[str, Dict[str, Any]] = None,
add_ground: bool = True,
global_options: Dict[str, Any] = None,
**kwargs) -> Callable[..., Env]:
"""Returns a function that when called, creates an Env."""
return functools.partial(
create,
env_name=env_name,
components=components,
edges=edges,
add_ground=add_ground,
global_options=global_options,
**kwargs)
| [
"[email protected]"
] | |
8aa017b49485a93529f5842ebd6c1605b6019aba | e63c45db069ea20b41fb850c5940e6f99db94914 | /TranskribusDU/tasks/TablePrototypes/DU_Table_Row.py | c69734cdcc09f2b14bb86df4a56c86e3b895773d | [
"BSD-3-Clause"
] | permissive | Transkribus/TranskribusDU | 669607cc32af98efe7380831d15b087b3fc326c9 | 9f2fed81672dc222ca52ee4329eac3126b500d21 | refs/heads/master | 2021-12-29T10:14:49.153914 | 2021-12-22T10:53:10 | 2021-12-22T10:53:10 | 72,862,342 | 24 | 6 | BSD-3-Clause | 2019-07-22T08:49:02 | 2016-11-04T15:52:04 | Python | UTF-8 | Python | false | false | 5,449 | py | # -*- coding: utf-8 -*-
"""
*** Same as its parent apart that text baselines are reflected as a LineString (instead of its centroid)
DU task for ABP Table:
doing jointly row BIO and near horizontal cuts SIO
block2line edges do not cross another block.
The cut are based on baselines of text blocks, with some positive or negative inclination.
- the labels of cuts are SIO
Copyright Naver Labs Europe(C) 2018 JL Meunier
Developed for the EU project READ. The READ project has received funding
from the European Union's Horizon 2020 research and innovation programme
under grant agreement No 674943.
"""
import sys, os
import math
try: #to ease the use without proper Python installation
import TranskribusDU_version
except ImportError:
sys.path.append( os.path.dirname(os.path.dirname( os.path.abspath(sys.argv[0]) )) )
import TranskribusDU_version
TranskribusDU_version
from common.trace import traceln
from tasks import _exit
from tasks.DU_CRF_Task import DU_CRF_Task
from tasks.DU_Table.DU_ABPTableSkewed import GraphSkewedCut, main
from tasks.DU_Table.DU_ABPTableSkewed_CutAnnotator import SkewedCutAnnotator
from tasks.DU_Table.DU_ABPTableSkewed_txtBIO_sepSIO_line import DU_ABPTableSkewedRowCutLine
from tasks.DU_Table.DU_ABPTableSkewed_txtBIOH_sepSIO_line import DU_ABPTableSkewedRowCutLine_BIOH
# ----------------------------------------------------------------------------
if __name__ == "__main__":
version = "v.01"
usage, description, parser = DU_CRF_Task.getBasicTrnTstRunOptionParser(sys.argv[0], version)
# parser.add_option("--annotate", dest='bAnnotate', action="store_true",default=False, help="Annotate the textlines with BIES labels")
#FOR GCN
# parser.add_option("--revertEdges", dest='bRevertEdges', action="store_true", help="Revert the direction of the edges")
parser.add_option("--detail", dest='bDetailedReport', action="store_true", default=False,help="Display detailed reporting (score per document)")
parser.add_option("--baseline", dest='bBaseline', action="store_true", default=False, help="report baseline method")
parser.add_option("--line_see_line", dest='iLineVisibility', action="store",
type=int, default=GraphSkewedCut.iLineVisibility,
help="seeline2line: how far in pixel can a line see another cut line?")
parser.add_option("--block_see_line", dest='iBlockVisibility', action="store",
type=int, default=GraphSkewedCut.iBlockVisibility,
help="seeblock2line: how far in pixel can a block see a cut line?")
parser.add_option("--height", dest="fCutHeight", default=GraphSkewedCut.fCutHeight
, action="store", type=float, help="Minimal height of a cut")
# parser.add_option("--cut-above", dest='bCutAbove', action="store_true", default=False
# ,help="Each object defines one or several cuts above it (instead of below as by default)")
parser.add_option("--angle", dest='lsAngle'
, action="store", type="string", default="-1,0,+1"
,help="Allowed cutting angles, in degree, comma-separated")
parser.add_option("--graph", dest='bGraph', action="store_true", help="Store the graph in the XML for displaying it")
parser.add_option("--bioh", "--BIOH", dest='bBIOH', action="store_true", help="Text are categorised along BIOH instead of BIO")
parser.add_option("--text", "--txt", dest='bTxt', action="store_true", help="Use textual features.")
# ---
#parse the command line
(options, args) = parser.parse_args()
options.bCutAbove = True # Forcing this!
if options.bBIOH:
DU_CLASS = DU_ABPTableSkewedRowCutLine_BIOH
else:
DU_CLASS = DU_ABPTableSkewedRowCutLine
if options.bGraph:
import os.path
# hack
DU_CLASS.bCutAbove = options.bCutAbove
traceln("\t%s.bCutAbove=" % DU_CLASS.__name__, DU_CLASS.bCutAbove)
DU_CLASS.lRadAngle = [math.radians(v) for v in [float(s) for s in options.lsAngle.split(",")]]
traceln("\t%s.lRadAngle=" % DU_CLASS.__name__, DU_CLASS.lRadAngle)
for sInputFilename in args:
sp, sf = os.path.split(sInputFilename)
sOutFilename = os.path.join(sp, "graph-" + sf)
doer = DU_CLASS("debug", "."
, iBlockVisibility=options.iBlockVisibility
, iLineVisibility=options.iLineVisibility
, fCutHeight=options.fCutHeight
, bCutAbove=options.bCutAbove
, lRadAngle=[math.radians(float(s)) for s in options.lsAngle.split(",")]
, bTxt=options.bTxt)
o = doer.cGraphClass()
o.parseDocFile(sInputFilename, 9)
o.addEdgeToDoc()
print('Graph edges added to %s'%sOutFilename)
o.doc.write(sOutFilename, encoding='utf-8',pretty_print=True,xml_declaration=True)
SkewedCutAnnotator.gtStatReport()
exit(0)
# ---
try:
sModelDir, sModelName = args
except Exception as e:
traceln("Specify a model folder and a model name!")
_exit(usage, 1, e)
main(DU_CLASS, sModelDir, sModelName, options)
| [
"[email protected]"
] | |
5fb152a03b97239720932a800dcb93ed2841278e | fd6fab64e64031b319b7dc88b66ad960d30fdfc7 | /assignment02_ModelQueryProcess/run_assignment.py | 12b99e32a4e8faed2c013945d46efacf258c313c | [] | no_license | mkadhirvel/DSC650 | 297fa63da3668f91d9ce17c6195522dc21d8b5f2 | 75556e3a11a3b5801cad7df124dcc19df219934d | refs/heads/master | 2023-03-17T12:19:34.332707 | 2021-02-11T00:29:11 | 2021-02-11T00:29:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 268 | py | """
Author: Alan Danque
Date: 20201205
Class: DSC 650
Exercise: Week 2 Assignment - Run all assignments
"""
import os
os.system('python ./kvdb.py')
os.system('python ./documentdb.py')
os.system('python ./objectdb.py')
os.system('python ./rdbms.py')
| [
"[email protected]"
] | |
1beeb283036f8942d827ce37f399f0e69c19519f | ad5f3ed89e0fed30fa3e2eff6a4baa12e8391504 | /tensorflow/python/keras/applications/mobilenet.py | 224e8c84496ef63c1a35e1597b4b253dc1747dab | [
"Apache-2.0"
] | permissive | DunyaELBASAN/Tensorflow-C- | aa5c66b32f7e5dcfc93092021afee1bf3c97e04b | 7a435c0946bdd900e5c0df95cad64005c8ad22f9 | refs/heads/master | 2022-11-29T23:37:53.695820 | 2020-02-21T18:16:44 | 2020-02-21T18:21:51 | 242,206,767 | 1 | 0 | Apache-2.0 | 2022-11-21T22:39:51 | 2020-02-21T18:38:41 | C++ | UTF-8 | Python | false | false | 19,201 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""MobileNet v1 models for Keras.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNets support any input size greater than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 16 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.
For each of these `alpha` values, weights for 4 different input image sizes
are provided (224, 192, 160, 128).
The following table describes the size and accuracy of the 100% MobileNet
on size 224 x 224:
----------------------------------------------------------------------------
Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)
----------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |
| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |
| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |
----------------------------------------------------------------------------
The following table describes the performance of
the 100 % MobileNet on various input sizes:
------------------------------------------------------------------------
Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)
------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 1.0 MobileNet-192 | 69.1 % | 529 | 4.2 |
| 1.0 MobileNet-160 | 67.2 % | 529 | 4.2 |
| 1.0 MobileNet-128 | 64.4 % | 529 | 4.2 |
------------------------------------------------------------------------
Reference paper:
- [MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications](https://arxiv.org/abs/1704.04861)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras import backend
from tensorflow.python.keras import layers
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHT_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/mobilenet/')
@keras_export('keras.applications.mobilenet.MobileNet',
'keras.applications.MobileNet')
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
classifier_activation='softmax',
**kwargs):
"""Instantiates the MobileNet architecture.
Reference paper:
- [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision
Applications](https://arxiv.org/abs/1704.04861)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in the `tf.keras.backend.image_data_format()`.
Arguments:
input_shape: Optional shape tuple, only to be specified if `include_top`
is False (otherwise the input shape has to be `(224, 224, 3)` (with
`channels_last` data format) or (3, 224, 224) (with `channels_first`
data format). It should have exactly 3 inputs channels, and width and
height should be no smaller than 32. E.g. `(200, 200, 3)` would be one
valid value. Default to `None`.
`input_shape` will be ignored if the `input_tensor` is provided.
alpha: Controls the width of the network. This is known as the width
multiplier in the MobileNet paper. - If `alpha` < 1.0, proportionally
decreases the number of filters in each layer. - If `alpha` > 1.0,
proportionally increases the number of filters in each layer. - If
`alpha` = 1, default number of filters from the paper are used at each
layer. Default to 1.0.
depth_multiplier: Depth multiplier for depthwise convolution. This is
called the resolution multiplier in the MobileNet paper. Default to 1.0.
dropout: Dropout rate. Default to 0.001.
include_top: Boolean, whether to include the fully-connected layer at the
top of the network. Default to `True`.
weights: One of `None` (random initialization), 'imagenet' (pre-training
on ImageNet), or the path to the weights file to be loaded. Default to
`imagenet`.
input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`) to
use as image input for the model. `input_tensor` is useful for sharing
inputs between multiple different networks. Default to None.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` (default) means that the output of the model will be
the 4D tensor output of the last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will be applied.
classes: Optional number of classes to classify images into, only to be
specified if `include_top` is True, and if no `weights` argument is
specified. Defaults to 1000.
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
**kwargs: For backwards compatibility only.
Returns:
A `keras.Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
ValueError: if `classifier_activation` is not `softmax` or `None` when
using a pretrained top layer.
"""
if 'layers' in kwargs:
global layers
layers = kwargs.pop('layers')
if kwargs:
raise ValueError('Unknown argument(s): %s' % (kwargs,))
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if backend.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if backend.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
rows = 224
logging.warning('`input_shape` is undefined or non-square, '
'or `rows` is not in [128, 160, 192, 224]. '
'Weights for input shape (224, 224) will be'
' loaded as the default.')
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if backend.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = layers.GlobalAveragePooling2D()(x)
x = layers.Reshape(shape, name='reshape_1')(x)
x = layers.Dropout(dropout, name='dropout')(x)
x = layers.Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = layers.Reshape((classes,), name='reshape_2')(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Activation(activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = training.Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# Load weights.
if weights == 'imagenet':
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weight_path = BASE_WEIGHT_PATH + model_name
weights_path = data_utils.get_file(
model_name, weight_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Arguments:
inputs: Input tensor of shape `(rows, cols, 3)` (with `channels_last`
data format) or (3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels, and width and height should
be no smaller than 32. E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space (i.e. the
number of output filters in the convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the width and
height of the 2D convolution window. Can be a single integer to
specify the same value for all spatial dimensions.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1. # Input shape
4D tensor with shape: `(samples, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(samples, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(samples, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = layers.ZeroPadding2D(padding=((0, 1), (0, 1)), name='conv1_pad')(inputs)
x = layers.Conv2D(
filters,
kernel,
padding='valid',
use_bias=False,
strides=strides,
name='conv1')(
x)
x = layers.BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return layers.ReLU(6., name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Arguments:
inputs: Input tensor of shape `(rows, cols, channels)` (with
`channels_last` data format) or (channels, rows, cols) (with
`channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network. - If `alpha` < 1.0,
proportionally decreases the number of filters in each layer. - If
`alpha` > 1.0, proportionally increases the number of filters in each
layer. - If `alpha` = 1, default number of filters from the paper are
used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel. The total number of depthwise convolution
output channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers, specifying the strides
of the convolution along the width and height. Can be a single integer
to specify the same value for all spatial dimensions. Specifying any
stride value != 1 is incompatible with specifying any `dilation_rate`
value != 1.
block_id: Integer, a unique identification designating the block number.
# Input shape
4D tensor with shape: `(batch, channels, rows, cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, rows, cols, channels)` if
data_format='channels_last'. # Output shape
4D tensor with shape: `(batch, filters, new_rows, new_cols)` if
data_format='channels_first'
or 4D tensor with shape: `(batch, new_rows, new_cols, filters)` if
data_format='channels_last'. `rows` and `cols` values might have
changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
if strides == (1, 1):
x = inputs
else:
x = layers.ZeroPadding2D(((0, 1), (0, 1)), name='conv_pad_%d' % block_id)(
inputs)
x = layers.DepthwiseConv2D((3, 3),
padding='same' if strides == (1, 1) else 'valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_dw_%d_bn' % block_id)(
x)
x = layers.ReLU(6., name='conv_dw_%d_relu' % block_id)(x)
x = layers.Conv2D(
pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(
x)
x = layers.BatchNormalization(
axis=channel_axis, name='conv_pw_%d_bn' % block_id)(
x)
return layers.ReLU(6., name='conv_pw_%d_relu' % block_id)(x)
@keras_export('keras.applications.mobilenet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.mobilenet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
| [
"[email protected]"
] | |
a26ec63f56bad3f7991ace4eb345ea52f222d5e9 | 44032f82bcb767175cf86aeccee623eb6cfbd40e | /deploy/compose/gpu/__init__.py | 2303c0b0cf1621e03ddbbda08853f070befb4247 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | veyselkoparal/DeepVideoAnalytics | 3628d41f8e06547e177a7badd20b399bd7f9028a | 013f7e1efcc11f9ed5762192a91589aa6b4df359 | refs/heads/master | 2020-03-16T04:22:46.603989 | 2018-05-07T06:55:47 | 2018-05-07T06:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,462 | py | """
Code in this file assumes that it is being run via dvactl and git repo root as current directory
"""
CONFIG = {
"deploy/gpu/docker-compose-2-gpus.yml": {"global_model_gpu_id": 0,
"global_model_memory_fraction": 0.1,
"workers":
[(0, 0.25, "LAUNCH_BY_NAME_indexer_inception", "inception"),
(0, 0.2, "LAUNCH_BY_NAME_analyzer_crnn", "crnn"),
(0, 0.5, "LAUNCH_BY_NAME_detector_coco", "coco"),
(1, 0.5, "LAUNCH_BY_NAME_detector_textbox", "textbox"),
(1, 0.19, "LAUNCH_BY_NAME_detector_face", "face"),
(1, 0.15, "LAUNCH_BY_NAME_indexer_facenet", "facenet"),
(1, 0.15, "LAUNCH_BY_NAME_analyzer_tagger", "tagger")]
},
"deploy/gpu/docker-compose-4-gpus.yml": {"global_model_gpu_id": 2,
"global_model_memory_fraction": 0.29,
"workers":
[(0, 0.3, "LAUNCH_BY_NAME_indexer_inception", "inception"),
(0, 0.4, "LAUNCH_BY_NAME_analyzer_tagger", "tagger"),
(0, 0.2, "LAUNCH_BY_NAME_analyzer_crnn", "crnn"),
(1, 1.0, "LAUNCH_BY_NAME_detector_coco", "coco"),
(2, 0.7, "LAUNCH_BY_NAME_detector_face", "face"),
(3, 0.5, "LAUNCH_BY_NAME_detector_textbox", "textbox"),
(3, 0.45, "LAUNCH_BY_NAME_indexer_facenet", "facenet")
]
},
}
SKELETON = """ version: '3'
services:
db:
image: postgres:9.6.6
container_name: dva-pg
volumes:
- dvapgdata:/var/lib/postgresql/data
env_file:
- ../../../custom.env
rabbit:
image: rabbitmq
container_name: dva-rmq
env_file:
- ../../../custom.env
volumes:
- dvarabbit:/var/lib/rabbitmq
redis:
image: bitnami/redis:latest
container_name: dva-redis
env_file:
- ../../../custom.env
volumes:
- dvaredis:/bitnami
webserver:
image: akshayubhat/dva-auto:gpu
container_name: webserver
env_file:
- ../../../custom.env
environment:
- LAUNCH_SERVER_NGINX=1
- LAUNCH_NOTEBOOK=1
command: bash -c "git reset --hard && git pull && sleep 10 && ./start_container.py"
ports:
- "127.0.0.1:8000:80"
- "127.0.0.1:8888:8888"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media
non-gpu-workers:
image: akshayubhat/dva-auto:gpu
env_file:
- ../../../custom.env
environment:
- LAUNCH_BY_NAME_retriever_inception=1
- LAUNCH_BY_NAME_retriever_facenet=1
- LAUNCH_Q_qextract=1
- LAUNCH_Q_qstreamer=1
- LAUNCH_SCHEDULER=1
- LAUNCH_Q_GLOBAL_RETRIEVER=1
command: bash -c "git reset --hard && git pull && sleep 45 && ./start_container.py"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media
{gpu_workers}
global-model:
image: akshayubhat/dva-auto:gpu
env_file:
- ../../../custom.env
environment:
- GPU_AVAILABLE=1
- NVIDIA_VISIBLE_DEVICES={global_model_gpu_id}
- GPU_MEMORY={global_model_memory_fraction}
- LAUNCH_Q_GLOBAL_MODEL=1
command: bash -c "git reset --hard && git pull && sleep 45 && ./start_container.py"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media
volumes:
dvapgdata:
dvadata:
dvarabbit:
dvaredis:
"""
BLOCK = """ {worker_name}:
image: akshayubhat/dva-auto:gpu
env_file:
- ../../../custom.env
environment:
- GPU_AVAILABLE=1
- NVIDIA_VISIBLE_DEVICES={gpu_id}
- GPU_MEMORY={memory_fraction}
- {env_key}={env_value}
command: bash -c "git reset --hard && git pull && sleep 45 && ./start_container.py"
depends_on:
- db
- redis
- rabbit
volumes:
- dvadata:/root/media"""
def generate_multi_gpu_compose():
for fname in CONFIG:
blocks = []
worker_specs = CONFIG[fname]['workers']
for gpu_id, fraction, env_key, worker_name, in worker_specs:
blocks.append(
BLOCK.format(worker_name=worker_name, gpu_id=gpu_id, memory_fraction=fraction, env_key=env_key,
env_value=1))
with open(fname, 'w') as out:
out.write(SKELETON.format(gpu_workers="\n".join(blocks),
global_model_gpu_id=CONFIG[fname]['global_model_gpu_id'],
global_model_memory_fraction=CONFIG[fname]['global_model_memory_fraction']))
| [
"[email protected]"
] | |
7266db340ad3c001b2476e0d9677e9d1a795cf48 | 46a5df524f1d96baf94f6eb0f6222f2b856235f3 | /src/data/image/sliced_grid.py | 7612a11c9ffd5b6b038a1658df956563308349f9 | [
"MIT"
] | permissive | PhilHarnish/forge | 5dfbb0aa2afdb91e55d85187bd86fbeb9b6b2888 | c544fb8b499e1e13793c94159f4c35bce187311e | refs/heads/master | 2023-03-11T17:23:46.569359 | 2023-02-25T15:09:01 | 2023-02-25T15:09:01 | 1,818,598 | 2 | 0 | MIT | 2023-02-25T15:09:02 | 2011-05-29T19:36:53 | Jupyter Notebook | UTF-8 | Python | false | false | 2,215 | py | import math
from typing import Iterable
import cv2
import numpy as np
from data.image import coloring, image, model
from puzzle.constraints.image import sliced_grid_constraints
from util.geometry import np2d
class SlicedGrid(model.LineSpecification):
_source: image.Image
_constraints: sliced_grid_constraints.SlicedGridConstraints
def __init__(
self,
source: image.Image,
constraints: sliced_grid_constraints) -> None:
self._source = source
self._constraints = constraints
def set_source(self, source: image.Image) -> None:
self._source = source
self._constraints.set_source(source)
def get_debug_data(self) -> np.ndarray:
data = cv2.cvtColor(self._source.get_debug_data(), cv2.COLOR_GRAY2RGB)
c = self._constraints.center
cv2.circle(data, c, 3, coloring.WHITE, thickness=3)
for (theta, distances, divisions), color in zip(
self._constraints.get_specs(),
coloring.colors(self._constraints.slices)):
for distance in distances:
x, y = np2d.move_from(c, theta, distance)
cv2.circle(data, (round(x), round(y)), 3, color, thickness=3)
return data
def __iter__(self) -> Iterable[model.Divisions]:
c = self._constraints.center
max_distance = sum(self._source.shape)
for theta, distances, divisions in self._constraints.get_specs():
endpoints = []
total_distance = 0
for distance in distances:
moved = np2d.move_from(c, theta, distance)
endpoints.append(moved)
total_distance += abs(distance)
start, end = endpoints
division_distance = math.copysign(
total_distance / divisions, -distances[0])
right_angle = theta + math.pi / 2
dx = round(math.cos(right_angle) * max_distance)
dy = round(math.sin(right_angle) * max_distance)
result = []
for i in range(0, divisions + 1): # n_divisions requires n+1 iterations.
x, y = np2d.move_from(start, theta, division_distance * i)
result.append((
theta,
(round(x - dx), round(y - dy)), (round(x + dx), round(y + dy)),
i / divisions))
yield result
def __len__(self) -> int:
return self._constraints.slices
| [
"[email protected]"
] | |
f1979087cd1398a523b893f6bdb223fc4f3c142e | 65585dce782bb50d92caa69be2431e094ac36a1f | /examples/recursive_dirtree_generator.py | 50307af4a1c3021c3703469a8d1c6028f5d8ab66 | [
"Apache-2.0"
] | permissive | vishalbelsare/treelib | 6e52f594cecb69210332b7092abcf1456be14666 | 12d7efd50829a5a18edaab01911b1e546bff2ede | refs/heads/master | 2023-08-31T07:38:06.461212 | 2022-04-13T15:07:52 | 2022-04-13T15:07:52 | 153,905,842 | 0 | 0 | NOASSERTION | 2023-03-27T15:17:00 | 2018-10-20T12:59:18 | Python | UTF-8 | Python | false | false | 1,691 | py | #!/usr/bin/env python
"""
Example of treelib usage to generate recursive tree of directories.
It could be useful to implement Directory Tree data structure
2016 samuelsh
"""
import treelib
import random
import hashlib
from string import digits, letters
MAX_FILES_PER_DIR = 10
def get_random_string(length):
return ''.join(random.choice(digits + letters) for _ in range(length))
def build_recursive_tree(tree, base, depth, width):
"""
Args:
tree: Tree
base: Node
depth: int
width: int
Returns:
"""
if depth >= 0:
depth -= 1
for i in xrange(width):
directory = Directory()
tree.create_node("{0}".format(directory.name), "{0}".format(hashlib.md5(directory.name)),
parent=base.identifier, data=directory) # node identifier is md5 hash of it's name
dirs_nodes = tree.children(base.identifier)
for dir in dirs_nodes:
newbase = tree.get_node(dir.identifier)
build_recursive_tree(tree, newbase, depth, width)
else:
return
class Directory(object):
def __init__(self):
self._name = get_random_string(64)
self._files = [File() for _ in xrange(MAX_FILES_PER_DIR)] # Each directory contains 1000 files
@property
def name(self):
return self._name
@property
def files(self):
return self._files
class File(object):
def __init__(self):
self._name = get_random_string(64)
@property
def name(self):
return self._name
tree = treelib.Tree()
base = tree.create_node('Root', 'root')
build_recursive_tree(tree, base, 2, 10)
tree.show()
| [
"[email protected]"
] | |
4425e109b0efe53b2e51a04bcddab969c531489c | d27bf22683710ff090642c05c1df2d13b18c2509 | /allauth/openid/admin.py | 0967c5c39ae1d4e1a60416bffb65e3f68ea3ecd1 | [
"MIT"
] | permissive | snswa/django-allauth | b8db554519111e5d022fb137d259e272db9998f4 | 0b58191f5d954d7f5a7c4e5bc8c33cf6fdf0c416 | refs/heads/master | 2021-01-18T10:29:31.434368 | 2010-10-21T18:24:56 | 2010-10-21T18:24:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | from django.contrib import admin
from models import OpenIDAccount
class OpenIDAccountAdmin(admin.ModelAdmin):
raw_id_fields = ('user',)
admin.site.register(OpenIDAccount, OpenIDAccountAdmin)
| [
"[email protected]"
] | |
88199abd4462b61b8c1e468a0166393a1ea355c4 | 699cad5fee497cce94463decf1bf2b811e3fd244 | /06프로그램의 흐름 제어하기/if.py | 95d092e7f3d31f5adce1aa2a57ab88f03995c7b0 | [] | no_license | Jeonghwan-Yoo/brain_python3 | 91974019a29013abe8c9f9ed132c48b404259e2f | a22e870515e760aaa497cbc99305977cf2f01a3d | refs/heads/master | 2020-07-27T00:02:29.604848 | 2019-09-16T13:16:09 | 2019-09-16T13:16:09 | 208,802,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | import sys #파이썬 프로그램을 종료하는 exit()을 사용하기 위해
print('수를 입력하세요 : ')
a=int(input())
if a==0:
print('0은 나눗셈에 이용할 수 없습니다.') #경고 메시지를 출력한 뒤
sys.exit(0) #프로그램을 종료시킵니다.
print('3 /', a, '=', 3/a) | [
"[email protected]"
] | |
42e9fe3ab57bd3c1e296f665413fc82fba5070e3 | 21e6a09131ac76d734102c829260c3b8e3a0094b | /solutions/21_textfsm/task_21_4.py | 9986cf1ad1531aef03cb29f28f968dc09e18cec7 | [] | no_license | Egor-Ozhmegoff/Python-for-network-engineers | 5fbe8f3a754263ab65c28093fed667684ae76ded | 6b70f4f9df658698ea0d770a064ee0e12b4e4de2 | refs/heads/master | 2023-08-11T20:52:12.999495 | 2021-09-09T14:42:14 | 2021-09-09T14:42:14 | 306,354,285 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | # -*- coding: utf-8 -*-
"""
Задание 21.4
Создать функцию send_and_parse_show_command.
Параметры функции:
* device_dict - словарь с параметрами подключения к одному устройству
* command - команда, которую надо выполнить
* templates_path - путь к каталогу с шаблонами TextFSM
* index - имя индекс файла, значение по умолчанию "index"
Функция должна подключаться к одному устройству, отправлять команду show с помощью netmiko,
а затем парсить вывод команды с помощью TextFSM.
Функция должна возвращать список словарей с результатами обработки вывода команды (как в задании 21.1a):
* ключи - имена переменных в шаблоне TextFSM
* значения - части вывода, которые соответствуют переменным
Проверить работу функции на примере вывода команды sh ip int br и устройствах из devices.yaml.
"""
import os
from pprint import pprint
from netmiko import ConnectHandler
import yaml
def send_and_parse_show_command(device_dict, command, templates_path):
if "NET_TEXTFSM" not in os.environ:
os.environ["NET_TEXTFSM"] = templates_path
with ConnectHandler(**device_dict) as ssh:
ssh.enable()
output = ssh.send_command(command, use_textfsm=True)
return output
if __name__ == "__main__":
full_pth = os.path.join(os.getcwd(), "templates")
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
for dev in devices:
result = send_and_parse_show_command(
dev, "sh ip int br", templates_path=full_pth
)
pprint(result, width=120)
# Второй вариант без использования use_textfsm в netmiko
from task_21_3 import parse_command_dynamic
def send_and_parse_show_command(device_dict, command, templates_path, index="index"):
attributes = {"Command": command, "Vendor": device_dict["device_type"]}
with ConnectHandler(**device_dict) as ssh:
ssh.enable()
output = ssh.send_command(command)
parsed_data = parse_command_dynamic(
output, attributes, templ_path=templates_path, index_file=index
)
return parsed_data
if __name__ == "__main__":
full_pth = os.path.join(os.getcwd(), "templates")
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
for dev in devices:
result = send_and_parse_show_command(
dev, "sh ip int br", templates_path=full_pth
)
pprint(result, width=120)
| [
"[email protected]"
] | |
aba8fcd3ea58d7fe66b3bbe8099f8f60d5f4097d | b64fcb9da80d12c52bd24a7a1b046ed9952b0026 | /client_sdk_python/providers/eth_tester/main.py | 68fdf1d3a68dcfcbb67e83434e4836cccf5581b6 | [
"MIT"
] | permissive | PlatONnetwork/client-sdk-python | e59f44a77690806c8763ed6db938ed8447d42417 | 94ad57bb34b5ee7bb314ac858071686382c55402 | refs/heads/master | 2022-07-09T08:49:07.312759 | 2021-12-24T08:15:46 | 2021-12-24T08:15:46 | 173,032,954 | 7 | 16 | MIT | 2022-08-31T02:19:42 | 2019-02-28T03:18:03 | Python | UTF-8 | Python | false | false | 1,773 | py | from client_sdk_python.providers import (
BaseProvider,
)
from .middleware import (
default_transaction_fields_middleware,
ethereum_tester_fixture_middleware,
ethereum_tester_middleware,
)
class EthereumTesterProvider(BaseProvider):
middlewares = [
default_transaction_fields_middleware,
ethereum_tester_fixture_middleware,
ethereum_tester_middleware,
]
ethereum_tester = None
api_endpoints = None
def __init__(self, ethereum_tester=None, api_endpoints=None):
if ethereum_tester is None:
# do not import eth_tester until runtime, it is not a default dependency
from eth_tester import EthereumTester
self.ethereum_tester = EthereumTester()
else:
self.ethereum_tester = ethereum_tester
if api_endpoints is None:
# do not import eth_tester derivatives until runtime, it is not a default dependency
from .defaults import API_ENDPOINTS
self.api_endpoints = API_ENDPOINTS
else:
self.api_endpoints = api_endpoints
def make_request(self, method, params):
namespace, _, endpoint = method.partition('_')
try:
delegator = self.api_endpoints[namespace][endpoint]
except KeyError:
return {
"error": "Unknown RPC Endpoint: {0}".format(method),
}
try:
response = delegator(self.ethereum_tester, params)
except NotImplementedError:
return {
"error": "RPC Endpoint has not been implemented: {0}".format(method),
}
else:
return {
'result': response,
}
def isConnected(self):
return True
| [
"[email protected]"
] | |
0915102cfa0343f989eef246184cd916f8cc46c4 | 4bdbec7ad33b31c392b9d1f88ddf84e4b9230467 | /cross_origin_test/cross_origin_test/wsgi.py | 5bf61a3cc71d9dc0d96e87531d460711a5070d70 | [
"BSD-2-Clause",
"MIT"
] | permissive | mohawkhq/django-cross-origin | 4aa775b15612e505404a9eb6cfe24a568561d265 | f73f5c9a49d4044c34e443153c071b6bb0acda31 | refs/heads/master | 2020-06-08T20:13:02.690261 | 2013-11-19T15:33:34 | 2013-11-19T15:33:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for cross_origin_test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cross_origin_test.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| [
"[email protected]"
] | |
1c07e950336bf700663363367fa33ecf43c0f407 | 0cb1ff9d0be4387e33f1003ab5cc72bab0345e7a | /wildcard/dashboards/settings/password/tests.py | 3372ec782591fc679b4e3a892d89731e3b8335cc | [
"Apache-2.0"
] | permissive | kickstandproject/wildcard | 65995fb0090c4cfcad34f8373cfc912199ecf5da | 0ef2a15d8ac6b1d37db964d0baa7e40f9f771bc9 | refs/heads/master | 2020-05-17T00:41:09.908059 | 2015-01-27T20:25:33 | 2015-01-28T03:30:22 | 14,288,349 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,365 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Centrin Data Systems Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IsA # noqa
from wildcard import api
from wildcard.test import helpers as test
# TODO(mrunge): remove, when keystone v3 supports
# change_own_password, incl. password validation
kver = api.keystone.VERSIONS.active
if kver == 2:
INDEX_URL = reverse('horizon:settings:password:index')
class ChangePasswordTests(test.TestCase):
@test.create_stubs({api.keystone: ('user_update_own_password', )})
def test_change_password(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
api.keystone.user_update_own_password(IsA(http.HttpRequest),
'oldpwd',
'normalpwd',).AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'PasswordForm',
'current_password': 'oldpwd',
'new_password': 'normalpwd',
'confirm_password': 'normalpwd'}
res = self.client.post(INDEX_URL, formData)
self.assertNoFormErrors(res)
def test_change_validation_passwords_not_matching(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
formData = {'method': 'PasswordForm',
'current_password': 'currpasswd',
'new_password': 'testpassword',
'confirm_password': 'doesnotmatch'}
res = self.client.post(INDEX_URL, formData)
self.assertFormError(res, "form", None, ['Passwords do not match.'])
@test.create_stubs({api.keystone: ('user_update_own_password', )})
def test_change_password_shows_message_on_login_page(self):
if kver == 3:
self.skipTest('Password change in keystone v3 unsupported')
api.keystone.user_update_own_password(IsA(http.HttpRequest),
'oldpwd',
'normalpwd').AndReturn(None)
self.mox.ReplayAll()
formData = {'method': 'PasswordForm',
'current_password': 'oldpwd',
'new_password': 'normalpwd',
'confirm_password': 'normalpwd'}
res = self.client.post(INDEX_URL, formData, follow=True)
info_msg = "Password changed. Please log in again to continue."
self.assertContains(res, info_msg)
def test_on_keystone_v3_disabled(self):
try:
reverse('horizon:settings:password:index')
except NoReverseMatch:
pass
| [
"[email protected]"
] | |
8c4de2642d9752e64cfff1c79de8129758f696fc | f5d0be87bad113cd3ec0dabc4db0683442c794bf | /alphastarmini/core/arch/spatial_encoder.py | 96cbd701618415f6f2794855072f3791699f3169 | [
"Apache-2.0"
] | permissive | ZHQ-air/mini-AlphaStar | 8aa22242334bd397fa398f2b865d2fc20fb1cab6 | 6039fd105bd263ee1f7c3276fea7fe7b660e0701 | refs/heads/main | 2023-07-03T16:10:13.712321 | 2021-08-17T02:59:56 | 2021-08-17T02:59:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,731 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
" Spatial Encoder."
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from alphastarmini.core.arch.entity_encoder import EntityEncoder
from alphastarmini.core.arch.entity_encoder import Entity
from alphastarmini.lib import utils as L
from alphastarmini.lib.hyper_parameters import Arch_Hyper_Parameters as AHP
from alphastarmini.lib.hyper_parameters import MiniStar_Arch_Hyper_Parameters as MAHP
__author__ = "Ruo-Ze Liu"
debug = False
class SpatialEncoder(nn.Module):
'''
Inputs: map, entity_embeddings
Outputs:
embedded_spatial - A 1D tensor of the embedded map
map_skip - Tensors of the outputs of intermediate computations
'''
def __init__(self, n_resblocks=4, original_32=AHP.original_32,
original_64=AHP.original_64,
original_128=AHP.original_128,
original_256=AHP.original_256,
original_512=AHP.original_512):
super().__init__()
self.inplanes = AHP.map_channels
self.project = nn.Conv2d(self.inplanes, original_32, kernel_size=1, stride=1,
padding=0, bias=True)
# ds means downsampling
self.ds_1 = nn.Conv2d(original_32, original_64, kernel_size=4, stride=2,
padding=1, bias=True)
self.ds_2 = nn.Conv2d(original_64, original_128, kernel_size=4, stride=2,
padding=1, bias=True)
self.ds_3 = nn.Conv2d(original_128, original_128, kernel_size=4, stride=2,
padding=1, bias=True)
self.resblock_stack = nn.ModuleList([
ResBlock(inplanes=original_128, planes=original_128, stride=1, downsample=None)
for _ in range(n_resblocks)])
if AHP == MAHP:
# note: in mAS, we replace 128x128 to 64x64, and the result 16x16 also to 8x8
self.fc = nn.Linear(8 * 8 * original_128, original_256)
else:
self.fc = nn.Linear(16 * 16 * original_128, original_256) # position-wise
self.conv1 = nn.Conv1d(original_256, original_32, kernel_size=1, stride=1,
padding=0, bias=False)
self.map_width = AHP.minimap_size
def preprocess(self, obs, entity_embeddings):
map_data = get_map_data(obs)
return map_data
def scatter(self, entity_embeddings, entity_x_y):
# `entity_embeddings` are embedded through a size 32 1D convolution, followed by a ReLU,
print("entity_embeddings.shape:", entity_embeddings.shape) if debug else None
reduced_entity_embeddings = F.relu(self.conv1(entity_embeddings.transpose(1, 2))).transpose(1, 2)
print("reduced_entity_embeddings.shape:", reduced_entity_embeddings.shape) if debug else None
# then scattered into a map layer so that the size 32 vector at a specific
# location corresponds to the units placed there.
def bits2value(bits):
# change from the bits to dec values.
l = len(bits)
v = 0
g = 1
for i in range(l - 1, -1, -1):
v += bits[i] * g
g *= 2
return v
# shape [batch_size x entity_size x embedding_size]
batch_size = reduced_entity_embeddings.shape[0]
entity_size = reduced_entity_embeddings.shape[1]
device = next(self.parameters()).device
scatter_map = torch.zeros(batch_size, AHP.original_32, self.map_width, self.map_width, device=device)
print("scatter_map.shape:", scatter_map.shape) if debug else None
for i in range(batch_size):
for j in range(entity_size):
# can not be masked entity
if entity_x_y[i, j, 0] != -1e9:
x = entity_x_y[i, j, :8]
y = entity_x_y[i, j, 8:]
x = bits2value(x)
y = bits2value(y)
print('x', x) if debug else None
print('y', y) if debug else None
# note, we reduce 128 to 64, so the x and y should also be
# 128 is half of 256, 64 is half of 128, so we divide by 4
x = int(x / 4)
y = int(y / 4)
scatter_map[i, :, y, x] += reduced_entity_embeddings[i, j, :]
#print("scatter_map:", scatter_map[0, :, 23, 19]) if 1 else None
return scatter_map
def forward(self, x, entity_embeddings, entity_x_y):
scatter_map = self.scatter(entity_embeddings, entity_x_y)
x = torch.cat([scatter_map, x], dim=1)
# After preprocessing, the planes are concatenated, projected to 32 channels
# by a 2D convolution with kernel size 1, passed through a ReLU
x = F.relu(self.project(x))
# then downsampled from 128x128 to 16x16 through 3 2D convolutions and ReLUs
# with channel size 64, 128, and 128 respectively.
# The kernel size for those 3 downsampling convolutions is 4, and the stride is 2.
# note: in mAS, we replace 128x128 to 64x64, and the result 16x16 also to 8x8
# note: here we should add a relu after each conv2d
x = F.relu(self.ds_1(x))
x = F.relu(self.ds_2(x))
x = F.relu(self.ds_3(x))
# 4 ResBlocks with 128 channels and kernel size 3 and applied to the downsampled map,
# with the skip connections placed into `map_skip`.
map_skip = x
for resblock in self.resblock_stack:
x = resblock(x)
# note if we add the follow line, it will output "can not comput gradient error"
# map_skip += x
# so we try to change to the follow line, which will not make a in-place operation
map_skip = map_skip + x
x = x.reshape(x.shape[0], -1)
# The ResBlock output is embedded into a 1D tensor of size 256 by a linear layer
# and a ReLU, which becomes `embedded_spatial`.
x = self.fc(x)
embedded_spatial = F.relu(x)
return map_skip, embedded_spatial
def get_map_data(obs, map_width=AHP.minimap_size, verbose=False):
'''
TODO: camera: One-hot with maximum 2 of whether a location is within the camera, this refers to mimimap
TODO: scattered_entities: 32 float values from entity embeddings
default map_width is 128
'''
if "feature_minimap" in obs:
feature_minimap = obs["feature_minimap"]
else:
feature_minimap = obs
save_type = np.float32
# A: height_map: Float of (height_map / 255.0)
height_map = np.expand_dims(feature_minimap["height_map"].reshape(-1, map_width, map_width) / 255.0, -1).astype(save_type)
print('height_map:', height_map) if verbose else None
print('height_map.shape:', height_map.shape) if verbose else None
# A: visibility: One-hot with maximum 4
visibility = L.np_one_hot(feature_minimap["visibility_map"].reshape(-1, map_width, map_width), 4).astype(save_type)
print('visibility:', visibility) if verbose else None
print('visibility.shape:', visibility.shape) if verbose else None
# A: creep: One-hot with maximum 2
creep = L.np_one_hot(feature_minimap["creep"].reshape(-1, map_width, map_width), 2).astype(save_type)
print('creep:', creep) if verbose else None
# A: entity_owners: One-hot with maximum 5
entity_owners = L.np_one_hot(feature_minimap["player_relative"].reshape(-1, map_width, map_width), 5).astype(save_type)
print('entity_owners:', entity_owners) if verbose else None
# the bottom 3 maps are missed in pysc1.2 and pysc2.0
# however, the 3 maps can be found on s2clientprotocol/spatial.proto
# actually, the 3 maps can be found on pysc3.0
# A: alerts: One-hot with maximum 2
alerts = L.np_one_hot(feature_minimap["alerts"].reshape(-1, map_width, map_width), 2).astype(save_type)
print('alerts:', alerts) if verbose else None
# A: pathable: One-hot with maximum 2
pathable = L.np_one_hot(feature_minimap["pathable"].reshape(-1, map_width, map_width), 2).astype(save_type)
print('pathable:', pathable) if verbose else None
# A: buildable: One-hot with maximum 2
buildable = L.np_one_hot(feature_minimap["buildable"].reshape(-1, map_width, map_width), 2).astype(save_type)
print('buildable:', buildable) if verbose else None
out_channels = 1 + 4 + 2 + 5 + 2 + 2 + 2
map_data = np.concatenate([height_map, visibility, creep, entity_owners,
alerts, pathable, buildable], axis=3)
map_data = np.transpose(map_data, [0, 3, 1, 2])
print('map_data.shape:', map_data.shape) if verbose else None
map_data = torch.tensor(map_data)
print('torch map_data.shape:', map_data.shape) if verbose else None
return map_data
class ResBlock(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.downsample = downsample
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out = out + identity
out = self.relu(out)
return out
class GatedResBlock(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlock, self).__init__()
self.sigmoid = nn.Sigmoid()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.conv1_mask = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.conv2_mask = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
def forward(self, x):
residual = x
x = F.relu(self.bn1(self.conv1(x) * self.sigmoid(self.conv1_mask(x))))
x = self.bn2(self.conv2(x) * self.sigmoid(self.conv2_mask(x)))
x += residual
x = F.relu(x)
return x
class ResBlockImproved(nn.Module):
def __init__(self, inplanes=128, planes=128, stride=1, downsample=None):
super(ResBlockImproved, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
'''From paper Identity Mappings in Deep Residual Networks'''
def forward(self, x):
residual = x
x = F.relu(self.bn1(x))
x = self.conv1(x)
x = F.relu(self.bn2(x))
x = self.conv2(x)
x = x + residual
return x
class ResBlock1D(nn.Module):
def __init__(self, inplanes, planes, seq_len, stride=1, downsample=None):
super(ResBlock1D, self).__init__()
self.conv1 = nn.Conv1d(inplanes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.ln1 = nn.LayerNorm([planes, seq_len])
self.conv2 = nn.Conv1d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.ln2 = nn.LayerNorm([planes, seq_len])
def forward(self, x):
residual = x
x = F.relu(self.ln1(x))
x = self.conv1(x)
x = F.relu(self.ln2(x))
x = self.conv2(x)
x = x + residual
return x
def test():
spatial_encoder = SpatialEncoder()
batch_size = 2
# dummy map list
map_list = []
map_data_1 = torch.zeros(batch_size, 1, AHP.minimap_size, AHP.minimap_size)
map_data_1_one_hot = L.to_one_hot(map_data_1, 2)
print('map_data_1_one_hot.shape:', map_data_1_one_hot.shape) if debug else None
map_list.append(map_data_1)
map_data_2 = torch.zeros(batch_size, 17, AHP.minimap_size, AHP.minimap_size)
map_list.append(map_data_2)
map_data = torch.cat(map_list, dim=1)
map_skip, embedded_spatial = spatial_encoder.forward(map_data)
print('map_skip:', map_skip) if debug else None
print('embedded_spatial:', embedded_spatial) if debug else None
print('map_skip.shape:', map_skip.shape) if debug else None
print('embedded_spatial.shape:', embedded_spatial.shape) if debug else None
if debug:
print("This is a test!")
if __name__ == '__main__':
test()
| [
"[email protected]"
] | |
ba497dd3afdf87eae4b1e1d9fa84bbe788335f77 | 385ed58325dd0cc75bdb9fd3e61c5e005f7a4f28 | /source/hall/src/hall/entity/hallfree.py | 63e7e839d8986e8730bf43df1ef165e4c0acc70a | [] | no_license | csirui/hall37 | 17dfa4e4f1f8bf719d0c11ac7738fa4c14fd06db | 5c4eb4b2bf57bbbee4731470c830d8d81915d603 | refs/heads/master | 2021-09-04T03:55:12.460035 | 2018-01-15T15:12:30 | 2018-01-15T15:12:30 | 117,560,615 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 5,420 | py | # -*- coding=utf-8 -*-
from sre_compile import isstring
from datetime import datetime
import freetime.util.log as ftlog
import poker.entity.events.tyeventbus as pkeventbus
from hall.entity import hallconf, hallpopwnd, datachangenotify
from hall.entity.hallconf import HALL_GAMEID
from hall.entity.hallusercond import UserConditionRegister
from poker.entity.biz.exceptions import TYBizConfException
from poker.entity.events.tyevent import EventConfigure, ItemCountChangeEvent
class HallFree(object):
def __init__(self):
self.freeItemId = None
self.iconRes = None
self.itemName = None # 前端图片上显示的字
self.states = []
def decodeFromDict(self, d):
self.freeItemId = d.get('freeItemId')
self.iconRes = d.get('iconRes')
self.itemName = d.get("itemName", "")
self.states = []
for state in d.get('states', []):
self.states.append(HallFreeState().decodeFromDict(state))
return self
class HallFreeState(object):
def __init__(self):
# str
self.desc = ''
# str
self.btnText = ''
# bool
self.hasMark = False
# int
self.enable = True
# bool
self.visible = True
# 条件
self.conditionList = None
# todotask
self.todotaskList = None
def decodeFromDict(self, d):
self.desc = d.get('desc', '')
self.btnText = d.get('btnText', '')
self.hasMark = d.get('hasMark', False)
self.enable = d.get('enable', True)
self.visible = d.get('visible', True)
self.conditionList = UserConditionRegister.decodeList(d.get('conditions', []))
self.todotaskList = []
for todotaskDict in d.get('todotasks', []):
self.todotaskList.append(hallpopwnd.decodeTodotaskFactoryByDict(todotaskDict))
return self
class HallFreeTemplate(object):
def __init__(self):
self.name = None
self.freeItems = None
def decodeFromDict(self, d, freeItemMap):
self.name = d.get('name')
if not isstring(self.name) or not self.name:
raise TYBizConfException(d, 'HallFreeTemplate.name must be not empty string')
self.freeItems = []
for itemId in d.get('freeItems', []):
if freeItemMap.has_key(itemId):
self.freeItems.append(freeItemMap[itemId])
return self
_inited = False
# key=promotionId, value=HallPromotion
_freeItemMap = {}
# key=templateName, value=HallPromoteTemplate
_templateMap = {}
def _reloadConf():
global _freeItemMap
global _templateMap
freeItemMap = {}
templateMap = {}
conf = hallconf.getFreeConf()
for freeDict in conf.get('freeItems', []):
freeItem = HallFree().decodeFromDict(freeDict)
if freeItem.freeItemId in freeItemMap:
raise TYBizConfException(freeDict, 'Duplicate freeId %s' % (freeItem.freeItemId))
freeItemMap[freeItem.freeItemId] = freeItem
if ftlog.is_debug():
ftlog.debug('hallfree._reloadConf freeIds=', freeItemMap.keys())
for templateDict in conf.get('templates', []):
template = HallFreeTemplate().decodeFromDict(templateDict, freeItemMap)
if template.name in templateMap:
raise TYBizConfException(templateDict, 'Duplicate templateName %s' % (template.name))
templateMap[template.name] = template
_freeItemMap = freeItemMap
_templateMap = templateMap
ftlog.debug('hallfree._reloadConf successed freeIds=', _freeItemMap.keys(),
'templateNames=', _templateMap.keys())
def _onConfChanged(event):
if _inited and event.isChanged('game:9999:free:tc'):
ftlog.debug('hallfree._onConfChanged')
_reloadConf()
def _onItemCountChanged(event):
if _inited:
ftlog.debug('hallfree._onItemCountChanged', event.userId)
datachangenotify.sendDataChangeNotify(HALL_GAMEID, event.userId, ['free', 'promotion_loc'])
def _initialize():
ftlog.debug('hallfree._initialize begin')
global _inited
if not _inited:
_inited = True
_reloadConf()
pkeventbus.globalEventBus.subscribe(EventConfigure, _onConfChanged)
pkeventbus.globalEventBus.subscribe(ItemCountChangeEvent, _onItemCountChanged)
ftlog.debug('hallfree._initialize end')
# 获取用户对应的免费列表配置数据
def getFree(gameId, userId, clientId, timestamp):
ret = []
templateName = hallconf.getFreeTemplateName(clientId)
template = _templateMap.get(templateName)
if ftlog.is_debug():
ftlog.debug('hallfree.getFree gameId=', gameId,
'userId=', userId,
'clientId=', clientId,
'timestamp=', datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S'),
'templateName=', templateName)
if not template:
template = _templateMap.get('default')
if ftlog.is_debug():
ftlog.debug('hallfree.getFree gameId=', gameId,
'userId=', userId,
'clientId=', clientId,
'timestamp=', datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S'),
'freeItems=', [fi.freeItemId for fi in template.freeItems] if template else [])
if template:
for freeItem in template.freeItems:
ret.append(freeItem)
return ret
| [
"[email protected]"
] | |
2892ca34dda7c6bac350599fac9f051e71e64ce2 | f0c6b43e325064511c4e2d7ce9c59e88a12d81d5 | /Assignment/DataTypes/problem10.py | 0565ed8531943f1e8764d0ac461c28ed26bea342 | [] | no_license | kendraregmi/Assignment1 | bda8402fa216bf54789c4d3b5092a5540d4ee68d | 83a8365e508f5b83cee71fc14155b7838103b3ba | refs/heads/main | 2023-03-26T17:42:54.255731 | 2021-03-08T07:29:04 | 2021-03-08T07:29:04 | 344,406,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | # 10. Write a Python program to remove the characters which have odd index
# values of a given string.
my_string= "Kathmandu"
result=""
for i in range(len(my_string)):
if i%2==0:
result= result+my_string[i]
print(result)
| [
"[email protected]"
] | |
eb2c8258f0156a186c1b5525851bf8627d0ebad7 | d7f43ee7b91c216b1740dead4cc348f3704d2f5a | /src/beginner_tutorials/scripts/add_two_ints_server.py~ | ef69b404916f90b0f5cf43bc27b89200b6fda426 | [] | no_license | capslockqq/catkin_ws | 26f734cf45cb5fe15301f5448a6005f2b21073b5 | a0989427e42988f36ae9e4d83ba7eb871a56b64e | refs/heads/master | 2021-08-24T07:04:07.551220 | 2017-12-08T14:42:19 | 2017-12-08T14:42:19 | 113,569,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | #!/usr/bin/env python
from beginner_tutorials.srv import
import rospy
def handle_add_two_ints(req):
print "Returning [%s + %s = %s]"%(req.a, req.b, (req.a + req.b))
return AddTwoIntsResponse(req.a + req.b)
def add_two_ints_server():
rospy.init_node('add_two_ints_server')
s = rospy.Service('add_two_ints', AddTwoInts, handle_add_two_ints)
print "Ready to add two ints."
rospy.spin()
if __name__ == "__main__":
add_two_ints_server()
| [
"ubuntu@ubuntu.(none)"
] | ubuntu@ubuntu.(none) |
|
58d0a8905b5a6546432140bf05e9ab8f06dfb857 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/77/usersdata/216/42255/submittedfiles/exercicio24.py | f70202f0601ef08a2d7723413c6c64658abd3963 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | # -*- coding: utf-8 -*-
import math
a=int(input('Digite um número:'))
b=int(input('Digite um número:'))
i=0
for i in range(1,a,1):
if a%i==0 and b%i==0:
print(i)
| [
"[email protected]"
] | |
f82d1bfc18cf23dccc01d4ee011811e1f567837a | 0092041336a420af59b73e2ab1bf6e7077b11f6e | /autoeq/constants.py | 9e3aa99e634a4cadadc3b973ff61a777af07f613 | [
"MIT"
] | permissive | jaakkopasanen/AutoEq | e10280a5413a406623ddbc8b87ddf7953ffd020c | ab5869c8f4996f8eea88abca50a41510263ed098 | refs/heads/master | 2023-08-22T22:43:51.969927 | 2023-08-09T11:13:24 | 2023-08-09T11:13:24 | 123,807,729 | 11,367 | 2,940 | MIT | 2023-08-11T08:23:26 | 2018-03-04T16:37:35 | Python | UTF-8 | Python | false | false | 9,711 | py | # -*- coding: utf-8 -*
import os
import math
DEFAULT_F_MIN = 20.0
DEFAULT_F_MAX = 20000.0
DEFAULT_STEP = 1.01
DEFAULT_MAX_GAIN = 6.0
DEFAULT_TREBLE_F_LOWER = 6000.0
DEFAULT_TREBLE_F_UPPER = 8000.0
DEFAULT_TREBLE_MAX_GAIN = 6.0
DEFAULT_TREBLE_GAIN_K = 1.0
DEFAULT_SMOOTHING_WINDOW_SIZE = 1 / 12
DEFAULT_SMOOTHING_ITERATIONS = 1
DEFAULT_TREBLE_SMOOTHING_F_LOWER = 6000.0
DEFAULT_TREBLE_SMOOTHING_F_UPPER = 8000.0
DEFAULT_TREBLE_SMOOTHING_WINDOW_SIZE = 2.0
DEFAULT_TREBLE_SMOOTHING_ITERATIONS = 1
DEFAULT_SOUND_SIGNATURE_SMOOTHING_WINDOW_SIZE = None
DEFAULT_FS = 44100
DEFAULT_BIT_DEPTH = 16
DEFAULT_PHASE = 'minimum'
DEFAULT_F_RES = 10.0
DEFAULT_TILT = 0.0
DEFAULT_BASS_BOOST_GAIN = 0.0
DEFAULT_BASS_BOOST_FC = 105.0
DEFAULT_BASS_BOOST_Q = 0.7
DEFAULT_TREBLE_BOOST_GAIN = 0.0
DEFAULT_TREBLE_BOOST_FC = 10000.0
DEFAULT_TREBLE_BOOST_Q = 0.7
DEFAULT_PEQ_OPTIMIZER_MIN_F = 20.0
DEFAULT_PEQ_OPTIMIZER_MAX_F = 20000.0
DEFAULT_PEQ_OPTIMIZER_MAX_TIME = None
DEFAULT_PEQ_OPTIMIZER_TARGET_LOSS = None
DEFAULT_PEQ_OPTIMIZER_MIN_CHANGE_RATE = None
DEFAULT_PEQ_OPTIMIZER_MIN_STD = 0.002
DEFAULT_FIXED_BAND_FILTER_MIN_GAIN = -12.0
DEFAULT_FIXED_BAND_FILTER_MAX_GAIN = 12.0
DEFAULT_PEAKING_FILTER_MIN_FC = 20.0
DEFAULT_PEAKING_FILTER_MAX_FC = 10000.0
DEFAULT_PEAKING_FILTER_MIN_Q = 0.18248 # AUNBandEq has maximum bandwidth of 5 octaves which is Q of 0.182479
DEFAULT_PEAKING_FILTER_MAX_Q = 6.0
DEFAULT_PEAKING_FILTER_MIN_GAIN = -20.0
DEFAULT_PEAKING_FILTER_MAX_GAIN = 20.0
DEFAULT_SHELF_FILTER_MIN_FC = 20.0
DEFAULT_SHELF_FILTER_MAX_FC = 10000.0
DEFAULT_SHELF_FILTER_MIN_Q = 0.4 # Shelf filters start to overshoot below 0.4
DEFAULT_SHELF_FILTER_MAX_Q = 0.7 # Shelf filters start to overshoot above 0.7
DEFAULT_SHELF_FILTER_MIN_GAIN = -20.0
DEFAULT_SHELF_FILTER_MAX_GAIN = 20.0
DEFAULT_BIQUAD_OPTIMIZATION_F_STEP = 1.02
DEFAULT_MAX_SLOPE = 18.0
DEFAULT_PREAMP = 0.0
DEFAULT_GRAPHIC_EQ_STEP = 1.0563 # Produces 127 samples with greatest frequency of 19871
ROOT_DIR = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
MOD_REGEX = r' \((sample|serial number) [a-zA-Z0-9\-]+\)$'
DBS = ['crinacle', 'headphonecom', 'innerfidelity', 'oratory1990', 'rtings']
HARMAN_OVEREAR_PREFERENCE_FREQUENCIES = [20.0, 21.0, 22.0, 24.0, 25.0, 27.0, 28.0, 30.0, 32.0, 34.0, 36.0, 38.0, 40.0, 43.0, 45.0, 48.0, 50.0, 53.0, 56.0, 60.0, 63.0, 67.0, 71.0, 75.0, 80.0, 85.0, 90.0, 95.0, 100.0, 106.0, 112.0, 118.0, 125.0, 132.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 212.0, 224.0, 236.0, 250.0, 265.0, 280.0, 300.0, 315.0, 335.0, 355.0, 375.0, 400.0, 425.0, 450.0, 475.0, 500.0, 530.0, 560.0, 600.0, 630.0, 670.0, 710.0, 750.0, 800.0, 850.0, 900.0, 950.0, 1000.0, 1060.0, 1120.0, 1180.0, 1250.0, 1320.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0, 1900.0, 2000.0, 2120.0, 2240.0, 2360.0, 2500.0, 2650.0, 2800.0, 3000.0, 3150.0, 3350.0, 3550.0, 3750.0, 4000.0, 4250.0, 4500.0, 4750.0, 5000.0, 5300.0, 5600.0, 6000.0, 6300.0, 6700.0, 7100.0, 7500.0, 8000.0, 8500.0, 9000.0, 9500.0, 10000.0, 10600.0, 11200.0, 11800.0, 12500.0, 13200.0, 14000.0, 15000.0, 16000.0, 17000.0, 18000.0, 19000.0, 20000.0]
HARMAN_INEAR_PREFENCE_FREQUENCIES = [20.0, 21.2, 22.4, 23.6, 25.0, 26.5, 28.0, 30.0, 31.5, 33.5, 35.5, 37.5, 40.0, 42.5, 45.0, 47.5, 50.0, 53.0, 56.0, 60.0, 63.0, 67.0, 71.0, 75.0, 80.0, 85.0, 90.0, 95.0, 100.0, 106.0, 112.0, 118.0, 125.0, 132.0, 140.0, 150.0, 160.0, 170.0, 180.0, 190.0, 200.0, 212.0, 224.0, 236.0, 250.0, 265.0, 280.0, 300.0, 315.0, 335.0, 355.0, 375.0, 400.0, 425.0, 450.0, 475.0, 500.0, 530.0, 560.0, 600.0, 630.0, 670.0, 710.0, 750.0, 800.0, 850.0, 900.0, 950.0, 1000.0, 1060.0, 1120.0, 1180.0, 1250.0, 1320.0, 1400.0, 1500.0, 1600.0, 1700.0, 1800.0, 1900.0, 2000.0, 2120.0, 2240.0, 2360.0, 2500.0, 2650.0, 2800.0, 3000.0, 3150.0, 3350.0, 3550.0, 3750.0, 4000.0, 4250.0, 4500.0, 4750.0, 5000.0, 5300.0, 5600.0, 6000.0, 6300.0, 6700.0, 7100.0, 7500.0, 8000.0, 8500.0, 9000.0, 9500.0, 10000.0, 10600.0, 11200.0, 11800.0, 12500.0, 13200.0, 14000.0, 15000.0, 16000.0, 17000.0, 18000.0, 19000.0, 20000.0]
PREAMP_HEADROOM = 0.2
PEQ_CONFIGS = {
'10_BAND_GRAPHIC_EQ': {
'optimizer': {'min_std': 0.01},
'filter_defaults': {'q': math.sqrt(2), 'min_gain': -12.0, 'max_gain': 12.0, 'type': 'PEAKING'},
'filters': [{'fc': 31.25 * 2 ** i} for i in range(10)]
},
'31_BAND_GRAPHIC_EQ': {
'optimizer': {'min_std': 0.01},
'filter_defaults': {'q': 4.318473, 'min_gain': -12.0, 'max_gain': 12.0, 'type': 'PEAKING'},
'filters': [{'fc': 20 * 2 ** (i / 3), 'type': 'PEAKING'} for i in range(31)]
},
'10_PEAKING': {
'filters': [{'type': 'PEAKING'}] * 10
},
'8_PEAKING_WITH_SHELVES': {
'optimizer': {
'min_std': 0.008
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{'type': 'PEAKING'}] * 8
},
'4_PEAKING_WITH_LOW_SHELF': {
'optimizer': {
'max_f': 10000.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}] + [{'type': 'PEAKING'}] * 4
},
'4_PEAKING_WITH_HIGH_SHELF': {
'filters': [{
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{'type': 'PEAKING'}] * 4
},
'AUNBANDEQ': {
'optimizer': {
'min_std': 0.008
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_fc': 20.0, # Can go to 16 Hz
'max_fc': 10000.0, # Can go to 20 kHz
'min_q': 0.182479, # Max bw of 5.0
'max_q': 10.0 # Min bw of 0.01 = 144.27 Q
}] * 8
},
'MINIDSP_2X4HD': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -16.0,
'max_gain': 16.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.5,
'max_q': 6.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'MINIDSP_IL_DSP': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -16.0,
'max_gain': 16.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.5,
'max_q': 6.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'NEUTRON_MUSIC_PLAYER': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -12.0,
'max_gain': 12.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 5.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'POWERAMP_EQUALIZER': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -15.0,
'max_gain': 15.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10e3,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 12.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'QUDELIX_5K': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -12.0,
'max_gain': 12.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10e3,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 7.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
'SPOTIFY': {
'optimizer': {'min_std': 0.01},
'filters': [
{'fc': 60.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 150.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 400.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 2400.0, 'q': 1.0, 'type': 'PEAKING'},
{'fc': 15000.0, 'q': 1.0, 'type': 'PEAKING'},
]
},
'USB_AUDIO_PLAYER_PRO': {
'optimizer': {
'min_std': 0.008
},
'filter_defaults': {
'min_gain': -20.0,
'max_gain': 20.0,
},
'filters': [{
'type': 'LOW_SHELF',
'fc': 105.0,
'q': 0.7
}, {
'type': 'HIGH_SHELF',
'fc': 10000.0,
'q': 0.7
}] + [{
'type': 'PEAKING',
'min_q': 0.1,
'max_q': 10.0,
'min_fc': 20.0,
'max_fc': 10000.0,
}] * 8
},
}
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.