blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
95495b389e7e95436f048fe3e1e335059fb2bd5d | 748c4ba7058336eb2d09b413066d21582e26d71b | /course_catalog/migrations/0041_rename_course_run.py | 64d107c8459b49ba3d72ecb2957b228b3bd05eb5 | [
"BSD-3-Clause"
]
| permissive | mitodl/open-discussions | 6dbb8ae2843263889634849ddd9096f74536b78e | ba7442482da97d463302658c0aac989567ee1241 | refs/heads/master | 2023-08-10T02:54:45.706067 | 2023-08-01T17:05:36 | 2023-08-01T17:05:36 | 93,760,926 | 13 | 3 | BSD-3-Clause | 2023-08-01T17:05:40 | 2017-06-08T14:46:35 | Python | UTF-8 | Python | false | false | 380 | py | # Generated by Django 2.1.11 on 2019-10-02 17:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("course_catalog", "0040_add_program_published"),
]
operations = [
migrations.RenameModel(old_name="CourseRun", new_name="LearningResourceRun")
]
| [
"[email protected]"
]
| |
4c6ed8f33aa431abae76a14dd95bb5c435bb4a2f | 39385e706c34202539ee8ee1089ebc4faa7e15c5 | /inference_server/inference_server_pb2_grpc.py | 9325a8429cd3ccb4e86f0e55f8a3001b70159127 | []
| no_license | paulhendricks/inference-server | f7845d8aeab13f95dd1ce069c6740fc80af6ca87 | bdf6ccc0e2559b2fef8ed8a02cb0b6cfbbfaba63 | refs/heads/master | 2020-03-17T22:22:12.580704 | 2018-05-18T20:54:00 | 2018-05-18T20:54:00 | 134,000,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,413 | py | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import inference_server_pb2 as inference__server__pb2
class InferenceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Compute = channel.unary_unary(
'/inference_server.Inference/Compute',
request_serializer=inference__server__pb2.Input.SerializeToString,
response_deserializer=inference__server__pb2.Output.FromString,
)
class InferenceServicer(object):
# missing associated documentation comment in .proto file
pass
def Compute(self, request, context):
"""Sends a greeting
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InferenceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Compute': grpc.unary_unary_rpc_method_handler(
servicer.Compute,
request_deserializer=inference__server__pb2.Input.FromString,
response_serializer=inference__server__pb2.Output.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'inference_server.Inference', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| [
"[email protected]"
]
| |
9f1a9e27a25ef04ca6febee56a37699a43c8a6d8 | 51d348426c6e5fa79f2e77baf59bdbf8357d9f12 | /week10/Инфоматрикс/b.условныйоператор/b)e.py | 282e78011a30e6244539ebc1ed65784e57996e99 | []
| no_license | Zhansayaas/webdev | c01325b13abf92cef13138d7ffc123cf9bc4f81a | dd054d0bcafc498eccc5f4626ab45fd8b46b3a3f | refs/heads/main | 2023-04-10T23:33:30.469465 | 2021-04-17T10:21:53 | 2021-04-17T10:21:53 | 322,049,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | n=int(input())
k=int(input())
if(n>k):print(1)
elif(n==k):print(0)
else:print(2) | [
"[email protected]"
]
| |
d25ee342daa07ce418636d6017598ceccbc395a2 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03086/s109673720.py | 9c53994051ea917600e7062df13c6995566eab3f | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | s=input()
n=len(s)
res=0
for i in range(n):
if s[i]=='A' or s[i]=='C' or s[i]=='G' or s[i]=='T':
ans=1
j=i+1
while j<n:
if s[j]=='A' or s[j]=='C' or s[j]=='G' or s[j]=='T':
ans+=1
j+=1
else:
break
else:
continue
res=max(res,ans)
print(res) | [
"[email protected]"
]
| |
57447872fefc81ffb2c0125733fb53f138cc9502 | d4e9fd6dd51d29ad374b460a2cfbd467502ede7d | /ros2param/ros2param/command/param.py | 1aa9026af3542f670f8f3224a40aaf22089810ad | [
"Apache-2.0"
]
| permissive | ros2/ros2cli | 3f7b93ff44d18b2292a50d3b6ff119494142328b | 351ef3c7442f49013d84084dea23fe399517690f | refs/heads/rolling | 2023-08-07T03:53:23.635067 | 2023-08-03T19:50:28 | 2023-08-03T19:50:28 | 93,568,427 | 142 | 157 | Apache-2.0 | 2023-09-14T07:36:46 | 2017-06-06T22:13:14 | Python | UTF-8 | Python | false | false | 1,366 | py | # Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ros2cli.command import add_subparsers_on_demand
from ros2cli.command import CommandExtension
class ParamCommand(CommandExtension):
"""Various param related sub-commands."""
def add_arguments(self, parser, cli_name, *, argv=None):
self._subparser = parser
# add arguments and sub-commands of verbs
add_subparsers_on_demand(
parser, cli_name, '_verb', 'ros2param.verb', required=False,
argv=argv)
def main(self, *, parser, args):
if not hasattr(args, '_verb'):
# in case no verb was passed
self._subparser.print_help()
return 0
extension = getattr(args, '_verb')
# call the verb's main method
return extension.main(args=args)
| [
"[email protected]"
]
| |
ff786bb5396ad92d10ed243592117497f6cf1e1c | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/-745935208/_signal.py | 46398c329673fba8df3fa9b203f853e8f6dd22a6 | []
| no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,547 | py | # encoding: utf-8
# module _signal
# from (built-in)
# by generator 1.146
"""
This module provides mechanisms to use signal handlers in Python.
Functions:
alarm() -- cause SIGALRM after a specified time [Unix only]
setitimer() -- cause a signal (described below) after a specified
float time and the timer may restart then [Unix only]
getitimer() -- get current value of timer [Unix only]
signal() -- set the action for a given signal
getsignal() -- get the signal action for a given signal
pause() -- wait until a signal arrives [Unix only]
default_int_handler() -- default SIGINT handler
signal constants:
SIG_DFL -- used to refer to the system default handler
SIG_IGN -- used to ignore the signal
NSIG -- number of defined signals
SIGINT, SIGTERM, etc. -- signal numbers
itimer constants:
ITIMER_REAL -- decrements in real time, and delivers SIGALRM upon
expiration
ITIMER_VIRTUAL -- decrements only when the process is executing,
and delivers SIGVTALRM upon expiration
ITIMER_PROF -- decrements both when the process is executing and
when the system is executing on behalf of the process.
Coupled with ITIMER_VIRTUAL, this timer is usually
used to profile the time spent by the application
in user and kernel space. SIGPROF is delivered upon
expiration.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame.
"""
# no imports
# Variables with simple values
CTRL_BREAK_EVENT = 1
CTRL_C_EVENT = 0
NSIG = 23
SIGABRT = 22
SIGBREAK = 21
SIGFPE = 8
SIGILL = 4
SIGINT = 2
SIGSEGV = 11
SIGTERM = 15
SIG_DFL = 0
SIG_IGN = 1
# functions
def default_int_handler(*more): # real signature unknown; restored from __doc__
"""
default_int_handler(...)
The default handler for SIGINT installed by Python.
It raises KeyboardInterrupt.
"""
pass
def getsignal(*args, **kwargs): # real signature unknown
"""
Return the current action for the given signal.
The return value can be:
SIG_IGN -- if the signal is being ignored
SIG_DFL -- if the default action for the signal is in effect
None -- if an unknown handler is in effect
anything else -- the callable Python object used as a handler
"""
pass
def set_wakeup_fd(fd, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
"""
set_wakeup_fd(fd, *, warn_on_full_buffer=True) -> fd
Sets the fd to be written to (with the signal number) when a signal
comes in. A library can use this to wakeup select or poll.
The previous fd or -1 is returned.
The fd must be non-blocking.
"""
pass
def signal(): # real signature unknown; restored from __doc__
"""
Set the action for the given signal.
The action can be SIG_DFL, SIG_IGN, or a callable Python object.
The previous action is returned. See getsignal() for possible return values.
*** IMPORTANT NOTICE ***
A signal handler function is called with two arguments:
the first is the signal number, the second is the interrupted stack frame.
"""
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
# variables with complex values
__spec__ = None # (!) real value is ''
| [
"[email protected]"
]
| |
37a1db073986ca0971ba4944632aafaec7a0d5ff | f57529f95a0fd10676f46063fdcd273fb5a81427 | /boj/05001-06000/5063.py | 6a2433d4269d1553b50fb5ef00f2d4236113033c | []
| no_license | hoyasmh/PS | a9b83b0044e483586590c9b7c6bf8a77236b67e7 | 6bbaa0ce77b2726f6af782af049d73720820f761 | refs/heads/master | 2023-04-23T10:43:27.349785 | 2021-05-17T13:43:53 | 2021-05-17T13:43:53 | 311,239,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | n=int(input())
for i in range(n):
r,e,c=map(int, input().split())
a=e-c
if a>r:
print('advertise')
elif a==r:
print('does not matter')
else:
print('do not advertise')
| [
"[email protected]"
]
| |
600a02249547f25a443efccb2f227a4daf743e72 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03626/s691421903.py | 12b73d2e8e8b33454998cc794473feec516fdf85 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | # https://atcoder.jp/contests/abc071/tasks/arc081_b
# 横ドミノと縦ドミノの場合わけを考える
# 横ドミノ→横ドミノの場合 : いままでの通り*3通り
# 横ドミノ→縦ドミノ : いままで*1通り ok
# 縦ドミノ→横ドミノ : いままで*2通り ok
# 縦ドミノ→縦ドミノ : いままで*2通り ok
MOD = 10**9+7
N = int(input())
S1 = input()
S2 = input()
pre1 = S1[0]
pre2 = S2[0]
if pre1 != pre2:
ans = 6
else:
ans = 3
for s1, s2 in zip(S1[1:], S2[1:]):
if pre1 == s1 and pre2 == s2:
pass
elif pre1 != pre2 and s1 != s2:
# 横→横
ans *= 3
elif pre1 != pre2 and s1 == s2:
# 横→縦
pass
elif pre1 == pre2 and s1 != s2:
# 縦→横
ans *= 2
elif pre1 == pre2 and s1 == s2:
# 縦→縦
ans *= 2
if ans >= MOD:
ans %= MOD
pre1, pre2 = s1, s2
print(ans % MOD)
| [
"[email protected]"
]
| |
511cd08228a358eb20555f4016aa0f102b17db57 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2/HazelParr/pancake_revenge.py | 0d9c90b6f19e36f7eff6120670502de3018d5d1b | []
| no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 381 | py | T = int(input())
for case in range(T):
x = input()
count = 0
while "-" in x:
new_string = ""
unchanged = ""
y = x.rfind("-")
fragment = x[:y+1]
unchanged = x[y+1:]
for i in fragment:
if i == "-":
new_string += "+"
else:
new_string += "-"
count += 1
x = new_string + unchanged
print("Case #{}: {}".format(case+1, count))
| [
"[[email protected]]"
]
| |
8cbdf7ab2ec66f8f1e82cb4824f60c32e935e855 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /120_design_patterns/001_SOLID_design_principles/_exercises/_exercises/isp/ISP/Book/IBook.py | 01dad5411393ea75f180ee3e25c50e409867ecae | []
| no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 220 | py | # ___ ISP.IL.. ______ IL..
#
#
# c_ IBookIL..
#
# ?p..
# ___ pages -> ?
# r_ N...
#
# ??.?
# ___ pages num_pages ?
# r_ N...
#
# ?p..
# ___ title __ ?
# r_ N...
#
# ??.?
# ___ title titleName ?
# r_ N...
| [
"[email protected]"
]
| |
f6b1a79848c4fc34a82d807b82b44e98bececaf5 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2320/60716/267286.py | b7837b56224bdc4c298a9aada04e063f0c2c204d | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | strs = input()
k = int(input())
if k==1:
lists = list()
lists.append(strs)
for i in range(len(strs)):
strlist = list(strs)
k = strlist.pop(0)
strlist.append(k)
strs = ''.join(strlist)
lists.append(strs)
lists.sort()
print(lists[0])
else:
strlist = list(strs)
strlist.sort()
strs = ''.join(strlist)
print(strs) | [
"[email protected]"
]
| |
391f37b3390c8466064f8f93b5a3f7eba2852de0 | cdd79cef15bdf6a0b9098e27028bbe38607bc288 | /AOJ/Vol02/0241_Quaternion Multiplication.py | a94a35629d177665c61f8dcc76a162305205fbfc | []
| no_license | nord2sudjp/atcoder | ee35a3eb35717485dc62627172de24c9dac102fb | 6b1cc5102a615492cc7ff8a33813bbb954641782 | refs/heads/master | 2023-08-25T11:27:14.205593 | 2021-09-27T05:43:04 | 2021-09-27T05:43:04 | 302,855,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | # Volume2-0241 Quaternion Multiplication
#l1=[1,2,3,4]
#l2=[7,6,7,8]
while True:
N=int(input())
if N==0:break
for _ in range(N):
*t,=list(map(int,input().split()))
l1=t[0:4]
l2=t[4:]
s=[x*y for x in l1 for y in l2]
ans=[0]*4 #1,i,j,k
ans[0]+=s[0]
ans[1]+=s[1]+s[4]
ans[2]+=s[2]+s[8]
ans[3]+=s[3]+s[12]
ans[0]+=-1*s[5]-1*s[10]-1*s[15]
ans[1]+=s[11]-1*s[14]
ans[2]+=-1*s[7]+s[13]
ans[3]+=s[6]-1*s[9]
print(' '.join(map(str,ans))) | [
"[email protected]"
]
| |
e6c627876e58af236e4193f1e7495a258b610ed7 | 2f38331b8a0bc8867859d853d352f9f5cc9cd1b5 | /day07/code/value.py | ae8c35d75226626552aef586887bad597c2a649a | []
| no_license | Chenhuaqi6/python_net | aaf66a15a2f438e1f3fc67f338abd15e2bbfd6a3 | 56efd53bbaa1212a86c65e9cd3b29d2f5f30b752 | refs/heads/master | 2020-04-09T08:30:20.817513 | 2019-01-19T06:51:41 | 2019-01-19T06:51:41 | 160,196,516 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from multiprocessing import Process,Value
import time
import random
#创建共享内存
money = Value("i",5000)
#挣钱
def boy():
for i in range(30):
time.sleep(0.2)
money.value += random.randint(1,1500)
def girl():
for i in range(30):
time.sleep(0.15)
money.value -= random.randint(100,1200)
b = Process(target = boy)
g = Process(target = girl)
b.start()
g.start()
b.join()
g.join()
print("一个月余额:",money.value)
| [
"[email protected]"
]
| |
4621879f40c0fc877a782f9a6e4748cf4c1db4cc | 077a17b286bdd6c427c325f196eb6e16b30c257e | /00_BofVar-unit-tests/05_32/remenissions-work/exploit-BofFunc-28.py | a140ada92f16b731fc6c68505ca91b55c8fc744a | []
| no_license | KurSh/remenissions_test | 626daf6e923459b44b82521aa4cb944aad0dbced | 9dec8085b62a446f7562adfeccf70f8bfcdbb738 | refs/heads/master | 2023-07-08T20:25:04.823318 | 2020-10-05T06:45:16 | 2020-10-05T06:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | from pwn import *
import time
import sys
import signal
import sf
target = process("./chall-test_BofVar-05-x86")
gdb.attach(target, execute="verify_exploit")
bof_payload = sf.BufferOverflow(arch=32)
bof_payload.set_input_start(0x66)
bof_payload.add_int32(0x34, 0xdeab)
bof_payload.add_int32(0x30, 0xbeef)
bof_payload.add_int32(0x2c, 0xfacadf)
bof_payload.add_int32(0x28, 0xbef0)
bof_payload.add_int32(0x24, 0xfacade)
bof_payload.add_int32(0x20, 0xdeac)
bof_payload.set_ret(0x8048456)
payload = bof_payload.generate_payload()
target.sendline(payload)
# Exploit Verification starts here 15935728
def handler(signum, frame):
raise Exception("Timed out")
def check_verification_done():
while True:
if os.path.exists("pwned") or os.path.exists("rip"):
sys.exit(0)
signal.signal(signal.SIGALRM, handler)
signal.alarm(2)
try:
while True:
check_verification_done()
except Exception:
print("Exploit timed out")
| [
"[email protected]"
]
| |
ac37fdfdef9ec6fcb843f8501be6c32e260623bf | 1cf0346a84062d63dd4e05bd858bb437ea0b0d07 | /tensorflow_tts/models/melgan.py | 49e434afe36a0d48711436f13e743a35e68b7799 | [
"Apache-2.0"
]
| permissive | Joyseyousa/TensorflowTTS | 668b149cf6b3ff774aca5197f6033aacf2802eba | f0614c03b677562a27f6a80f623f4ce0018b21eb | refs/heads/master | 2022-09-29T13:42:19.077946 | 2020-06-04T08:29:30 | 2020-06-04T08:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,434 | py | # -*- coding: utf-8 -*-
# Copyright 2020 The MelGAN Authors and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MelGAN Modules."""
import numpy as np
import tensorflow as tf
from tensorflow_tts.utils import WeightNormalization
from tensorflow_tts.utils import GroupConv1D
def get_initializer(initializer_seed=42):
"""Creates a `tf.initializers.glorot_normal` with the given seed.
Args:
initializer_seed: int, initializer seed.
Returns:
GlorotNormal initializer with seed = `initializer_seed`.
"""
return tf.keras.initializers.GlorotNormal(seed=initializer_seed)
class TFReflectionPad1d(tf.keras.layers.Layer):
"""Tensorflow ReflectionPad1d module."""
def __init__(self, padding_size, padding_type="REFLECT", **kwargs):
"""Initialize TFReflectionPad1d module.
Args:
padding_size (int)
padding_type (str) ("CONSTANT", "REFLECT", or "SYMMETRIC". Default is "REFLECT")
"""
super().__init__(**kwargs)
self.padding_size = padding_size
self.padding_type = padding_type
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Padded tensor (B, T + 2 * padding_size, C).
"""
return tf.pad(x, [[0, 0], [self.padding_size, self.padding_size], [0, 0]], self.padding_type)
class TFConvTranspose1d(tf.keras.layers.Layer):
"""Tensorflow ConvTranspose1d module."""
def __init__(self,
filters,
kernel_size,
strides,
padding,
is_weight_norm,
initializer_seed,
**kwargs):
"""Initialize TFConvTranspose1d( module.
Args:
filters (int): Number of filters.
kernel_size (int): kernel size.
strides (int): Stride width.
padding (str): Padding type ("same" or "valid").
"""
super().__init__(**kwargs)
self.conv1d_transpose = tf.keras.layers.Conv2DTranspose(
filters=filters,
kernel_size=(kernel_size, 1),
strides=(strides, 1),
padding="same",
kernel_initializer=get_initializer(initializer_seed)
)
if is_weight_norm:
self.conv1d_transpose = WeightNormalization(self.conv1d_transpose)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T', C').
"""
x = tf.expand_dims(x, 2)
x = self.conv1d_transpose(x)
x = tf.squeeze(x, 2)
return x
class TFResidualStack(tf.keras.layers.Layer):
"""Tensorflow ResidualStack module."""
def __init__(self,
kernel_size,
filters,
dilation_rate,
use_bias,
nonlinear_activation,
nonlinear_activation_params,
is_weight_norm,
initializer_seed,
**kwargs):
"""Initialize TFResidualStack module.
Args:
kernel_size (int): Kernel size.
filters (int): Number of filters.
dilation_rate (int): Dilation rate.
use_bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
"""
super().__init__(**kwargs)
self.blocks = [
getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params),
TFReflectionPad1d((kernel_size - 1) // 2 * dilation_rate),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed)
),
getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params),
tf.keras.layers.Conv1D(filters=filters,
kernel_size=1,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed))
]
self.shortcut = tf.keras.layers.Conv1D(filters=filters,
kernel_size=1,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
name='shortcut')
# apply weightnorm
if is_weight_norm:
self._apply_weightnorm(self.blocks)
self.shortcut = WeightNormalization(self.shortcut)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T, C).
"""
_x = tf.identity(x)
for layer in self.blocks:
_x = layer(_x)
shortcut = self.shortcut(x)
return shortcut + _x
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
class TFMelGANGenerator(tf.keras.Model):
"""Tensorflow MelGAN generator module."""
def __init__(self, config, **kwargs):
"""Initialize TFMelGANGenerator module.
Args:
config: config object of Melgan generator.
"""
super().__init__(**kwargs)
# check hyper parameter is valid or not
assert config.filters >= np.prod(config.upsample_scales)
assert config.filters % (2 ** len(config.upsample_scales)) == 0
# add initial layer
layers = []
layers += [
TFReflectionPad1d((config.kernel_size - 1) // 2,
padding_type=config.padding_type,
name='first_reflect_padding'),
tf.keras.layers.Conv1D(filters=config.filters,
kernel_size=config.kernel_size,
use_bias=config.use_bias,
kernel_initializer=get_initializer(config.initializer_seed))
]
for i, upsample_scale in enumerate(config.upsample_scales):
# add upsampling layer
layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params),
TFConvTranspose1d(
filters=config.filters // (2 ** (i + 1)),
kernel_size=upsample_scale * 2,
strides=upsample_scale,
padding='same',
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name='conv_transpose_._{}'.format(i)
)
]
# ad residual stack layer
for j in range(config.stacks):
layers += [
TFResidualStack(
kernel_size=config.stack_kernel_size,
filters=config.filters // (2 ** (i + 1)),
dilation_rate=config.stack_kernel_size ** j,
use_bias=config.use_bias,
nonlinear_activation=config.nonlinear_activation,
nonlinear_activation_params=config.nonlinear_activation_params,
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name='residual_stack_._{}._._{}'.format(i, j)
)
]
# add final layer
layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(**config.nonlinear_activation_params),
TFReflectionPad1d((config.kernel_size - 1) // 2,
padding_type=config.padding_type,
name='last_reflect_padding'),
tf.keras.layers.Conv1D(filters=config.out_channels,
kernel_size=config.kernel_size,
use_bias=config.use_bias,
kernel_initializer=get_initializer(config.initializer_seed))
]
if config.use_final_nolinear_activation:
layers += [tf.keras.layers.Activation("tanh")]
if config.is_weight_norm is True:
self._apply_weightnorm(layers)
self.melgan = tf.keras.models.Sequential(layers)
@tf.function(input_signature=[tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32)])
def call(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, T, channels)
Returns:
Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)
"""
return self.melgan(c)
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
def _build(self):
"""Build model by passing fake input."""
fake_mels = tf.random.uniform(shape=[1, 20, 80], dtype=tf.float32)
self(fake_mels)
class TFMelGANDiscriminator(tf.keras.layers.Layer):
"""Tensorflow MelGAN generator module."""
def __init__(self,
out_channels=1,
kernel_sizes=[5, 3],
filters=16,
max_downsample_filters=1024,
use_bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
is_weight_norm=True,
initializer_seed=0.02,
**kwargs):
"""Initilize MelGAN discriminator module.
Args:
out_channels (int): Number of output channels.
kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,
and the first and the second kernel sizes will be used for the last two layers.
For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15.
the last two layers' kernel size will be 5 and 3, respectively.
filters (int): Initial number of filters for conv layer.
max_downsample_filters (int): Maximum number of filters for downsampling layers.
use_bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
padding_type (str): Padding type (support only "REFLECT", "CONSTANT", "SYMMETRIC")
"""
super().__init__(**kwargs)
discriminator = []
# check kernel_size is valid
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1
assert kernel_sizes[1] % 2 == 1
# add first layer
discriminator = [
TFReflectionPad1d((np.prod(kernel_sizes) - 1) // 2, padding_type=padding_type),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=int(np.prod(kernel_sizes)),
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed)
),
getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)
]
# add downsample layers
in_chs = filters
with tf.keras.utils.CustomObjectScope({"GroupConv1D": GroupConv1D}):
for downsample_scale in downsample_scales:
out_chs = min(in_chs * downsample_scale, max_downsample_filters)
discriminator += [
GroupConv1D(
filters=out_chs,
kernel_size=downsample_scale * 10 + 1,
strides=downsample_scale,
padding='same',
use_bias=use_bias,
groups=in_chs // 4,
kernel_initializer=get_initializer(initializer_seed)
)
]
discriminator += [
getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)
]
in_chs = out_chs
# add final layers
out_chs = min(in_chs * 2, max_downsample_filters)
discriminator += [
tf.keras.layers.Conv1D(
filters=out_chs,
kernel_size=kernel_sizes[0],
padding='same',
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed)
)
]
discriminator += [
getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params)
]
discriminator += [
tf.keras.layers.Conv1D(
filters=out_channels,
kernel_size=kernel_sizes[1],
padding='same',
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed)
)
]
if is_weight_norm is True:
self._apply_weightnorm(discriminator)
self.disciminator = discriminator
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of output tensors of each layer.
"""
outs = []
for f in self.disciminator:
x = f(x)
outs += [x]
return outs
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
class TFMelGANMultiScaleDiscriminator(tf.keras.Model):
"""MelGAN multi-scale discriminator module."""
def __init__(self, config, **kwargs):
"""Initilize MelGAN multi-scale discriminator module.
Args:
config: config object for melgan discriminator
"""
super().__init__(**kwargs)
self.discriminator = []
# add discriminator
for i in range(config.scales):
self.discriminator += [
TFMelGANDiscriminator(
out_channels=config.out_channels,
kernel_sizes=config.kernel_sizes,
filters=config.filters,
max_downsample_filters=config.max_downsample_filters,
use_bias=config.use_bias,
downsample_scales=config.downsample_scales,
nonlinear_activation=config.nonlinear_activation,
nonlinear_activation_params=config.nonlinear_activation_params,
padding_type=config.padding_type,
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name='melgan_discriminator_scale_._{}'.format(i)
)
]
self.pooling = getattr(tf.keras.layers, config.downsample_pooling)(**config.downsample_pooling_params)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs = []
for f in self.discriminator:
outs += [f(x)]
x = self.pooling(x)
return outs
| [
"[email protected]"
]
| |
4ce830497d5d62e49c120012bdfb1463222c1714 | 0b406d2c041c76d9ef8789539e0e3af9a50e3613 | /Extract_refactor/Ocr/lib/BusinessImagen.py | 1efc3e6dc179167f58ae293994b0d864ef57c812 | []
| no_license | aise17/ExtractPdf | 221b47c5f0e75a823284b4f52981917962042592 | 7e1bfbc759cb7473d727574e5df78eaaac9fa8a4 | refs/heads/master | 2022-02-26T06:39:14.265795 | 2019-06-04T15:01:39 | 2019-06-04T15:01:39 | 184,154,301 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,186 | py | import glob
# de aki cojeremos la calse Image de PIL que es necesaria para pasarsela a tresseract
from PIL import Image
# con treseract extaeremos eltexto de la imagen png pasada por escala de grises
import pytesseract
# se usea OpenCv para aplicar escala de grises sobre imagen
import cv2
# usamos sistema para crear una imagen/archivo temporal y eliminarla por su PID
import os
from Ocr.Conf.Config import configuracion
from Extract_refactor.settings import MEDIA_URL , IMAGENES_PATH, JPG_PATH
class BusinessImagen():
def configurarEscalaDeGrisesDefecto(self,image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
return gray
def configurarEscalaDeGrisesBlur(self, image):
gray = cv2.medianBlur(self.configurarEscalaDeGrisesDefecto(image), 3)
return gray
def configurarEscalaDeGrisesThresh(self, image):
gray = cv2.threshold(self.configurarEscalaDeGrisesDefecto(image), 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
return gray
def configuracionEscalaDeColoresThresBinary(self, image):
gray = cv2.adaptiveThreshold(self.configurarEscalaDeGrisesDefecto(image), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 11, 2)
return gray
def contarImagenesGeneradas(self):
"""
cuenta el numero de archivos acabados en jpg que existen en la carpeta output
:return: numero de imagenes
"""
number_of_images = len(glob.glob( JPG_PATH + configuracion['rutas_de_todas_imagenes']))
print(" [+] NUMERO DE PAGINAS -> " + number_of_images.__str__())
return number_of_images
def cargaImagen (self, count):
"""
carga las imagenes generadas aprtir del pdf
:param count: para iterar entreimagenessi son mas de una
:return: debulve una imagen tipo OpenCV
"""
if os.path.exists(JPG_PATH + 'image_name.jpg'):
image = cv2.imread(JPG_PATH + 'image_name.jpg')
else:
image = cv2.imread(JPG_PATH + 'image_name-' + count.__str__() + '.jpg')
return image
def aplicarEcalaDeGrises(self, gray):
"""
escribimos la imagen procesada(en escala de grises) en el disco como una imagen/fichero temporal,
y sobre este aplicamos openCV
:param gray:
:return: ruta filename temporal creado
"""
filename = "{}.jpg".format(os.getpid())
cv2.imwrite(filename, gray)
#cv2.imshow('output', gray)
#cv2.waitKey(0)
return filename
def aplicarORC(self, filename):
"""
cargamos la imagen con la variable tipo Image de PIL/Pillow,
y se aplica el ORC
:param filename: ruta de imagen temporal
:return: str texto extraido de imagen con tresseract-orc
"""
text = pytesseract.image_to_string(Image.open(filename))
# y eliminamos la imagen temporal
os.remove(filename)
return text
def borrarImagenesCreadas(self):
for imagen in glob.glob(JPG_PATH +configuracion['rutas_de_todas_imagenes']):
os.remove(imagen)
| [
"[email protected]"
]
| |
bbefdb7f7b0c7af6bc87b4dd1079b37651970d18 | 8fc653ed827dc185cc92196826c94e01c0a532c4 | /setup.py | 90e957663dd75dd44cbb3931a9637eb2385ae74f | [
"MIT"
]
| permissive | Deepakdubey90/prometheus_flask_exporter | 825dd1d1b8f4d81c7c78d9b9d2107b17f673748d | ae613e94573a28c72ef19dcd1372cc52fa07b5a7 | refs/heads/master | 2020-04-08T19:36:43.364482 | 2018-11-13T11:21:35 | 2018-11-13T11:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | from setuptools import setup
with open('README.md') as readme:
long_description = readme.read()
setup(
name='prometheus_flask_exporter',
packages=['prometheus_flask_exporter'],
version='0.4.0',
description='Prometheus metrics exporter for Flask',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
author='Viktor Adam',
author_email='[email protected]',
url='https://github.com/rycus86/prometheus_flask_exporter',
download_url='https://github.com/rycus86/prometheus_flask_exporter/archive/0.4.0.tar.gz',
keywords=['prometheus', 'flask', 'monitoring', 'exporter'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=['prometheus_client', 'flask'],
)
| [
"[email protected]"
]
| |
e26e6b3fb181566bc0a2c8b7ecbf18f53dc42fef | e27333261b8e579564016c71d2061cc33972a8b8 | /development_codes/Backend/.history/BERTimplementation_20210810215300.py | e2cbc16e68d4af0fc027546c697ad4f250821dcd | []
| no_license | Dustyik/NewsTweet_InformationRetrieval | 882e63dd20bc9101cbf48afa6c3302febf1989b1 | d9a6d92b51c288f5bcd21ea1cc54772910fa58f7 | refs/heads/master | 2023-07-01T09:12:53.215563 | 2021-08-12T08:28:33 | 2021-08-12T08:28:33 | 382,780,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from tfidfImplementation import *
from utils import QueryParsers
#First segment involves retrieving K documents with tf-idf
#Second segment involves reranking them with a BERT encoder
K = 100
#BERT_MODEL = 'bert-base-nli-mean-tokens'
BERT_MODEL = "paraphrase-multilingual-mpnet-base-v2"
class BERTmodel:
def __init__(self, tweets_data):
self.tweets_data = tweets_data
self.cosineSimilarity = CosineSimilarity(tweets_data, return_size=K)
self.BERT_model = SentenceTransformer(BERT_MODEL)
def tfidf_retrieve_K_tweets(self, article_id, article_title):
topKResults = self.cosineSimilarity.query(query_id=article_id, query_text=article_title)
return topKResults
def return_BERT_query(self, article_id, article_title):
topKResults = self.tfidf_retrieve_K_tweets(article_id, article_title)
queryStemmed = " ".join(QueryParsers(article_title).query)
query_vector_embedding = self.BERT_model.encode(queryStemmed)
topKResults['vector_embedding'] = topKResults.apply(lambda row: self.BERT_model.encode(row.clean_text), axis = 1)
topKResults["BERT_similarity"] = topKResults.apply(lambda row: cosine_similarity(np.array(query_vector_embedding).reshape(1, -1), np.array(row.vector_embedding).reshape(1, -1)).item(), axis = 1)
topKResults.sort_values(by='BERT_similarity',ascending=False,inplace=True)
return topKResults
| [
"[email protected]"
]
| |
b6da48ecd1d049b9d4232c17283cd2ca3c4afe13 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/actrl/rulehitag15min.py | 36d1af8cc9f903d8698add797b360b3c6b85fd41 | []
| no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,591 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RuleHitAg15min(Mo):
"""
A class that represents the most current aggregated statistics for rule hits in a 15 minute sampling interval. This class updates every 5 minutes.
"""
meta = StatsClassMeta("cobra.model.actrl.RuleHitAg15min", "rule hits")
counter = CounterMeta("revPkts", CounterCategory.COUNTER, "packets", "reverse hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "revPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "revPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "revPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "revPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "revPktsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "revPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "revPktsRate"
meta._counters.append(counter)
counter = CounterMeta("pkts", CounterCategory.COUNTER, "packets", "hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "pktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "pktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "pktsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "pktsRate"
meta._counters.append(counter)
counter = CounterMeta("egrPkts", CounterCategory.COUNTER, "packets", "egress hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "egrPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "egrPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "egrPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "egrPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "egrPktsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "egrPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "egrPktsRate"
meta._counters.append(counter)
counter = CounterMeta("ingrPkts", CounterCategory.COUNTER, "packets", "ingress hit packets")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "ingrPktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "ingrPktsPer"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "ingrPktsSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "ingrPktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "ingrPktsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "ingrPktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "ingrPktsRate"
meta._counters.append(counter)
meta.moClassName = "actrlRuleHitAg15min"
meta.rnFormat = "CDactrlRuleHitAg15min"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current aggregated rule hits stats in 15 minute"
meta.writeAccessMask = 0x601
meta.readAccessMask = 0x601
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.fv.RInfoHolder")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.actrl.RuleHitAg")
meta.rnPrefixes = [
('CDactrlRuleHitAg15min', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "egrPktsCum", "egrPktsCum", 7476, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "egress hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsCum", prop)
prop = PropMeta("str", "egrPktsPer", "egrPktsPer", 7477, PropCategory.IMPLICIT_PERIODIC)
prop.label = "egress hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsPer", prop)
prop = PropMeta("str", "egrPktsRate", "egrPktsRate", 7482, PropCategory.IMPLICIT_RATE)
prop.label = "egress hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsRate", prop)
prop = PropMeta("str", "egrPktsSpct", "egrPktsSpct", 7478, PropCategory.IMPLICIT_SUSPECT)
prop.label = "egress hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsSpct", prop)
prop = PropMeta("str", "egrPktsThr", "egrPktsThr", 7479, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "egress hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("egrPktsThr", prop)
prop = PropMeta("str", "egrPktsTr", "egrPktsTr", 7481, PropCategory.IMPLICIT_TREND)
prop.label = "egress hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsTr", prop)
prop = PropMeta("str", "egrPktsTrBase", "egrPktsTrBase", 7480, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "egress hit packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("egrPktsTrBase", prop)
prop = PropMeta("str", "ingrPktsCum", "ingrPktsCum", 7537, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "ingress hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsCum", prop)
prop = PropMeta("str", "ingrPktsPer", "ingrPktsPer", 7538, PropCategory.IMPLICIT_PERIODIC)
prop.label = "ingress hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsPer", prop)
prop = PropMeta("str", "ingrPktsRate", "ingrPktsRate", 7543, PropCategory.IMPLICIT_RATE)
prop.label = "ingress hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsRate", prop)
prop = PropMeta("str", "ingrPktsSpct", "ingrPktsSpct", 7539, PropCategory.IMPLICIT_SUSPECT)
prop.label = "ingress hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsSpct", prop)
prop = PropMeta("str", "ingrPktsThr", "ingrPktsThr", 7540, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "ingress hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("ingrPktsThr", prop)
prop = PropMeta("str", "ingrPktsTr", "ingrPktsTr", 7542, PropCategory.IMPLICIT_TREND)
prop.label = "ingress hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsTr", prop)
prop = PropMeta("str", "ingrPktsTrBase", "ingrPktsTrBase", 7541, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "ingress hit packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("ingrPktsTrBase", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "pktsCum", "pktsCum", 24181, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsCum", prop)
prop = PropMeta("str", "pktsPer", "pktsPer", 24182, PropCategory.IMPLICIT_PERIODIC)
prop.label = "hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsPer", prop)
prop = PropMeta("str", "pktsRate", "pktsRate", 24187, PropCategory.IMPLICIT_RATE)
prop.label = "hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRate", prop)
prop = PropMeta("str", "pktsSpct", "pktsSpct", 24183, PropCategory.IMPLICIT_SUSPECT)
prop.label = "hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsSpct", prop)
prop = PropMeta("str", "pktsThr", "pktsThr", 24184, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsThr", prop)
prop = PropMeta("str", "pktsTr", "pktsTr", 24186, PropCategory.IMPLICIT_TREND)
prop.label = "hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTr", prop)
prop = PropMeta("str", "pktsTrBase", "pktsTrBase", 24185, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "hit packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTrBase", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "revPktsCum", "revPktsCum", 24236, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "reverse hit packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsCum", prop)
prop = PropMeta("str", "revPktsPer", "revPktsPer", 24237, PropCategory.IMPLICIT_PERIODIC)
prop.label = "reverse hit packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsPer", prop)
prop = PropMeta("str", "revPktsRate", "revPktsRate", 24242, PropCategory.IMPLICIT_RATE)
prop.label = "reverse hit packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsRate", prop)
prop = PropMeta("str", "revPktsSpct", "revPktsSpct", 24238, PropCategory.IMPLICIT_SUSPECT)
prop.label = "reverse hit packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsSpct", prop)
prop = PropMeta("str", "revPktsThr", "revPktsThr", 24239, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "reverse hit packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("revPktsThr", prop)
prop = PropMeta("str", "revPktsTr", "revPktsTr", 24241, PropCategory.IMPLICIT_TREND)
prop.label = "reverse hit packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsTr", prop)
prop = PropMeta("str", "revPktsTrBase", "revPktsTrBase", 24240, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "reverse hit packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("revPktsTrBase", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHcloudIgw", "From fv:Ctx to hcloud:Igw", "cobra.model.hcloud.Igw"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHcloudVgw", "From fv:Ctx to hcloud:Vgw", "cobra.model.hcloud.Vgw"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToConsVzBrCP", "From cloud ExtEPg to Consumer Contract", "cobra.model.vz.BrCP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToProvVzBrCP", "From cloud ExtEPg to Provider Contract", "cobra.model.vz.BrCP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEPgToConsVzBrCP", "From EPg to Contract", "cobra.model.vz.BrCP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEPgToProvVzBrCP", "From EPg to Contract", "cobra.model.vz.BrCP"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudExtEPg", "From fvCtx (VRF) to cloudExtEPg", "cobra.model.cloud.ExtEPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToFvCtx", "cloud:ExtEPg to fv:Ctx", "cobra.model.fv.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToHCloudEndPoint", "cloud:ExtEPg to hcloud:EndPoint", "cobra.model.hcloud.EndPoint"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToHcloudSecurityGroup", "cloud:ExtEPg to hcloud:SecurityGroup", "cobra.model.hcloud.SecurityGroup"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudExtEPgToHCloudCtx", "From cloud ExtEPg to VPCs hCloudCtx", "cobra.model.hcloud.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudRegion", "From fvCtx (VRF) to CloudRegion", "cobra.model.cloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHcloudCsr", "From fvCtx (VRF) to hcloudCsr (CSR)", "cobra.model.hcloud.Csr"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEpgToHcloudSecurityGroup", "cloud:EPg to hcloud:SecurityGroup", "cobra.model.hcloud.SecurityGroup"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEpgToFvCtx", "cloud:EPg to fv:Ctx", "cobra.model.fv.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHCloudEndPoint", "From fvCtx (VRF) to hcloud:EndPoint", "cobra.model.hcloud.EndPoint"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEpgToHCloudEndPoint", "cloud:EPg to hcloud:EndPoint", "cobra.model.hcloud.EndPoint"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHCloudCtx", "From fvCtx (VRF) to hcloudCtx (VPC)", "cobra.model.hcloud.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudCtxProfile", "From fvCtx (VRF) to cloudCtxProfile", "cobra.model.cloud.CtxProfile"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudEPg", "From fvCtx (VRF) to cloud EPg", "cobra.model.cloud.EPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudEPgToHCloudCtx", "From cloud EPg to VPCs hCloudCtx", "cobra.model.hcloud.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("cloudEpgToApp", "cloudEpgToApp", "cobra.model.cloud.App"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtxToRegion", "Vrf to cloud Region", "cobra.model.cloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ATgToGraphInst", "Graph Instances", "cobra.model.vns.GraphInst"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("AEPgToVirtualMachines", "Virtual Machines", "cobra.model.comp.Vm"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("MgmtInstPToNode", "External Management Network EPG to Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("OoBToNode", "Out-of-band Management EPG to Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("InBToNode", "Node", "cobra.model.fv.Locale"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("EPgToNwIf", "Interface", "cobra.model.nw.If"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtxToNwIf", "Private Network to Interface", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
fc2adf8051fdd9f97147f4dc02075974b54ab7ac | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03265/s414771268.py | b56306c65e375123788a4b9e6db496b3f1d196d4 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | x1, y1, x2, y2 = map(int, input().split())
a = x2 - x1
b = y2 - y1
x3 = x2 - b
y3 = y2 + a
x4 = x3 - a
y4 = y3 - b
print(f'{x3} {y3} {x4} {y4}') | [
"[email protected]"
]
| |
a22223b26b8737255728236d842d299cecf4eb12 | d91a0186cec0452a8eb54fd6fabe0ef9e75cd738 | /chapter_8/exercise_8.16/build_prof_module.py | 301a4e69f18d34e27417fae0a4179c61e1060dee | []
| no_license | MaximZolotukhin/erik_metiz | 31a6f5146b8bb58b8f04a6b9635b36a67830e52a | 8afde60aa2bddd6858a5f7a7189169a82bde4322 | refs/heads/main | 2023-05-03T07:39:06.731413 | 2021-05-30T19:04:31 | 2021-05-30T19:04:31 | 361,544,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 350 | py | # Передача неограниченного числа именованных аргументов
def build_profile(firs_name, last_name, **user_info):
"""Строит словарь с информацией о пользователе"""
user_info['firs_name'] = firs_name
user_info['last_name'] = last_name
return user_info
| [
"[email protected]"
]
| |
ec6d47be6ba51fe99fb151c6c4965a64bf57d256 | d68aedd8fdd83669d0f91b0191846a67a10fecf1 | /mwtfidfe.py | 4ce2b6cd0749bbf37d107eb352d588504afa966e | []
| no_license | frankness/MPyWE | d63c2b35071d48c49626c79910e6f182445a6ff0 | af0fb7bd170080e2a496b95841ad9443a65c226d | refs/heads/master | 2020-04-13T00:54:00.697708 | 2018-06-23T13:55:35 | 2018-06-30T01:37:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,555 | py | # encoding: utf-8
from __future__ import unicode_literals
from __future__ import division
import argparse
import math
import struct
import sys
import warnings
import os
import codecs
import numpy as np
from multiprocessing import Pool, Value, Array
import time
from pybloom import BloomFilter
import pickle
u'''
chinese morpheme enhanced word embedding.
'''
MIN_CHINESE = 0x4E00
MAX_CHINESE = 0x9FA5
morpheme_size = 200000
idf_words_dict = pickle.load(open("./data/people's_daily_idf_words_dict.pkl", "rb"))
tf_article_word_counter_dict = pickle.load(open("./data/people's_daily_tf_article_word_counter_dict.pkl", "rb"))
# load monosyllable, disyllable, and multisyllable morpheme
monosyllable_bf = BloomFilter(capacity=100000, error_rate=0.001)
disyllable_bf = BloomFilter(capacity=100000, error_rate=0.001)
multisyllable_bf = BloomFilter(capacity=100000, error_rate=0.001)
def load_morphemes():
folder_path = './dict'
for file_name in os.listdir(folder_path):
file_path = os.path.join(folder_path, file_name)
with codecs.open(file_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.rstrip()
if file_name.startswith('all_disyllable'):
if line not in disyllable_bf:
disyllable_bf.add(line)
elif file_name.startswith('all_monosyllable'):
if line not in monosyllable_bf:
monosyllable_bf.add(line)
else:
if line not in multisyllable_bf:
multisyllable_bf.add(line)
load_morphemes()
def hash_morpheme(morpheme):
return abs(hash(morpheme)) % morpheme_size
# 词信息
class VocabItem:
def __init__(self, word):
self.word = word
self.morpheme = [] # morphme
is_all_chinese = True
for morpheme in word:
morpheme_ord = ord(morpheme)
if morpheme_ord < MIN_CHINESE or morpheme_ord > MAX_CHINESE:
is_all_chinese = False
break
if is_all_chinese:
if len(word) == 1:
if word in monosyllable_bf:
self.morpheme.append(word)
elif len(word) == 2:
if word in disyllable_bf:
self.morpheme.append(word)
else:
for m in word:
if m in monosyllable_bf:
self.morpheme.append(m)
else:
if word in multisyllable_bf:
self.morpheme.append(word)
else:
pass
# 分配语素hash值
morpheme_hash = []
for m in self.morpheme:
morpheme_hash.append(hash_morpheme(m))
self.morpheme = morpheme_hash
self.count = 0
self.path = None # Path (list of indices) from the root to the word (leaf)
self.code = None # Huffman encoding
# 词汇表
class Vocab:
def __init__(self, fi, min_count):
vocab_items = []
vocab_hash = {}
word_count = 0
fi = codecs.open(fi, 'r', encoding='utf-8')
# Add special tokens <bol> (beginning of line) and <eol> (end of line)
for token in [u'<bol>', u'<eol>']:
vocab_hash[token] = len(vocab_items) # vocab index
vocab_items.append(VocabItem(token)) #
for line in fi:
line = line.rstrip()
if line == '' or line.startswith(u'#######'):
continue
tokens = line.split()
for token in tokens:
if token not in vocab_hash:
vocab_hash[token] = len(vocab_items) # vocab index
vocab_items.append(VocabItem(token))
# assert vocab_items[vocab_hash[token]].word == token, 'Wrong vocab_hash index'
vocab_items[vocab_hash[token]].count += 1 # 出现次数加一
word_count += 1
if word_count % 10000 == 0:
sys.stdout.write(u"\rReading word %d" % word_count)
sys.stdout.flush()
# Add special tokens <bol> (beginning of line) and <eol> (end of line)
vocab_items[vocab_hash[u'<bol>']].count += 1
vocab_items[vocab_hash[u'<eol>']].count += 1
word_count += 2
self.bytes = fi.tell()
self.vocab_items = vocab_items # List of VocabItem objects
self.vocab_hash = vocab_hash # Mapping from each token to its index in vocab
self.word_count = word_count # Total number of words in train file
# Add special token <unk> (unknown),
# merge words occurring less than min_count into <unk>, and
# sort vocab in descending order by frequency in train file
self.__sort(min_count)
# assert self.word_count == sum([t.count for t in self.vocab_items]), 'word_count and sum of t.count do not agree'
print(u'Total words in training file: %d' % self.word_count)
print(u'Total bytes in training file: %d' % self.bytes)
print(u'Vocab size: %d' % len(self))
def __getitem__(self, i):
return self.vocab_items[i]
def __len__(self):
return len(self.vocab_items)
def __iter__(self):
return iter(self.vocab_items)
def __contains__(self, key):
return key in self.vocab_hash
def __sort(self, min_count):
tmp = []
tmp.append(VocabItem(u'<unk>'))
unk_hash = 0
count_unk = 0
for token in self.vocab_items:
if token.count < min_count:
count_unk += 1
tmp[unk_hash].count += token.count
else:
tmp.append(token)
tmp.sort(key=lambda token: token.count, reverse=True)
# Update vocab_hash
vocab_hash = {}
for i, token in enumerate(tmp):
vocab_hash[token.word] = i
self.vocab_items = tmp
self.vocab_hash = vocab_hash
print()
print(u'Unknown vocab size:', count_unk)
def indices(self, tokens):
return [self.vocab_hash[token] if token in self else self.vocab_hash[u'<unk>'] for token in tokens]
def index(self, token):
return self.vocab_hash.get(token)
u'''
构造霍夫曼树:https://www.wikiwand.com/zh-hans/%E9%9C%8D%E5%A4%AB%E6%9B%BC%E7%BC%96%E7%A0%81
'''
def encode_huffman(self):
# Build a Huffman tree
vocab_size = len(self) # len 635
count = [t.count for t in self] + [1e15] * (vocab_size - 1) # len 1269
parent = [0] * (2 * vocab_size - 2) # len 1268
binary = [0] * (2 * vocab_size - 2) # len 1268
# vocab 是按从大到小排序的
pos1 = vocab_size - 1 # 634
pos2 = vocab_size # 635
for i in range(vocab_size - 1):
# Find min1 寻找最小频率1
if pos1 >= 0:
if count[pos1] < count[pos2]:
min1 = pos1
pos1 -= 1
else:
min1 = pos2
pos2 += 1
else:
min1 = pos2
pos2 += 1
# Find min2
if pos1 >= 0:
if count[pos1] < count[pos2]:
min2 = pos1
pos1 -= 1
else:
min2 = pos2
pos2 += 1
else:
min2 = pos2
pos2 += 1
count[vocab_size + i] = count[min1] + count[min2] # 合并最小出现次数的两个节点
parent[min1] = vocab_size + i
parent[min2] = vocab_size + i
binary[min2] = 1 # 有点像桶标记思路
# Assign binary code and path pointers to each vocab word
root_idx = 2 * vocab_size - 2
for i, token in enumerate(self):
path = [] # List of indices from the leaf to the root
code = [] # Binary Huffman encoding from the leaf to the root
node_idx = i
while node_idx < root_idx:
if node_idx >= vocab_size:
path.append(node_idx)
code.append(binary[node_idx])
node_idx = parent[node_idx]
path.append(root_idx)
# These are path and code from the root to the leaf
token.path = [j - vocab_size for j in path[::-1]]
token.code = code[::-1]
class UnigramTable:
"""
A list of indices of tokens in the vocab following a power law distribution,
used to draw negative samples.
"""
def __init__(self, vocab):
vocab_size = len(vocab)
power = 0.75
norm = sum([math.pow(t.count, power) for t in vocab]) # Normalizing constant 正常化常量,用于当分母
table_size = 1e8 # Length of the unigram table 100000000.0
table = np.zeros(int(table_size), dtype=np.uint32)
print(u'Filling unigram table')
p = 0 # Cumulative probability 累积概率
i = 0
for j, token in enumerate(vocab):
p += float(math.pow(token.count, power)) / norm # p的最大值就是1
while i < table_size and (float(i) / table_size) < p:
table[i] = j
i += 1
self.table = table
def sample(self, count):
indices = np.random.randint(low=0, high=len(self.table), size=count)
return [self.table[i] for i in indices]
# 这里是取近似值
def sigmoid(z):
if z > 6:
return 1.0
elif z < -6:
return 0.0
else:
return 1 / (1 + math.exp(-z))
'''
初始化Matrix syn0, syn0_m, syn1
'''
def init_net(dim, vocab_size, morpheme_size): # dim=635, vocab_size=100
# Init syn0 with random numbers from a uniform distribution on the interval [-0.5, 0.5]/dim
# 用区间[-0.5,0.5] / dim的均匀分布的随机数初始化syn0
tmp = np.random.uniform(low=-0.5 / dim, high=0.5 / dim, size=(vocab_size, dim))
syn0_m = np.random.uniform(low=-0.5 / dim, high=0.5 / dim, size=(morpheme_size, dim))
# Create and return a ctypes object from a numpy array
syn0 = np.ctypeslib.as_ctypes(tmp)
syn0 = Array(syn0._type_, syn0, lock=False)
syn0_m = np.ctypeslib.as_ctypes(syn0_m)
syn0_m = Array(syn0_m._type_, syn0_m, lock=False)
# Init syn1 with zeros
tmp = np.zeros(shape=(vocab_size, dim))
syn1 = np.ctypeslib.as_ctypes(tmp)
syn1 = Array(syn1._type_, syn1, lock=False)
return (syn0, syn0_m, syn1)
'''
根据pid来划分fi文件
'''
def train_process(pid):
# Set fi to point to the right chunk of training file
start = vocab.bytes / num_processes * pid
end = vocab.bytes if pid == num_processes - 1 else vocab.bytes / num_processes * (pid + 1)
fi.seek(start)
print(u'Worker %d beginning training at %d, ending at %d \n' % (pid, start, end))
alpha = starting_alpha
word_count = 0
last_word_count = 0
article_words_tf_idf = None
while fi.tell() < end: #
line = fi.readline().strip()
# Skip blank lines
if not line:
continue
if line.startswith(u'######'):
file_name = line.split(' ')[-1]
print(u'file_name: ', file_name)
# word
article_words_tf_idf = {}
tf_article_word_counter = tf_article_word_counter_dict.get(file_name)
if tf_article_word_counter is None or len(tf_article_word_counter) == 0:
continue
tf_article_word_counter = sorted(tf_article_word_counter.items(), key=lambda x: x[1], reverse=True)
max_count = tf_article_word_counter[0][1]
for word, count in tf_article_word_counter:
print(u'word: {}, count: {}'.format(word, count))
tf = count / max_count
idf = idf_words_dict.get(word)
if idf is None:
print(u'word: {} idf is None'.format(word))
idf = 0.0
tf_idf = tf * idf
# 这里要把word变为索引
word_index = vocab.index(word)
if word_index is None:
print(u'vocab.index({}) is None'.format(word))
continue
article_words_tf_idf[word_index] = tf_idf
continue
# Init sent, a list of indices of words in line
sent = vocab.indices([u'<bol>'] + line.split() + [u'<eol>']) # 构造一行,加上<bol> 和 <eol>
for sent_pos, token in enumerate(sent):
if word_count % 10000 == 0:
global_word_count.value += (word_count - last_word_count)
last_word_count = word_count
# Recalculate alpha
alpha = starting_alpha * (1 - float(global_word_count.value) / vocab.word_count)
if alpha < starting_alpha * 0.0001:
alpha = starting_alpha * 0.0001
# Print progress info
sys.stdout.write(u"\rAlpha: %f Progress: %d of %d (%.2f%%)" %
(alpha, global_word_count.value, vocab.word_count,
float(global_word_count.value) / vocab.word_count * 100))
sys.stdout.flush()
# Randomize window size, where win is the max window size 随机化窗口大小,其中win是最大窗口大小
current_win = np.random.randint(low=1, high=win + 1)
context_start = max(sent_pos - current_win, 0)
context_end = min(sent_pos + current_win + 1, len(sent))
# 前后上下文
context = sent[context_start: sent_pos] + sent[sent_pos + 1: context_end] # Turn into an iterator?
words_weight = []
for c in context:
word_tf_idf = article_words_tf_idf.get(c)
if word_tf_idf is None:
word_tf_idf = 0
words_weight.append(word_tf_idf)
print('words_weight: ', words_weight)
if np.sum(words_weight) == 0:
norm_words_weight = np.zeros(len(context))
else:
norm_words_weight = words_weight / np.sum(words_weight)
# 中 CBOW, skip-gram 模型中加入pinyin信息
# CBOW
if cbow:
neu1 = np.zeros(dim)
neu1e = np.zeros(dim)
morpheme_index_list = []
for c, word_weight in zip(context, norm_words_weight):
neu1m = np.zeros(dim)
neu1m += syn0[c] * word_weight
# 加上 morpheme
for morpheme_index in vocab[c].morpheme:
neu1m = np.add(neu1m, np.multiply(syn0_m[morpheme_index, :], 1.0 / len(vocab[c].morpheme)))
morpheme_index_list.append(morpheme_index)
neu1m = neu1m * 0.5
neu1 += neu1m
assert len(neu1) == dim, u'neu1pinyin and dim do not agree'
# // the CBOW model takes the average of the vectors of the input context, and use the product of the input -> hidden weight matrix and the average vector as the output .
# neu1 = np.multiply(neu1, 1.0 / len(context))
# Compute neu1e and update syn1
if neg > 0:
# negative sampling
classifiers = [(token, 1)] + [(target, 0) for target in table.sample(neg)]
else:
# hierarchical softmax
classifiers = zip(vocab[token].path, vocab[token].code) # 通过Huffman tree获取
for target, label in classifiers:
z = np.dot(neu1, syn1[target])
p = sigmoid(z)
g = alpha * (label - p)
neu1e += g * syn1[target] # Error to backpropagate to syn0
syn1[target] += g * neu1 # Update syn1
# Update syn0 # 哦,这里是这么更新的。
for c, word_weight in zip(context, norm_words_weight):
syn0[c] += neu1e # * word_weight
# morpheme_rate: the factor <float> of learning rate for pinyin, default is 1.0
for morpheme_index in morpheme_index_list:
syn0_m[morpheme_index] += neu1e * morpheme_rate
# Skip-gram
else:
for c in context:
# Error to backpropagate to syn0
neu1e = np.zeros(dim)
# Compute neu1e and update syn1
if neg > 0:
# negative sampling
classifiers = [(token, 1)] + [(target, 0) for target in table.sample(neg)]
else:
# hierarchical softmax
classifiers = zip(vocab[token].path, vocab[token].code)
for target, label in classifiers:
# z = np.dot(syn0[context_word], syn1[target])
neu1 = np.zeros(dim)
neu1m = np.zeros(dim)
neu1m += syn0[c]
if len(vocab[c].character) > 0:
for morpheme_index in vocab[c].morpheme:
neu1m += np.multiply(syn0_m[morpheme_index], 1.0 / len(vocab[c].morpheme))
neu1m = np.multiply(neu1m, 1.0 / 2)
neu1 += neu1m
z = np.dot(neu1, syn1[target])
p = sigmoid(z)
g = alpha * (label - p)
neu1e += g * syn1[target] # Error to backpropagate to syn0
syn1[target] += g * syn0[c] # Update syn1
# Update syn0
syn0[c] += neu1e
# Update syn0_m
if len(vocab[c].morpheme) > 0:
for morpheme_index in vocab[c].morpheme:
syn0_m[morpheme_index] += neu1e * morpheme_rate
word_count += 1
# Print progress info
global_word_count.value += (word_count - last_word_count)
sys.stdout.write(u"\rAlpha: %f Progress: %d of %d (%.2f%%)" %
(alpha, global_word_count.value, vocab.word_count,
float(global_word_count.value) / vocab.word_count * 100))
sys.stdout.flush()
fi.close()
u'''
保存 vector
'''
def save(vocab, syn0, syn0_m, fo, binary):
print(u'Saving model to', fo)
dim = len(syn0[0])
if binary:
fo = codecs.open(fo, 'wb', encoding='utf-8')
fo.write('%d %d\n' % (len(syn0), dim))
fo.write('\n')
for token, vector in zip(vocab, syn0):
for morpheme_index in token.charater:
vector = np.add(vector, np.multiply(syn0_m[morpheme_index, :], 1.0 / len(token.charater)))
fo.write('%s ' % token.word)
for s in vector:
fo.write(struct.pack('f', s))
fo.write('\n')
else: # 按字符串保存
fo = codecs.open(fo, 'w', encoding='utf-8')
fo.write('%d %d\n' % (len(syn0), dim)) # syn0, dim (635, 100)
for token, vector in zip(vocab, syn0):
word = token.word
tmp_vector = np.zeros(dim)
tmp_vector = np.add(tmp_vector, vector)
for morpheme_index in token.morpheme:
tmp_vector = np.add(tmp_vector, np.multiply(syn0_m[morpheme_index], 1.0 / len(token.morpheme)))
vector_str = ' '.join([str(s) for s in tmp_vector])
fo.write('%s %s\n' % (word, vector_str))
fo.close()
def __init_process(*args):
global vocab, syn0, syn0_m, syn1, table, cbow, neg, dim, starting_alpha
global win, num_processes, morpheme_rate, global_word_count, fi
# initargs = (vocab, syn0, syn1, table, cbow, neg, dim, alpha, win, num_processes, global_word_count, fi)
vocab, syn0_tmp, syn0_m_tmp, syn1_tmp, table, cbow, neg, dim, starting_alpha, win, num_processes, morpheme_rate, global_word_count = args[
:-1]
fi = codecs.open(args[-1], 'r', encoding='utf-8')
with warnings.catch_warnings():
warnings.simplefilter('ignore', RuntimeWarning)
syn0 = np.ctypeslib.as_array(syn0_tmp)
syn1 = np.ctypeslib.as_array(syn1_tmp)
syn0_m = np.ctypeslib.as_array(syn0_m_tmp)
def train(fi, fo, cbow, neg, dim, alpha, win, min_count, num_processes, binary, morpheme_rate):
# Read train file to init vocab (词汇表)
vocab = Vocab(fi, min_count)
# Init net
syn0, syn0_m, syn1 = init_net(dim, len(vocab), morpheme_size)
global_word_count = Value('i', 0)
table = None
#
if neg > 0:
print(u'Initializing unigram table')
table = UnigramTable(vocab)
else:
print(u'Initializing Huffman tree')
vocab.encode_huffman()
# Begin training using num_processes workers
t0 = time.time()
pool = Pool(processes=num_processes, initializer=__init_process,
initargs=(vocab, syn0, syn0_m, syn1, table, cbow, neg, dim, alpha,
win, num_processes, morpheme_rate, global_word_count, fi))
# Apply `func` to each element in `iterable`, collecting the results in a list that is returned.
pool.map(train_process, range(num_processes))
t1 = time.time()
print()
print(u'Completed training. Training took', (t1 - t0) / 60, u'minutes')
# Save model to file
save(vocab, syn0, syn0_m, fo, binary)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# train_file = "/Users/LeonTao/PycharmProjects/deborausujono/word2vecpy/data/input-chinese"
# output_file = "/Users/LeonTao/PycharmProjects/deborausujono/word2vecpy/data/output-chinese"
# /Users/LeonTao/NLP/Corpos/wiki/zhwiki-latest-simplified_tokened.txt
train_file = "/Users/LeonTao/PycharmProjects/deborausujono/word2vecpy/data/people's_daily"
output_file = "/Users/LeonTao/PycharmProjects/deborausujono/word2vecpy/data/people's_daily_morpheme_word_tfidf_cbow_100d"
t0 = time.time()
u'''
修改内容:
negative: 5
min-count for pinyin:
'''
parser.add_argument('-train', help='Training file', dest='fi', default=train_file) # , required=True
parser.add_argument('-model', help='Output model file', dest='fo', default=output_file) # , required=True
parser.add_argument('-cbow', help='1 for CBOW, 0 for skip-gram', dest='cbow', default=1, type=int)
parser.add_argument('-negative',
help='Number of negative examples (>0) for negative sampling, 0 for hierarchical softmax',
dest='neg', default=5, type=int)
parser.add_argument('-dim', help='Dimensionality of word embeddings', dest='dim', default=100, type=int)
parser.add_argument('-alpha', help='Starting alpha', dest='alpha', default=0.025, type=float)
parser.add_argument('-window', help='Max window length', dest='win', default=5, type=int)
parser.add_argument('-min-count', help='Min count for words used to learn <unk>', dest='min_count', default=5,
type=int)
parser.add_argument('-processes', help='Number of processes', dest='num_processes', default=1, type=int)
parser.add_argument('-binary', help='1 for output model in binary format, 0 otherwise', dest='binary', default=0,
type=int)
parser.add_argument('-morpheme-rate', help='the factor <float> of learning rate for morpheme, default is 1.0',
dest='morpheme_rate', default=1.0, type=float)
# TO DO: parser.add_argument('-epoch', help='Number of training epochs', dest='epoch', default=1, type=int)
print(u'os.getcwd: {}'.format(os.getcwd()))
# -train data/input -model data/output -cbow 1 -negative 5 -dim 100 -window 5
args = parser.parse_args()
print(u'args: {} \n'.format(args))
train(args.fi, args.fo, bool(args.cbow), args.neg, args.dim, args.alpha, args.win,
args.min_count, args.num_processes, bool(args.binary), args.morpheme_rate)
t1 = time.time()
print(u"cost time: {}".format(t1 - t0))
| [
"[email protected]"
]
| |
e09aaeb743a979e5cabac0be657af51374b5dbfc | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/yaml-3.10/yaml/__init__.py | 13b9f8b58fe063c683cad508788cf06d6e71c097 | []
| no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/yaml-3.10/yaml/__init__.py | [
"[email protected]"
]
| |
407d88f8bd05212b374fa5dd67bf6d0bcceb9d30 | 4c5d113b19bf8d55d2d94fe7dc08fd90e0152174 | /thor/constants.py | 1787df00d88af014488203ee7972eb248d049a43 | [
"BSD-3-Clause"
]
| permissive | swipswaps/thor | f4b2b956fbd71c3fa4a84d457ff67f158d9e9c21 | d3d1dcbe86f67a62c90b4cde3fc577e414825cf2 | refs/heads/master | 2023-04-05T11:48:31.884619 | 2021-02-12T19:38:23 | 2021-02-12T19:38:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | import numpy as np
class Constants:
# km to au
KM_P_AU = 149597870.700
# seconds to days
S_P_DAY = 86400.0
# Speed of light: AU per day (173.14463267424034) (299792.458 km/s -- DE431/DE430)
C = 299792.458 / KM_P_AU * S_P_DAY
# Gravitational constant: AU**3 / M_sun / d**2 (0.295912208285591100E-3 -- DE431/DE430)
G = 0.295912208285591100E-3
# Solar Mass: M_sun (1.0)
M_SUN = 1.0
# Earth Equatorial Radius: km (6378.1363 km -- DE431/DE430)
R_EARTH = 6378.1363 / KM_P_AU
# Mean Obliquity at J2000: radians (84381.448 arcseconds -- DE431/DE430)
OBLIQUITY = 84381.448 * np.pi / (180.0 * 3600.0)
# Transformation matrix from Equatorial J2000 to Ecliptic J2000
TRANSFORM_EQ2EC = np.array([
[1, 0, 0],
[0, np.cos(OBLIQUITY), np.sin(OBLIQUITY)],
[0, -np.sin(OBLIQUITY), np.cos(OBLIQUITY)
]])
# Transformation matrix from Ecliptic J2000 to Equatorial J2000
TRANSFORM_EC2EQ = TRANSFORM_EQ2EC.T | [
"[email protected]"
]
| |
2dd3b4102b27ed2302263574898acc47681bba6c | 43eb7f8581a8dbfa1298b4e6d84fc7b7a552e335 | /python/kserve/test/test_v1beta1_explainer_config.py | 462733a4fe7179cd230f7ac00ba92e086e952e08 | [
"Apache-2.0"
]
| permissive | Suresh-Nakkeran/kserve | c2d114f7258a70b4c8ddeb8ee8c584d4eee0f81b | d3910e0fc6af4bf73156a53bd912d6e4acc87533 | refs/heads/master | 2023-07-29T00:17:28.900100 | 2021-09-11T08:04:54 | 2021-09-11T08:04:54 | 406,243,335 | 0 | 0 | Apache-2.0 | 2021-09-14T05:59:05 | 2021-09-14T05:59:04 | null | UTF-8 | Python | false | false | 2,073 | py | # Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KServe
Python SDK for KServe # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import kserve
from kserve.models.v1beta1_explainer_config import V1beta1ExplainerConfig # noqa: E501
from kserve.rest import ApiException
class TestV1beta1ExplainerConfig(unittest.TestCase):
"""V1beta1ExplainerConfig unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test V1beta1ExplainerConfig
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = kserve.models.v1beta1_explainer_config.V1beta1ExplainerConfig() # noqa: E501
if include_optional :
return V1beta1ExplainerConfig(
default_image_version = '0',
image = '0'
)
else :
return V1beta1ExplainerConfig(
default_image_version = '0',
image = '0',
)
def testV1beta1ExplainerConfig(self):
"""Test V1beta1ExplainerConfig"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
6331b3fa3fff6a07d5467b20340d9d1d30e4fe9b | ad13583673551857615498b9605d9dcab63bb2c3 | /output/instances/nistData/atomic/unsignedByte/Schema+Instance/NISTXML-SV-IV-atomic-unsignedByte-minExclusive-4-2.py | ad0db34b2b85a1754a789abff5f0a8024ff7d358 | [
"MIT"
]
| permissive | tefra/xsdata-w3c-tests | 397180205a735b06170aa188f1f39451d2089815 | 081d0908382a0e0b29c8ee9caca6f1c0e36dd6db | refs/heads/main | 2023-08-03T04:25:37.841917 | 2023-07-29T17:10:13 | 2023-07-30T12:11:13 | 239,622,251 | 2 | 0 | MIT | 2023-07-25T14:19:04 | 2020-02-10T21:59:47 | Python | UTF-8 | Python | false | false | 302 | py | from output.models.nist_data.atomic.unsigned_byte.schema_instance.nistschema_sv_iv_atomic_unsigned_byte_min_exclusive_4_xsd.nistschema_sv_iv_atomic_unsigned_byte_min_exclusive_4 import NistschemaSvIvAtomicUnsignedByteMinExclusive4
obj = NistschemaSvIvAtomicUnsignedByteMinExclusive4(
value=218
)
| [
"[email protected]"
]
| |
7a92b6df84eaf024ce5f084a8fbb89c734db180b | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_229/ch74_2020_04_13_03_09_30_010178.py | 593012b482da142f910dee4af2f450b410ca8283 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | def conta_bigramas(string):
dic = dict()
for i in string and x in string:
if i+x in string:
if i+x in dic:
dic[i+x] += 1
else:
dic[i+x] = 1
return dic
print(conta_bigramas("banana nanica")) | [
"[email protected]"
]
| |
424a7ef7d48763ef6c952dd34adea36b3238cc13 | 23f3349e8b50f0cb3e461bbd65c1ea8dec792d0b | /2_semestr/encryption_1.py | beb9439133bbf54535c99776ead5297b83c186b0 | []
| no_license | JacobLutin/Python_bmstu | d17866dbab0e74f0f9d600c4dbd9d53eb5c5b7be | 66fd8679de7556978b9cd1e9fd8646a8d7d6daa8 | refs/heads/master | 2020-05-29T14:40:09.310602 | 2017-03-27T05:18:58 | 2017-03-27T05:18:58 | 64,742,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | py | def rotate(Tb,N):
for i in range(0,N//2):
for j in range(i,N-1-i):
tmp = Tb[i][j]
Tb[i][j] = Tb[n-j-1][i]
Tb[n-j-1][i] = Tb[n-i-1][n-j-1]
Tb[n-i-1][n-j-1] = Tb[j][n-i-1]
Tb[j][n-i-1] = tmp
return Tb
def PrintTable(Tb,N):
for i in range(N):
for j in range(N):
print(Tb[i][j], end="\t")
print()
print()
def shifravanie(mask,out,sent,k):
for i in range(n):
for j in range(n):
if (mask[i][j] == 1):
out[i][j] = sent[k]
k += 1
return out,k
"""
def decimalToBinary(num):
while num > 1:
num = num // 2
def decode(arr, n):
matrix = []
for i in arr:
"""
import random
n = int(input("Введите размер шифровальной таблицы: "))
Table = []
for i in range(n):
Table.append([])
for j in range(n):
Table[i].append(0)
c = 1
for i in range(n // 2):
for j in range(n // 2):
Table[i][j] = c
Table[j][n - i - 1] = c
Table[n - i - 1][n - j - 1] = c
Table[n - j - 1][i] = c
c = c + 1
c = c - 1
block = []
while (len(block) != (n // 2) ** 2):
i = random.randint(0, n - 1)
j = random.randint(0, n - 1)
flag = True
for k in range(len(block)):
if (Table[i][j] == -1) or (Table[i][j] == block[k]):
flag = False
break
if (flag == True):
block.append(Table[i][j])
Table[i][j] = -1
for i in range(n):
for j in range(n):
if (Table[i][j] != -1):
Table[i][j] = 0
else:
Table[i][j] = 1
PrintTable(Table,n)
key = []
for i in range(n):
m = 0
for j in range(n):
m += Table[i][j] * 2 ** (n - j - 1)
key.append(m)
print()
sentense = "серегаяобьяснютебекакэтоработает"
print(len(sentense))
k = 0
Shifr = []
for i in range(n):
Shifr.append([])
for j in range(n):
Shifr[i].append(0)
Shifr,k=shifravanie(Table,Shifr,sentense,k)
Table=rotate(Table,n)
Shifr,k=shifravanie(Table,Shifr,sentense,k)
Table=rotate(Table,n)
Shifr,k=shifravanie(Table,Shifr,sentense,k)
Table=rotate(Table,n)
Shifr,k=shifravanie(Table,Shifr,sentense,k)
Table=rotate(Table,n)
PrintTable(Table,n)
PrintTable(Shifr,n)
print()
print(key)
| [
"[email protected]"
]
| |
0fbe0e50be80a501c819d446dd212e8cea341c8d | 8862d671654ed336f1de2895323e4cf76f0855d7 | /syft/mpc/utils.py | 3a4a7d9b250c7487e661dab920df36ddfed39296 | [
"Apache-2.0"
]
| permissive | Vinohith/PySyft | f0f29b000af586faca88756533079a4bfea17ff1 | 1921efeeda2c7b0bf93f17a33ddf59f8020fa653 | refs/heads/master | 2020-03-31T14:24:27.596173 | 2018-10-09T16:03:43 | 2018-10-09T16:03:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | cache = {}
def egcd(a, b):
"""
greatest common denominator
:param a:
:param b:
:return:
"""
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
"""
calculate the multiplicative inverse of a modulus m such that
(x * result) % m == (x / a)
for any integer between 0 and m
:param a: the number we wish to divide by
:param m: the size of the modular field
:return: the number we can multiply by to actually divide by a
"""
if(a in cache):
sub_cache = cache[a]
if m in sub_cache:
return sub_cache[m]
else:
cache[a] = {}
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
result = x % m
cache[a][m] = result
return result | [
"[email protected]"
]
| |
9e77404a90a4c116a9c72e7dd494b94705ede353 | ffc02736617d5bb4308427b3df5e43811601cea0 | /examples/run_curl.py | b1dfee4e9ef691aaa9a75caf82946d6d611dde11 | [
"MIT"
]
| permissive | weihancool/tf2rl | 4315dd94f8f924f15085f26a9434f6824aa3736c | 0ef45d4a32a177f14fb579c9c2332f71404a9595 | refs/heads/master | 2023-05-02T17:00:05.775610 | 2021-05-29T03:32:52 | 2021-05-29T03:32:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,618 | py | import dmc2gym
from tf2rl.algos.curl_sac import CURLSAC
from tf2rl.envs.dmc_wrapper import DMCWrapper
from tf2rl.experiments.trainer import Trainer
def main():
dm_envs = {
'finger': ['finger', 'spin', 2],
'cartpole': ['cartpole', 'balance', 4],
'reacher': ['reacher', 'easy', 4],
'cheetah': ['cheetah', 'run', 4],
'walker': ['walker', 'walk', 2],
'ball': ['ball_in_cup', 'catch', 4],
'humanoid': ['humanoid', 'stand', 4],
'bring_ball': ['manipulator', 'bring_ball', 4],
'bring_peg': ['manipulator', 'bring_peg', 4],
'insert_ball': ['manipulator', 'insert_ball', 4],
'insert_peg': ['manipulator', 'insert_peg', 4]}
parser = Trainer.get_argument()
parser = CURLSAC.get_argument(parser)
parser.add_argument('--env-name', type=str, default="cartpole", choices=dm_envs.keys())
parser.add_argument('--seed', type=int, default=1)
parser.set_defaults(batch_size=256)
parser.set_defaults(n_warmup=10000)
parser.set_defaults(max_steps=3e6)
parser.set_defaults(save_summary_interval=100)
args = parser.parse_args()
domain_name, task_name, action_repeat = dm_envs[args.env_name]
original_obs_shape = (100, 100, 9)
input_obs_shape = (84, 84, 9)
def make_env():
return DMCWrapper(
dmc2gym.make(
domain_name=domain_name,
task_name=task_name,
seed=args.seed,
visualize_reward=False,
from_pixels=True,
height=100,
width=100,
frame_skip=action_repeat,
channels_first=False),
obs_shape=original_obs_shape,
k=3,
channel_first=False)
env = make_env()
test_env = make_env()
# see Table 3 of CURL paper
lr_sac = lr_curl = 2e-4 if args.env_name == "cheetah" else 1e-3
policy = CURLSAC(
obs_shape=input_obs_shape,
action_dim=env.action_space.high.size,
gpu=args.gpu,
memory_capacity=int(1e5),
n_warmup=int(1e3),
max_action=env.action_space.high[0],
batch_size=512,
actor_units=(1024, 1024),
critic_units=(1024, 1024),
lr_sac=lr_sac,
lr_curl=lr_curl,
lr_alpha=1e-4,
tau=0.01,
init_temperature=0.1,
auto_alpha=True,
stop_q_grad=args.stop_q_grad)
trainer = Trainer(policy, env, args, test_env=test_env)
if args.evaluate:
trainer.evaluate_policy_continuously()
else:
trainer()
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
ea79b41539481557905fbd47f8df65758118b68d | 96ec8ea87fb2cfdd2d850a0471c9820f92152847 | /九章算法/递归/Fibonacci easy.py | ed586d5e4c2c29806c786838bc6a997aa82f867e | []
| no_license | bitterengsci/algorithm | ae0b9159fd21cc30c9865f981f9c18cf9c6898d7 | bf70d038b70c51edc6ddd6bfef1720fb5f9f2567 | refs/heads/master | 2023-08-10T10:22:18.774232 | 2023-07-31T21:04:11 | 2023-07-31T21:04:11 | 186,261,880 | 95 | 46 | null | 2023-07-31T21:04:12 | 2019-05-12T13:57:27 | Python | UTF-8 | Python | false | false | 323 | py | from functools import lru_cache
class Solution:
"""
@param n: an integer
@return: an integer f(n)
"""
@lru_cache(maxsize=10000)
def fibonacci(self, n):
if n == 1:
return 0
elif n in [2, 3]:
return 1
return self.fibonacci(n - 1) + self.fibonacci(n - 2) | [
"[email protected]"
]
| |
d7366dbf8bbfbc57e036cf38cc8c864998245935 | 97a192ac8a3feca408bb3f0ad746a8004e6bfcb7 | /to_do_list/venv/bin/pip3.6 | e29e4a64c8ccf31bea1aab4c20a66f68917835e0 | [
"MIT",
"Python-2.0"
]
| permissive | DitooAZ/Python-Games | e46aed297c2e2ab2a5ca9869241a711d2e15f6e2 | 587cb499c57437acbe052d9eb5fb8d48272735e9 | refs/heads/master | 2023-03-21T00:34:28.405176 | 2021-01-30T15:25:22 | 2021-01-30T15:25:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | 6 | #!/home/afroz/PycharmProjects/to_do_list/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| [
"[email protected]"
]
| |
a547055c3a6c7346b003d025b49edfef8cd3e2b8 | 329d8c2e0a259ad4880774c84ca4f6b28bbb641c | /bangali_19/configs/augs/v1.py | 2ccdd35a4f447281decfd6fdc7264984bca77b04 | []
| no_license | dodler/kgl | 1d3eeb6032b74afb761abe5fa8620325594d5a75 | b17525299e98d41da6f6631bd796084097e8a94e | refs/heads/master | 2021-06-24T17:26:38.683242 | 2021-06-20T08:14:23 | 2021-06-20T08:14:23 | 192,477,506 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | config = {
"arch": "multi-head",
"backbone": "se_resnext50_32x4d",
"pretrained": True,
"in-bn": True,
'opt': 'sgd',
'loss_aggregate_fn': 'mean',
'schedule': 'cosine_annealing_warm_restarts',
'T_0': 6,
'lr': 0.1,
'train_aug': 'augmentations.geom.v1',
'valid_aug': 'augmentations.geom.v0',
}
| [
"[email protected]"
]
| |
f26abbd82e77317f1c957bdf7e3267f5c65f2d83 | 833e69b32f9bf9f9ac746ac46851cedae6366e63 | /hta_expense_management/models/expense_request.py | 415674635f4aa5001ca3aadb44016a0887ff5dc5 | []
| no_license | halltech-ci/odoo_15 | 2984d3ac5dbd446f2fb8ef49dd37ea53e71a0f71 | 8e587e38535ccf8fa10fd42be1bc75d957e63311 | refs/heads/main | 2023-08-23T14:48:10.712042 | 2021-11-06T20:14:46 | 2021-11-06T20:14:46 | 414,481,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
class ExpenseRequest(models.Model):
_name = 'expense.request'
_description = 'Custom expense request'
_inherit = ['mail.thread', 'mail.activity.mixin']
_order = 'date desc'
@api.model
def _default_employee_id(self):
return self.env.user.employee_id
@api.model
def _get_default_requested_by(self):
return self.env['res.users'].browse(self.env.uid)
def _get_default_name(self):
return self.env["ir.sequence"].next_by_code("expense.request.code")
name = fields.Char(default=_get_default_name, readonly=True)
description = fields.Char('Description', required=True)
state = fields.Selection(selection=[
('draft', 'Draft'),
('submit', 'Submitted'),
('validate', 'Validate'),
('to_approve', 'To Approve'),
('approve', 'Approved'),
('post', 'Paid'),
('refuse', 'Refused'),
('cancel', 'Cancelled')
], string='Status', index=True, readonly=True, tracking=True, copy=False, default='draft', required=True, help='Expense Report State')
| [
"[email protected]"
]
| |
e97f08ac73a48b46ed2e88f1aa02f9e54c37b37e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03549/s657113138.py | 7bc42c0eb4301b72eea8570f2ec52dbe884e2fce | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | n,m = map(int,input().split())
# 1ケースでかかる時間を求める
total = m*1900 + (n-m)*100
# 全てのケースで正解する確率の分母
prob_all = 2**m
print(total * prob_all) | [
"[email protected]"
]
| |
aec3ae45025c14557e08868f995361392ecd97e0 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /tools/binary_size/print_trybot_sizes.py | 3dfbbc52800a48e7e37c53d5d46648cf12e7eaa3 | [
"BSD-3-Clause"
]
| permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 5,713 | py | #!/usr/bin/env python3
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints android-binary-size result for a given commit or commit range."""
import argparse
import collections
import concurrent.futures
import csv
import json
import os
import posixpath
import re
import subprocess
import sys
# Max number of commits to show when given a range and no -n parameter.
_COMMIT_LIMIT = 200
# Commit ranges where size bot was giving invalid results.
_BAD_COMMIT_RANGES = [
range(1045024, 1045552), # https://crbug.com/1361952
]
_COMMIT_RE = re.compile(r'^commit (?:(?!^commit).)*', re.DOTALL | re.MULTILINE)
_MAIN_FIELDS_RE = re.compile(
r'^commit (\S+).*?'
r'^Date:\s+(.*?)$.*?'
r'^ (\S.*?)$', re.DOTALL | re.MULTILINE)
_REVIEW_RE = re.compile(r'^ Reviewed-on: (\S+)', re.MULTILINE)
_CRREV_RE = re.compile(r'^ Cr-Commit-Position:.*?(\d+)', re.MULTILINE)
_GERRIT_RE = re.compile(r'https://([^/]+)/c/(.*?)/\+/(\d+)')
_CommitInfo = collections.namedtuple(
'_CommitInfo', 'git_hash date subject review_url cr_position')
def _parse_commit(text):
git_hash, date, subject = _MAIN_FIELDS_RE.match(text).groups()
review_url = ([''] + _REVIEW_RE.findall(text))[-1]
cr_position = int((['0'] + _CRREV_RE.findall(text))[-1])
return _CommitInfo(git_hash, date, subject, review_url, cr_position)
def _git_log(git_log_args):
cmd = ['git', 'log']
if len(git_log_args) == 1 and '..' not in git_log_args[0]:
# Single commit rather than commit range.
cmd += ['-n1']
elif not any(x.startswith('-n') for x in git_log_args):
# Ensure there's a limit on number of commits.
cmd += [f'-n{_COMMIT_LIMIT}']
cmd += git_log_args
log_output = subprocess.check_output(cmd, encoding='utf8')
ret = [_parse_commit(x) for x in _COMMIT_RE.findall(log_output)]
if len(ret) == _COMMIT_LIMIT:
sys.stderr.write(
f'Limiting to {_COMMIT_LIMIT} commits. Use -n## to override\n')
return ret
def _query_size(review_url, internal):
if not review_url:
return '<missing>'
m = _GERRIT_RE.match(review_url)
if not m:
return '<bad URL>'
host, project, change_num = m.groups()
if internal:
project = 'chrome'
builder = 'android-internal-binary-size'
else:
project = 'chromium'
builder = 'android-binary-size'
cmd = ['bb', 'ls', '-json', '-p']
# Request results for all patchsets, assuming fewer than 30.
for patchset in range(1, 30):
cmd += [
'-predicate',
"""{
"builder":{"project":"%s","bucket":"try","builder":"%s"},
"gerrit_changes":[{
"host":"%s","project":"%s",
"change":"%s","patchset":"%d"}
]}""" % (project, builder, host, project, change_num, patchset)
]
result = subprocess.run(cmd,
check=False,
stdout=subprocess.PIPE,
encoding='utf8')
if result.returncode:
return '<missing>'
# Take the last one that has a size set (output is in reverse order already).
for json_str in result.stdout.splitlines():
try:
obj = json.loads(json_str)
except json.JSONDecodeError:
sys.stderr.write(f'Problem JSON:\n{json_str}\n')
sys.exit(1)
properties = obj.get('output', {}).get('properties', {})
listings = properties.get('binary_size_plugin', {}).get('listings', [])
for listing in listings:
if listing['name'] == 'Android Binary Size':
return listing['delta']
return '<unknown>'
def _maybe_rewrite_crrev(git_log_args):
if len(git_log_args) != 1:
return
values = git_log_args[0].split('..')
if len(values) != 2 or not values[0].isdigit() or not values[1].isdigit():
return
values = [
subprocess.check_output(['git-crrev-parse', v], text=True).rstrip()
for v in values
]
git_log_args[0] = '..'.join(values)
print(f'Converted crrev to commits: {git_log_args[0]}')
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--csv', action='store_true', help='Print as CSV')
parser.add_argument('--internal',
action='store_true',
help='Query android-internal-binary-size (Googlers only)')
args, git_log_args = parser.parse_known_args()
# Ensure user has authenticated.
result = subprocess.run(['bb', 'auth-info'],
check=False,
stdout=subprocess.DEVNULL)
if result.returncode:
sys.stderr.write('First run: bb auth-login\n')
sys.exit(1)
_maybe_rewrite_crrev(git_log_args)
commit_infos = _git_log(git_log_args)
if not commit_infos:
sys.stderr.write('Did not find any commits.\n')
sys.exit(1)
print(f'Fetching bot results for {len(commit_infos)} commits...')
if args.csv:
print_func = csv.writer(sys.stdout).writerow
else:
print_func = lambda v: print('{:<12}{:14}{:12}{:32}{}'.format(*v))
print_func(('Commit #', 'Git Hash', 'Size', 'Date', 'Subject'))
num_bad_commits = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as pool:
sizes = [
pool.submit(_query_size, info.review_url, args.internal)
for info in commit_infos
]
for info, size in zip(commit_infos, sizes):
if any(info.cr_position in r for r in _BAD_COMMIT_RANGES):
num_bad_commits += 1
size_str = size.result().replace(' bytes', '').lstrip('+')
crrev_str = info.cr_position or ''
print_func(
(crrev_str, info.git_hash[:12], size_str, info.date, info.subject))
if num_bad_commits:
print(f'Includes {num_bad_commits} commits from known bad revision range.')
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
006bb7b5a2c12acfab42ef1e5bce124fd466f121 | c9669c314c74b5d7f3098f75fe359b036f241eea | /tests/test_generation_utils.py | de986b696d8aa0aeddfe880993354754726411e1 | [
"Apache-2.0"
]
| permissive | stanford-crfm/transformers | c5439f53bc777a43a9adc02ecedb561facdb4b79 | 8575081fa36a0ff7c7f90a779f1e5ae7633dfbd8 | refs/heads/mistral-v1 | 2023-08-23T22:15:32.268675 | 2021-09-28T19:44:38 | 2021-09-28T19:44:38 | 337,241,186 | 5 | 1 | Apache-2.0 | 2021-09-15T04:18:36 | 2021-02-08T23:45:32 | Python | UTF-8 | Python | false | false | 70,282 | py | # coding=utf-8
# Copyright 2020 The HuggingFace Team Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a clone of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
if is_torch_available():
import torch
from transformers import BartForConditionalGeneration, BartTokenizer, top_k_top_p_filtering
from transformers.generation_beam_search import BeamSearchScorer
from transformers.generation_logits_process import (
ForcedBOSTokenLogitsProcessor,
ForcedEOSTokenLogitsProcessor,
HammingDiversityLogitsProcessor,
InfNanRemoveLogitsProcessor,
LogitsProcessorList,
MinLengthLogitsProcessor,
NoBadWordsLogitsProcessor,
NoRepeatNGramLogitsProcessor,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
)
from transformers.generation_stopping_criteria import MaxLengthCriteria, StoppingCriteriaList
from transformers.generation_utils import (
BeamSampleDecoderOnlyOutput,
BeamSampleEncoderDecoderOutput,
BeamSearchDecoderOnlyOutput,
BeamSearchEncoderDecoderOutput,
GreedySearchDecoderOnlyOutput,
GreedySearchEncoderDecoderOutput,
SampleDecoderOnlyOutput,
SampleEncoderDecoderOutput,
)
class GenerationTesterMixin:
model_tester = None
all_generative_model_classes = ()
input_name = "input_ids"
def _get_input_ids_and_config(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict[self.input_name]
attention_mask = torch.ones_like(input_ids, dtype=torch.long)
# cut to half length & take max batch_size 3
max_batch_size = 2
sequence_length = input_ids.shape[-1] // 2
input_ids = input_ids[:max_batch_size, :sequence_length]
attention_mask = attention_mask[:max_batch_size, :sequence_length]
# generate max 3 tokens
max_length = input_ids.shape[-1] + 3
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
config.pad_token_id = config.eos_token_id
return config, input_ids, attention_mask, max_length
@staticmethod
def _get_logits_processor_and_kwargs(
input_length,
eos_token_id,
forced_bos_token_id=None,
forced_eos_token_id=None,
max_length=None,
diversity_penalty=None,
):
process_kwargs = {
"min_length": input_length + 1,
"bad_words_ids": [[1, 0]],
"no_repeat_ngram_size": 2,
"repetition_penalty": 1.2,
}
logits_processor = LogitsProcessorList(
(
[
HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2),
]
if diversity_penalty is not None
else []
)
+ (
[
MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id),
]
if eos_token_id is not None
else []
)
+ (
[
ForcedBOSTokenLogitsProcessor(forced_bos_token_id),
]
if forced_bos_token_id is not None
else []
)
+ (
[ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)]
if forced_eos_token_id is not None
else []
)
+ [
NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id),
NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"]),
RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"]),
]
)
return process_kwargs, logits_processor
@staticmethod
def _get_warper_and_kwargs(num_beams):
warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7}
logits_warper = LogitsProcessorList(
[
TemperatureLogitsWarper(warp_kwargs["temperature"]),
TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)),
TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)),
]
)
return warp_kwargs, logits_warper
@staticmethod
def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
}
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1):
beam_kwargs = {
"early_stopping": False,
"length_penalty": 2.0,
"num_beams": 2,
"num_return_sequences": num_return_sequences,
"num_beam_groups": 2, # one beam per group
"diversity_penalty": 2.0,
}
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=beam_kwargs["num_beams"],
device=torch_device,
length_penalty=beam_kwargs["length_penalty"],
do_early_stopping=beam_kwargs["early_stopping"],
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=beam_kwargs["num_beam_groups"],
)
return beam_kwargs, beam_scorer
@staticmethod
def _get_encoder_outputs(
model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1
):
encoder = model.get_encoder()
encoder_outputs = encoder(
input_ids,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave(
num_interleave, dim=0
)
input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id()
attention_mask = None
return encoder_outputs, input_ids, attention_mask
def _greedy_generate(
self,
model,
input_ids,
attention_mask,
max_length,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
eos_token_id=model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
kwargs = {}
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
num_beams=1,
max_length=max_length,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**logits_process_kwargs,
)
if model.config.is_encoder_decoder:
encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
with torch.no_grad():
output_greedy = model.greedy_search(
input_ids,
max_length=max_length,
attention_mask=attention_mask,
logits_processor=logits_processor,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_scores=output_scores,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_greedy, output_generate
def _sample_generate(
self,
model,
input_ids,
attention_mask,
max_length,
num_return_sequences,
logits_processor,
logits_warper,
logits_warper_kwargs,
process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
torch.manual_seed(0)
output_generate = model.generate(
input_ids,
do_sample=True,
num_beams=1,
max_length=max_length,
num_return_sequences=num_return_sequences,
attention_mask=attention_mask,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**logits_warper_kwargs,
**process_kwargs,
)
torch.manual_seed(0)
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=num_return_sequences,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(num_return_sequences, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(num_return_sequences, dim=0)
input_ids_clone = input_ids.repeat_interleave(num_return_sequences, dim=0)
# prevent flaky generation test failures
logits_processor.append(InfNanRemoveLogitsProcessor())
with torch.no_grad():
output_sample = model.sample(
input_ids_clone,
attention_mask=attention_mask_clone,
max_length=max_length,
logits_processor=logits_processor,
logits_warper=logits_warper,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_sample, output_generate
def _beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
beam_scorer,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_process_kwargs,
)
# beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_beam_search = model.beam_search(
input_ids_clone,
beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_beam_search
def _beam_sample_generate(
self,
model,
input_ids,
attention_mask,
max_length,
num_return_sequences,
beam_scorer,
beam_kwargs,
logits_warper,
logits_warper_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
torch.manual_seed(0)
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=True,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_warper_kwargs,
)
# beam_search does not automatically interleave `batch_size` dim for `num_beams * num_return_sequences`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams * num_return_sequences,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
else:
attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0)
# prevent flaky generation test failures
logits_processor = LogitsProcessorList()
logits_processor.append(InfNanRemoveLogitsProcessor())
torch.manual_seed(0)
with torch.no_grad():
output_beam_sample = model.beam_sample(
input_ids.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0),
beam_scorer,
max_length=max_length,
attention_mask=attention_mask,
logits_warper=logits_warper,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_beam_sample
def _group_beam_search_generate(
self,
model,
input_ids,
attention_mask,
max_length,
beam_scorer,
beam_kwargs,
logits_processor,
logits_process_kwargs,
output_scores=False,
output_attentions=False,
output_hidden_states=False,
return_dict_in_generate=False,
):
output_generate = model.generate(
input_ids,
attention_mask=attention_mask,
do_sample=False,
max_length=max_length,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
remove_invalid_values=True,
**beam_kwargs,
**logits_process_kwargs,
)
# group_beam_search does not automatically interleave `batch_size` dim for `num_beams`
kwargs = {}
if model.config.is_encoder_decoder:
encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs(
model,
input_ids,
attention_mask,
num_interleave=beam_scorer.num_beams,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
kwargs["encoder_outputs"] = encoder_outputs
input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0)
else:
attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0)
input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0)
with torch.no_grad():
output_group_beam_search = model.group_beam_search(
input_ids_clone,
beam_scorer,
max_length=max_length,
attention_mask=attention_mask_clone,
logits_processor=logits_processor,
output_scores=output_scores,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict_in_generate=return_dict_in_generate,
**kwargs,
)
return output_generate, output_group_beam_search
def test_greedy_generate(self):
# check `generate()` and `greedy_search()` are equal
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# test old generation output for backwards compatibility
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length
)
self.assertListEqual(output_greedy.tolist(), output_generate.tolist())
def test_greedy_generate_dict_outputs(self):
for model_class in self.all_generative_model_classes:
# disable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist())
for output in (output_greedy, output_generate):
self._check_outputs(output, input_ids, model.config)
def test_greedy_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
# enable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
if not hasattr(config, "use_cache"):
# only relevant if model has "use_cache"
return
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_greedy, output_generate = self._greedy_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist())
for output in (output_greedy, output_generate):
self._check_outputs(output, input_ids, model.config, use_cache=True)
def test_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
# check `generate()` and `sample()` are equal
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=1,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
)
self.assertListEqual(output_sample.tolist(), output_generate.tolist())
# check `generate()` and `sample()` yield equal results for `num_return_sequences`
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=3,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
)
self.assertListEqual(output_sample.tolist(), output_generate.tolist())
def test_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
# disable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
model.config.eos_token_id,
forced_bos_token_id=model.config.forced_bos_token_id,
forced_eos_token_id=model.config.forced_eos_token_id,
max_length=max_length,
)
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
output_sample, output_generate = self._sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=2,
logits_processor=logits_processor,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
process_kwargs=process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_sample, SampleEncoderDecoderOutput)
self.assertIsInstance(output_generate, SampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_sample, SampleDecoderOnlyOutput)
self.assertIsInstance(output_generate, SampleDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist())
for output in (output_sample, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=2)
def test_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
# check `generate()` and `beam_search()` are equal
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
# check `generate()` and `beam_search()` are equal for `num_return_sequences`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
)
self.assertListEqual(output_generate.tolist(), output_beam_search.tolist())
def test_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
output_generate, output_beam_search = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_search, output_generate):
self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams)
def test_beam_search_generate_dict_outputs_use_cache(self):
for model_class in self.all_generative_model_classes:
# enable cache
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
if not hasattr(config, "use_cache"):
# only relevant if model has "use_cache"
return
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
)
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
config.use_cache = True
config.is_decoder = True
model = model_class(config).to(torch_device).eval()
output_beam, output_generate = self._beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_process_kwargs=logits_process_kwargs,
logits_processor=logits_processor,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist())
for output in (output_beam, output_generate):
self._check_outputs(
output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams
)
def test_beam_sample_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
model = model_class(config).to(torch_device).eval()
# check `generate()` and `beam_search()` are equal
# change `num_return_sequences = 2` but not for `beam_scorer`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0] * num_return_sequences, max_length
)
beam_kwargs["num_return_sequences"] = num_return_sequences
output_generate, output_beam_sample = self._beam_sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=num_return_sequences,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist())
def test_beam_sample_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# disable cache
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1)
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(
input_ids.shape[0] * num_return_sequences, max_length
)
beam_kwargs["num_return_sequences"] = num_return_sequences
output_beam_sample, output_generate = self._beam_sample_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
num_return_sequences=num_return_sequences,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_warper=logits_warper,
logits_warper_kwargs=logits_warper_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput)
else:
self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist())
self.assertTrue(
torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_beam_sample, output_generate):
self._check_outputs(
output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams
)
def test_generate_without_input_ids(self):
config, _, _, max_length = self._get_input_ids_and_config()
# if no bos token id => cannot generate from None
if config.bos_token_id is None:
return
for model_class in self.all_generative_model_classes:
model = model_class(config).to(torch_device)
model.eval()
output_ids_generate = model.generate(
do_sample=False,
max_length=max_length,
remove_invalid_values=True,
)
self.assertIsNotNone(output_ids_generate)
def test_group_beam_search_generate(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
diversity_penalty=2.0,
)
# check `generate()` and `group_beam_search()` are equal
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist())
# check `generate()` and `group_beam_search()` are equal for `num_return_sequences`
num_return_sequences = 2
if model.config.is_encoder_decoder:
max_length = 4
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
)
self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist())
def test_group_beam_search_generate_dict_output(self):
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
config.use_cache = False
# It is important set set the eos_token_id to None to ensure that no sequences
# shorter than `max_length` can be generated which could lead to flaky circle ci
# failures if the top `num_return_sequences` beams are all shorter than the longest beam
config.eos_token_id = None
config.forced_eos_token_id = None
model = model_class(config).to(torch_device).eval()
if model.config.is_encoder_decoder:
max_length = 4
logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs(
input_ids.shape[-1],
config.eos_token_id,
config.forced_bos_token_id,
config.forced_eos_token_id,
max_length,
diversity_penalty=2.0,
)
num_return_sequences = 1
beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(
input_ids.shape[0], max_length, num_return_sequences=num_return_sequences
)
output_generate, output_group_beam_search = self._group_beam_search_generate(
model=model,
input_ids=input_ids,
attention_mask=attention_mask,
max_length=max_length,
beam_scorer=beam_scorer,
beam_kwargs=beam_kwargs,
logits_processor=logits_processor,
logits_process_kwargs=logits_process_kwargs,
output_scores=True,
output_hidden_states=True,
output_attentions=True,
return_dict_in_generate=True,
)
if model.config.is_encoder_decoder:
self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput)
self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput)
else:
self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput)
self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput)
self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist())
self.assertTrue(
torch.allclose(
output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3
)
)
self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],))
self.assertTrue((output_generate["sequences_scores"] < 0).all().item())
for output in (output_group_beam_search, output_generate):
self._check_outputs(
output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams
)
def test_generate_with_head_masking(self):
"""Test designed for encoder-decoder models to ensure the attention head masking is used."""
attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
for model_class in self.all_generative_model_classes:
config, input_ids, attention_mask, max_length = self._get_input_ids_and_config()
model = model_class(config).to(torch_device)
# We want to test only encoder-decoder models
if not config.is_encoder_decoder:
continue
head_masking = {
"head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device),
"decoder_head_mask": torch.zeros(
config.decoder_layers, config.decoder_attention_heads, device=torch_device
),
"cross_attn_head_mask": torch.zeros(
config.decoder_layers, config.decoder_attention_heads, device=torch_device
),
}
signature = inspect.signature(model.forward)
# We want to test only models where encoder/decoder head masking is implemented
if not set(head_masking.keys()) < set([*signature.parameters.keys()]):
continue
for attn_name, (name, mask) in zip(attention_names, head_masking.items()):
out = model.generate(
input_ids,
attention_mask=attention_mask,
num_beams=1,
output_attentions=True,
return_dict_in_generate=True,
remove_invalid_values=True,
**{name: mask},
)
# We check the state of decoder_attentions and cross_attentions just from the last step
attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0)
def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1):
batch_size, seq_length = input_ids.shape
num_sequences_in_output = batch_size * num_return_sequences
gen_len = (
output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length
)
# scores
self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config)
# Attentions
if config.is_encoder_decoder:
# encoder
self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length)
# decoder
self._check_attentions_for_generate(
num_sequences_in_output,
output.decoder_attentions,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
else:
# if use_cache first input is equal to no use_cache, so skip here
attentions = output.attentions if not use_cache else output.attentions[1:]
min_length = seq_length if not use_cache else seq_length + 1
self._check_attentions_for_generate(
num_sequences_in_output,
attentions=attentions,
min_length=min_length,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
# Hidden States
if config.is_encoder_decoder:
# encoder
self._check_encoder_hidden_states_for_generate(
output.encoder_hidden_states, batch_size, config, seq_length
)
# decoder
self._check_hidden_states_for_generate(
num_sequences_in_output,
output.decoder_hidden_states,
min_length=1,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
else:
# if use_cache first input is equal to no use_cache, so skip here
hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:]
min_length = seq_length if not use_cache else seq_length + 1
self._check_hidden_states_for_generate(
num_sequences_in_output,
hidden_states,
min_length=min_length,
max_length=output.sequences.shape[-1],
config=config,
use_cache=use_cache,
)
def _check_scores(self, batch_size, scores, length, config):
expected_shape = (batch_size, config.vocab_size)
self.assertIsInstance(scores, tuple)
self.assertEqual(len(scores), length)
self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores))
def _check_attentions_for_generate(
self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1
):
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions)
)
self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(attentions):
tgt_len = min_length + idx if not use_cache else 1
src_len = min_length + idx
expected_shape = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions)
)
def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length):
encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length)
self.assertIsInstance(attentions, tuple)
self.assertListEqual(
[layer_attentions.shape for layer_attentions in attentions],
[encoder_expected_shape] * len(attentions),
)
def _check_hidden_states_for_generate(
self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1
):
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states],
[True] * len(hidden_states),
)
self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(hidden_states):
seq_len = min_length + idx if not use_cache else 1
expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states],
[expected_shape] * len(iter_hidden_states),
)
def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length):
encoder_expected_shape = (batch_size, seq_length, config.hidden_size)
self.assertIsInstance(hidden_states, tuple)
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in hidden_states],
[encoder_expected_shape] * len(hidden_states),
)
@require_torch
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p function behaves as expected
def test_top_k_top_p_filtering(self):
logits = torch.tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276,
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 4 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958,
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 4 highest values <= 0.6
],
dtype=torch.float,
device=torch_device,
)
non_inf_expected_idx = torch.tensor(
[[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]],
dtype=torch.long,
device=torch_device,
) # expected non filtered idx as noted above
non_inf_expected_output = torch.tensor(
[
8.2221,
8.4321,
7.4402,
9.3845,
6.2712,
8.8275,
7.3858,
9.6770,
], # expected non filtered values as noted above
dtype=torch.float,
device=torch_device,
)
output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")].to(device=torch_device)
non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device)
self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12))
self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx)))
@require_torch
class GenerationIntegrationTests(unittest.TestCase):
@slow
def test_diverse_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood.
The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People.
"Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports.
The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both."""
bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
outputs = bart_model.generate(
input_ids,
num_beams=4,
num_return_sequences=2,
num_beam_groups=4,
diversity_penalty=2.0,
remove_invalid_values=True,
)
generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True)
self.assertListEqual(
generated_text,
[
"The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle name, as well as his father's first. It is the first baby for both of them.",
"Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the first child for both. The couple announced the pregnancy in January. The name Silas is the middle name of Timberlake's maternal grandfather. It's also his own middle name.",
],
)
def test_max_length_backward_compat_greedy(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
max_length = 20
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids,
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with self.assertWarns(UserWarning):
bart_model.greedy_search(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
def test_max_length_backward_compat_sample(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
max_length = 20
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids,
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with torch.no_grad():
with self.assertWarns(UserWarning):
bart_model.sample(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
def test_max_length_backward_compat_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 2
input_ids = input_ids.expand(2, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids,
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
with self.assertWarns(UserWarning):
_ = bart_model.beam_search(
input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs
)
def test_max_length_backward_compat_group_beam_search(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 6
num_beam_groups = 3
num_return_sequences = num_beams * batch_size
input_ids = input_ids.expand(6, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids,
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
with self.assertWarns(UserWarning):
bart_model.group_beam_search(
input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs
)
def test_max_length_warning_if_different(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
batch_size = 1
max_length = 20
num_beams = 6
num_beam_groups = 3
num_return_sequences = num_beams * batch_size
stopping_criteria_max_length = 18
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)])
# Greedy
input_ids = input_ids.expand(6, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
input_ids = bart_model._prepare_decoder_input_ids_for_generation(
input_ids,
decoder_start_token_id=bart_model.config.decoder_start_token_id,
bos_token_id=bart_model.config.bos_token_id,
)
with self.assertWarns(UserWarning):
bart_model.greedy_search(
input_ids,
max_length=max_length,
pad_token_id=bart_model.config.pad_token_id,
stopping_criteria=stopping_criteria,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
# Sample
with self.assertWarns(UserWarning):
with torch.no_grad():
bart_model.sample(
input_ids,
max_length=max_length,
stopping_criteria=stopping_criteria,
pad_token_id=bart_model.config.pad_token_id,
eos_token_id=bart_model.config.eos_token_id,
**model_kwargs,
)
# Beam
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
with self.assertWarns(UserWarning):
with torch.no_grad():
bart_model.beam_search(
input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
max_length=max_length,
beam_scorer=beam_scorer,
**model_kwargs,
)
# Grouped beam search
diverse_beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
num_beam_hyps_to_keep=num_return_sequences,
num_beam_groups=num_beam_groups,
)
with self.assertWarns(UserWarning):
bart_model.group_beam_search(
input_ids,
diverse_beam_scorer,
stopping_criteria=stopping_criteria,
num_beams=num_beams,
max_length=max_length,
**model_kwargs,
)
def test_beam_search_warning_if_max_length_is_passed(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
batch_size = 1
num_beams = 3
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
input_ids = input_ids.expand(num_beams, -1)
model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {})
stopping_criteria_max_length = 18
stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)])
with self.assertWarns(UserWarning):
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
max_length=10,
)
generated_ids = bart_model.beam_search(
input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
beam_scorer=beam_scorer,
**model_kwargs,
)
beam_scorer_no_max_len = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=torch_device,
)
generated_ids_no_max_len = bart_model.beam_search(
input_ids,
num_beams=num_beams,
stopping_criteria=stopping_criteria,
beam_scorer=beam_scorer_no_max_len,
**model_kwargs,
)
# BeamSearchScorer max_length should not influence "real" max_length
self.assertEqual(generated_ids.tolist(), generated_ids_no_max_len.tolist())
def test_max_new_tokens(self):
article = """Justin Timberlake and Jessica Biel, welcome to parenthood."""
bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random")
bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device)
input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device)
self.assertEqual(list(input_ids.shape), [1, 15])
# Encoder decoder call
max_new_tokens = 3
outputs = bart_model.generate(input_ids, max_new_tokens=max_new_tokens)
# 1 BOS + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 4])
# Decoder only call
outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=max_new_tokens)
# 15 + 3 new tokens
self.assertEqual(list(outputs.shape), [1, 18])
# max_new_tokens and max_length serve the same purpose and should not be used together.
with self.assertWarns(UserWarning):
outputs = bart_model.generate(decoder_input_ids=input_ids, max_new_tokens=10, max_length=20)
| [
"[email protected]"
]
| |
9b22cfd35b76e0c6d5d9df02c069057c37a0a9c7 | 51d8f003828d6ee6e6611f0e133b1e35cf400601 | /dnekcab-eciovni/invoice_api/core/migrations/0005_auto_20180831_1728.py | 6c7e489ba33b303a972e1f95773abfe4a6348581 | []
| no_license | tatubola/xpto | 23b5f7a42c13c7d39eb321e52b9b4b2d1ef76c4c | 6ed8cec23b06bccb1edf57e6b67af017f9a162d3 | refs/heads/master | 2020-04-02T11:05:24.560009 | 2018-10-23T17:41:10 | 2018-10-23T17:41:10 | 154,370,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | # Generated by Django 2.0.8 on 2018-08-31 17:28
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20180827_1752'),
]
operations = [
migrations.AlterField(
model_name='fatura',
name='boleto_url',
field=models.CharField(blank=True, error_messages={'invalid': 'Insira uma url válida.'}, max_length=255, null=True, validators=[django.core.validators.URLValidator()]),
),
migrations.AlterField(
model_name='servico',
name='data_expiracao',
field=models.DateField(),
),
]
| [
"[email protected]"
]
| |
210e2ef67d9dbcad596f1621a8653073ca4e2646 | 8168caa4ae066940dfedd788eeb107c5f65532ef | /node_modules/jest-haste-map/node_modules/fsevents/build/config.gypi | 77c829ebd83516aa0e33f98d86df08a9e4e5fdef | [
"MIT"
]
| permissive | muzamilnazir/keeper | 099a922068e028ca51b14c9bf85135fc2a509cf1 | ade15bd80b95f31e640378db8a3ed9a1a2a4ea86 | refs/heads/main | 2023-06-18T18:28:01.293667 | 2021-07-07T07:17:52 | 2021-07-07T07:17:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,702 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-tmp/icudt68l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "68",
"is_debug": 0,
"llvm_version": "11.0",
"napi_build_version": "8",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_node_code_cache": "true",
"node_use_node_snapshot": "true",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "83.dylib",
"target_arch": "x64",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 0,
"xcode_version": "11.0",
"nodedir": "/Users/mac/Library/Caches/node-gyp/14.17.0",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"commit_hooks": "true",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/zsh",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"timing": "",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"preid": "",
"fetch_retries": "2",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"logs_max": "10",
"prefer_online": "",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"audit_level": "low",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"offline": "",
"read_only": "",
"searchlimit": "20",
"access": "",
"json": "",
"allow_same_version": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/mac/.npm-init.js",
"userconfig": "/Users/mac/.npmrc",
"cidr": "",
"node_version": "14.17.0",
"user": "",
"auth_type": "legacy",
"editor": "vi",
"ignore_prepublish": "",
"save": "true",
"script_shell": "",
"tag": "latest",
"before": "",
"global": "",
"progress": "true",
"ham_it_up": "",
"optional": "true",
"searchstaleness": "900",
"bin_links": "true",
"force": "",
"save_prod": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"rebuild_bundle": "true",
"sso_poll_frequency": "500",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"prefer_offline": "",
"version": "",
"cache_min": "10",
"otp": "",
"cache": "/Users/mac/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"fund": "true",
"package_lock_only": "",
"save_optional": "",
"user_agent": "npm/6.14.13 node/v14.17.0 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"init_version": "1.0.0",
"node_options": "",
"umask": "0022",
"scope": "",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/var/folders/cn/tv09tq6n7r10g948dc0phgyh0000gn/T",
"unsafe_perm": "true",
"format_package_lock": "true",
"link": "",
"prefix": "/usr/local"
}
}
| [
"[email protected]"
]
| |
955597662f9fa120479a85f48ef72fc1c51a6ba7 | 6189f34eff2831e3e727cd7c5e43bc5b591adffc | /WebMirror/management/rss_parser_funcs/feed_parse_extractSadhoovysinhumantranslationsWordpressCom.py | 76bd4a048313ef6e663c631631bd11116a32297d | [
"BSD-3-Clause"
]
| permissive | fake-name/ReadableWebProxy | 24603660b204a9e7965cfdd4a942ff62d7711e27 | ca2e086818433abc08c014dd06bfd22d4985ea2a | refs/heads/master | 2023-09-04T03:54:50.043051 | 2023-08-26T16:08:46 | 2023-08-26T16:08:46 | 39,611,770 | 207 | 20 | BSD-3-Clause | 2023-09-11T15:48:15 | 2015-07-24T04:30:43 | Python | UTF-8 | Python | false | false | 596 | py |
def extractSadhoovysinhumantranslationsWordpressCom(item):
'''
Parser for 'sadhoovysinhumantranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| [
"[email protected]"
]
| |
e6b1a7a5d7f6c2ea08dcd31845e29efc0e8c606f | 020fed990dcab7417f82bde82f19d6beae58b06f | /ethiostockdemo/ethiostockdemo/urls.py | af60814eaf5fbc6f4ca37c6961e8330427d55c92 | []
| no_license | TsiyonW/pracdjango | 2cb27522bf201543eb262e060f70a765d59236e3 | ef6d319fda2cde3d3c07b9e0162e30a6153cce5e | refs/heads/master | 2021-01-16T09:06:21.008123 | 2020-02-25T20:37:16 | 2020-02-25T20:37:16 | 243,053,765 | 0 | 0 | null | 2020-02-25T20:37:17 | 2020-02-25T17:07:47 | Python | UTF-8 | Python | false | false | 925 | py | """ethiostockdemo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from graphene_django.views import GraphQLView
urlpatterns = [
path('admin/', admin.site.urls),
path('graphql/', csrf_exempt(GraphQLView.as_view(graphiql=True))),
] | [
"="
]
| = |
ff1d767a12e4fd97828963a44224b8e3926cfc52 | 34ed92a9593746ccbcb1a02630be1370e8524f98 | /lib/pints/pints/tests/test_toy_rosenbrock.py | b22f4651ddea441cae2d604c240719ec522216c8 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
]
| permissive | HOLL95/Cytochrome_SV | 87b7a680ed59681230f79e1de617621680ea0fa0 | d02b3469f3ee5a4c85d756053bc87651093abea1 | refs/heads/master | 2022-08-01T05:58:16.161510 | 2021-02-01T16:09:31 | 2021-02-01T16:09:31 | 249,424,867 | 0 | 0 | null | 2022-06-22T04:09:11 | 2020-03-23T12:29:29 | Jupyter Notebook | UTF-8 | Python | false | false | 2,807 | py | #!/usr/bin/env python3
#
# Tests the Rosenbrock toy problems.
#
# This file is part of PINTS.
# Copyright (c) 2017-2018, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
import pints
import pints.toy
import unittest
import numpy as np
class TestRosenbrock(unittest.TestCase):
"""
Tests the Rosenbrock toy problems.
"""
def test_error(self):
f = pints.toy.RosenbrockError()
self.assertEqual(f.n_parameters(), 2)
fx = f([10, 10])
self.assertTrue(np.isscalar(fx))
self.assertEqual(fx, 810081)
xopt = f.optimum()
fopt = f(xopt)
self.assertEqual(fopt, 0)
np.random.seed(1)
for x in np.random.uniform(-5, 5, size=(10, 2)):
self.assertTrue(f(x) > fopt)
def test_log_pdf(self):
f = pints.toy.RosenbrockLogPDF()
self.assertEqual(f.n_parameters(), 2)
fx = f([0.5, 6.0])
self.assertTrue(np.isscalar(fx))
self.assertAlmostEqual(fx, np.log(1.0 / 3307.5))
xopt = f.optimum()
fopt = f(xopt)
self.assertEqual(fopt, 0)
# sensitivity test
l, dl = f.evaluateS1([3, 4])
self.assertEqual(l, -np.log(2505))
self.assertEqual(len(dl), 2)
self.assertEqual(dl[0], float(-6004.0 / 2505.0))
self.assertEqual(dl[1], float(200.0 / 501.0))
# suggested bounds and distance measure
bounds = f.suggested_bounds()
bounds = [[-2, 4], [-1, 12]]
bounds = np.transpose(bounds).tolist()
self.assertTrue(np.array_equal(bounds, f.suggested_bounds()))
x = np.ones((100, 3))
self.assertRaises(ValueError, f.distance, x)
x = np.ones((100, 3, 2))
self.assertRaises(ValueError, f.distance, x)
# there is no simple way to generate samples from Rosenbrock
nsamples = 10000
g = pints.toy.GaussianLogPDF([1, 1], [1, 1])
samples = g.sample(nsamples)
self.assertTrue(f.distance(samples) > 0)
x = np.ones((100, 3))
self.assertRaises(ValueError, f.distance, x)
x = np.ones((100, 2, 2))
self.assertRaises(ValueError, f.distance, x)
# generate samples with mean and variance closer to true values
g1 = pints.toy.GaussianLogPDF([0.86935785, 2.59978086],
[[1.80537968, 2.70257559],
[2.70257559, 8.52658308]])
samples1 = g1.sample(nsamples)
self.assertTrue(f.distance(samples1) > 0)
self.assertTrue(f.distance(samples) > f.distance(samples1))
if __name__ == '__main__':
print('Add -v for more debug output')
import sys
if '-v' in sys.argv:
debug = True
unittest.main()
| [
"[email protected]"
]
| |
465623bff38a425e2f71d5f8761761b68aabd562 | 51a37b7108f2f69a1377d98f714711af3c32d0df | /src/leetcode/P968.py | adec6da448f1dd1be7449761ba15ed897b6927e1 | []
| no_license | stupidchen/leetcode | 1dd2683ba4b1c0382e9263547d6c623e4979a806 | 72d172ea25777980a49439042dbc39448fcad73d | refs/heads/master | 2022-03-14T21:15:47.263954 | 2022-02-27T15:33:15 | 2022-02-27T15:33:15 | 55,680,865 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,925 | py | # Definition for a binary tree node.
from functools import lru_cache
INF = 0xffffffff
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def minCameraCover(self, root):
"""
:type root: TreeNode
:rtype: int
"""
@lru_cache(maxsize=None)
def solve(node, t):
if node is None:
return 0
if node.left is None and node.right is None:
if t == 1:
return INF
if t == 0:
return 1
if t == -1:
return 0
l = []
for i in range(3):
l.append(solve(node.left, i - 1))
r = []
for i in range(3):
r.append(solve(node.right, i - 1))
if t == -1:
if node.left is not None and node.right is not None:
return min(min(l[2], l[1]) + min(r[2], r[1]), min(r[2], r[1]) + min(l[2], l[1]))
if node.left is not None:
return min(l[2], l[1])
else:
return min(r[2], r[1])
if t == 0:
return 1 + min(l) + min(r)
if t == 1:
if node.left is not None and node.right is not None:
return min(l[1] + min(r[2], r[1]), r[1] + min(l[2], l[1]))
if node.left is not None:
return l[1]
else:
return r[1]
return min(solve(root, 0), solve(root, 1))
if __name__ == '__main__':
node = TreeNode(1)
node.left = TreeNode(2)
node.left.left = TreeNode(3)
node.left.left.left = TreeNode(4)
node.left.left.left.left = TreeNode(5)
node.left.left.left.left.left = TreeNode(6)
print(Solution().minCameraCover(node))
| [
"[email protected]"
]
| |
e645f221824750ae1df6e085b30a4d11d74f99d1 | 5ed21f38903512ff931cb0527fc0a651a1572127 | /dag1/live_koding/gjettelek.py | b593a9791de1a0e0394b71192b55604fdfbfa7a4 | []
| no_license | kodeskolen/tekna_agder_h20_2 | 6eb52a21fa2425b82cb88108686cce0079ac71ab | 16e869ad48b2411b3f2b133f3adbb382863a744d | refs/heads/main | 2023-01-13T16:35:06.870744 | 2020-11-20T13:24:49 | 2020-11-20T13:24:49 | 310,009,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 19:28:47 2020
@author: Marie
"""
# Vi "tenker" på et tall mellom 0 og 100
# Spilleren gjetter et tall
# Dersom gjettet er rett så er spillet over
# Dersom gjettet er for lavt eller høyt, får spilleren beskjed
# Spilleren får gjette på nytt
# Spilleren har begrenset med gjett
from random import randint
riktig_tall = randint(0, 100)
maks_forsøk = 3
gjett = int(input("Gjett et tall mellom 0 og 100: "))
forsøk = 1
while gjett != riktig_tall and forsøk < maks_forsøk:
if gjett < riktig_tall:
print("For lavt!")
else:
print("For høyt!")
gjett = int(input("Gjett igjen: "))
forsøk += 1
if riktig_tall == gjett:
print("Det var riktig! :D")
else:
print(f"Du har brukt opp dine {maks_forsøk} forsøk!")
print("GAME OVER!!")
| [
"[email protected]"
]
| |
cc44faed3d75937afcbd58a5d3896e2eafd0e6c5 | f4b694982027ac362de1e9d6755f2943d0355a06 | /DECSKS-29_--_Finite_difference_approximations_to_derivatives_using_centered_stencils_only_with_ghost_point_extrapolating_from_ghost_cells/DECSKS-versions/1D1V-VP ghost points on both sides_plotting_cleaned_up_FD_stencil_size_corrected_and_ndarrays_to_data_instead_of_plots/DECSKS/lib/read.py | dddf6324f5d24524d809138d4e1417aef2eb83c1 | []
| no_license | dsirajud/IPython-notebooks | 55275e44191c16f5393571522787993f931cfd98 | 6ad9d978c611558525fc9d716af101dc841a393b | refs/heads/master | 2021-01-15T15:33:57.119172 | 2016-07-13T20:08:29 | 2016-07-13T20:08:29 | 35,054,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116,324 | py | import numpy as np
import linecache
import scipy.misc
# lib.read for DECSKS-2.2 input decks
class InputError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def safe_eval(s):
try:
return eval(s)
except NameError:
return s.lower()
def inputfile(sim_name):
"""Reads the input file and returns a dictionary containing
simulation parameters, sim_params. Function called from
directory ./DECSKS, input files located in ./DECSKS/etc
inputs:
filename -- (str) string with filename containing sim params
outputs:
sim_params -- (dict) dictionary containing simulation parameters
as well as a dictionary of splitting coeffs
needed for chosen split scheme
"""
version = 2.4
params_filename = './etc/params_' + sim_name + '.dat'
infile = open(params_filename, 'r')
lines = infile.readlines()
# --------------------------------------------------------------------------
# Domain specifications
Nx = eval(lines[15][lines[15].find('=')+1:].strip())
ax = eval(lines[16][lines[16].find('=')+1:].strip())
bx = eval(lines[17][lines[17].find('=')+1:].strip())
Ny = eval(lines[19][lines[19].find('=')+1:].strip())
ay = eval(lines[20][lines[20].find('=')+1:].strip())
by = eval(lines[21][lines[21].find('=')+1:].strip())
Nz = eval(lines[23][lines[23].find('=')+1:].strip())
az = eval(lines[24][lines[24].find('=')+1:].strip())
bz = eval(lines[25][lines[25].find('=')+1:].strip())
Nvx = eval(lines[27][lines[27].find('=')+1:].strip())
avx = eval(lines[28][lines[28].find('=')+1:].strip())
bvx = eval(lines[29][lines[29].find('=')+1:].strip())
Nvy = eval(lines[31][lines[31].find('=')+1:].strip())
avy = eval(lines[32][lines[32].find('=')+1:].strip())
bvy = eval(lines[33][lines[33].find('=')+1:].strip())
Nvz = eval(lines[35][lines[35].find('=')+1:].strip())
avz = eval(lines[36][lines[36].find('=')+1:].strip())
bvz = eval(lines[37][lines[37].find('=')+1:].strip())
Nt = eval(lines[39][lines[39].find('=')+1:].strip())
T = eval(lines[40][lines[40].find('=')+1:].strip())
N = eval(lines[46][lines[46].find('=')+1:].strip())
# --------------------------------------------------------------------------
# list of phase space variables used, in etc/params.dat must set unused
# vars to have Nz as None, z = x, vx, y, ...
# e.g. in 1D1V, phasespace_vars = ['x', 'vx']
phasespace_vars = []
if Nx is not None:
phasespace_vars.append('x')
if Ny is not None:
phasespace_vars.append('y')
if Nz is not None:
phasespace_vars.append('z')
if Nvx is not None:
phasespace_vars.append('vx')
if Nvy is not None:
phasespace_vars.append('vy')
if Nvz is not None:
phasespace_vars.append('vz')
# ==========================================================================
# Boundary conditions dictionary -- contains dist. function BCs as well as phi
BC = {}
BC['f'] = {}
BC['phi'] = {}
# BC['f'] = BC dict on distribution function f
# BC['f']['x'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['f']['y'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['f']['z'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['f']['vx'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['f']['vy'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['f']['vz'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['phi'] = BC dict on electric potential phi
# BC['phi']['x'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['phi']['y'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['phi']['z'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['phi']['vx'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['phi']['vy'] = {'lower' : lower_value, 'upper' : upper_value}
# BC['phi']['vz'] = {'lower' : lower_value, 'upper' : upper_value}
#
# subdict objects that give keyword descriptions that match method names in lib.boundaryconditions and lib.fieldsolvers
# include, for var in phasespace_vars:
#
# BC['f'][var]['type'] and BC['phi'][var]['type']
#
# these are used to assemble function handle strings that select the corresponding routine needed for the specified BCs
BC_infilename = './etc/' + lines[106][lines[106].find(':')+1:].strip()
BC_infile = open(BC_infilename, 'r')
BC_infile_lines = BC_infile.readlines()
# DECSKS will throw an error if numbers are inputted as BCs in etc/params.dat
# strings are stored as lowercase as they are used in an eval statement to access
# the relevant method in lib.boundaryconditions. e.g. 'absorbing' is accessed as
# either eval('lib.boundaryconditions.absorbing_lower_boundary') or
# eval('lib.boundaryconditions.absorbing_upper_boundary') in lib.convect.remap_step
BC['f']['x'] = {}
BC['f']['x']['lower'] = safe_eval(BC_infile_lines[53][BC_infile_lines[53].find('=')+1:].strip())
BC['f']['x']['upper'] = safe_eval(BC_infile_lines[54][BC_infile_lines[54].find('=')+1:].strip())
BC['f']['y'] = {}
BC['f']['y']['lower'] = safe_eval(BC_infile_lines[56][BC_infile_lines[56].find('=')+1:].strip())
BC['f']['y']['upper'] = safe_eval(BC_infile_lines[57][BC_infile_lines[57].find('=')+1:].strip())
BC['f']['z'] = {}
BC['f']['z']['lower'] = safe_eval(BC_infile_lines[59][BC_infile_lines[59].find('=')+1:].strip())
BC['f']['z']['upper'] = safe_eval(BC_infile_lines[60][BC_infile_lines[60].find('=')+1:].strip())
BC['f']['vx'] = {}
BC['f']['vx']['lower'] = safe_eval(BC_infile_lines[68][BC_infile_lines[68].find('=')+1:].strip())
BC['f']['vx']['upper'] = safe_eval(BC_infile_lines[69][BC_infile_lines[69].find('=')+1:].strip())
BC['f']['vy'] = {}
BC['f']['vy']['lower'] = safe_eval(BC_infile_lines[71][BC_infile_lines[71].find('=')+1:].strip())
BC['f']['vy']['upper'] = safe_eval(BC_infile_lines[72][BC_infile_lines[72].find('=')+1:].strip())
BC['f']['vz'] = {}
BC['f']['vz']['lower'] = safe_eval(BC_infile_lines[74][BC_infile_lines[74].find('=')+1:].strip())
BC['f']['vz']['upper'] = safe_eval(BC_infile_lines[75][BC_infile_lines[75].find('=')+1:].strip())
# make all BCs lowercase strings so they can be used to construct the function strings in lib.boundaryconditions module
# whose names are all lowercase
# if an accepted boundary condition synonym as been used, change value to the name it goes by in lib.boundaryconditions
# check that all inputs for evolved phase space variables are recognized keywords and are compatible with the
# boundary at which they are indicated
for var in phasespace_vars:
for boundary in ['lower', 'upper']:
BC['f'][var][boundary] = BC['f'][var][boundary].lower()
if BC['f'][var][boundary] == 'cutoff':
if var in ['x', 'y', 'z']:
print "the following boundary condition was not accepted:\n"
print "distribution function %s boundary condition on %s: %s" % (boundary, var, BC['f'][var][boundary].upper())
print "\na cutoff condition on a configuration grid makes it unclear how to calculate or specify boundary conditions"
print "on the electric potential phi. The interpretation of a cutoff condition is that the numerical grid is a control"
print "volume inside an otherwise open system. Since we do not simulate what happens outside the control volume, "
print "the nature of a cutoff boundary condition on a configuration variable is inconsistent with the objective of"
print "specifying boundary conditions on phi and is not acceptable. The system must be closed (e.g. 'absorbing', 'collector')"
print "or periodic on a configuration variable. Please reconsider the context of the intended simulation (e.g. if boundary is desired,"
print " the corresponding boundary condition on the distribution function should be set to ABSORBING).\n"
raise InputError('a CUTOFF boundary condition on the distribution for a a configuration variable is inconsistent (can only be used on velocity variables).')
else:
pass
elif BC['f'][var][boundary] == 'collector':
pass
elif BC['f'][var][boundary] == 'absorbing':
pass
elif BC['f'][var][boundary] == 'source':
if boundary == 'lower':
raise NotImplementedError('a lower boundary condition on the distribution function for the configuration variable x has been selected as SOURCE; however, a source distribution has only been implemented as an upper boundary so far.')
else:
pass
elif BC['f'][var][boundary] == 'symmetry':
if boundary == 'upper':
print "the following boundary condition was not accepted:\n"
print "distribution function %s boundary condition on %s: %s" % (boundary, var, BC['f'][var][boundary].upper())
print "\nDECSKS only supports LOWER symmetry boundaries."
raise NotImplementedError('a symmetric UPPER boundary condition on the distribution function was specified in params_boundaryconditions.dat; however, DECSKS only has functionality to permit lower boundary symmetry.')
elif boundary == 'lower':
print "\nCOURTESY NOTICE TO USER: the boundary condition %s was selected for the distribution function on %s at the %s boundary in params_boundaryconditions.dat; " % (BC['f'][var][boundary].upper(), var, boundary)
print "this is a recognized input synonym for a '%s' condition. Changing value stored to BC['f']['%s']['%s'] = '%s'\n" % ('SYMMETRIC', var, boundary, 'SYMMETRIC')
print "Please regard any warnings/error messages that cite the keyword '%s' with this change in mind\n" % ('SYMMETRIC')
BC['f'][var][boundary] = 'symmetric'
elif BC['f'][var][boundary] == 'symmetric':
if boundary == 'lower':
pass
elif boundary == 'upper':
print "the following boundary condition was not accepted:\n"
print "distribution function %s boundary condition on %s: %s" % (boundary, var, BC['f'][var][boundary].upper())
print "\nDECSKS only supports LOWER symmetry boundaries."
raise NotImplementedError('a symmetric UPPER boundary condition on the distribution function was specified in params_boundaryconditions.dat; however, DECSKS only has functionality to permit lower boundary symmetry.')
elif BC['f'][var][boundary] == 'periodic':
pass
else: # inputs do not match any options
print '\nThe invalid keyword %s was specified in params_boundaryconditions.dat on the variable %s at the %s boundary\n' % (BC['f'][var][boundary].upper(), var, boundary)
raise InputError('inputs are restricted to those listed as options in params_boundaryconditions.dat')
# above we have checked for valid input. Next, check for compatible inputs (if 'periodic' is selected, it must be selected for both
# upper and lower bounds) and store a descriptor that toggles the correct orchestrator
# function in lib.boundaryconditions module ('periodic' vs. 'nonperiodic')
for var in phasespace_vars:
if BC['f'][var]['lower'] == 'periodic' and BC['f'][var]['upper'] == 'periodic':
BC['f'][var]['type'] = 'periodic'
# check for invalid inputs (if 'periodic' was set at one boundary, it would need to be set at the opposite boundary as 'periodic' as well since
# a periodic boundary condition effectively involves both boundaries)
elif BC['f'][var]['lower'] == 'periodic' and BC['f'][var]['upper'] != 'periodic':
print "\nThe following boundary conditions specified in params_boundaryconditions.dat:"
print "\nlower boundary condition on f for the variable %s: %s" % (var, BC['f'][var]['lower'].upper())
print "upper boundary condition on f for the variable %s: %s" % (var, BC['f'][var]['upper'].upper())
print "\nare inconsistent. Cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')"
raise InputError('cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')
elif BC['f'][var]['lower'] != 'periodic' and BC['f'][var]['upper'] == 'periodic':
print "\nThe following boundary conditions specified in params_boundaryconditions.dat:"
print "\nlower boundary condition on f for the variable %s: %s" % (var, BC['f'][var]['lower'].upper())
print "upper boundary condition on f for the variable %s: %s" % (var, BC['f'][var]['upper'].upper())
print "\nare inconsistent. Cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')"
raise InputError('cannot combine periodic and non-periodic boundary conditions on same variable for distribution function, check inputs in params_boundaryconditions.dat')
else:
# by this point, we have already checked for consistency among the nonperiodic conditions
# both bouundaries are non-periodic, and are a combination of: symmetric (lower), collector (lower or upper), absorbing (lower or upper), cutoff (lower or upper)
BC['f'][var]['type'] = 'nonperiodic'
distribution_function_boundarycondition_orchestrator_prefix = 'DECSKS.lib.boundaryconditions'
# create a dictionary of function handles that call either
# the 'periodic', 'nonperiodic', or 'symmetric' orchestrator in lib.boundaryconditions
#
# i.e. we form the string handle for each active variable var:
#
# distribution_function_boundarycondition_orchestrator_handle[var] =
#
# DECSKS.lib.boundaryconditions.periodic
# DECSKS.lib.boundaryconditions.nonperiodic
# DECSKS.lib.boundaryconditions.symmetric
distribution_function_boundarycondition_orchestrator_handle = {}
for var in phasespace_vars:
distribution_function_boundarycondition_orchestrator_handle[var] = ".".join(
(distribution_function_boundarycondition_orchestrator_prefix, BC['f'][var]['type']))
# --------------------------------------------------------------------------
# High order correction (HOC) method applied to each phase space variable
# store as uppercase
HOC = {}
HOC['x'] = safe_eval(lines[56][lines[56].find(':')+1:].strip())
HOC['y'] = safe_eval(lines[57][lines[57].find(':')+1:].strip())
HOC['z'] = safe_eval(lines[58][lines[58].find(':')+1:].strip())
HOC['vx'] = safe_eval(lines[60][lines[60].find(':')+1:].strip())
HOC['vy'] = safe_eval(lines[61][lines[61].find(':')+1:].strip())
HOC['vz'] = safe_eval(lines[62][lines[62].find(':')+1:].strip())
# make all non-None inputs capitalized
for key in HOC.keys():
if HOC[key] is not None:
HOC[key] = HOC[key].upper()
else:
pass
# check for valid inputs
for key in HOC.keys():
if HOC[key] is not None:
if type(HOC[key]) != str:
raise InputError('A non-string entry was found as a high order correction specification. Only FD or FOURIER are accepted')
elif HOC[key] != 'FD' and HOC[key] != 'FOURIER':
print "\nThe following high order correction was specified in params.dat, but is not recognized:"
print "\nHigh order correction on %s: %s\n" % (key, HOC[key].upper())
print "only FD and FOURIER are accepted keywords\n"
raise InputError('An unrecognized high order correction was specified. Only FD or FOURIER are accepted')
elif HOC[key] == 'FOURIER' and BC['f'][key]['type'] != 'periodic': # Fourier corrections use trigonometric derivatives, which rely on periodicity of the underlying functions
print "\nThe following boundary conditions specified in params_boundaryconditions.dat:"
print "\nlower boundary condition on f for the variable %s: %s" % (key, BC['f'][key]['lower'].upper())
print "upper boundary condition on f fore the variable %s: %s\n\n" % (key, BC['f'][key]['upper'].upper())
print "are inconsistent with the high order correction specified in params.dat:"
print "\nhigh order correction on %s: %s\n\n" % (key, HOC[var].upper())
print "FOURIER high order corrections only make sense for periodic systems (if this is the intention, the BCs on f and phi must be set to PERIODIC in params_boundaryconditions.dat)\n"
raise InputError('Fourier corrections on a variable only make sense for periodic systems. The boundary conditions on the distribution function were read-in as not periodic for this variable.')
elif eval('N' + key) is None:
raise InputError('a variable not involved in the simulation (its number of grid points was specified as None) must also have its high order correction method specified as None. While reading in the input deck, the aforementioned expectation was not met. Please revisit the entries (number of grid points) and high order correction specification.')
# store lists containing number of total and active gridpoints
# this is acknowledged as redundant given the above storing as Nx_active, Ny_active,
# etc., but these objects are used in legacy methods inside DECSKS
# initialize lists
total_dims = [] # e.g. in 1D1V this could contain [Nx, Nvx]
for var in phasespace_vars:
total_dims.append(eval('N' + var))
numdims = len(phasespace_vars)
# --------------------------------------------------------------------------
# Initial density specification (2 species)
mu = safe_eval(lines[68][lines[68].find(':')+1:].strip())
densities_list = lines[69][lines[69].find(':')+1:].strip().split(', ')
for i in range(len(densities_list)):
densities_list[i] = densities_list[i].lower()
if len(densities_list) == 2: # if two species return dictionary of strings
density = {}
density['electrons'] = densities_list[0]
density['electrons'] = density['electrons'].lower()
density['ions'] = densities_list[1]
density['ions'] = density['ions'].lower()
# --------------------------------------------------------------------------
# split scheme specification
split_scheme = lines[81][lines[81].find('=')+1:].strip()
split_scheme = split_scheme.upper()
# filepath to splitting coefficient tables
filename = lines[82][lines[82].find(':')+1:].strip()
filepath = './etc/' + filename
# get splitting coefficients for chosen scheme
if split_scheme is not None:
splitting = splitting_coefficients(filepath, split_scheme)
else:
splitting = None
# --------------------------------------------------------------------------
# check for validity on split scheme vs. boundary conditions
#
# i.e. check that if the problem is bounded, the user cannot use a split scheme that has negative time substeps
#
# Schemes with only positive time substeps: LF2
# Schemes that contain negative time substeps: Y4, O6-4, O11-6, O14-6
#
for var in phasespace_vars:
if BC['f'][var]['lower'] != 'periodic' and BC['f'][var]['upper'] != 'periodic':
if split_scheme in ['LF2']:
pass
else: # a split scheme that involves negative time substeps has been selected
print "\nThe following set of user specified information is not accepted by DECSKS:\n"
print "\nin params.dat, the following was specified:"
print "split scheme = %s:" % split_scheme
print "\nand the boundary data was specified in params_boundaryconditions.dat:\n"
print "distribution function lower boundary condition on %s: %s" % (BC['f'][var]['lower'],var)
print "distribution function upper boundary condition on %s: %s" % (BC['f'][var]['upper'], var)
print "\nThe split scheme involves negative time substeps, while the boundary conditions are non-periodic. The BOUNDED Vlasov-Poisson problem is irreversible. A split scheme with negative time substeps can only be used in periodic systems, which correspond to systems of infinite extent\n"
raise InputError('The split scheme involves negative time substeps, while the boundary conditions are non-periodic. The BOUNDED Vlasov-Poisson problem is irreversible. A split scheme with negative time substeps can only be used in periodic systems, which correspond to systems of infinite extent. To rectify this, the user may wish to select periodic boundary conditions on the distribution function (hence phi).')
# --------------------------------------------------------------------------
# Plot window specification (used in lib.plots.Setup)
xmin = eval(lines[96][lines[96].find('=')+1:].strip())
xmax = eval(lines[97][lines[97].find('=')+1:].strip())
ymin = eval(lines[99][lines[99].find('=')+1:].strip())
ymax = eval(lines[100][lines[100].find('=')+1:].strip())
plot_params = dict(xmin = xmin, xmax = xmax,
ymin = ymin, ymax = ymax)
# --------------------------------------------------------------------------
# DICTIONARIES AND MATRICES RELEVANT FOR HIGH ORDER CORRECTION APPLICATIONS
#
# Constructing the finite different weight matrices, W.
#-------------------------------------------------------
# requires: (dict) FD_schemes
#
# Note: FD_schemes is only needed to construct W. W is what is used in
# the simulation. Hence, the building routine for FD_schemes
# is not optimized, since it happens before the simulation starts
# and is not a source of repeated computational cost.
#
# FD_schemes is a dictionary containing the families of every order derivative
# needed for the indicated global error N in etc/params.dat, i.e. all schemes
# of various degrees of asymmetry and handedness. For large N, this can be a
# large dictionary, cf. the function routine read_FD_schemes to see all
# that gets stored inside. It is used to construct the difference coefficient
# matrices W (for applying high order corrections). The other scheme
# FD_scheme_dn1 is used to construct the matrix W_dn1 which is a difference
# coefficient matrix for the first derivative (dn = 1) at LTE = 6, and used
# to compute the electric field E = "-dphi" = W_dn1.dot(phi),
# where dphi is the first derivative# of the electric potential, as calculated by
# the methods in lib.fieldsolvers package
#---------------------------------------------------------------------------
#
# initialize all dictionaries whose keys correspond to phase space vars
# and whose values contain the relevant ndarrays
Xi = {}
xi = {}
W = {}
# top level check: if any var has FD corrections, store FD_schemes and init FD weight matrix W
# for 6th order first derivative
if 'FD' in HOC.values():
# store finite difference schemes
FD_schemes = read_FD_schemes(N)
# if FD on a configuration variable, need to differentiate phi to obtain the acceleration a ~ E = -dphi
if HOC['x'] == 'FD' or HOC['y'] == 'FD' or HOC['z'] == 'FD':
# first derivative with LTE = 6, used to find dphi = -E after phi is
# found from a 6th order Poisson solve
# EXPECTS 'x' at the moment
FD_scheme_dn1 = read_FD_scheme(1,6)
W_dn1_LTE6 = assemble_finite_difference_weight_matrix_const_dn_const_LTE(BC,
'x',
Nx,
FD_scheme_dn1,
dn = 1,
LTE = 6
)
else:
# else, Fourier Gauss solver is used, no need for this matrix
W_dn1_LTE6 = None
# variable-by-variable checks: assemble consistent objects needed
# for the specified means of HOC from etc/params.dat
# Note: the following is organized with the expectation that
# higher dimensional implementations would be stepped through
# as sets of 2D advection problems, always paired as z and vz
# i.e. not as mixed stepthroughs with x paired with vy for example
for var in phasespace_vars:
if HOC[var] == 'FD':
W[var] = assemble_finite_difference_weight_matrix(BC,
'x',
eval('N' + var),
N,
FD_schemes
)
elif HOC[var] == 'FOURIER':
# ensure the correct number of grid points
# is passed for the generalized velocity Nvz_active
# for x,y,z, 'vz' = vx, vy, vz
# for vx, vy, vz, 'vz' = ax, ay, az, which have
# the same number of dims as x, y, z, respectively
# this is needed in the routine assemble_spectral_derivative_operator
# so that the correctly dimensioned 2D arrays are returned
if var[0] == 'v':
# if a velocity variable, the velocity of this velocity is an acceleration
# which has the same dimensions as the corresponding configuration variable
# e.g. vx has velocity(vx) = ax which has the same dimensions as x
Nvv = eval('N' + var[1])
else:
# if a configuration variable, the velocity is the physical velocity, which
# must be a coresponding active variable
# e.g. x has a velocity vx
Nvv = eval('Nv' + var)
# The 3D tensor Xi is used to compute trigonometric derivatives
# by operating on a 2D array of Fourier wave components (transformed
# row-wise for each column, where as usual the objects have been
# transpoed if needed so that the variation (x or vx) is along
# rows, not columns)
#
# Fourier transform (derivatives) = Xi * Fourier transform (f)
# derivatives = inverse transform (Xi * Fourier(f))
#
#
# the object xi is used in legacy methods in DECSKS (pre-DECSKSv2.0)
Xi, xi = assemble_spectral_derivative_operator(Xi, xi,
var,
eval('a' + var),
eval('b' + var),
eval('N' + var),
Nvv,
N)
# ---------------------------------------------------------------------
# "Alternating" identity matrix
# in lib.HOC.correctors, require an diagonal matrix with shape = (Nz, Nz)
# with entries as (-1)^i, where i is the row number, for details see on github
#
# dsirajud/IPython-notebooks/
# DECSKS-09 -- array-based implementation recast -- part 1.ipynb
#
# section "2D casting of correction coefficients c (vector) -> c (tensor)"
I_alternating = np.diag( (-np.ones(N)) ** np.arange(N) )
# ---------------------------------------------------------------------
# Bernoulli number storage, and forming the matrices A_pos, A_neg
# obtain Bernoulli numbers (note: only 23 numbers are entered into the dat file ->
# max global error is 23 - 1 = 22) for a correction up to global error order
# N, N-1 Bernoulli numbers are needed. If higher than global error order 22 is
# desired, additional Bernoulli numbes need to be entered in
#
# etc/Table_of_Bernoulli_numbers.dat
#
# Store Bernoulli numbers from dat file etc/Table_of_Bernoulli_numbers.dat
filename = 'Table_of_Bernoulli_numbers.dat'
filepath = './etc/' + filename
Bernoulli_numbers = Bernoulli(filepath)
# "A" matrices for Bernoulli number storage and matrix HOC application
# in lib.HOC.Beta_matrix, see notebook on github at
# dsirajud/IPython-notebooks/
# DECSKS-09 -- array-based implementation recast -- part 1.ipynb
#
# the A matrices are matrices containing scaled Bernoulli numbers (normalized by factorials)
# that also factor in the sign (direction) information of the advecting density packets
# (the different amounts to all odd coefficients having opposite sign)
# The A matrices are used in the method lib.HOC.Beta_matrix (used to construct the array of the *magnitudes*
# of the Nvz sets of N beta coefficients; note that the high order flux is further computed as a sum of
# products that alternating with sign according to the parity of the derivative number, i.e. alternates signs
# among odds and evens. These prefactors are applied at the end of the method lib.HOC.correctors by matrix
# pre-multiplication of the matrix B with the alternating (in sight) identity matrix I formed above)
# the method lib.HOC.Beta_matrix is called from inside lib.HOC.correctors (used to assemble the 2D array c of correctors)
A_pos, A_neg = np.zeros([N,N]), np.zeros([N,N])
for i in range(N):
for j in range(i+1):
A_pos[i,j] = Bernoulli_numbers[i-j] / scipy.misc.factorial(i-j)
if (i - j) == 1:
A_neg[i,j] = -A_pos[i,j]
else:
A_neg[i,j] = A_pos[i,j]
A_matrix = {}
# dictionary container
# allow dictionary access to relevant matrix of Bernoulli numbers
# by operating with str(int(np.sign(CFL.frac)))
A_matrix['1'] = A_pos
A_matrix['0'] = A_pos
A_matrix['-1'] = A_neg
#--------------------------------------------------------------------------------------------#
# ELECTRIC POTENTIAL PHI
#--------------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------------#
# Boundary conditions BC['phi'] dictionary and dictionary of boundary values, phi_BC
#
# BC['phi']['x', 'y', or 'z']['lower' or 'upper'] = string keyword that describes the BC
# phi_BC['x', 'y', or 'z'] = boundary value vector phi_BC that appears in a Poisson solver
#--------------------------------------------------------------------------------------------#
phi_BC = {}
# keys: 'x', 'y', 'z'
# values: ndarrays of size eval('N' + var + '_active)
BC['phi'] = {}
# keys: 'x', 'y', 'z'
# values / keys for subdict: 'lower', 'upper'
# values for subdict: string keyword that describes the BC at the key specification
# --------------------------------------------------------------------------
# PHI BOUNDARY CONDITIONS AND PHI BOUNDARY VALUES VECTORS FOR SOLVER Phi_BC['x', 'y', or 'z']
# lines read in from boundaryconditions dat file were stored above in BC_infile_lines
if HOC['x'] == 'FD':
BC['phi']['x'] = {}
BC['phi']['x']['lower'] = safe_eval(BC_infile_lines[209][BC_infile_lines[209].find('=')+1:].strip())
BC['phi']['x']['upper'] = safe_eval(BC_infile_lines[210][BC_infile_lines[210].find('=')+1:].strip())
phi_BC['x'] = np.zeros(Nx)
elif HOC['x'] == 'FOURIER': # periodic fourier solver is used, a BC vector is not needed
phi_BC['x'] = None
if HOC['y'] == 'FD':
BC['phi']['y'] = {}
BC['phi']['y']['lower'] = safe_eval(BC_infile_lines[212][BC_infile_lines[212].find('=')+1:].strip())
BC['phi']['y']['upper'] = safe_eval(BC_infile_lines[213][BC_infile_lines[213].find('=')+1:].strip())
phi_BC['y'] = np.zeros(Ny)
elif HOC['y'] == 'FOURIER': # periodic fourier solver is used, a BC vector is not needed
phi_BC['y'] = None
if HOC['z'] == 'FD':
BC['phi']['z'] = {}
BC['phi']['z']['lower'] = safe_eval(BC_infile_lines[215][BC_infile_lines[215].find('=')+1:].strip())
BC['phi']['z']['upper'] = safe_eval(BC_infile_lines[216][BC_infile_lines[216].find('=')+1:].strip())
phi_BC['z'] = np.zeros(Nz)
elif HOC['z'] == 'FOURIER': # periodic fourier solver is used, a BC vector is not needed
phi_BC['z'] = None
# ensure all inputs stored above in BC['phi'] dict objects are uppercase and recognized
for var in ['x', 'y', 'z']:
if var in phasespace_vars:
if HOC[var] == 'FOURIER':
pass
else: # HOC is FD which computes the Lorentz term through a potential phi (Fourier uses the electric field E)
# LOWER BOUNDARY CHECKS
if BC['phi'][var]['lower'] is None:
raise InputError('a NoneType was specified as a LOWER boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not meant to be evolved, set its number of grid points to None')
elif type(BC['phi'][var]['lower']) != str:
raise InputError('a non-string type as a LOWER boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not intended to be active, set its number of grid points to None. Otherwise, a recognized string keyword must be specified on the boundary condition on phi for this variable.')
else:
BC['phi'][var]['lower'] = BC['phi'][var]['lower'].upper()
if BC['phi'][var]['lower'] not in ['PERIODIC', 'SELF-CONSISTENT', 'SYMMETRIC', 'SYMMETRY', 'BIAS']:
print "\nThe following boundary conditions specified in params_boundaryconditions.dat is not a recognized keyword:\n\n"
print "lower boundary condition on phi for variable %s: %s" % (var, BC['phi'][var]['lower'].upper())
raise InputError('boundary condition indicated on phi is not an accepted keyword option')
elif (BC['phi'][var]['lower'] == 'SYMMETRIC' or BC['phi'][var]['lower'] == 'SYMMETRY') and BC['f'][var]['lower'] != 'symmetric':
print "\nThe following boundary conditions specified in params_boundaryconditions.dat is:\n\n"
print "lower boundary condition on phi for variable %s: %s\n" % (var, BC['phi'][var]['lower'].upper())
print "lower boundary condition on f for variable %s: %s" % (var, BC['f'][var]['lower'].upper())
print "upper boundary condition on f for variable %s: %s\n" % (var, BC['f'][var]['upper'].upper())
print "a SYMMETRIC boundary condition must be specified on both phi and f"
# by this point all synonyms have been normalized on BC['f'][var], 'symmetric' corresponds to the symmetry condition
raise InputError('a SYMMETRY boundary condition on phi was specified, but a symmetry boundary was not specified on the distribution function f at this same (lower) boundary. A symmetric domain requires a lower boundary condition to be SYMMETRIC on both phi and f.')
else:
pass
# UPPER BOUNDARY CHECKS
if BC['phi'][var]['upper'] is None:
raise InputError('a NoneType was specified as an upper boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not meant to be evolved, set its number of grid points to None')
elif type(BC['phi'][var]['upper']) != str:
raise InputError('a non-string type as an upper boundary condition on the electric potential phi for an active variable (a non-NoneType was specified for the number of grid points on this variable). If the variable is not intended to be active, set its number of grid points to None. Otherwise, a recognized string keyword must be specified on the boundary condition on phi for this variable.')
else:
BC['phi'][var]['upper'] = BC['phi'][var]['upper'].upper()
if BC['phi'][var]['upper'] not in ['PERIODIC', 'SELF-CONSISTENT', 'SYMMETRIC', 'SYMMETRY', 'BIAS']:
print "\nThe following boundary condition specified in params_boundaryconditions.dat is not a recognized boundary condition keyword:\n\n"
print "upper boundary condition on phi for variable %s: %s\n" % (var, BC['phi'][var]['upper'].upper())
raise InputError('boundary condition indicated on phi is not an accepted keyword option')
elif BC['phi'][var]['upper'] == 'SYMMETRIC' or BC['phi'][var]['upper'] == 'SYMMETRY':
print "\nThe following boundary condition specified in params_boundaryconditions.dat is not available:\n\n"
print "upper boundary condition on phi: %s\n" % BC['phi'][var]['upper'].upper()
raise NotImplementedError('a SYMMETRY boundary condition on phi as an UPPER boundary is specified in params_boundaryconditions.dat; only lower boundaries can support a symmetry boundary condition.')
# CHECK FOR CONSISTENCY IN BOUNDARY CONDITIONS BETWEEN BOTH LOWER AND UPPER SPECIFICATIONS
if BC['phi'][var]['lower'] == 'PERIODIC' and BC['phi'][var]['upper'] != 'PERIODIC':
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\n\n"
print "lower boundary condition on phi for variable %s: %s" % (var, BC['phi'][var]['lower'].upper())
print "upper boundary condition on phi for variable %s: %s\n\n" % (var, BC['phi'][var]['upper'].upper())
raise InputError('PERIODIC boundary conditions on phi involve both lower and upper boundaries. The read-in of params_boundaryconditions.dat has the lower boundary condition as PERIODIC but the upper boundary condition is NOT. Both boundary conditions on phi must be set to PERIODIC if a periodic plasma is to be simulated.')
elif BC['phi'][var]['lower'] != 'PERIODIC' and BC['phi'][var]['upper'] == 'PERIODIC':
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\n\n"
print "lower boundary condition on phi for variable %s: %s" % (var, BC['phi'][var]['lower'].upper())
print "upper boundary condition on phi for variable %s: %s\n\n" % (var, BC['phi'][var]['upper'].upper())
raise InputError('PERIODIC boundary conditions on phi involve both lower and upper boundaries. The read-in of params_boundaryconditions.dat has the upper boundary condition as PERIODIC but the lower boundary condition is NOT. Both boundary conditions on phi must be set to PERIODIC if a periodic plasma is to be simulated.')
elif BC['phi'][var]['lower'] == 'PERIODIC' and BC['phi'][var]['upper'] == 'PERIODIC':
if BC['f'][var]['type'] != 'periodic': # note that validity and consistency checks on inputs for the distribution function have already been done above
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\n\n"
print "lower boundary condition on phi for variable %s: %s" % (var, BC['phi'][var]['lower'].upper())
print "upper boundary condition on phi for variable %s: %s\n" % (var, BC['phi'][var]['upper'].upper())
print "lower boundary condition on phi for variable %s: %s" % (var, BC['f'][var]['lower'].upper())
print "upper boundary condition on phi for variable %s: %s\n" % (var, BC['f'][var]['upper'].upper())
print "e.g. periodic boundaries on phi require periodic boundaries on f for the same variable\n"
raise InputError('PERIODIC boundary conditions on were specifed consistently for phi in params_boundaryconditions.dat; however, periodic boundary conditions must also be consistently specified on the distribution function. Revisit params_boundaryconditions.dat and ensure that both lower and upper boundaries on the distribution function f and the potential phi are set to PERIODIC if a periodic plasma is intended to be simulated.')
elif BC['f'][var]['type'] == 'periodic': # note that validity and consistency checks on inputs for the distribution function have already been done above
pass
# CHECK FOR CONSISTENCY ON PHI BCS WITH HIGH ORDER CORRECTION METHOD SPECIFIED (note we have already checked this against the distribution function BCs)
# here, we are only checking to see if that BCs on phi aren't periodic, to ensure that HOC is NOT set to fourier (relies on periodicity))
# the following conditional check asks: "if (BCs on phi are not periodic) AND (HOC is FOURIER)"
if ((BC['phi'][var]['lower'] == 'PERIODIC' and BC['phi'][var]['upper'] != 'PERIODIC') or (BC['phi'][var]['lower'] != 'PERIODIC' and BC['phi'][var]['upper'] == 'PERIODIC')) and HOC[var] == 'fourier':
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent with the specified high order correction method in params.dat: \n\n"
print "lower boundary condition on phi for variable %s: %s" % (var, BC['phi'][var]['lower'].upper())
print "upper boundary condition on phi for variable %s: %s\n\n" % (var, BC['phi'][var]['upper'].upper())
print "upper boundary condition on phi for variable %s: %s\n\n" % (var, HOC[var].upper())
print "\n\nFourier high order corrections require periodic boundary conditions on both phi and the distribution function f\n"
raise InputError('the high order correction is specified as FOURIER; however, the BCs on the electric potential phi are not periodic. FOURIER corrections require PERIODIC BCs on phi and the distribution function as the methods rely on periodicity')
#--------------------------------------------------------------------------------------------#
# BIAS values
#--------------------------------------------------------------------------------------------#
Bias = {} # this dictionary is created for reading in the bias values, it is not returned
# in sim_params dict. If a bias condition is set on any boundary, this dictionary
# assigns its value at that boundary in the vector phi_BC[var], phi_BC[var] is
# returned (as usual, var = ['x', 'y', 'z'])
Bias['x'] = {}
Bias['y'] = {}
Bias['z'] = {}
Bias['x']['lower'] = safe_eval(BC_infile_lines[227][BC_infile_lines[227].find('=')+1:].strip())
Bias['x']['upper'] = safe_eval(BC_infile_lines[228][BC_infile_lines[228].find('=')+1:].strip())
Bias['y']['lower'] = safe_eval(BC_infile_lines[230][BC_infile_lines[230].find('=')+1:].strip())
Bias['y']['upper'] = safe_eval(BC_infile_lines[231][BC_infile_lines[231].find('=')+1:].strip())
Bias['z']['lower'] = safe_eval(BC_infile_lines[233][BC_infile_lines[233].find('=')+1:].strip())
Bias['z']['upper'] = safe_eval(BC_infile_lines[234][BC_infile_lines[234].find('=')+1:].strip())
# check for valid inputs on active variables for any boundary that is specified as BIAS
for var in ['x', 'y', 'z']:
if var in phasespace_vars:
if HOC[var] == 'FOURIER':
pass
else:
for boundary in ['lower', 'upper']:
if var in phasespace_vars:
if BC['phi'][var][boundary] == 'BIAS':
if Bias[var][boundary] is None: # if the BC is BIAS but the value input for the BIAS value is None
print "\nThe following specifications in params_boundaryconditions.dat are inconsistent:\n"
print "%s boundary condition on phi for variable %s: %s" % (boundary, var, BC['phi'][var][boundary].upper())
print "%s BIAS value on phi for variable %s: %s\n" % (boundary, var, Bias[var][boundary])
print "e.g. if a boundary condition on phi is set to BIAS for a variable, a number must be specifed under BIAS value\n"
raise InputError('A phi boundary condition on an active variable (number of grid points on this variable has been set as non-None) has been specified as BIAS; however, the corresponding BIAS value is NoneType. Must be a number.')
elif type(Bias[var][boundary]) == str:
print "\nThe following specifications in params_boundaryconditions.dat are inconsistent:\n"
print "%s boundary condition on phi for variable %s: %s" % (boundary, var, BC['phi'][var][boundary].upper())
print "%s BIAS value on phi for variable %s: %s\n" % (boundary, var, Bias[var][boundary])
print "e.g. if a boundary condition on phi is set to BIAS for a variable, a number must be specifed under BIAS value\n"
raise InputError('A phi boundary condition on an active variable (number of grid points on this variable has been set as non-None) has been specified as BIAS; however, the corresponding BIAS value is str type. Must be a number.')
else:
pass
# E is calculated by the following call flow, first an ORCHESTRATOR is called:
#
# E = lib.fieldsolvers.compute_electric_field_fourier <--- solves with a Gauss' law solver directly
#
# or
#
# E = lib.fieldsolvers.compute_electric_field_fd <--- solves a Poisson solver for phi, then differentiate to get E
#
# which can generally be called by eval operating on string handles that are themselves constructed
# per 'lib.fieldsolvers.compute_electric_field_' + HOC[var].lower()
#
# If a finite difference routine is specified, a Poisson solve must be performed to obtain phi.
# We call the relevant Poisson solver among the following options (L = lower boundary, U = upper boundary, DBC = Dirichlet BC, NBC = Neumann BC):
#
# Poisson_6th_PBC
# Poisson_6th_LDBC_UDBC
# Poisson_6th_LDBC_UNBC
# Poisson_6th_LNBC_UDBC
# Poisson_6th_LDBC_LDBC
# Poisson_6th_UDBC_UNBC
#
# which are selected based on the boundary conditions the user has supplied in params_boundaryconditions.dat.
#
# finally, we compute and return:
#
# E = - 1 / config_var.width * W_dn1_LTE6.dot(phi)
#
# --------------------------------------------------------------------------
# fieldsolver orchestator handle string for electric field (periodic or non-periodic)
#
# currently only 1D1V, only one handle needed. When this will be generalized, can make a dict object with keys corresponding
# to each active configuration variable
compute_electric_field_orchestrator_handle = {}
for var in ['x', 'y', 'z']:
if var in phasespace_vars:
# dictionary key labels the component of the electric field: 'x', 'y', 'z'
compute_electric_field_orchestrator_handle[var] = "DECSKS.lib.fieldsolvers.compute_electric_field_" + HOC[var].lower()
# ---------------------------------------------------------------------
# initialize dictionaries for wall charge objects
sigma = {}
sigma_n = {}
for var in ['x', 'y', 'z']:
if var in phasespace_vars:
sigma_n[var] = {}
sigma[var] = {}
# --------------------------------------------------------------------------
# Dictionary for the specific electric potential phi function solver needed
# according to the specified boundary conditions on phi
for var in ['x', 'y', 'z']:
if var in phasespace_vars:
if HOC[var] == 'FOURIER':
pass # uses electric field E, periodic boundary conditions only
else: # is FD corrections, and electric potential phi in a Poisson solver, can be periodic or other BCs
BC['phi'][var]['type'] = BC['phi'][var]['lower'] + '_' + BC['phi'][var]['upper']
if BC['phi'][var]['type'] == 'PERIODIC_PERIODIC':
BC['phi'][var]['type'] = 'PBC'
if BC['f'][var]['lower'] != 'periodic' and BC['f'][var]['upper'] != 'periodic':
raise InputError('A boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')
if BC['phi'][var]['type'] == 'BIAS_BIAS':
BC['phi'][var]['type'] = 'LDBC_UDBC'
# Dirichlet condition, phi = BIAS value, see notebook s24 for reason for factor of -2.0 in the derivation (from interpolation to the half-integer index)
# if grounded (phi = 0 at wall), the factor of -2.0 is of no consequence so this is general.
phi_BC[var][0] = -2.0 * float(Bias[var]['lower'])
# Dirichlet condition, phi = BIAS value
phi_BC[var][-1] = -2.0 * float(Bias[var]['upper'])
if BC['f'][var]['lower'] != 'absorbing' or BC['f'][var]['upper'] != 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'
raise InputError('A boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')
elif BC['phi'][var]['type'] == 'BIAS_SELF-CONSISTENT':
BC['phi'][var]['type'] = 'LDBC_UNBC'
# Dirichlet condition, phi = BIAS value
phi_BC[var][0] = float(Bias[var]['lower'])
# Neumann condition, dphi = sigma_upper, translates to phi_BC[-1] = -6 var.width * sigma_upper (see https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb for details)
# phi_BC[-1] = - 6 * var.width * sim_params['sigma'][var]['upper'], changes with time step
if BC['f'][var]['lower'] != 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'
raise InputError('A lower boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')
if BC['f'][var]['upper'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'
# initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions
sigma[var]['upper'] = 0 # initialize to zero charge at time zero
sigma_n[var]['upper'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time
else:
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\n\n"
print "upper boundary condition on phi for variable %s: %s\n" % (var, BC['phi'][var]['upper'].upper())
print "upper boundary condition on f for variable %s: %s\n" % (var, BC['f'][var]['upper'].upper())
print "\ne.g. an upper boundary condition on phi as SELF-CONSISTENT must have the upper boundary condition on f as COLLECTOR"
print "\ne.g. an upper boundary condition on f as ASBORBING must have the upper boundary condition on phi as BIAS\n"
raise InputError('An upper boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')
elif BC['phi'][var]['type'] == 'SELF-CONSISTENT_BIAS':
BC['phi'][var]['type'] = 'LNBC_UDBC'
# Neumann condition, dphi = -sigma_lower, translates to phi_BC[0] = -6 var.width * sigma_lower (see https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb for details)
#phi_BC[var][0] = var.width * LNBC, changes with time step
# Dirichlet condition, phi = BIAS value
phi_BC[var][-1] = -2.0 * float(Bias[var]['upper'])
# check upper boundary
if BC['f'][var]['upper'] == 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'
pass
elif BC['f'][var]['upper'] == 'source':
pass
else:
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\n\n"
print "upper boundary condition on phi for variable %s: %s\n" % (var, BC['phi'][var]['upper'].upper())
print "upper boundary condition on f for variable %s: %s\n\n" % (var, BC['f'][var]['upper'].upper())
print "\ne.g. an upper boundary condition set on phi as BIAS must have the upper boundary condition on f as ABSORBING\n"
raise InputError('An upper boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')
# check lower boundary
if BC['f'][var]['lower'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'
# initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions
sigma[var]['lower'] = 0 # initialize to zero charge at time zero
sigma_n[var]['lower'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time
else:
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\n\n"
print "lower boundary condition on phi: %s" % BC['phi'][var]['lower'].upper()
print "lower boundary condition on f: %s\n" % BC['f'][var]['lower'].upper()
print "\ne.g. an lower boundary condition set on phi as SELF-CONSISTENT must have the lower boundary condition on f as COLLECTOR"
print "e.g. an lower boundary condition set on f as ABSORBING must have the lower boundary condition on phi as BIAS"
print "e.g. an lower boundary condition set on f as PERIODIC requires the upper boundary on f to be PERIODIC as well as both lower and upper boundary conditions on phi to be set to PERIODIC\n"
raise InputError('A lower boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector if self-consistent boundary potentials are desired). Equivalently, phi is not compatible with f (e.g. if periodic boundaries on f were desired, the potential must also be periodic)')
elif BC['phi'][var]['type'] == 'SYMMETRIC_BIAS' or BC['phi'][var]['type'] == 'SYMMETRY_BIAS':
BC['phi'][var]['type'] = 'LNBC_UDBC'
# Neumann condition, dphi = 0 for symmetry
phi_BC[var][0] = 0.
# Dirichlet condition, phi = BIAS value
phi_BC[var][-1] = float(Bias[var]['upper'])
if BC['f'][var]['upper'] != 'absorbing': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\n\n"
print "upper boundary condition on phi: %s" % BC['phi'][var]['upper'].upper()
print "upper boundary condition on f: %s\n\n" % BC['f'][var]['upper'].upper()
print "\ne.g. an upper boundary condition set on phi as BIAS must have the upper boundary condition on f as ABSORBING\n "
raise InputError('An upper boundary condition on phi was specified as BIAS; however, the corresponding boundary condition on f is not compatible (must be set to absorbing or equivalent synonym)')
elif BC['phi'][var]['type'] == 'SYMMETRIC_SELF-CONSISTENT' or BC['phi'][var]['type'] == 'SYMMETRY_SELF-CONSISTENT':
BC['phi'][var]['type'] = 'LDBC_LNBC'
# We default to a LDBC_LNBC solver, both boundary conditions on left edge, entries 0 (Dirichlet) and 1 (Neumann)
# cf. DECSKS-04 notebook for more details:
#
# https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb
#
# Dirichlet condition, set reference potential phi = 0
phi_BC[var][0] = 0. # reference potential set to zero
# Neumann condition, dphi = 0 for symmetry
phi_BC[var][1] = 0.
if BC['f'][var]['upper'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'
# initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions
# By virtue of the setup, the above enforcements on the lower boundary ensures this unenforced upper Neumann BC is
# satisfied automatically given the relationship that Neumann BCs are fixed by due to the Poisson equation
#
# see github.com/dsirajud/IPython-Notebooks/DECSKS-04 for more information (final few sections of the notebook)
#
# Thus, we do not need to actually enforce the wall potential directly in terms of the charge accumulated for this boundary; however,
# we initialize and track the objects here so that the data can be accessed, analyzed or otherwise plotted, should the user wish
sigma[var]['upper'] = 0 # initialize to zero charge at time zero
sigma_n[var]['upper'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time
else:
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\n\n"
print "upper boundary condition on phi: %s" % BC['phi'][var]['upper'].upper()
print "upper boundary condition on f: %s\n\n" % BC['f'][var]['upper'].upper()
print "\ne.g. an upper boundary condition set on phi as SELF-CONSISTENT must have the upper boundary condition on f as COLLECTOR\n "
raise InputError('An upper boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')
elif BC['phi'][var]['type'] == 'SELF-CONSISTENT_SELF-CONSISTENT':
BC['phi'][var]['type'] = 'LDBC_LNBC'
# We default to a LDBC_LNBC solver, both boundary conditions on left edge, entries 0 (Dirichlet) and 1 (Neumann)
# cf. DECSKS-04 notebook for more details:
#
# https://github.com/dsirajud/IPython-notebooks/DECSKS-04...ipynb
#
# Dirichlet condition, set reference potential phi = 0
phi_BC[var][0] = 0. # reference potential set to zero
# Neumann condition, dphi = 0 for symmetry
#phi_BC[var][1] = - 6 * var.width * sim_params['sigma'][var]['lower'], changes with time step
if BC['f'][var]['lower'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'
# initialize wall charge densities
sigma[var]['lower'] = 0 # initialize to zero charge at time zero
sigma_n[var]['lower'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time
else:
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\n\n"
print "lower boundary condition on phi on variable %s: SELF-CONSISTENT" % var
print "lower boundary condition on f on variable %s: %s\n\n" % (var, BC['f'][var]['lower'].upper())
print "\ne.g. a lower boundary condition set on phi as SELF-CONSISTENT must have the lower boundary condition on f as COLLECTOR\n "
raise InputError('A lower boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')
if BC['f'][var]['upper'] == 'collector': # all synonyms for 'absorbing' (except 'collector') have been seen by this point, and if encountered changed to 'absorbing'
# initialize wall charge densities, sigma for the collector (f) /self-consistent (phi) conditions
# By virtue of the setup, the above enforcements on the lower boundary ensures this unenforced upper Neumann BC is
# satisfied automatically given the relationship that Neumann BCs are fixed by due to the Poisson equation
#
# see github.com/dsirajud/IPython-Notebooks/DECSKS-04 for more information (final few sections of the notebook)
#
# Thus, we do not need to actually enforce the wall potential directly in terms of the charge accumulated for this boundary; however,
# we initialize and track the objects here so that the data can be accessed, analyzed or otherwise plotted, should the user wish
sigma[var]['upper'] = 0 # initialize to zero charge at time zero
sigma_n[var]['upper'] = np.zeros(Nt + 1) # this was put in at one point for plotting wall charge vs. time
else:
print "\nThe following boundary conditions specified in params_boundaryconditions.dat are inconsistent together:\n\n"
print "upper boundary condition on phi: SELF-CONSISTENT"
print "upper boundary condition on f: %s\n\n" % BC['f'][var]['upper'].upper()
print "\ne.g an upper boundary condition set on phi as SELF-CONSISTENT must have the upper boundary condition on f as COLLECTOR\n "
raise InputError('An upper boundary condition on phi was specified as SELF-CONSISTENT; however, the corresponding boundary condition on f is not compatible (must be set to collector)')
# else: boundary conditions have already been checked for valid inputs, no invalid input will be encountered
# --------------------------------------------------------------------------
# ELECTRIC POTENTIAL PHI FUNCTION HANDLE STRING and BOUNDARY CONDITION TYPE FUNCTION HANDLE STRING
#
# currently only 1D1V, only one handle needed. When this will be generalized, can make a dict objects with keys corresponding
# to each active configuration variable
#
# The forms of each string call their associated method per the boundary conditions specified by the user in params_boundaryconditions.dat,
# based on the boundary conditions specified by the user, one of the following will be created:
#
# compute_electric_potential_phi_handle[var] =
#
# DECSKS.lib.fieldsolvers.Poisson_6th_PBC
# DECSKS.lib.fieldsolvers.Poisson_6th_LDBC_UDBC
# DECSKS.lib.fieldsolvers.Poisson_6th_LDBC_UNBC
# DECSKS.lib.fieldsolvers.Poisson_6th_LNBC_UDBC
# DECSKS.lib.fieldsolvers.Poisson_6th_LDBC_LNBC
# DECSKS.lib.fieldsolvers.Poisson_6th_UDBC_UNBC (<-- available, but not used in any current combination of BCs)
#
#
# and, one of the following
#
# distribution_function_boundarycondition_handle[var]['lower'] =
#
# DECSKS.lib.boundaryconditions.absorbing_lower_boundary
# DECSKS.lib.boundaryconditions.collector_lower_boundary
# DECSKS.lib.boundaryconditions.symmetric_lower_boundary
#
# NOTE: if 'periodic' has been specified, everything is
# handled in the orchestrator, distribution_function_boundarycondition_orchestrator
# which would take on the string value = 'DECSKS.lib.boundaryconditions.periodic
distribution_function_boundarycondition_prefix = 'DECSKS.lib.boundaryconditions'
distribution_function_boundarycondition_handle = {}
for var in phasespace_vars:
if BC['f'][var]['type'] == 'periodic':
pass
else:
distribution_function_boundarycondition_handle[var] = {}
distribution_function_boundarycondition_handle[var]['lower'] = ".".join((distribution_function_boundarycondition_prefix, BC['f'][var]['lower']))
distribution_function_boundarycondition_handle[var]['lower'] = "_".join((distribution_function_boundarycondition_handle[var]['lower'], 'lower_boundary'))
distribution_function_boundarycondition_handle[var]['upper'] = ".".join((distribution_function_boundarycondition_prefix, BC['f'][var]['upper']))
distribution_function_boundarycondition_handle[var]['upper'] = "_".join((distribution_function_boundarycondition_handle[var]['upper'], 'upper_boundary'))
compute_electric_potential_phi_handle = {}
compute_electric_potential_phi_prefix = "DECSKS.lib.fieldsolvers.Poisson_6th_"
for var in ['x', 'y', 'z']:
if var in phasespace_vars:
if HOC[var] == 'FOURIER': # uses a Gauss law solver to find E directly, which is called by the orchestrator on the fieldsolver
pass
else: # computes the electric field E by differentiating phi in an orchestrator fieldsolver function (string handle constructed above)
# inside the orchestrator, a particular Poisson solver is called according with the boundary conditions indicated in params_boundaryconditions.dat
compute_electric_potential_phi_handle[var] = compute_electric_potential_phi_prefix + BC['phi'][var]['type']
else:
pass
# in the future, can generalize this to multiple dimensions by making this a dict with keys ['x', 'y', 'z']
# currently just on 1D1V and expecting an 'x' variable to be evolved in configuration
if 'x' not in phasespace_vars:
raise NotImplementedError('Current 1D1V version of DECSKS is expecting x to be the active configuration variable. Please revise the intended simulation so that x is the symbol chosen in params.dat.')
else:
if HOC['x'] == 'FOURIER': # uses a Gauss solver to find E directly
Poisson_6th_order_FD_solver_matrices = None
else: # uses a Poisson solver to find phi, then differentiates to obtain E
Poisson_6th_order_FD_solver_matrices = assemble_Poisson_6th_order_FD_solver_matrices(Nx, BC)
derivative_method = {}
derivative_method_prefix = 'DECSKS.lib.derivatives'
for var in phasespace_vars:
derivative_method[var] = ".".join((derivative_method_prefix, HOC[var].lower()))
sim_params = dict(
sim_name = sim_name,
N = N, HOC = HOC,
derivative_method = derivative_method,
Nx = Nx, ax = ax, bx = bx,
Ny = Ny, ay = ay, by = by,
Nz = Nz, az = az, bz = bz,
Nvx = Nvx, avx = avx, bvx = bvx,
Nvy = Nvy, avy = avy, bvy = bvy,
Nvz = Nvz, avz = avz, bvz = bvz,
Nt = Nt, T = T,
phasespace_vars = phasespace_vars,
numdims = numdims,
total_dims = total_dims,
density = density,
mu = mu,
split_scheme = split_scheme,
splitting = splitting,
plot_params = plot_params,
BC = BC, # boundary condition types on all phase space variables on distribution function f and phi
phi_BC = phi_BC, # dictionary containing boundary value vector for electric potential used in Poisson solve, e.g. phi_BC['x']
sigma = sigma,
sigma_n = sigma_n, # this was put in for charge history plots
distribution_function_boundarycondition_handle = distribution_function_boundarycondition_handle, # dictionary with keys (var in phasespace_vars), which are keys to a subdict with keys 'lower', 'upper'
distribution_function_boundarycondition_orchestrator_handle = distribution_function_boundarycondition_orchestrator_handle, # dictionary with keys (var in phasespace_vars)
compute_electric_potential_phi_handle = compute_electric_potential_phi_handle,
compute_electric_field_orchestrator_handle = compute_electric_field_orchestrator_handle,
I_alternating = I_alternating, # identity matrix with alternating signs according to row, used in computing correctors c
A_matrix = A_matrix, # Matrices of Bernoulli numbers for HOC
W = W,
W_dn1_LTE6 = W_dn1_LTE6,
Xi = Xi, # spectral differentiation operator matrix (1j*xi[i,j]) ** q
xi = xi, # wave number vector
Poisson_6th_order_FD_solver_matrices = Poisson_6th_order_FD_solver_matrices
)
infile.close()
# --------------------------------------------------------------------------
# Before return, broadcast notification
# regarding start of simulation and order of solver
print "\n"
print "****************************************************************************************************"
print " Vlasov-Poisson DECSKS v%1.1f simulation " % version
print "****************************************************************************************************"
print "simulation label: %s" % sim_name
print "(output data will be stored in directory etc/outputs/%s)" % sim_name
print "\nwill step through %d-dimensional solution in variables: %s using %s time splitting scheme" % (len(phasespace_vars), phasespace_vars, split_scheme)
print "\n"
for var in phasespace_vars:
print "high order corrections method on %s: %s, LTE = %d" % (var, HOC[var], N+1)
if len(densities_list) == 2: # then two species are evolved
print "\ninitialized two species initial distribution functions:\n"
print "electrons: %s" % density['electrons']
print "ions: %s\n" % density['ions']
# create list of phase space strings
grid_str = []
for var in phasespace_vars:
grid_str.append('N' + var)
print "will evolve solution on the following grid:\n"
print "(%s, %s) = %d x %d grid:\n" % (grid_str[0], grid_str[1], eval(grid_str[0]), eval(grid_str[1]))
for var in phasespace_vars:
print " %g <= %s <= %g " % (eval('a' + var), var, eval('b' + var))
print "\n"
print "over a simulation time 0 <= t <= %g with Nt = %d time steps" % (T, Nt)
print "\n"
return sim_params
def splitting_coefficients(filepath, split_scheme):
"""Reads in the splitting coefficient for the specified
scheme in input file (e.g. params.dat)
inputs:
split_scheme -- (str) designates which split scheme
filepath -- (file) input file with splitting coeffs rel. path
output:
split_coeffs -- (dict) splitting coefficients for specified scheme
usage:
in split_schemes module, store and call as
splitting = sim_params['splitting'] # grab from sim_params dict
coeff = splitting['order']['coeffs'] = a, b, a, b, ...
stage = splitting['order']['stages'] = 1, 1, 2, 2, ...
access ith coefficient by
splitting[ coeff[i] ][ int(stage[i]) ]
"""
infile = open(filepath, 'r')
lines = infile.readlines()
infile.close()
if split_scheme == 'LF2':
coeffs = lines[6].strip().split(', ')
stages = lines[9].strip().split(', ')
a1 = eval(lines[14][lines[14].find('=')+1:].strip())
a2 = eval(lines[15][lines[15].find('=')+1:].strip())
b1 = eval(lines[16][lines[16].find('=')+1:].strip())
b2 = eval(lines[17][lines[17].find('=')+1:].strip())
number_of_stages = dict(a = 2, b = 2)
order = dict(coeffs = coeffs, stages = stages)
splitting = dict(order = order, number_of_stages = number_of_stages,
a = [None, a1, a2],
b = [None, b1, b2])
elif split_scheme == 'Y4':
coeffs = lines[23].strip().split(', ')
stages = lines[26].strip().split(', ')
a1 = eval(lines[31][lines[31].find('=')+1:].strip())
a2 = eval(lines[32][lines[32].find('=')+1:].strip())
a3 = eval(lines[33][lines[33].find('=')+1:].strip())
a4 = eval(lines[34][lines[34].find('=')+1:].strip())
b1 = eval(lines[36][lines[36].find('=')+1:].strip())
b2 = eval(lines[37][lines[37].find('=')+1:].strip())
b3 = eval(lines[38][lines[38].find('=')+1:].strip())
b4 = eval(lines[39][lines[39].find('=')+1:].strip())
number_of_stages = dict(a = 4, b = 4)
order = dict(coeffs = coeffs, stages = stages)
splitting = dict(order = order, number_of_stages = number_of_stages,
a = [None, a1, a2, a3, a4],
b = [None, b1, b2, b3, b4])
elif split_scheme == 'O6-4':
coeffs = lines[45].strip().split(', ')
stages = lines[48].strip().split(', ')
a1 = eval(lines[53][lines[53].find('=')+1:].strip())
a2 = eval(lines[54][lines[54].find('=')+1:].strip())
a3 = eval(lines[55][lines[55].find('=')+1:].strip())
a4 = eval(lines[56][lines[56].find('=')+1:].strip())
b1 = eval(lines[58][lines[58].find('=')+1:].strip())
b2 = eval(lines[59][lines[59].find('=')+1:].strip())
b3 = eval(lines[60][lines[60].find('=')+1:].strip())
b4 = eval(lines[61][lines[61].find('=')+1:].strip())
number_of_stages = dict(a = 4, b = 4)
order = dict(coeffs = coeffs, stages = stages)
splitting = dict(order = order, number_of_stages = number_of_stages,
a = [None, a1, a2, a3, a4],
b = [None, b1, b2, b3, b4])
elif split_scheme == 'O11-6':
coeffs = lines[67].strip().split(', ')
coeffs += lines[68].strip().split(', ')
stages = lines[71].strip().split(', ')
stages += lines[72].strip().split(', ')
a1 = eval(lines[78][lines[78].find('=')+1:].strip())
a2 = eval(lines[79][lines[79].find('=')+1:].strip())
a3 = eval(lines[80][lines[80].find('=')+1:].strip())
a4 = eval(lines[81][lines[81].find('=')+1:].strip())
a5 = eval(lines[82][lines[82].find('=')+1:].strip())
a6 = eval(lines[83][lines[83].find('=')+1:].strip())
b1 = eval(lines[85][lines[85].find('=')+1:].strip())
b2 = eval(lines[86][lines[86].find('=')+1:].strip())
b3 = eval(lines[87][lines[87].find('=')+1:].strip())
b4 = eval(lines[88][lines[88].find('=')+1:].strip())
b5 = eval(lines[89][lines[89].find('=')+1:].strip())
b6 = eval(lines[90][lines[90].find('=')+1:].strip())
number_of_stages = dict(a = 6, b = 6)
order = dict(coeffs = coeffs, stages = stages)
splitting = dict(order = order, number_of_stages = number_of_stages,
a = [None, a1, a2, a3, a4, a5, a6],
b = [None, b1, b2, b3, b4, b5, b6])
elif split_scheme == 'O14-6':
coeffs = lines[96].strip().split(', ')
coeffs += lines[97].strip().split(', ')
coeffs += lines[98].strip().split(', ')
stages = lines[101].strip().split(', ')
stages += lines[102].strip().split(', ')
stages += lines[103].strip().split(', ')
a1 = eval(lines[110][lines[110].find('=')+1:].strip())
a2 = eval(lines[111][lines[111].find('=')+1:].strip())
a3 = eval(lines[112][lines[112].find('=')+1:].strip())
a4 = eval(lines[113][lines[113].find('=')+1:].strip())
a5 = eval(lines[114][lines[114].find('=')+1:].strip())
a6 = eval(lines[115][lines[115].find('=')+1:].strip())
a7 = eval(lines[116][lines[116].find('=')+1:].strip())
a8 = eval(lines[117][lines[117].find('=')+1:].strip())
b1 = eval(lines[119][lines[119].find('=')+1:].strip())
b2 = eval(lines[120][lines[120].find('=')+1:].strip())
b3 = eval(lines[121][lines[121].find('=')+1:].strip())
b4 = eval(lines[122][lines[122].find('=')+1:].strip())
b5 = eval(lines[123][lines[123].find('=')+1:].strip())
b6 = eval(lines[124][lines[124].find('=')+1:].strip())
b7 = eval(lines[125][lines[125].find('=')+1:].strip())
number_of_stagess = dict(a = 8, b = 7)
order = dict(coeffs = coeffs, stages = stages)
splitting = dict(order = order, number_of_stages = number_of_stages,
a = [None, a1, a2, a3, a4, a5, a6, a7, a8],
b = [None, b1, b2, b3, b4, b5, b6, b7])
return splitting
def Bernoulli(filepath):
"""Reads in Bernoulli numbers from data file
inputs:
filepath -- (str) relative path to file containing
'Table_of_Bernoulli_numbers.dat'
output:
B -- (ndarray, ndim=1), numpy 1D array of all Bernoulli
numbers contained within 'Table_of_Bernoulli_numbers.dat'
"""
infile = open(filepath, 'r')
numbers = infile.readlines()
infile.close()
B = np.array([eval(number) for number in numbers])
return B
def output_files(filepath):
"""Reads in output filenames from input file (e.g. params.dat), opens
all files and returns a dictionary containing all files ready for writing
inputs:
filepath -- (str) relative path to file containing
'params_output.dat', which holds all output filenames
to be used and relative path information
output:
outfiles -- (dict) opened output files ready for writing
"""
infile = open(filepath, 'r')
lines = infile.readlines()
rel_path = './'
rel_path += lines[6][lines[6].find(':')+1:].strip()
rel_path += lines[7][lines[7].find(':')+1:].strip()
filename_I1 = lines[9][lines[9].find(':')+1:].strip()
filename_I2 = lines[10][lines[10].find(':')+1:].strip()
filename_IW = lines[12][lines[12].find(':')+1:].strip()
filename_WE = lines[13][lines[13].find(':')+1:].strip()
filename_S = lines[15][lines[15].find(':')+1:].strip()
filepath_I1 = rel_path + filename_I1
filepath_I2 = rel_path + filename_I2
filepath_IW = rel_path + filename_IW
filepath_WE = rel_path + filename_WE
filepath_S = rel_path + filename_S
outfile_I1 = open(filepath_I1, 'w')
outfile_I2 = open(filepath_I2, 'w')
outfile_IW = open(filepath_IW, 'w')
outfile_WE = open(filepath_WE, 'w')
outfile_S = open(filepath_S, 'w')
outfiles = dict(I1 = outfile_I1,
I2 = outfile_I2,
IW = outfile_IW,
WE = outfile_WE,
S = outfile_S)
return outfiles
def store_FD_schemes(infilename,
FD_schemes,
dn):
"""reads infile, creates empty subdictionaries
inside FD_schemes, and stores all data inside
the corresponding dictionary objects
inputs:
infilename -- (str) file name for a single derivative
table, e.g. f1_FD_coefficients.dat,
f2_FD_coefficients.dat, ...
FD_schemes -- (dict) empty dictionary
dn -- (str) single derivative whose schemes are to be stored
outputs:
FD_schemes -- (dict) same dictionary loaded with
all schemes for the dn'th derivative
"""
infile = open(infilename, 'r')
lines = infile.readlines()
dn = 'dn' + dn # dn key (str)
# create empty subdictionaries for each handedness
FD_schemes[dn]['forward'] = {}
FD_schemes[dn]['central'] = {}
FD_schemes[dn]['backward'] = {}
for i in range(len(lines)):
if lines[i][0] == 'f':
handedness = 'forward'
asymmetry = lines[i].split(' ')[1].strip()
# create empty subdictionary for given asymmetry
FD_schemes[dn][handedness][asymmetry] = {}
elif lines[i][0] == 'c':
handedness = 'central'
asymmetry = lines[i].split(' ')[1].strip()
# create empty subdictionary for given asymmetry
FD_schemes[dn][handedness][asymmetry] = {}
elif lines[i][0] == 'b':
handedness = 'backward'
asymmetry = lines[i].split(' ')[1].strip()
# create empty subdictionary for given asymmetry
FD_schemes[dn][handedness][asymmetry] = {}
elif lines[i].split(' ')[0] == 'number':
numlines = eval(lines[i][lines[i].find(':')+1:].strip())
# set switch on for storage and intialize empties
store_data_switch = 1
w = []
stencil = []
elif store_data_switch == 1:
for j in range(i+1, i + numlines+1):
# note: first line in linecache is [1], not [0]
line = linecache.getline(infilename,j)
pairs = line.split(', ')
w.append(eval(pairs[0]))
stencil.append(eval(pairs[1]))
# store as values in subdictionary keys 'w', 'stencil'
FD_schemes[dn][handedness][asymmetry]['w'] = w
FD_schemes[dn][handedness][asymmetry]['stencil'] = stencil
# reset switch for next scheme dict key
store_data_switch = 0
else:
# let i increment until it reaches i + numlines and repeat
pass
return FD_schemes
def read_FD_schemes(N):
"""store all finite difference schemes from
tables generated in dat files located in
./etc/finite_difference_schemes
in a consoolidated dictionary called FD_schemes
inputs:
dn_max -- (int) maximum derivative in .dat files
should correspond to same dn_max as
in tables generated
outputs:
FD_schemes -- (dict) all FD schemes equipped with
list of weights w and stencil
"""
FD_schemes = {}
rel_path = './etc/finite_difference_schemes/'
infile_suffix = '_FD_coefficients.dat'
for dn in range(1,N):
LTE = '_LTE' + str(N+1 - dn)
infilename = 'f' + str(dn) + LTE + infile_suffix
infilepath = rel_path + infilename
# create empty subdictionary for given dn
FD_schemes['dn' + str(dn)] = {}
# store schemes in subdictionaries of dn
# which are the keys: 'handedness', 'asymmetry'
FD_schemes = store_FD_schemes(infilepath,
FD_schemes,
str(dn))
return FD_schemes
#-----------------------------------------------------------#
# the following two routines (read and store) are for one
# derivative of order dn at given LTE, the original purpose
# was to have dn = 1, LTE = 6 for a 6th order Poisson solver
def read_FD_scheme(dn, LTE):
"""store finite difference scheme for dn'th derivative
from tables generated in dat files located in
./etc/finite_difference_schemes
in a consolidated dictionary called FD_schemes_dn
inputs:
dn -- (int) derivative number in .dat file containing
difference coefficients
LTE -- (int) local truncation error order
**Requires the generated dat file for dn = 1, LTE = 6
etc/finite_difference_schemes/
f1_LTE6_FD_coefficients.dat
outputs:
FD_scheme -- (dict) FD scheme equipped with
list of weights w and stencil for
the specified order dn at specified LTE
"""
FD_scheme = {}
rel_path = './etc/finite_difference_schemes/'
infile_suffix = '_FD_coefficients.dat'
infilename = 'f' + str(dn) + '_LTE' + str(LTE) + infile_suffix
infilepath = rel_path + infilename
# create empty subdictionary for given key 'dn#'
FD_scheme['dn' + str(dn)] = {}
# create empty subdictionary for given key 'LTE#'
FD_scheme['dn' + str(dn)]['LTE' + str(LTE)] = {}
# store schemes in subdictionaries of dn
# which are the keys: 'handedness', 'asymmetry'
FD_scheme = store_FD_scheme(infilepath,
FD_scheme,
str(dn),
str(LTE))
return FD_scheme
def store_FD_scheme(infilename,
FD_scheme,
dn,
LTE):
"""reads infile, creates empty subdictionaries
inside FD_schemes, and stores all data inside
the corresponding dictionary objects
inputs:
infilename -- (str) file name for a single derivative
table, e.g. f1_LTE6_FD_coefficients.dat,
f2_LTE7_FD_coefficients.dat, ...
FD_scheme -- (dict) empty dictionary
dn -- (str) single derivative whose schemes are to be stored
outputs:
FD_scheme -- (dict) same dictionary loaded with
all schemes for the dn'th derivative
"""
infile = open(infilename, 'r')
lines = infile.readlines()
dn = 'dn' + dn # dn key (str)
LTE = 'LTE' + LTE # LTE key (str)
# create empty subdictionaries for each handedness
FD_scheme[dn][LTE]['forward'] = {}
FD_scheme[dn][LTE]['central'] = {}
FD_scheme[dn][LTE]['backward'] = {}
for i in range(len(lines)):
if lines[i][0] == 'f':
handedness = 'forward'
asymmetry = lines[i].split(' ')[1].strip()
# create empty subdictionary for given asymmetry
FD_scheme[dn][LTE][handedness][asymmetry] = {}
elif lines[i][0] == 'c':
handedness = 'central'
asymmetry = lines[i].split(' ')[1].strip()
# create empty subdictionary for given asymmetry
FD_scheme[dn][LTE][handedness][asymmetry] = {}
elif lines[i][0] == 'b':
handedness = 'backward'
asymmetry = lines[i].split(' ')[1].strip()
# create empty subdictionary for given asymmetry
FD_scheme[dn][LTE][handedness][asymmetry] = {}
elif lines[i].split(' ')[0] == 'number':
numlines = eval(lines[i][lines[i].find(':')+1:].strip())
# set switch on for storage and intialize empties
store_data_switch = 1
w = []
stencil = []
elif store_data_switch == 1:
for j in range(i+1, i + numlines+1):
# note: first line in linecache is [1], not [0]
line = linecache.getline(infilename,j)
pairs = line.split(', ')
w.append(eval(pairs[0]))
stencil.append(eval(pairs[1]))
# store as values in subdictionary keys 'w', 'stencil'
FD_scheme[dn][LTE][handedness][asymmetry]['w'] = w
FD_scheme[dn][LTE][handedness][asymmetry]['stencil'] = stencil
# reset switch for next scheme dict key
store_data_switch = 0
else:
# let i increment until it reaches i + numlines and repeat
pass
return FD_scheme
#-----------------------------------------------------------#
def assemble_finite_difference_weight_matrix(BC,
z_str,
zN,
N,
FD_schemes
):
"""Assembles a matrix corresponding to the weights of in
the finite difference computation of derivatives, i.e.
assembles the weight matrix W, giving the difference matrix d
for the q-th derivative:
1 / x.width ** q W[q,:,:].dot(f) = d[q,:,:]
i.e. W are the difference
coefficients, which do not
contain the width of the
abscissa value, e.g. x.width
where f and df are vectors of length z.N in the 1D case.
inputs:
zN -- (int) number of active grid points for the phase sapce variable z
N -- (int) global error on the advection algorithm, specified in etc/params.dat
FD_schemes -- (dict) dictionary containing all schemes for all dn, handedness,
and asymmetry
outputs:
Wz -- (ndarray, ndim=3) Wz[dn, zN, zN] where each 2D matrix Wz[dn,:,:]
is the weight matrix W corresponding to the dn-th derivative in
the context of the above equation.
"""
if BC['f'][z_str]['type'] == 'periodic':
imax = zN - 1
Wz = np.zeros([N, zN, zN]) # includes zeroeth order derivative container (not used)
# i.e. Wz[q,:,:] is for the q-th derivative with this dummy zero index created
for dn in range(1,N): # need derivatives dn = 1, 2, ... , N-1
W_dn = np.zeros([zN, zN])
p = N + 1 - dn # LTE of scheme on dn-th derivative decreases with dn
# given what is needed is LTE[z.width ** dn * dnf] = O(N)
# rather than LTE on just the derivative
# local copy of all schemes pertaining to derivative order dn
FD_schemes_dn = FD_schemes['dn' + str(dn)]
stencil_size = p + dn
stencil_center = stencil_size // 2
# choose as centered a scheme as possible given the stencil size
if np.mod(stencil_size,2) == 1: # stencil size is odd
handedness = 'central'
asymmetry = str(0)
else: # stencil size is even
handedness = 'forward'
asymmetry = str(stencil_center - 1)
FD_scheme = FD_schemes_dn[handedness][asymmetry]
w = FD_scheme['w']
stencil = FD_scheme['stencil']
# CONSTRUCT WEIGHT MATRIX -- off-grid points will sample "other side" per PBC
for i in range(zN):
if zN - i <= stencil_center: # then gridpoint is close enough to right-edge that
# it samples off-grid on the right side
N_offgrid = zN - 1 - i # number of off-grid points the
# stencil references when stencil is odd, otherwise
# the meaning is N_ongrid for even stencil but
# the following still works given negative indexing
W_dn[i, i + np.array(stencil)[:N_offgrid] ] = w[:N_offgrid]
W_dn[i, i + np.array(stencil)[N_offgrid:] - zN] = w[N_offgrid:]
else:
W_dn[i, i + np.array(stencil) ] = w
Wz[dn,:,:] = W_dn
else:
imax = zN - 1
Wz = np.zeros([N, zN, zN]) # includes zeroeth order derivative container (not used)
# i.e. Wz[q,:,:] is for the q-th derivative with this dummy zero index created
for dn in range(1,N):
W_dn = np.zeros([zN, zN])
p = N+1 - dn # LTE of scheme on dn-th derivative, dnf, decreases with dn
# given what is needed is LTE[z.width ** dn * dnf] = O(N+1)
# rather than LTE[dnf] = O(N+1)
# local copy of all schemes pertaining to derivative order dn
FD_schemes_dn = FD_schemes['dn' + str(dn)]
stencil_size = p + dn
stencil_center = stencil_size // 2
for i in range(zN):
if i < stencil_center:
handedness = 'forward'
asymmetry = str(i)
elif imax - i < stencil_center:
handedness = 'backward'
asymmetry = str(imax - i)
else:
if np.mod(stencil_size,2) == 1:
handedness = 'central'
asymmetry = str(0)
else:
handedness = 'forward'
asymmetry = str(stencil_center - 1)
FD_scheme = FD_schemes_dn[handedness][asymmetry]
w = FD_scheme['w']
stencil = FD_scheme['stencil']
W_dn[i, i + np.array(stencil)] = w # load all weights at once into W_dn
Wz[dn,:,:] = W_dn
return Wz
def assemble_finite_difference_weight_matrix_const_dn_const_LTE(BC,
z_str,
zN,
FD_scheme_const_dn,
dn = 1,
LTE = 6
):
"""Assembles a matrix corresponding to the weights of in
the finite difference computation of derivatives, i.e.
assembles the weight matrix W, giving the difference matrix d
for the q-th derivative:
1 / x.width ** q W[q,:,:].dot(f) = d[q,:,:]
i.e. W are the difference
coefficients, which do not
contain the width of the
abscissa value, e.g. x.width
where f and df are vectors of length z.N in the 1D case.
inputs:
BC -- (dict) boundary condition dictionary
z_str -- (str) z.str attribute, e.g. x.str = 'x', vx.str = 'vx', ...
zN -- (int) number of active grid points for the phase space variable z
N -- (int) global error on the advection algorithm, specified in etc/params.dat
FD_schemes -- (dict) dictionary containing all schemes for all dn, handedness,
and asymmetry
outputs:
Wz -- (ndarray, ndim=3) Wz[dn, zN, zN] where each 2D matrix Wz[dn,:,:]
is the weight matrix W corresponding to the dn-th derivative in
the context of the above equation.
"""
imax = zN - 1
if BC['f'][z_str]['type'] == 'periodic':
# set up matrix that uses PBC in sampling; off-grid referencing
# samples from the opposite side of the domain per periodic BC
W = np.zeros([zN, zN])
# local copy of all schemes pertaining to derivative order dn
FD_scheme = FD_scheme_const_dn['dn' + str(dn)]['LTE' + str(LTE)]
stencil_size = LTE + dn
stencil_center = stencil_size // 2
# CHOOSE SCHEME, if odd choose central, elif even choose most centered
if np.mod(stencil_size,2) == 1: # only schemes with an odd number
# stencils have a central scheme
handedness = 'central'
asymmetry = str(0)
else:
handedness = 'forward' # or equivalently could call it 'backward'
# and take assymetry = stencil_center+1, same thing
asymmetry = str(stencil_center - 1)
w = FD_scheme[handedness][asymmetry]['w']
stencil = FD_scheme[handedness][asymmetry]['stencil']
# CONSTRUCT WEIGHT MATRIX -- off-grid points will sample "other side" per PBC
for i in range(zN): # loop through rows whose row label corresponds to gridpoint of interest
if zN - i <= stencil_center: # then gridpoint is close enough to right-edge that
# it samples off-grid on the right side
N_offgrid = zN - 1 - i # number of off-grid points the
# stencil references when stencil is odd, otherwise
# the meaning is N_ongrid for even stencil but
# the following still works given negative indexing
W[i, i + np.array(stencil)[:N_offgrid] ] = w[:N_offgrid]
W[i, i + np.array(stencil)[N_offgrid:] - zN] = w[N_offgrid:]
else:
W[i, i + np.array(stencil) ] = w
else: # explicit finite differencing whose stencil shifts near edges to evade
# off-grid indexing (using more sided schemes)
imax = zN - 1
W = np.zeros([zN, zN])
# local copy of all schemes pertaining to derivative order dn
FD_scheme = FD_scheme_const_dn['dn' + str(dn)]['LTE' + str(LTE)]
stencil_size = LTE + dn
stencil_center = stencil_size // 2
for i in range(zN):
if i < stencil_center:
handedness = 'forward'
asymmetry = str(i)
elif imax - i < stencil_center:
handedness = 'backward'
asymmetry = str(imax - i)
else:
if np.mod(stencil_size,2) == 1: # only schemes with an odd number
# stencils have a central scheme
handedness = 'central'
asymmetry = str(0)
else:
handedness = 'forward' # or equivalently could call it 'backward'
# and take assymetry = stencil_center+1, same thing
asymmetry = str(stencil_center - 1)
w = FD_scheme[handedness][asymmetry]['w']
stencil = FD_scheme[handedness][asymmetry]['stencil']
W[i, i + np.array(stencil)] = w # load all weights at once into W_dn
return W
def assemble_spectral_derivative_operator(Xi, xi,
z_str,
az,
bz,
Nz,
Nvz,
N):
"""For 2D constructions, e.g. (x, vx). For higher dimensions,
e.g. 4D (x,y, vx, v_perp) can reuse this with some minor
changes. For 1D or 3D, a different (but, similar) routine
needs to be coded. For 3D, the overall stepthrough will need
to be deconstructed to be a split problem among 2D problems
and a 1D problem.
inputs:
Xi -- (dict) to contain key/values:
Xi['z'] -- (ndarray, ndim=3, dtype = complex), z = x, vx, ...
this routine adds the key 'z' to the dictionary. Hence,
the dictionary passed is at minimum an empty dictionary, but
in general contains previous keys assigned by previuos calls
to this same function
xi -- (dict) contains key/values
xi['z'] -- (ndarray, ndim=1, dtype = float64), z = x, vx, ...
this routine adds the key 'z' to the dictionary. Hence,
the dictionary passed is at minimum an empty dictionary, but
in general contains previous keys assigned by previuos calls
to this same function
z_str -- (str) corresponding to phase space variable z affiliated
with the objects Xi and xi.
Nz -- (int) total number of gridpoints for z
Nvz -- (int) total number of gridpoints for vz
az -- (num) lower domain bound on z, used to compute width Lz
bz -- (num) upper domain bound on z
N -- (int) global error of scheme
outputs: updates the dictionaries Xi, xi to have
the key/value pair:
Xi['z'] -- (ndarray, ndim=3, dtype=complex)
xi['z'] -- (ndarray, ndim=1, dtype = float64)
which corresponds to a matrix with entries
$$Xi = ((Xi)_{q,i,j}) = 1j * (Delta z xi_{i,j})^q$$
USAGE NOTE: computing Xi * Ff, where Ff is numpy.fft.fft(f)
and f.shape = (x.N, vx.N) produces the Fourier transform
of the derivative coefficients $F[d] equiv D$, where
D[q,i,j] corresponds to the qth order derivative coefficient
at a phase space location [i,j]. The method
lib.derivatives.trigonometric3D takes the row-wise inverse
transform so that the tensor d[q,i,j] is generated.
"""
# catch any nonsense passes to this function, i.e. z
# does not have a velocity, hence is not being advected
if Nvz is None:
return None
# domain widths
Lz = float(bz - az) # used in wave number vector, xi
zwidth = Lz / Nz # needed for 3D object, Xi = "(j zwidth xi)**q"
# build wave vector xi for given z
wave_index = np.arange(Nz)
xi_z = np.where(wave_index <= Nz / 2,
2*np.pi*wave_index / Lz,
2*np.pi*(wave_index - Nz) / Lz)
xi[z_str] = xi_z
# Set up compound matrix Xi.
# First, copy column vector xi along Nvz columns
xi_2D = np.outer(xi_z, np.ones(Nvz))
# set up vector extending in depth dimension so
# broadcasting per ** operator produces the expected dims on Xi
# i.e. Xi.shape = (N, z.N, vz.N)
dn = np.arange(1,N).reshape(N-1,1,1)
# with the previously formed objects with carefully chosen dims
# we generate the required Xi object
Xi[z_str] = (1j * zwidth * xi_2D) ** dn
return Xi, xi
def assemble_Poisson_6th_order_FD_solver_matrices(Nx, BC):
"""
forms the matrices D and B required for the 6th order finite
difference based Poisson solver. The solvers come in several
varieties (DBC = Dirichlet BC, NBC = Neumann BC, L = lower boundary
U = upper boundary where L and U refer to lower and higher values
of a configuration variable):
Poisson_6th_PBC
Poisson_6th_LDBC_UDBC
Poisson_6th_LNBC_UDBC
Poisson_6th_LDBC_UNBC
For Neumann/Neumann conditions (is not a well-posed problem) we
require recasting the NBC/NBC problem into an equivalent problem
that is representative by the following Cauchy boundary condition
setup:
Poisson_6th_LDBC_LNBC
Poisson_6th_UDBC_UNBC
The matrices D and B for each method are slightly different. This routine
determines the variety of solver needed as chosen from the list above,
and assembles these matrices
Inputs:
Nx -- (int) this is Nx_active, the number of active grid points in a
configuration variable x
BC -- (dict) contains boundary condition information that has been
determined based on user inputs in params_boundaryconditions.dat
Outputs:
D -- (ndarray, ndim = 2, shape = (Nx, Nx)) matrix of finite difference
coefficients on phi
B -- (ndarray, ndim = 2, shape = (Nx, Nx)) matrix of finite difference
coefficients on the totaldensity n
"""
Poisson_6th_order_FD_solver_matrices = {}
# Nx is the number of active nodes in configuration
if BC['phi']['x']['type'] == 'PBC':
# assemble D, a matrix of difference coefficients on phi
D = np.zeros([Nx,Nx])
for i in range(Nx):
if i == 0:
D[i,-1] = 1
D[i,i] = -2
D[i,i+1] = 1
elif i == Nx-1:
D[i,0] = 1
D[i,i] = -2
D[i,i-1] = 1
else:
D[i,i-1] = 1
D[i,i] = -2
D[i,i+1] = 1
# assemble B, a matrix of difference coefficients on the total density
B = np.zeros([Nx,Nx])
for i in range(Nx):
if i == 0:
B[i,-2] = -1/240.
B[i,-1] = 1/10.
B[i,i] = 97/120.
B[i,i+1] = 1/10.
B[i,i+2] = -1/240.
if i == 1:
B[i,-1] = -1/240.
B[i,i-1] = 1/10.
B[i,i] = 97/120.
B[i,i+1] = 1/10.
B[i,i+2] = -1/240.
elif 1 < i < Nx-2:
B[i,i-2] = -1/240.
B[i,i-1] = 1/10.
B[i,i] = 97/120.
B[i,i+1] = 1/10.
B[i,i+2] = -1/240.
elif i == Nx-2:
B[i,i-2] = -1/240.
B[i,i-1] = 1/10.
B[i,i] = 97/120.
B[i,i+1] = 1/10.
B[i,0] = -1/240.
elif i == Nx-1:
B[i,i-2] = -1/240.
B[i,i-1] = 1/10.
B[i,i] = 97/120.
B[i,0] = 1/10.
B[i,1] = -1/240.
elif BC['phi']['x']['type'] == 'LDBC_UDBC':
# assemble D, a matrix of difference coefficients on phi
D = np.zeros([Nx,Nx])
for i in range(Nx):
if i == 0:
D[i,i] = -3
D[i,i+1] = 1
elif i == Nx-1:
D[i,i] = -3
D[i,i-1] = 1
else:
D[i,i-1] = 1
D[i,i] = -2
D[i,i+1] = 1
# assemble B, a matrix of difference coefficients on the total density
B = np.zeros([Nx, Nx])
for i in range(Nx):
if i == 0:
B[i,i] = 317/240.
B[i,i+1] = -133/120.
B[i,i+2] = 187/120.
B[i,i+3] = -23/20.
B[i,i+4] = 109/240.
B[i,i+5] = -3/40.
if i == 1:
B[i,i-1] = 3/40.
B[i,i] = 209/240.
B[i,i+1] = 1/60.
B[i,i+2] = 7/120.
B[i,i+3] = -1/40.
B[i,i+4] = 1/240.
elif 1 < i < Nx-2:
B[i,i-2] = -1/240.
B[i,i-1] = 1/10.
B[i,i] = 97/120.
B[i,i+1] = 1/10.
B[i,i+2] = -1/240.
elif i == Nx-2:
B[i,i-4] = 1/240.
B[i,i-3] = -1/40.
B[i,i-2] = 7/120.
B[i,i-1] = 1/60.
B[i,i] = 209/240.
B[i,i+1] = 3/40.
elif i == Nx-1:
B[i,i] = 317/240.
B[i,i-1] = -133/120.
B[i,i-2] = 187/120.
B[i,i-3] = -23/20.
B[i,i-4] = 109/240.
B[i,i-5] = -3/40.
elif BC['phi']['x']['type'] == 'LNBC_UDBC':
# assemble D, a matrix of difference coefficients on phi
D = np.zeros([Nx,Nx])
for i in range(Nx):
if i == 0:
D[i,i] = -1
D[i,i+1] = 1
elif i == Nx-1:
D[i,i] = -3
D[i,i-1] = 1
else:
D[i,i-1] = 1
D[i,i] = -2
D[i,i+1] = 1
# assemble B, a matrix of difference coefficients on the total density
B = np.zeros([Nx,Nx])
for i in range(Nx):
if i == 0:
B[i,i] = 317/240.
B[i,i+1] = -133/120.
B[i,i+2] = 187/120.
B[i,i+3] = -23/20.
B[i,i+4] = 109/240.
B[i,i+5] = -3/40.
if i == 1:
B[i,i-1] = 3/40.
B[i,i] = 209/240.
B[i,i+1] = 1/60.
B[i,i+2] = 7/120.
B[i,i+3] = -1/40.
B[i,i+4] = 1/240.
elif 1 < i < Nx-2:
B[i,i-2] = -1/240.
B[i,i-1] = 1/10.
B[i,i] = 97/120.
B[i,i+1] = 1/10.
B[i,i+2] = -1/240.
elif i == Nx-2:
B[i,i-4] = 1/240.
B[i,i-3] = -1/40.
B[i,i-2] = 7/120.
B[i,i-1] = 1/60.
B[i,i] = 209/240.
B[i,i+1] = 3/40.
elif i == Nx-1:
B[i,i] = 317/240.
B[i,i-1] = -133/120.
B[i,i-2] = 187/120.
B[i,i-3] = -23/20.
B[i,i-4] = 109/240.
B[i,i-5] = -3/40.
elif BC['phi']['x']['type'] == 'LDBC_UNBC':
# assemble D, a matrix of difference coefficients on phi
D = np.zeros((Nx,Nx))
# UDBC row
D[0,0] = 1.
# LNBC row
D[-1,-1] = -97/10.
D[-1,-2] = 16.
D[-1,-3] = -10
D[-1,-4] = 5.
D[-1,-5] = -3/2.
D[-1,-6] = 1/5.
# Poisson's equation rows
for i in range(1,Nx-1):
D[i,i-1] = 1
D[i,i] = -2
D[i,i+1] = 1
# assemble B, a matrix of difference coefficients on the total density
B = np.zeros((Nx,Nx))
for i in range(B.shape[0]):
# i == 0 row contains all zeros
if i == 1:
B[i, i-1] = 3 / 40.
B[i, i] = 209 / 240.
B[i,i+1] = 1 / 60.
B[i,i+2] = 7 / 120.
B[i,i+3] = -1 / 40.
B[i,i+4] = 1 / 240.
elif 2 <= i <= Nx-3:
B[i,i-2] = -1/240.
B[i,i-1] = 1/10.
B[i,i] = 97/120.
B[i,i+1] = 1/10.
B[i,i+2] = -1/240.
elif i == Nx-2:
B[i,i+1] = 3 / 40.
B[i,i] = 209 / 240.
B[i,i-1] = 1 / 60.
B[i,i-2] = 7 / 120.
B[i,i-3] = -1 / 40.
B[i,i-4] = 1 / 240.
if i == Nx-1:
B[i,i-5] = -3/40.
B[i,i-4] = 109 / 240.
B[i,i-3] = -23 / 20.
B[i,i-2] = 187 / 120.
B[i,i-1] = -133/120.
B[i,i] = 317 / 240.
elif BC['phi']['x']['type'] == 'LDBC_LNBC':
# assemble D, a matrix of difference coefficients on phi
D = np.zeros((Nx,Nx))
# LDBC row, (row 0)
D[0,0] = 1.
# LNBC row, (row 1)
D[1,0] = -97/10.
D[1,1] = 16.
D[1,2] = -10
D[1,3] = 5.
D[1,4] = -3/2.
D[1,5] = 1/5.
# Poisson's equation rows
for i in range(2,Nx):
D[i,i-2] = 1
D[i,i-1] = -2
D[i,i] = 1
# assemble B, a matrix of difference coefficients on the total density
B = np.zeros((Nx,Nx))
for i in range(1,B.shape[0]):
# if i == 0: row of zeros, density is not involved (corresponds to DBC)
if i == 1:
B[i,i-1] = 317 / 240.
B[i,i] = -133/120.
B[i,i+1] = 187 / 120.
B[i,i+2] = -23 / 20.
B[i,i+3] = 109 / 240.
B[i,i+4] = -3/40.
if i == 2:
B[i, i-2] = 3 / 40.
B[i, i-1] = 209 / 240.
B[i,i] = 1 / 60.
B[i,i+1] = 7 / 120.
B[i,i+2] = -1 / 40.
B[i,i+3] = 1 / 240.
elif 3 <= i <= Nx-2:
B[i,i-3] = -1/240.
B[i,i-2] = 1/10.
B[i,i-1] = 97/120.
B[i,i] = 1/10.
B[i,i+1] = -1/240.
elif i == Nx-1:
B[i,i-5] = 1/240.
B[i,i-4] = -1/40.
B[i,i-3] = 7/120.
B[i,i-2] = 1/60.
B[i,i-1] = 209/240.
B[i,i] = 3/40.
elif BC['phi']['x']['type'] == 'UDBC_UNBC':
# assemble D, a matrix of difference coefficients on phi
D = np.zeros((Nx,Nx))
# LDBC row, (row Nx-1)
D[-1,-1] = 1.
# LNBC row, (row Nx-2)
D[-2,-1] = -97/10.
D[-2,-2] = 16.
D[-2,-3] = -10
D[-2,-4] = 5.
D[-2,-5] = -3/2.
D[-2,-6] = 1/5.
# Poisson's equation rows
for i in range(Nx-2):
D[i,i] = 1
D[i,i+1] = -2
D[i,i+2] = 1
# assemble B, a matrix of difference coefficients on the total density
B = np.zeros((Nx,Nx))
for i in range(B.shape[0]):
if i == 0:
B[i,i] = 3/40.
B[i,i+1] = 209/240.
B[i,i+2] = 1/60.
B[i,i+3] = 7/120.
B[i,i+4] = -1/40.
B[i,i+5] = 1/240.
if 1 <= i < Nx-3:
B[i,i-1] = -1/240.
B[i,i] = 1/10.
B[i,i+1] = 97/120.
B[i,i+2] = 1/10.
B[i,i+3] = -1/240.
elif i == Nx-3:
B[i,i-3] = 1/240.
B[i,i-2] = -1/40.
B[i,i-1] = 7/120.
B[i,i] = 1/60.
B[i,i+1] = 209/240.
B[i,i+2] = 3/40.
elif i == Nx-2:
B[i,i+1] = 317 / 240.
B[i,i] = -133/120.
B[i,i-1] = 187 / 120.
B[i,i-2] = -23 / 20.
B[i,i-3] = 109 / 240.
B[i,i-4] = -3/40.
# else i == Nx - 1: row of zeros, density is not involved (corresponds to DBC)
Poisson_6th_order_FD_solver_matrices['D'] = D
Poisson_6th_order_FD_solver_matrices['B'] = B
return Poisson_6th_order_FD_solver_matrices
| [
"[email protected]"
]
| |
b8e423e679443fc6f005a3baffb6c63992fc9384 | 98420fdd66b8dce46ef88cd34fcace36777fa232 | /py3/torch_motion_retarget_autoencoder/main_train.py | 63456fb7a475f29d83de2bf2e04dd5e88bb0e5e9 | []
| no_license | Daiver/jff | f972fe7464f78ba6008a036b697ea3f04b7010a4 | 33d6a781af8d7f6ae60c25e10051977af2fef1b9 | refs/heads/master | 2023-04-07T06:33:41.487938 | 2022-05-03T10:07:32 | 2022-05-03T10:07:32 | 12,180,634 | 1 | 1 | null | 2023-04-03T19:25:00 | 2013-08-17T15:03:14 | C++ | UTF-8 | Python | false | false | 3,558 | py | import numpy as np
import cv2
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
import np_draw_tools
from data_generation import make_rectangle_dataset, make_circle_dataset
from models import Encoder, Decoder
from utils import numpy_images_to_torch
def main():
n_samples_to_generate = 1500
epochs = 50
device = "cuda"
batch_size = 8
circle_set = make_circle_dataset(n_samples_to_generate)
rect_set = make_rectangle_dataset(n_samples_to_generate)
train_rect_set = numpy_images_to_torch(rect_set)
train_circle_set = numpy_images_to_torch(circle_set)
print(f"N rect samples {len(train_rect_set)} N circle samples {len(train_circle_set)}")
train_rect_loader = DataLoader(train_rect_set, batch_size=batch_size, shuffle=True, drop_last=True)
val_rect_loader = DataLoader(train_rect_set, batch_size=batch_size * 16, shuffle=False)
train_circle_loader = DataLoader(train_circle_set, batch_size=batch_size, shuffle=True, drop_last=True)
encoder = Encoder().to(device)
decoder_rect = Decoder().to(device)
decoder_circle = Decoder().to(device)
optimizer = optim.Adam(list(encoder.parameters()) + list(decoder_rect.parameters()) + list(decoder_circle.parameters()), lr=1e-4)
# criterion = nn.MSELoss()
# criterion = nn.L1Loss()
criterion = nn.BCELoss()
for epoch_ind in range(epochs):
losses = []
losses_rect = []
losses_circle = []
encoder.train()
decoder_rect.train()
decoder_circle.train()
for sample_rect, sample_circle in zip(train_rect_loader, train_circle_loader):
sample_rect = sample_rect.to(device)
pred_rect = encoder(sample_rect)
pred_rect = decoder_rect(pred_rect)
loss_rect = criterion(pred_rect, sample_rect)
sample_circle = sample_circle.to(device)
pred_circle = encoder(sample_circle)
pred_circle = decoder_circle(pred_circle)
loss_circle = criterion(pred_circle, sample_circle)
loss = loss_rect + loss_circle
losses.append(loss.item())
losses_rect.append(loss_rect.item())
losses_circle.append(loss_circle.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
encoder.eval()
decoder_rect.eval()
decoder_circle.eval()
print(f"{epoch_ind + 1} / {epochs} loss {np.mean(losses)} loss rect {np.mean(losses_rect)} loss circle {np.mean(losses_circle)}")
for sample_rect in val_rect_loader:
sample_rect = sample_rect.to(device)
pred_rect = encoder(sample_rect)
# pred_rect = decoder_rect(pred_rect)
pred_rect = decoder_circle(pred_rect)
pred_rect = (pred_rect.detach().permute(0, 2, 3, 1).cpu().numpy() * 255).astype(np.uint8)
sample_rect = (sample_rect.detach().permute(0, 2, 3, 1).cpu().numpy() * 255).astype(np.uint8)
to_show = []
for p, s in zip(pred_rect, sample_rect):
to_show.append(p)
to_show.append(s)
to_show = np_draw_tools.make_grid(to_show[:64], 8)
cv2.imshow("", to_show)
cv2.waitKey(100)
break
torch.save(encoder.state_dict(), "encoder.pt")
torch.save(decoder_circle.state_dict(), "decoder_circle.pt")
torch.save(decoder_rect.state_dict(), "decoder_rect.pt")
cv2.waitKey()
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
db035bcd80d85a97d93d283c7d8154a62e1f3161 | 9d530dc15db600c0630bf7f5141a1277e11d7da6 | /wagtail_shell/test/urls.py | e816acb3672e0c9c3b22078e2ca91f8db34363bd | [
"BSD-3-Clause"
]
| permissive | kaedroho/wagtail-shell | a7b549800a6302d2338d79c5472457662b0d01d3 | cddab026bc3d647c77eac7e31236b662276698af | refs/heads/main | 2023-04-13T10:55:56.916119 | 2021-04-26T17:12:41 | 2021-04-26T17:13:06 | 312,389,122 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | from django.conf.urls import include, url
from django.contrib import admin
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.documents import urls as wagtaildocs_urls
from wagtail.core import urls as wagtail_urls
urlpatterns = [
url(r"^django-admin/", admin.site.urls),
url(r"^admin/", include(wagtailadmin_urls)),
url(r"^documents/", include(wagtaildocs_urls)),
url(r"", include(wagtail_urls)),
]
| [
"[email protected]"
]
| |
caea99874c479e8fff1f0d8d70f1c26b8bf9f39e | 344e2956b4e2a30a8ef7532d951f96d995d1dd1e | /18_mmaction/lib/mmcv/mmcv/ops/__init__.py | b38aff92534ddc32fe7d6ee4eb59383b38c688f7 | [
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"GPL-3.0-only"
]
| permissive | karndeepsingh/Monk_Object_Detection | e64199705326e4cd65e4b29946cae210a4ef9649 | 425fa50a3236cb9097389646275da06bf9185f6b | refs/heads/master | 2022-12-22T18:26:53.933397 | 2020-09-28T12:49:50 | 2020-09-28T12:49:50 | 299,307,843 | 1 | 1 | Apache-2.0 | 2020-09-28T12:52:18 | 2020-09-28T12:52:17 | null | UTF-8 | Python | false | false | 2,136 | py | from .bbox import bbox_overlaps
from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive
from .cc_attention import CrissCrossAttention
from .corner_pool import CornerPool
from .deform_conv import DeformConv2d, DeformConv2dPack, deform_conv2d
from .deform_roi_pool import (DeformRoIPool, DeformRoIPoolPack,
ModulatedDeformRoIPoolPack, deform_roi_pool)
from .focal_loss import (SigmoidFocalLoss, SoftmaxFocalLoss,
sigmoid_focal_loss, softmax_focal_loss)
from .info import get_compiler_version, get_compiling_cuda_version
from .masked_conv import MaskedConv2d, masked_conv2d
from .modulated_deform_conv import (ModulatedDeformConv2d,
ModulatedDeformConv2dPack,
modulated_deform_conv2d)
from .nms import batched_nms, nms, nms_match, soft_nms
from .point_sample import (SimpleRoIAlign, point_sample,
rel_roi_point_to_rel_img_point)
from .psa_mask import PSAMask
from .roi_align import RoIAlign, roi_align
from .roi_pool import RoIPool, roi_pool
from .saconv import SAConv2d
from .sync_bn import SyncBatchNorm
from .tin_shift import TINShift, tin_shift
from .wrappers import Conv2d, ConvTranspose2d, Linear, MaxPool2d
__all__ = [
'bbox_overlaps', 'CARAFE', 'CARAFENaive', 'CARAFEPack', 'carafe',
'carafe_naive', 'CornerPool', 'DeformConv2d', 'DeformConv2dPack',
'deform_conv2d', 'DeformRoIPool', 'DeformRoIPoolPack',
'ModulatedDeformRoIPoolPack', 'deform_roi_pool', 'SigmoidFocalLoss',
'SoftmaxFocalLoss', 'sigmoid_focal_loss', 'softmax_focal_loss',
'get_compiler_version', 'get_compiling_cuda_version', 'MaskedConv2d',
'masked_conv2d', 'ModulatedDeformConv2d', 'ModulatedDeformConv2dPack',
'modulated_deform_conv2d', 'batched_nms', 'nms', 'soft_nms', 'nms_match',
'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 'SyncBatchNorm', 'Conv2d',
'ConvTranspose2d', 'Linear', 'MaxPool2d', 'CrissCrossAttention', 'PSAMask',
'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign',
'SAConv2d', 'TINShift', 'tin_shift'
]
| [
"[email protected]"
]
| |
62923615d10f8b267c51040f2482bd55da9e58cf | 7615badcbd9cc22a263c5f206e951c8c1e6b3e70 | /setup.py | 81eed71015da29a0162829431b4559150308db73 | []
| no_license | winkidney/PyMonitor | 216c88140ea942d23e8f3a634e63c5e3052f46c8 | f772153af217d89b74e5fca2427f3d92ca919f34 | refs/heads/master | 2021-01-23T15:29:45.711809 | 2014-09-18T06:06:14 | 2014-09-18T06:06:14 | 24,089,355 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
requires = [
'psutil',
]
setup(name='PyMonitor',
version='0.1',
description='Monitor system status',
classifiers=[
"Programming Language :: Python",
],
author='',
author_email='',
url='',
keywords='system tools',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
tests_require=requires,
test_suite="none",
)
| [
"[email protected]"
]
| |
d3480cfe285646810d63a5bcafcf319224170244 | d4679b63ff98399a2e2c90a70196ca61be12d5ed | /Part_1/Алгоритмы и структуры данных/Homeworks/Sort1/quick_sort_new.py | 96af6e92a923fca83c9b9828b92291d6e436ae13 | []
| no_license | akoshel/MADE | 9a702064f9dd5f89664efed4e76f9a2fb0a94743 | e0c3aceaf190bb86bae9f8239ae181d5529bc044 | refs/heads/main | 2023-04-28T14:25:53.210880 | 2021-05-19T16:23:52 | 2021-05-19T16:23:52 | 328,123,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | from random import randint
list_lenght = int(input())
raw_list = list(map(int, input().split(' ')))
assert(len(raw_list) == list_lenght)
def quick_sort_step(input_list, l, r):
ll = l
i = l
hh = r
x = input_list[l]
while i <= hh:
if input_list[i] < x:
input_list[ll], input_list[i] = input_list[i], input_list[ll]
ll += 1
i += 1
elif input_list[i] > x:
input_list[i], input_list[hh] = input_list[hh], input_list[i]
hh -= 1
else:
i += 1
return ll, hh
def quick_sort(init_list, l, r):
if l < r:
m = randint(l, r)
init_list[m], init_list[l] = init_list[l], init_list[m]
ll, hh = quick_sort_step(init_list, l, r)
quick_sort(init_list, l, ll - 1)
quick_sort(init_list, hh + 1, r)
quick_sort(raw_list, 0, len(raw_list)-1)
print(' '.join(list(map(str, raw_list))))
| [
"[email protected]"
]
| |
38713d85ffd79716879376e48de891c2aaa7b329 | 13f4a06cd439f579e34bf38406a9d5647fe7a0f3 | /nn_ns/data_structure/TreeNodeOps/UnbalancedMultiWayTreeNodeOps/IUnbalancedMultiWayTreeNodeOps.py | 628ddf2735bb46b14ed81b58dd3dae4abfa781b8 | []
| no_license | edt-yxz-zzd/python3_src | 43d6c2a8ef2a618f750b59e207a2806132076526 | 41f3a506feffb5f33d4559e5b69717d9bb6303c9 | refs/heads/master | 2023-05-12T01:46:28.198286 | 2023-05-01T13:46:32 | 2023-05-01T13:46:32 | 143,530,977 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,174 | py | __all__ = '''
IUnbalancedMultiWayTreeNodeOps
'''.split()
from .abc import not_implemented, override
from ..TreeNodeOps.IOrientedTreeNodeOps__inorder import \
IOrientedTreeNodeOps__inorder
class IUnbalancedMultiWayTreeNodeOps(IOrientedTreeNodeOps__inorder):
'''
assume nonleaf.num_children > 0 since num_entities = num_children-1
assume nonleaf.num_children >= 2 since inorder
entity_position == innode_position
except:
entity_position != innode_last == entity_end
new methods:
`get_entity_at
`_iter_entities_of_nonleaf_
`_iter_reversed_entities_of_nonleaf_
iter_entities_of_nonleaf
iter_reversed_entities_of_nonleaf
get_num_entities_of_nonleaf
calc_num_entities_of_subtree
iter_innode_position_entity_pairs_of_nonleaf
iter_reversed_innode_position_entity_pairs_of_nonleaf
get_innode_entity_begin
get_innode_entity_end
get_innode_entity_begin_or_end
iter_entities_of_subtree
iter_reversed_entities_of_subtree
leaf_to_iter_entities_of_subtree
leaf_to_iter_reversed_entities_of_subtree
'''
__slots__ = ()
@override
def why_not_subtree_ok(ops, self, **kwargs):
# kwargs readonly, should not remove key from it
# i.e. donot override: def is_subtree_ok(ops, self, *, as_root=..., **kwargs)
return (ops.why_not_num_entities_of_nonleaf_ok(self)
+ super().why_not_subtree_ok(self, **kwargs)
)
def why_not_num_entities_of_nonleaf_ok(ops, self):
# num_entities_of_nonleaf = num_children-1
#
is_leaf = ops.is_leaf
unstable_iter_nodes_of_subtree = ops.unstable_iter_nodes_of_subtree
get_num_children = ops.get_num_children
get_num_entities_of_nonleaf = ops.get_num_entities_of_nonleaf
#if ops.is_leaf(self): return ()
for node in unstable_iter_nodes_of_subtree(self):
if is_leaf(node): continue
nonleaf = node; del node
num_children = get_num_children(nonleaf)
num_entities_of_nonleaf = get_num_entities_of_nonleaf(nonleaf)
if num_children != 1+num_entities_of_nonleaf:
return ('num_children != 1+num_entities_of_nonleaf',)
return ()
def get_num_entities_of_nonleaf(ops, self):
assert not ops.is_leaf(self)
return ops.get_num_children(self) - 1
########## require num_children > 0 ################
#def get_num_entities_of_subtree(ops, self):
def calc_num_entities_of_subtree(ops, self):
on_leaf = lambda _: 0
on_nonleaf = ops.get_num_entities_of_nonleaf
combine = lambda a, bs: sum(bs, a)
return ops.bottomup_eval_unoriented_subtree(
self, on_leaf, on_nonleaf, combine)
if ops.is_leaf(self):
return 0
return sum(map(ops.calc_num_entities_of_subtree
, ops.unstable_iter_children(self))
, ops.get_num_entities_of_nonleaf(self))
# nonleaf
@not_implemented
def _iter_entities_of_nonleaf_(ops, self):
# self as node, not as subtree
assert not ops.is_leaf(self)
...
@not_implemented
def _iter_reversed_entities_of_nonleaf_(ops, self):
# self as node, not as subtree
assert not ops.is_leaf(self)
...
def iter_entities_of_nonleaf(ops, self, *, reverse=False):
if not reverse:
f = ops._iter_entities_of_nonleaf_
else:
f = ops._iter_reversed_entities_of_nonleaf_
return f(self)
def iter_reversed_entities_of_nonleaf(ops, self, *, reverse=False):
return ops.iter_entities_of_nonleaf(self, reverse=not reverse)
def iter_innode_position_entity_pairs_of_nonleaf(ops, self, *, reverse=False):
# NOTE: not output innode_last (== child_last == entity_end)
return zip(ops.iter_innode_positions(self, reverse=reverse)
, ops.iter_entities_of_nonleaf(self, reverse=reverse))
def iter_reversed_innode_position_entity_pairs_of_nonleaf(ops, self, *, reverse=False):
# NOTE: not output innode_last (== child_last == entity_end)
return ops.iter_innode_position_entity_pairs_of_nonleaf(self, reverse=not reverse)
# entity_begin == child_begin
# entity_end == child_last
def get_innode_entity_begin_or_end(ops, self, end:bool):
return ops.get_innode_first_or_last_position(self)
def get_innode_entity_begin(ops, self):
return ops.get_innode_first_position(self)
def get_innode_entity_end(ops, self):
return ops.get_innode_last_position(self)
@not_implemented
def get_entity_at(ops, self, entity_position):
# like get_child_at
assert entity_position != ops.get_innode_entity_end(self)
...
def iter_reversed_entities_of_subtree(ops, self, *, reverse=False):
return ops.iter_entities_of_subtree(self, reverse=not reverse)
def iter_entities_of_subtree(ops, self, *, reverse=False):
# reverse ==>> last leaf
last = reverse = bool(reverse)
leaf, depth = ops.get_first_or_last_leaf_ex(self, 0, last)
return ops.leaf_to_iter_entities_of_subtree(leaf, depth, reverse=reverse)
@staticmethod
def __nonleaf_triples2entities(get_entity_at, triples):
# triple = (nonleaf, entity_position, depth)
# get_entity_at = ops.get_entity_at
for nonleaf, entity_position, depth in triples:
yield get_entity_at(nonleaf, entity_position)
def leaf_to_iter_reversed_entities_of_subtree(ops, self, depth, *, reverse=False):
f = ops.leaf_to_iter_entities_of_subtree
return f(self, depth, reverse=not reverse)
def leaf_to_iter_entities_of_subtree(ops, self, depth, *, reverse=False):
assert ops.is_leaf(self)
f = ops.leaf_to_inorder_iter_nonleaf_entity_position_triples
it = f(self, depth, reverse=reverse)
return __class__.__nonleaf_triples2entities(ops.get_entity_at, it)
if __name__ == '__main__':
XXX = IUnbalancedMultiWayTreeNodeOps
from seed.helper.print_methods import print_methods
print_methods(XXX)
| [
"[email protected]"
]
| |
f2c89a5b91a1fc71833013689a89d7bf15352771 | beea74a2a1f2445b107af411197e8b6300e715e6 | /supervised_learning/0x12-transformer_apps/1-dataset.py | 486a2ffdb9d99fe7221d1babf782fa0a793b1816 | []
| no_license | 95ktsmith/holbertonschool-machine_learning | 0240d8fa8523b06d3353c2bffa74205b84253be8 | 2757c8526290197d45a4de33cda71e686ddcbf1c | refs/heads/master | 2023-07-26T16:02:26.399758 | 2021-09-09T15:57:57 | 2021-09-09T15:57:57 | 310,087,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py | #!/usr/bin/env python3
""" Dataset """
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
class Dataset:
""" Class to load an prepare a dataset for machine translation """
def __init__(self):
""" Init """
data = tfds.load(
"ted_hrlr_translate/pt_to_en",
as_supervised=True
)
self.data_train = data['train']
self.data_valid = data['validation']
tokenizer_pt, tokenizer_en = self.tokenize_dataset(self.data_train)
self.tokenizer_pt = tokenizer_pt
self.tokenizer_en = tokenizer_en
def tokenize_dataset(self, data):
"""
Creates sub-word tokenizers for a dataset
data: tf.data.Dataset whose examples are formatted as a table (pt, en)
pt: tf.Tensor containing the Portuguese sentence
en: tf.Tensor containing the English sentence
Returns: tokenizer_pt, tokenizer_en
tokenizer_pt: Portuguese tokenizer
tokenizer_en: English tokenizer
"""
encoder = tfds.deprecated.text.SubwordTextEncoder
tokenizer_pt = encoder.build_from_corpus(
(pt.numpy() for pt, en in data),
target_vocab_size=2 ** 15
)
tokenizer_en = encoder.build_from_corpus(
(en.numpy() for pt, en in data),
target_vocab_size=2 ** 15
)
return tokenizer_pt, tokenizer_en
def encode(self, pt, en):
"""
Encodes a translation into tokens
pt: tf.Tensor containing the Portuguese sentence
en: tf.Tensor containing the corresponding English sentence
Returns: pt_tokens, en_tokens
pt_tokens: np.ndarray containing Portuguese tokens
en_tokens: np.ndarray containing the English tokens
"""
pt_tokens = [self.tokenizer_pt.vocab_size]
pt_tokens += self.tokenizer_pt.encode(pt.numpy())
pt_tokens += [pt_tokens[0] + 1]
en_tokens = [self.tokenizer_en.vocab_size]
en_tokens += self.tokenizer_en.encode(en.numpy())
en_tokens += [en_tokens[0] + 1]
return pt_tokens, en_tokens
| [
"[email protected]"
]
| |
6bb4155195ddc4b87cc695213e8d01711e32e57a | 115d568228ea4dd48bc567fac1afbe90a67e9a8c | /LSTM/SegWords/BI-LSTM/Demo4/seqlib.py | 30ee4fca743daeff75bb34637089947b506e7f4c | []
| no_license | sunshinelu/NLPLearnNote | 6eb6b016ed18602be3a2fe8ce2f1bdb770efb226 | 76cfd64438e8acbf0aadc727675d7b17b63549e3 | refs/heads/master | 2020-03-08T07:03:25.652478 | 2018-05-06T14:13:02 | 2018-05-06T14:13:02 | 127,985,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,052 | py | #!/usr/bin/python
#-*-coding:utf-8-*-
'''
Created on 2018-04-25 15:41
@author:wangrs
'''
#1.导入模块包和语料库文件
import codecs
from gensim.models.word2vec import Word2Vec
import numpy as np
import nltk
from nltk.probability import FreqDist
import pandas as pd
from pickle import dump,load
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM,GRU,SimpleRNN
from keras.layers.core import Reshape,Flatten,Dropout,Dense,Activation
from keras.regularizers import l1,l2
from keras.layers.convolutional import Convolution2D,MaxPooling2D,MaxPooling1D
from sklearn.cross_validation import train_test_split
from keras.optimizers import SGD,RMSprop,Adagrad
from keras.utils import np_utils
#2.使用分词语料库生成词向量(模型)
def load_file(input_file): #读单个文本
input_data = codecs.open(input_file,'r',encoding='utf-8')
input_text = input_data.read()
return input_text
#使用gensim的word2vec库
def trainW2V(corpus,epochs = 10,num_features=100,sg=1,min_word_count=1,num_works=4,context=4,sample=1e-5,negative=5):
w2v = Word2Vec(workers=num_works,sample= sample,size=num_features,min_count=min_word_count,window=context)
np.random.shuffle(corpus) #打乱顺序函数
w2v.build_vocab(corpus)
w2v.train(corpus,total_examples=w2v.corpus_count,epochs=epochs)
print("word2vec DONE.")
return w2v
#3.语料预处理
def freq_func(input_text): #nltk输入文本,输出词频
corpus = nltk.Text(input_text)
fdist = FreqDist(corpus)
w = list(fdist.keys())
v = list(fdist.values())
freqpd = pd.DataFrame({'word':w,'freq':v})
freqpd.sort_values(by='freq',ascending=False,inplace=True)
freqpd['idx'] = np.arange(len(v))
return freqpd
#初始化权重
def init_weightlist(w2v,idx2word,word2idx):
init_weight_wv = []
for i in range(len(idx2word)):
init_weight_wv.append(w2v[idx2word[i]])
#定义‘U’为未登录新字,‘P’为两头padding用途,并增加两个相应的向量表示
char_num = len(init_weight_wv)
idx2word[char_num] = 'U'
word2idx['U'] = char_num
idx2word[char_num+1] = 'P'
word2idx['P'] = char_num+1
init_weight_wv.append(np.random.randn(100))
init_weight_wv.append(np.zeros(100))
return init_weight_wv,idx2word,word2idx
def character_tagging(input_file,output_file): #加入标注标签:BMES(B是词首,M是词中,E是词尾,S是单字词)
#带BOM的utf-8编码的txt文件时开头会有一个多余的字符\ufeff,BOM被解码为一个字符\ufeff,如何去掉?
# 修改encoding为utf-8_sig或者utf_8_sig
input_data = codecs.open(input_file,'r',encoding='utf-8_sig')
output_data = codecs.open(output_file,'w',encoding='utf-8')
for line in input_data.readlines():
word_list = line.strip().split()
for word in word_list:
if len(word) == 1:
output_data.write(word+"/S ")
else:
output_data.write(word[0]+'/B ')
for w in word[1:len(word)-1]:
output_data.write(w+"/M ")
output_data.write(word[len(word)-1]+'/E ')
output_data.write('\n')
output_data.close()
input_data.close()
def featContext(sentence,word2idx='',context = 7):
predict_word_num = []
for w in sentence: #文本中的字如果在词典中则转为数字,如果不在则设置为U
if w in word2idx:
predict_word_num.append(word2idx[w])
else:
predict_word_num.append(word2idx['U'])
num = len(predict_word_num) #首尾padding
pad = int((context-1)*0.5)
for i in range(pad):
predict_word_num.insert(0,word2idx['P'])
predict_word_num.append(word2idx['P'])
train_x = []
for i in range(num):
train_x.append(predict_word_num[i:i+context])
return train_x
#4.训练语料
class Lstm_Net(object):
def __init__(self):
self.init_weight=[]
self.batch_size = 128
self.word_dim = 100
self.maxlen = 7
self.hidden_units = 100
self.nb_classes = 0
def buildnet(self):
self.maxfeatures = self.init_weight[0].shape[0] #词典大小
self.model = Sequential()
print('stacking LSTM .....')#使用了堆叠的LSTM架构
self.model.add(Embedding(self.maxfeatures,self.word_dim,input_length=self.maxlen))
self.model.add(LSTM(self.hidden_units,return_sequences=True))
self.model.add(LSTM(self.hidden_units,return_sequences=False))
self.model.add(Dropout(0.5))
self.model.add(Dense(self.nb_classes))
self.model.add(Activation('softmax'))
self.model.compile(loss='categorical_crossentropy',optimizer='adam')
def train(self,modelname):
result= self.model.fit(self.train_X,self.Y_train,batch_size=self.batch_size,epochs=20,validation_data=(self.test_X,self.Y_test))
self.model.save_weights(modelname)
def splitset(self,train_word_num,train_label,train_size=0.9,random_state=1):
self.train_X,self.test_X,train_y,test_y = train_test_split(train_word_num,train_label,train_size=0.9,random_state=1)
print(np.shape(self.train_X))
self.Y_train = np_utils.to_categorical(train_y,self.nb_classes)
print(np.shape(self.Y_train))
self.Y_test = np_utils.to_categorical(test_y,self.nb_classes)
def predict_num(self,input_num,input_txt,label_dict='',num_dict=''):
#根据输入得到标注推断
input_num = np.array(input_num)
predict_prob = self.model.predict_proba(input_num,verbose=False)
predict_label = self.model.predict_classes(input_num,verbose=False)
for i,label in enumerate(predict_label[:-1]):
if i==0: #如果是首字,不可为E,M
predict_prob[i,label_dict['E']] = 0
predict_prob[i,label_dict['M']] = 0
if label == label_dict['B']: #前字为B,后字不可为B,S
predict_prob[i+1, label_dict['B']] = 0
predict_prob[i+1, label_dict['S']] = 0
if label == label_dict['E']: #前字为E,后字不可为M,E
predict_prob[i+1, label_dict['M']] = 0
predict_prob[i+1, label_dict['E']] = 0
if label == label_dict['M']: #前字为M,后字不可为B,S
predict_prob[i+1, label_dict['B']] = 0
predict_prob[i+1, label_dict['S']] = 0
if label == label_dict['S']: # 前字为S,后字不可为M,E
predict_prob[i + 1, label_dict['M']] = 0
predict_prob[i + 1, label_dict['E']] = 0
predict_label[i+1] = predict_prob[i+1].argmax()
predict_label_new = [num_dict[x] for x in predict_label]
result = [w+'/'+label for w,label in zip(input_txt,predict_label_new)]
return ' '.join(result)+'\n'
def getweights(self,wfname):
return self.model.load_weights(wfname)
| [
"[email protected]"
]
| |
d010259a00c5e7ee1dffe89abbf7d4da12ce6f86 | 716f3250efd6fe51cf9e44b5cc57a45e04c5113e | /computeFeatures/seqStep/seqToolManagers/conservationTools/corrMutGeneric.py | 6e41aaec075005ec4928c67486daae55a463c9c9 | [
"Apache-2.0"
]
| permissive | kacst2/BIPSPI | 6e878ca7fe6a3f4175efd26ba209c489f4429db6 | 35bbf88ac7f4c4f09773101f44cebdb828dd64ef | refs/heads/master | 2020-03-29T18:47:13.017429 | 2018-09-04T17:03:07 | 2018-09-04T17:03:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,179 | py | from __future__ import absolute_import, print_function
import sys, os
from subprocess import Popen, PIPE, check_output
from Config import Configuration
from utils import myMakeDir, tryToRemove #utils is at the root of the package
import re
import time
from ..seqToolManager import seqToolManager, FeatureComputerException
from utils import myMakeDir, tryToRemove #utils is at the root of the package
class CorrMutGeneric(seqToolManager):
'''
Computes corrMut and processes their outputs. Extends class ConservationManager. This is an
abstract class
'''
BAD_SCORE_CONSERVATION = -1048576 #Something went wrong tag
SEQUENCES_SEPARATOR="-"*5
MIN_N_SEQS_MSA= 10
def __init__(self, seqsManager, outPath):
'''
@param seqsManager: ..manageSeqs.seqsManager.SeqsManager
@param outPath: str. path where corrMut scores will be saved
'''
seqToolManager.__init__(self,seqsManager, outPath, None)
self.seqsManager= seqsManager
self.corrMutOutPath= myMakeDir(outPath,"corrMut")
def getFinalPath(self):
'''
returns path where results are saved
@return corrMutOutPath: str
'''
return self.corrMutOutPath
def checkAlreayComputed(self, corrMutOutName):
'''
checks if output files have been computed
@param corrMutOutName. Output file
@return boolean. True if Output file sequences have been computed, False otherwise
'''
if not os.path.isfile(corrMutOutName):
return False
else:
nlines=int( check_output('wc -l {}'.format(corrMutOutName), shell=True).split()[0])
if nlines<12: #Too small file to be correct
return False
return True
def loadOneAligFile(self, aligFile):
taxaRegex1= re.compile(".* OS=(([]'[\w]+.?\s){1,2})")
taxaRegex2= re.compile(".* \[((\w+\s){1,2})")
seqsByLine=set([])
seqsByTaxa={}
if not os.path.isfile(aligFile):
return seqsByTaxa
with open(aligFile) as f:
f.readline()
seq1= f.readline().strip()
taxa= "targetProtein"
seqsByLine.add( (seq1, taxa) )
for line in f:
if line.startswith(">"):
matchObj= re.match(taxaRegex1, line)
if matchObj:
taxa= re.sub("[\[\]']","",matchObj.group(1).strip())
else:
matchObj= re.match(taxaRegex2, line)
if matchObj:
taxa= re.sub("[\[\]]","",matchObj.group(1).strip())
else:
print(line)
print("taxa not found in aligFile %s"%aligFile)
taxa=None
# raw_input("continue?")
# raise ValueError("taxa not found in aligFile %s"%aligFile)
# print(taxa)
# raw_input("press enter to continue")
else:
line= line.strip()
if not line in seqsByLine:
seqsByLine.add( (line,taxa) )
for seq, taxa in seqsByLine:
aligQuality= len(seq)- seq.count("-")- sum( (aa.islower() for aa in seq))
seq= re.sub(r"[a-z]", "", seq)
if taxa not in seqsByTaxa:
seqsByTaxa[taxa]= (seq, aligQuality)
else:
prevSeq, prevQuality= seqsByTaxa[taxa]
if aligQuality> prevQuality:
seqsByTaxa[taxa]= (seq, aligQuality)
return seqsByTaxa
def createPairedAlignmet(self, aligsDictL, aligsDictR, aligFormatedName):
pairedAlig=""
nPairs= 0
# print(sorted(aligsDictL.keys()))
# raw_input("press enter")
if "targetProtein" not in aligsDictL or "targetProtein" not in aligsDictR:
return None
seqL_initial, scoreL = aligsDictL["targetProtein"]
seqR_initial, scoreR = aligsDictR["targetProtein"]
pairedAlig+=("%s%s%s\n"%(seqL_initial, CorrMutGeneric.SEQUENCES_SEPARATOR, seqR_initial))
for taxaL in aligsDictL:
if taxaL!="targetProtein" and taxaL in aligsDictR:
seqL, scoreL = aligsDictL[taxaL]
seqR, scoreR = aligsDictR[taxaL]
pairedAlig+=("%s%s%s\n"%(seqL, CorrMutGeneric.SEQUENCES_SEPARATOR, seqR))
nPairs+=1
with open(aligFormatedName,"w") as f:
f.write(pairedAlig)
return aligFormatedName, pairedAlig, nPairs, seqL_initial, seqR_initial
def compute(self, HHBlitsFnamesDict, prefix):
'''
Computes corrMut for the Multiple Sequence aligment hhBlitsOut after pairing it by taxa. If more than 2 sequences
are found for one taxa, just best match is choosen
@param HHBlitsFnamesDict: {"l":{"A":"1A2K_l_A_u.a3m"}, "r":{"B":"1A2K_r_B_u.a3m", "C":"1A2K_r_C_u.a3m"}}
@param prefix: str. The prefix of the complex, p.e. 1A2K
'''
aligsDict= {chainType:{ chainId: self.loadOneAligFile(HHBlitsFnamesDict[chainType][chainId])
for chainId in HHBlitsFnamesDict[chainType]} for chainType in HHBlitsFnamesDict}
for chainIdL in aligsDict["l"]:
for chainIdR in aligsDict["r"]:
print("launching corrMut over chains %s - %s"%(chainIdL, chainIdR))
# raw_input("press enter to procced")
aligFormatedName= os.path.join(self.corrMutOutPath, "tmp_"+prefix+"_l-"+chainIdL+"-r-"+chainIdR+"_"+"u.ali")
try:
corrMutOutName= os.path.join(self.corrMutOutPath, prefix+"_l-"+chainIdL+"_r-"+chainIdR+"_"+"u.corrMut")
if self.checkAlreayComputed(corrMutOutName):
print("%s already computed"%corrMutOutName)
continue
aligOut= self.createPairedAlignmet(aligsDict["l"][chainIdL], aligsDict["r"][chainIdR],
aligFormatedName)
if aligOut:
__, __, nAlig, seqL, seqR= aligOut
else:
nAlig=0
seqL, __= self.seqsManager.getSeq("l", chainIdL)
seqR, __= self.seqsManager.getSeq("r", chainIdR)
if nAlig> CorrMutGeneric.MIN_N_SEQS_MSA:
startTime= time.time()
iterOfCorrelatedRows= self.lauchCorrMutProgram(aligFormatedName)
print("Time CorrMut", time.time()- startTime)
else:
iterOfCorrelatedRows= None #( "*** Sorry", "Error, not enough sequences in MSA")
# if len(processOut[1])>0:
# print("Error computing corrMut. Caught stdin/stderr:\n",processOut[0],processOut[1])
self.saveProcResults(seqL, seqR, corrMutOutName, iterOfCorrelatedRows, chainIdL, chainIdR, nAlig)
except (KeyboardInterrupt, Exception):
print("Exception happend computing corrMut for %s over chains %s - %s"%(prefix, chainIdL, chainIdR))
tryToRemove(corrMutOutName)
raise
finally:
tryToRemove(aligFormatedName)
pass
def lauchCorrMutProgram(self, aligFormatedName):
#abstract Method
return None
def writeHeader(self, fHandler):
fHandler.write("chainIdL structResIdL resNameL chainIdR structResIdR resNameR %s %s\n"%(self.featName,
self.featName+"Quality"))
def saveProcResults(self, seqL, seqR, corrMutOutName, iterOfCorrelatedRows, chainIdL, chainIdR, nAlig):
'''
Reads corrMut output file and writes another one with tabulated format, headers and
some error checking.
@param: seqL: str. Sequence of the ligand chain
@param: seqR: str. Sequence of the receptor chain
@param corrMutOutName: str. Fname where formated results will be saved.
@param iterOfCorrelatedRows: iterator of elements as [res_i, res_j, corrMuScore] ] res_i and res_j are 0 based
@param chainIdL:str. The chain Id for the ligand
@param chainIdR:str. The chain Id for the receptor
@param nAlig: int. The number of rows of MSA
'''
corrMutQuality= float(nAlig)/ (len(seqL)+len(seqR))
if iterOfCorrelatedRows==None:
self.makeFakeFile( seqL, seqR, corrMutOutName, corrMutQuality, chainIdL, chainIdR)
return 1
else:
try:
with open(corrMutOutName,"w") as outFile:
self.writeHeader(outFile)
scoresDict={}
lenSeqL= len(seqL)
lenSeparator= len(CorrMutGeneric.SEQUENCES_SEPARATOR)
addedI_J= set([])
# for line in corrMutOut.split("\n")[1:]:
for line in iterOfCorrelatedRows:
i, j, score= line
# i, j=int(i)-1, int(j)-1
if i>=lenSeqL or j <(lenSeqL+lenSeparator): continue
j= j-lenSeqL-lenSeparator
assert j>=0
addedI_J.add((i,j))
letterL= seqL[i]
letterR= seqR[j]
score= float(score)
structIndexL= self.seqsManager.seqToStructIndex("l", chainIdL, i, asString= True)
structIndexR= self.seqsManager.seqToStructIndex("r", chainIdR, j, asString= True)
if structIndexR is None or (self.filterOutLabels and structIndexR[-1].isalpha()): continue
if structIndexL is None or (self.filterOutLabels and structIndexL[-1].isalpha()): continue
outFile.write("%s %s %s %s %s %s %f %f\n"%(chainIdL, structIndexL, letterL, chainIdR, structIndexR, letterR,
score, corrMutQuality))
for i in range(len(seqL)):
letterL= seqL[i]
for j in range(len(seqR)):
if not (i,j) in addedI_J:
letterR= seqR[j]
structIndexL= self.seqsManager.seqToStructIndex("l", chainIdL, i, asString= True)
structIndexR= self.seqsManager.seqToStructIndex("r", chainIdR, j, asString= True)
if structIndexR is None or (self.filterOutLabels and structIndexR[-1].isalpha()): continue
if structIndexL is None or (self.filterOutLabels and structIndexL[-1].isalpha()): continue
outFile.write("%s %s %s %s %s %s %f %f\n"%(chainIdL, structIndexL, letterL, chainIdR, structIndexR, letterR,
0.0, corrMutQuality))
return 0
except (KeyboardInterrupt, Exception) as e:
print(e)
print("Exception happend computing %s"%corrMutOutName)
tryToRemove(corrMutOutName)
raise
def makeFakeFile(self, seqL, seqR, corrMutOutName, corrMutQuality, chainIdL, chainIdR):
try:
with open(corrMutOutName,"w") as outFile:
self.writeHeader(outFile)
for i,letterL in enumerate(seqL):
structIndexL= self.seqsManager.seqToStructIndex("l", chainIdL, i, asString= True)
if structIndexL is None or (self.filterOutLabels and structIndexL[-1].isalpha()): continue
for j,letterR in enumerate(seqR):
structIndexR= self.seqsManager.seqToStructIndex("r", chainIdR, j, asString= True)
if structIndexR is None or (self.filterOutLabels and structIndexR[-1].isalpha()): continue
outFile.write("%s %s %s %s %s %s %f %f\n"%(chainIdL, structIndexL, letterL, chainIdR, structIndexR, letterR,
CorrMutGeneric.BAD_SCORE_CONSERVATION, corrMutQuality))
except (KeyboardInterrupt, Exception):
print("Exception happend computing %s"%corrMutOutName)
tryToRemove(corrMutOutName)
raise
| [
"[email protected]"
]
| |
9a8f98740162c5d7d7746170ae5aac8824d90bb8 | 930c207e245c320b108e9699bbbb036260a36d6a | /BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Average_Supply_Air_Flow.py | 9859e698689ef384cb9feebcee0e03310d17392d | []
| no_license | InnovationSE/BRICK-Generated-By-OLGA | 24d278f543471e1ce622f5f45d9e305790181fff | 7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2 | refs/heads/master | 2021-07-01T14:13:11.302860 | 2017-09-21T12:44:17 | 2017-09-21T12:44:17 | 104,251,784 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Supply_Air_Flow import Supply_Air_Flow
class Average_Supply_Air_Flow(Supply_Air_Flow):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Average_Supply_Air_Flow
| [
"[email protected]"
]
| |
c2b0992bf436362075dcb44aa8a37adeff6d0bbc | 46ae8264edb9098c9875d2a0a508bc071201ec8b | /res/scripts/client/avatarinputhandler/aimingsystemssniperaimingsystem.py | 3880835227faf1be789c070fc425cf175557dbee | []
| no_license | Difrex/wotsdk | 1fc6156e07e3a5302e6f78eafdea9bec4c897cfb | 510a34c67b8f4c02168a9830d23f5b00068d155b | refs/heads/master | 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,483 | py | # Embedded file name: scripts/client/AvatarInputHandler/AimingSystems/SniperAimingSystem.py
import BigWorld
import Math
import GUI
from Math import Vector3, Matrix
import math
from AvatarInputHandler import mathUtils
from AvatarInputHandler import AimingSystems
from AvatarInputHandler.AimingSystems import IAimingSystem
from gun_rotation_shared import calcPitchLimitsFromDesc
from projectile_trajectory import getShotAngles
from AvatarInputHandler.cameras import readFloat, readVec3, readVec2
_MAX_DEVIATION_DEG = 90.0
_MAX_DEVIATION = math.radians(_MAX_DEVIATION_DEG)
class InputFilter:
def __init__(self):
self.__T1 = 0.5
self.__T2 = 2.2
self.__T3 = 1.5
self.__F1 = 0.0
self.__F2 = 0.0
self.__delay = 0.0
def reloadConfig(self, filterSec):
if filterSec is not None:
T_Vector = readVec3(filterSec, 'T', (0.0, 0.01, 0.01), (10.0, 10.0, 10.0), (0.0, 2.0, 1.0))
self.__T1 = T_Vector[0]
self.__T2 = T_Vector[1]
self.__T3 = T_Vector[2]
return
def resetTimer(self):
self.__delay = 0.0
def reset(self, value):
self.__F1 = self.__F2 = value
self.__delay = 0.0
def active(self):
return self.__delay > 0.0 or math.fabs(self.__F1 - self.__F2) > 0.0001
def value(self):
return self.__F2
def adjust(self, delta):
self.__F1 += delta
self.__F2 += delta
def update(self, input, dt):
self.__delay += dt
if self.__delay >= self.__T1:
self.__F1 += (input - self.__F1) * dt / self.__T2
self.__F2 += (self.__F1 - self.__F2) * dt / self.__T3
return self.__F2
else:
return self.__F2
class SniperAimingSystem(IAimingSystem):
turretYaw = property(lambda self: self.__idealYaw + self.__oscillator.deviation.x)
gunPitch = property(lambda self: self.__idealPitch + self.__oscillator.deviation.y)
__CONSTRAINTS_MULTIPLIERS = Vector3(1.0, 1.0, 1.0)
__activeSystem = None
__FILTER_ENABLED = True
@staticmethod
def setStabilizerSettings(useHorizontalStabilizer, useVerticalStabilizer):
SniperAimingSystem.__CONSTRAINTS_MULTIPLIERS.x = 1.0 if useHorizontalStabilizer else 0.0
SniperAimingSystem.__CONSTRAINTS_MULTIPLIERS.y = 1.0 if useVerticalStabilizer else 0.0
if SniperAimingSystem.__activeSystem is not None:
SniperAimingSystem.__activeSystem.resetIdealDirection()
return
@staticmethod
def getStabilizerSettings():
return (SniperAimingSystem.__CONSTRAINTS_MULTIPLIERS.x > 0.0, SniperAimingSystem.__CONSTRAINTS_MULTIPLIERS.y > 0.0)
@staticmethod
def enableFilter(enable):
SniperAimingSystem.__FILTER_ENABLED = enable
def __init__(self, dataSec):
IAimingSystem.__init__(self)
self.__idealYaw = 0.0
self.__idealPitch = 0.0
self.__g_curPitch = 0.0
self.__g_curYaw = 0.0
self.__stailizationLimit = math.radians(60.0)
self.__vehicleTypeDescriptor = None
self.__vehicleMProv = None
self.__vehiclePrevMat = None
self.__yprDeviationConstraints = Vector3(math.pi * 2.1, math.pi / 2.0 * 0.95, 0.0)
self.__oscillator = Math.PyOscillator(1.0, Vector3(0.0, 0.0, 15.0), Vector3(0.0, 0.0, 3.5), self.__yprDeviationConstraints)
self.__pitchfilter = InputFilter()
self.reloadConfig(dataSec)
self.__pitchCompensating = 0.0
return
def reloadConfig(self, dataSec):
filterSec = dataSec['aimingSystem']
self.__pitchDeviation = readVec2(filterSec, 'deviation', (-_MAX_DEVIATION_DEG, -_MAX_DEVIATION_DEG), (_MAX_DEVIATION_DEG, _MAX_DEVIATION_DEG), (0.0, 0.0))
self.__pitchDeviation = (-math.radians(self.__pitchDeviation[1]), -math.radians(self.__pitchDeviation[0]))
self.__pitchfilter.reloadConfig(filterSec)
def destroy(self):
IAimingSystem.destroy(self)
SniperAimingSystem.__activeSystem = None
return
def enableHorizontalStabilizerRuntime(self, enable):
yawConstraint = math.pi * 2.1 if enable else 0.0
self.__yprDeviationConstraints.x = yawConstraint
def enable(self, targetPos):
player = BigWorld.player()
self.__vehicleTypeDescriptor = player.vehicleTypeDescriptor
self.__vehicleMProv = player.getOwnVehicleStabilisedMatrix()
self.__vehiclePrevMat = Matrix(self.__vehicleMProv)
IAimingSystem.enable(self, targetPos)
self.__yawLimits = self.__vehicleTypeDescriptor.gun['turretYawLimits']
self.__pitchLimits = self.__vehicleTypeDescriptor.gun['pitchLimits']
self.__idealYaw, self.__idealPitch = AimingSystems.getTurretYawGunPitch(self.__vehicleTypeDescriptor, self.__vehicleMProv, targetPos, True)
self.__idealYaw, self.__idealPitch = self.__clampToLimits(self.__idealYaw, self.__idealPitch)
currentGunMat = AimingSystems.getPlayerGunMat(self.__idealYaw, self.__idealPitch)
self.__g_curYaw = currentGunMat.yaw
self.__g_curPitch = (targetPos - currentGunMat.translation).pitch
self._matrix.set(currentGunMat)
self.__idealYaw, self.__idealPitch = self.__worldYawPitchToTurret(self.__g_curYaw, self.__g_curPitch)
self.__idealYaw, self.__idealPitch = self.__clampToLimits(self.__idealYaw, self.__idealPitch)
self.__oscillator.reset()
self.__pitchfilter.reset(currentGunMat.pitch)
SniperAimingSystem.__activeSystem = self
vehicle = player.getVehicleAttached()
if vehicle is not None:
if hasattr(vehicle.filter, 'placingOnGround') and not vehicle.filter.placingOnGround:
vehicle.filter.calcPlacedMatrix(True)
self.__baseMatrix = vehicle.filter.placingMatrix
else:
self.__baseMatrix = vehicle.matrix
return
def disable(self):
SniperAimingSystem.__activeSystem = None
player = BigWorld.player()
vehicle = player.getVehicleAttached()
if vehicle is not None:
if not vehicle.filter.placingOnGround:
vehicle.filter.calcPlacedMatrix(False)
return
def getDesiredShotPoint(self):
start = self._matrix.translation
scanDir = self._matrix.applyVector(Vector3(0.0, 0.0, 1.0))
return AimingSystems.getDesiredShotPoint(start, scanDir)
def resetIdealDirection(self):
self.__idealYaw, self.__idealPitch = self.__worldYawPitchToTurret(self.__g_curYaw, self.__g_curPitch)
self.__idealYaw, self.__idealPitch = self.__clampToLimits(self.__idealYaw, self.__idealPitch)
self.__pitchfilter.reset(self.__g_curPitch)
def handleMovement(self, dx, dy):
self.__idealYaw, self.__idealPitch = self.__worldYawPitchToTurret(self.__g_curYaw, self.__pitchfilter.value())
newPitch = self.__idealPitch + dy
newYaw = self.__idealYaw + dx
self.__idealYaw, idealPitch, inLimit, pitchMin, dp = self.__inLimit(self.__idealYaw, newYaw, newPitch)
newPitch += dp
if not inLimit:
d1 = pitchMin - self.__idealPitch
d2 = pitchMin - newPitch
if math.fabs(d1) >= math.fabs(d2):
self.__idealPitch = idealPitch
currentGunMat = AimingSystems.getPlayerGunMat(self.__idealYaw, self.__idealPitch)
self.__pitchfilter.adjust(currentGunMat.pitch - self.__pitchfilter.value())
else:
currentGunMat = AimingSystems.getPlayerGunMat(self.__idealYaw, idealPitch)
self.__pitchfilter.reset(currentGunMat.pitch)
self.__g_curYaw = currentGunMat.yaw
self.__g_curPitch = currentGunMat.pitch
self._matrix.set(currentGunMat)
self.__oscillator.velocity = Vector3(0.0, 0.0, 0.0)
_, uncompensatedPitch = AimingSystems.getTurretYawGunPitch(self.__vehicleTypeDescriptor, BigWorld.player().getOwnVehicleStabilisedMatrix(), self.getDesiredShotPoint())
if inLimit:
self.__pitchCompensating = mathUtils.clamp(math.radians(-2.0), math.radians(2.0), idealPitch - uncompensatedPitch)
else:
self.__pitchCompensating = 0.0
def __clampToLimits(self, turretYaw, gunPitch):
if self.__yawLimits is not None:
turretYaw = mathUtils.clamp(self.__yawLimits[0], self.__yawLimits[1], turretYaw)
pitchLimits = calcPitchLimitsFromDesc(turretYaw, self.__pitchLimits)
if SniperAimingSystem.__FILTER_ENABLED:
pitchLimitsMin = min(pitchLimits[0] + self.__pitchDeviation[0], _MAX_DEVIATION)
pitchLimitsMax = max(pitchLimits[1] + self.__pitchDeviation[1], -_MAX_DEVIATION)
else:
pitchLimitsMin = pitchLimits[0]
pitchLimitsMax = pitchLimits[1]
gunPitch = mathUtils.clamp(pitchLimitsMin, pitchLimitsMax + self.__pitchCompensating, gunPitch)
return (turretYaw, gunPitch)
def __inLimit(self, prevYaw, newYaw, newPitch):
if self.__yawLimits is not None:
prevYaw = mathUtils.clamp(self.__yawLimits[0], self.__yawLimits[1], prevYaw)
newYaw = mathUtils.clamp(self.__yawLimits[0], self.__yawLimits[1], newYaw)
prevPitchLimits = calcPitchLimitsFromDesc(prevYaw, self.__pitchLimits)
pitchLimits = calcPitchLimitsFromDesc(newYaw, self.__pitchLimits)
if SniperAimingSystem.__FILTER_ENABLED:
pitchLimitsMin = pitchLimits[0] + self.__pitchDeviation[0]
pitchLimitsMax = pitchLimits[1] + self.__pitchDeviation[1]
prevLimMin = prevPitchLimits[0] + self.__pitchDeviation[0]
prevLimMax = prevPitchLimits[1] + self.__pitchDeviation[1]
else:
pitchLimitsMin = pitchLimits[0]
pitchLimitsMax = pitchLimits[1]
prevLimMin = prevPitchLimits[0]
prevLimMax = prevPitchLimits[1]
prevLimitedPitch = mathUtils.clamp(prevLimMin, prevLimMax, newPitch)
limitedPitch = mathUtils.clamp(pitchLimitsMin, pitchLimitsMax, newPitch)
dp = limitedPitch - prevLimitedPitch
return (newYaw,
limitedPitch,
pitchLimitsMin <= newPitch <= pitchLimitsMax,
pitchLimitsMin,
dp)
def __worldYawPitchToTurret(self, worldYaw, worldPitch):
worldToTurret = Matrix(self.__vehicleMProv)
worldToTurret.invert()
worldToTurret.preMultiply(mathUtils.createRotationMatrix((worldYaw, worldPitch, 0.0)))
return (worldToTurret.yaw, worldToTurret.pitch)
def update(self, deltaTime):
self.__oscillator.constraints = mathUtils.matrixScale(self.__yprDeviationConstraints, SniperAimingSystem.__CONSTRAINTS_MULTIPLIERS)
l_curYaw, l_curPitch = self.__worldYawPitchToTurret(self.__g_curYaw, self.__g_curPitch)
stabilizationOn = math.fabs(self._matrix.roll) < self.__stailizationLimit and SniperAimingSystem.__FILTER_ENABLED
if stabilizationOn:
l_curYaw, l_curNewPitch = self.__clampToLimits(l_curYaw, l_curPitch)
else:
l_curNewPitch = l_curPitch
if stabilizationOn:
newLocal = l_curPitch + (l_curNewPitch - l_curPitch)
newGunMat = AimingSystems.getPlayerGunMat(l_curYaw, newLocal)
new__g_curPitch = newGunMat.pitch
new__g_curPitch = self.__pitchfilter.update(new__g_curPitch, deltaTime)
globalDelta = new__g_curPitch - self.__g_curPitch
else:
globalDelta = l_curNewPitch - self.__idealPitch
yprDelta = Vector3(l_curYaw - self.__idealYaw, globalDelta, 0.0)
self.__oscillator.deviation = yprDelta
self.__oscillator.update(deltaTime)
l_curYaw = self.__idealYaw + self.__oscillator.deviation.x
if stabilizationOn:
l_curPitch = l_curPitch + self.__oscillator.deviation.y
else:
l_curPitch = self.__idealPitch + self.__oscillator.deviation.y
l_curYaw, l_newCurPitch = self.__clampToLimits(l_curYaw, l_curPitch)
if not stabilizationOn:
globalDelta = l_newCurPitch - self.__idealPitch
l_curPitch = l_newCurPitch
yprDelta = Vector3(l_curYaw - self.__idealYaw, globalDelta, 0.0)
self.__oscillator.deviation = yprDelta
currentGunMat = AimingSystems.getPlayerGunMat(l_curYaw, l_curPitch)
self.__g_curYaw = currentGunMat.yaw
self.__g_curPitch = currentGunMat.pitch
self._matrix.set(currentGunMat)
return 0.0 | [
"[email protected]"
]
| |
9f22af2e0e7505b1cc7333dc94157e766abb8b25 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit959.py | fdc98c144976bd8c7ca16f4120e60feff1cf388a | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,967 | py | # qubit number=5
# total number=41
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.x(input_qubit[2]) # number=26
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.y(input_qubit[3]) # number=25
prog.x(input_qubit[0]) # number=9
prog.h(input_qubit[1]) # number=32
prog.cz(input_qubit[0],input_qubit[1]) # number=33
prog.h(input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=38
prog.cz(input_qubit[0],input_qubit[1]) # number=39
prog.h(input_qubit[1]) # number=40
prog.x(input_qubit[1]) # number=36
prog.cx(input_qubit[0],input_qubit[1]) # number=37
prog.cx(input_qubit[0],input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.y(input_qubit[3]) # number=27
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.x(input_qubit[3]) # number=12
prog.cx(input_qubit[1],input_qubit[2]) # number=31
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit959.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
]
| |
9ac8fbd2093b050ddc4cfc599c8ec66a84de265d | 35fdd5b42b47a1dbe6a25f6fc1865f4e48b842a5 | /evalml/data_checks/class_imbalance_data_check.py | 9eedc3f25b4b5d9b027a72bcabf93ac8fc2b90fa | [
"BSD-3-Clause"
]
| permissive | skvorekn/evalml | 41e5426f9f7d5ad625c21b74336009894c79c7de | 2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8 | refs/heads/main | 2023-03-27T01:42:07.691406 | 2021-03-19T18:53:43 | 2021-03-19T18:53:43 | 349,555,689 | 0 | 0 | BSD-3-Clause | 2021-03-21T14:57:01 | 2021-03-19T21:08:12 | null | UTF-8 | Python | false | false | 7,631 | py |
from evalml.data_checks import (
DataCheck,
DataCheckError,
DataCheckMessageCode,
DataCheckWarning
)
from evalml.utils import _convert_woodwork_types_wrapper, infer_feature_types
class ClassImbalanceDataCheck(DataCheck):
"""Checks if any target labels are imbalanced beyond a threshold. Use for classification problems"""
def __init__(self, threshold=0.1, min_samples=100, num_cv_folds=3):
"""Check if any of the target labels are imbalanced, or if the number of values for each target
are below 2 times the number of cv folds
Arguments:
threshold (float): The minimum threshold allowed for class imbalance before a warning is raised.
This threshold is calculated by comparing the number of samples in each class to the sum of samples in that class and the majority class.
For example, a multiclass case with [900, 900, 100] samples per classes 0, 1, and 2, respectively,
would have a 0.10 threshold for class 2 (100 / (900 + 100)). Defaults to 0.10.
min_samples (int): The minimum number of samples per accepted class. If the minority class is both below the threshold and min_samples,
then we consider this severely imbalanced. Must be greater than 0. Defaults to 100.
num_cv_folds (int): The number of cross-validation folds. Must be positive. Choose 0 to ignore this warning.
"""
if threshold <= 0 or threshold > 0.5:
raise ValueError("Provided threshold {} is not within the range (0, 0.5]".format(threshold))
self.threshold = threshold
if min_samples <= 0:
raise ValueError("Provided value min_samples {} is not greater than 0".format(min_samples))
self.min_samples = min_samples
if num_cv_folds < 0:
raise ValueError("Provided number of CV folds {} is less than 0".format(num_cv_folds))
self.cv_folds = num_cv_folds * 2
def validate(self, X, y):
"""Checks if any target labels are imbalanced beyond a threshold for binary and multiclass problems
Ignores NaN values in target labels if they appear.
Arguments:
X (ww.DataTable, pd.DataFrame, np.ndarray): Features. Ignored.
y (ww.DataColumn, pd.Series, np.ndarray): Target labels to check for imbalanced data.
Returns:
dict: Dictionary with DataCheckWarnings if imbalance in classes is less than the threshold,
and DataCheckErrors if the number of values for each target is below 2 * num_cv_folds.
Example:
>>> import pandas as pd
>>> X = pd.DataFrame()
>>> y = pd.Series([0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
>>> target_check = ClassImbalanceDataCheck(threshold=0.10)
>>> assert target_check.validate(X, y) == {"errors": [{"message": "The number of instances of these targets is less than 2 * the number of cross folds = 6 instances: [0]",\
"data_check_name": "ClassImbalanceDataCheck",\
"level": "error",\
"code": "CLASS_IMBALANCE_BELOW_FOLDS",\
"details": {"target_values": [0]}}],\
"warnings": [{"message": "The following labels fall below 10% of the target: [0]",\
"data_check_name": "ClassImbalanceDataCheck",\
"level": "warning",\
"code": "CLASS_IMBALANCE_BELOW_THRESHOLD",\
"details": {"target_values": [0]}},\
{"message": "The following labels in the target have severe class imbalance because they fall under 10% of the target and have less than 100 samples: [0]",\
"data_check_name": "ClassImbalanceDataCheck",\
"level": "warning",\
"code": "CLASS_IMBALANCE_SEVERE",\
"details": {"target_values": [0]}}],\
"actions": []}
"""
results = {
"warnings": [],
"errors": [],
"actions": []
}
y = infer_feature_types(y)
y = _convert_woodwork_types_wrapper(y.to_series())
fold_counts = y.value_counts(normalize=False, sort=True)
if len(fold_counts) == 0:
return results
# search for targets that occur less than twice the number of cv folds first
below_threshold_folds = fold_counts.where(fold_counts < self.cv_folds).dropna()
if len(below_threshold_folds):
below_threshold_values = below_threshold_folds.index.tolist()
error_msg = "The number of instances of these targets is less than 2 * the number of cross folds = {} instances: {}"
DataCheck._add_message(DataCheckError(message=error_msg.format(self.cv_folds, below_threshold_values),
data_check_name=self.name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_FOLDS,
details={"target_values": below_threshold_values}), results)
counts = fold_counts / (fold_counts + fold_counts.values[0])
below_threshold = counts.where(counts < self.threshold).dropna()
# if there are items that occur less than the threshold, add them to the list of results
if len(below_threshold):
below_threshold_values = below_threshold.index.tolist()
warning_msg = "The following labels fall below {:.0f}% of the target: {}"
DataCheck._add_message(DataCheckWarning(message=warning_msg.format(self.threshold * 100, below_threshold_values),
data_check_name=self.name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_BELOW_THRESHOLD,
details={"target_values": below_threshold_values}), results)
sample_counts = fold_counts.where(fold_counts < self.min_samples).dropna()
if len(below_threshold) and len(sample_counts):
sample_count_values = sample_counts.index.tolist()
severe_imbalance = [v for v in sample_count_values if v in below_threshold]
warning_msg = "The following labels in the target have severe class imbalance because they fall under {:.0f}% of the target and have less than {} samples: {}"
DataCheck._add_message(DataCheckWarning(message=warning_msg.format(self.threshold * 100, self.min_samples, severe_imbalance),
data_check_name=self.name,
message_code=DataCheckMessageCode.CLASS_IMBALANCE_SEVERE,
details={"target_values": severe_imbalance}), results)
return results
| [
"[email protected]"
]
| |
339989adcfa17b04173240ab9b0f9595798d6350 | e5f8d24525a211750900c3c8e7a631b344aa4443 | /src/mpi4py/run.py | 3c279bf1277d7bedc09cd800f4d76233ab803077 | []
| permissive | mpi4py/mpi4py | 569ce3f4707e54fa2c1e041cc9b96147337a1f10 | 8bdd0c30f98797deefa4e4f129898fefb2b1e171 | refs/heads/master | 2023-08-31T21:39:18.799184 | 2023-08-28T09:41:32 | 2023-08-28T13:24:37 | 12,620,272 | 720 | 125 | BSD-2-Clause | 2023-09-14T21:16:17 | 2013-09-05T14:44:25 | Python | UTF-8 | Python | false | false | 8,358 | py | # Author: Lisandro Dalcin
# Contact: [email protected]
"""Run Python code using ``mpi4py``.
Run Python code (scripts, modules, zip files) using the ``runpy``
module. In case of an unhandled exception, abort execution of the MPI
program by calling ``MPI.COMM_WORLD.Abort()``.
"""
def run_command_line(args=None):
"""Run command line ``[pyfile | -m mod | -c cmd | -] [arg] ...``.
* ``pyfile`` : program read from script file
* ``-m mod`` : run library module as a script
* ``-c cmd`` : program passed in as a command string
* ``-`` : program read from standard input (``sys.stdin``)
* ``arg ...``: arguments passed to program in ``sys.argv[1:]``
"""
# pylint: disable=import-outside-toplevel
import sys
from runpy import run_module, run_path
def run_string(string, init_globals=None, run_name=None,
filename='<string>', argv0='-c'):
from runpy import _run_module_code
code = compile(string, filename, 'exec', 0, 1)
kwargs = {'script_name': argv0}
return _run_module_code(code, init_globals, run_name, **kwargs)
sys.argv[:] = args if args is not None else sys.argv[1:]
if sys.argv[0] == '-':
cmd = sys.stdin.read()
run_string(cmd, run_name='__main__', filename='<stdin>', argv0='-')
elif sys.argv[0] == '-c':
cmd = sys.argv.pop(1) # Remove "cmd" from argument list
run_string(cmd, run_name='__main__', filename='<string>', argv0='-c')
elif sys.argv[0] == '-m':
del sys.argv[0] # Remove "-m" from argument list
run_module(sys.argv[0], run_name='__main__', alter_sys=True)
else:
from os.path import realpath, dirname
if not getattr(sys.flags, 'isolated', 0): # pragma: no branch
sys.path[0] = realpath(dirname(sys.argv[0])) # Fix sys.path
run_path(sys.argv[0], run_name='__main__')
def set_abort_status(status):
"""Terminate MPI execution environment at Python exit.
Terminate MPI execution environment at Python exit by calling
``MPI.COMM_WORLD.Abort(status)``. This function should be called
within an ``except`` block. Afterwards, exceptions should be
re-raised.
"""
# pylint: disable=import-outside-toplevel
import sys
if isinstance(status, SystemExit):
status = status.code
elif isinstance(status, KeyboardInterrupt):
from _signal import SIGINT
status = SIGINT + 128
if not isinstance(status, int):
status = 0 if status is None else 1
pkg = __spec__.parent
mpi = sys.modules.get(f'{pkg}.MPI')
if mpi is not None and status:
# pylint: disable=protected-access
mpi._set_abort_status(status)
def main():
"""Entry-point for ``python -m mpi4py.run ...``."""
# pylint: disable=too-many-statements
# pylint: disable=import-outside-toplevel
import os
import sys
def prefix():
prefix = os.path.dirname(__spec__.origin)
print(prefix, file=sys.stdout)
sys.exit(0)
def version():
from . import __version__
package = __spec__.parent
print(f"{package} {__version__}", file=sys.stdout)
sys.exit(0)
def mpi_std_version():
from . import rc
rc.initialize = rc.finalize = False
from . import MPI
version, subversion = MPI.Get_version()
print(f"MPI {version}.{subversion}", file=sys.stdout)
sys.exit(0)
def mpi_lib_version():
from . import rc
rc.initialize = rc.finalize = False
from . import MPI
library_version = MPI.Get_library_version()
print(library_version, file=sys.stdout)
sys.exit(0)
def usage(errmess=None):
from textwrap import dedent
python = os.path.basename(sys.executable)
program = __spec__.name
cmdline = dedent(f"""
usage: {python} -m {program} [options] <pyfile> [arg] ...
or: {python} -m {program} [options] -m <mod> [arg] ...
or: {python} -m {program} [options] -c <cmd> [arg] ...
or: {python} -m {program} [options] - [arg] ...
""").strip()
helptip = dedent(f"""
Try `{python} -m {program} -h` for more information.
""").strip()
options = dedent("""
options:
--prefix show install path and exit
--version show version number and exit
--mpi-std-version show MPI standard version and exit
--mpi-lib-version show MPI library version and exit
-h|--help show this help message and exit
-rc <key=value,...> set 'mpi4py.rc.key=value'
""").strip()
if errmess:
print(errmess, file=sys.stderr)
print(cmdline, file=sys.stderr)
print(helptip, file=sys.stderr)
sys.exit(1)
else:
print(cmdline, file=sys.stdout)
print(options, file=sys.stdout)
sys.exit(0)
def parse_command_line(args=None):
# pylint: disable=too-many-branches
class Options:
# pylint: disable=too-few-public-methods
# pylint: disable=missing-class-docstring
rc_args = {}
def poparg(args):
if len(args) < 2 or args[1].startswith('-'):
usage('Argument expected for option: ' + args[0])
return args.pop(1)
options = Options()
args = sys.argv[1:] if args is None else args[:]
while args and args[0].startswith('-'):
if args[0] in ('-m', '-c', '-'):
break # Stop processing options
if args[0] in ('-h', '-help', '--help'):
usage() # Print help and exit
if args[0] in ('-prefix', '--prefix'):
prefix() # Print install path and exit
if args[0] in ('-version', '--version'):
version() # Print version number and exit
if args[0] in ('-mpi-std-version', '--mpi-std-version'):
mpi_std_version() # Print MPI standard version and exit
if args[0] in ('-mpi-lib-version', '--mpi-lib-version'):
mpi_lib_version() # Print MPI library version and exit
try:
arg0 = args[0]
if arg0.startswith('--'):
if '=' in arg0:
opt, _, arg = arg0[1:].partition('=')
if opt in ('-rc',):
arg0, args[1:1] = opt, [arg]
else:
arg0 = arg0[1:]
if arg0 == '-rc':
from ast import literal_eval
for entry in poparg(args).split(','):
key, _, val = entry.partition('=')
if not key or not val:
raise ValueError(entry)
try:
val = literal_eval(val)
except ValueError:
pass
options.rc_args[key] = val
else:
usage('Unknown option: ' + args[0])
del args[0]
except Exception: # pylint: disable=broad-except
# Bad option, print usage and exit with error
usage('Cannot parse option: ' + args[0])
# Check remaining args and return to caller
if not args:
usage("No path specified for execution")
elif args[0] in ('-m', '-c') and len(args) < 2:
usage("Argument expected for option: " + args[0])
return options, args
def bootstrap(options):
if options.rc_args: # Set mpi4py.rc parameters
from . import rc
rc(**options.rc_args)
# Parse and process command line options
options, args = parse_command_line()
bootstrap(options)
# Run user code. In case of an unhandled exception, abort
# execution of the MPI program by calling 'MPI_Abort()'.
try:
run_command_line(args)
except SystemExit as exc:
set_abort_status(exc)
raise
except KeyboardInterrupt as exc:
set_abort_status(exc)
raise
except BaseException:
set_abort_status(1)
raise
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
5bcd1a408337e34fde01241f6fa33a12fd231a0c | da497ddf926b8791f3812c79543120215822216b | /icsbep/pu-sol-therm-007/openmc/case-6/generate_materials.py | 9bf69b784be27e86c3ab8ef8378f2f525260bcdb | []
| no_license | mit-crpg/benchmarks | 55f38e569699554d07df254103e2f828dc5b4ff8 | 58e15679ec684b9e2f552df58099e3648b5708cc | refs/heads/master | 2022-05-17T12:27:45.590757 | 2022-05-09T15:07:00 | 2022-05-09T15:07:00 | 2,704,358 | 23 | 30 | null | 2019-11-11T16:35:27 | 2011-11-03T19:04:29 | Python | UTF-8 | Python | false | false | 926 | py | import openmc
mats = openmc.Materials()
mat = openmc.Material(1)
mat.name = "Plutonium nitrate solution"
mat.set_density('sum')
mat.add_nuclide('Pu238', 1.5406e-08)
mat.add_nuclide('Pu239', 2.4294e-04)
mat.add_nuclide('Pu240', 1.1886e-05)
mat.add_nuclide('Pu241', 7.7338e-07)
mat.add_nuclide('Pu242', 2.2727e-08)
mat.add_nuclide('N14', 1.2577e-03)
mat.add_nuclide('H1', 6.3655e-02)
mat.add_nuclide('O16', 3.5483e-02)
mat.add_s_alpha_beta('c_H_in_H2O')
mats.append(mat)
mat = openmc.Material(2)
mat.name = "304L stainless steel"
mat.set_density('sum')
mat.add_element('Fe', 5.9355e-02)
mat.add_element('Cr', 1.7428e-02)
mat.add_element('Ni', 7.7203e-03)
mat.add_element('Mn', 1.7363e-03)
mats.append(mat)
mat = openmc.Material(3)
mat.name = "Water at 25 C"
mat.set_density('sum')
mat.add_nuclide('H1', 6.6655e-02)
mat.add_nuclide('O16', 3.3327e-02)
mat.add_s_alpha_beta('c_H_in_H2O')
mats.append(mat)
mats.export_to_xml()
| [
"[email protected]"
]
| |
f7bdc9446ef34ddd93a77d2b6caff30f2bd83d5c | 26536ad8f07242ea5411a02117adc80462cc1173 | /ssseg/modules/models/nonlocalnet/nonlocalnet.py | 6c52ff731e0d8a7bb1f2e4ed9300222c35906f67 | [
"MIT"
]
| permissive | yawudede/sssegmentation | 451b34c7e383b61d74b483c3048c0ed760821956 | b7fb5bd955a59cda0cfa20ac0c51aea67bfe0e30 | refs/heads/main | 2023-01-30T20:28:10.976883 | 2020-12-16T08:45:49 | 2020-12-16T08:45:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,243 | py | '''
Function:
Implementation of NonLocalNet
Author:
Zhenchao Jin
'''
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from ...backbones import *
from ..base import BaseModel
from .nonlocalblock import NonLocal2d
'''NonLocalNet'''
class NonLocalNet(BaseModel):
def __init__(self, cfg, **kwargs):
super(NonLocalNet, self).__init__(cfg, **kwargs)
align_corners, norm_cfg, act_cfg = self.align_corners, self.norm_cfg, self.act_cfg
# build non-local block
nl_cfg = cfg['nonlocal']
self.conv_before_nl = nn.Sequential(
nn.Conv2d(nl_cfg['in_channels'], nl_cfg['out_channels'], kernel_size=3, stride=1, padding=1, bias=False),
BuildNormalizationLayer(norm_cfg['type'], (nl_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
)
self.nl_block = NonLocal2d(
in_channels=nl_cfg['out_channels'],
reduction=nl_cfg['reduction'],
use_scale=nl_cfg['use_scale'],
mode=nl_cfg['mode'],
norm_cfg=copy.deepcopy(norm_cfg),
act_cfg=copy.deepcopy(act_cfg),
)
self.conv_after_nl = nn.Sequential(
nn.Conv2d(nl_cfg['out_channels'], nl_cfg['out_channels'], kernel_size=3, stride=1, padding=1, bias=False),
BuildNormalizationLayer(norm_cfg['type'], (nl_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
)
# build decoder
decoder_cfg = cfg['decoder']
self.decoder = nn.Sequential(
nn.Conv2d(decoder_cfg['in_channels'], decoder_cfg['out_channels'], kernel_size=3, stride=1, padding=1, bias=False),
BuildNormalizationLayer(norm_cfg['type'], (decoder_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
nn.Dropout2d(decoder_cfg['dropout']),
nn.Conv2d(decoder_cfg['out_channels'], cfg['num_classes'], kernel_size=1, stride=1, padding=0)
)
# build auxiliary decoder
auxiliary_cfg = cfg['auxiliary']
self.auxiliary_decoder = nn.Sequential(
nn.Conv2d(auxiliary_cfg['in_channels'], auxiliary_cfg['out_channels'], kernel_size=3, stride=1, padding=1, bias=False),
BuildNormalizationLayer(norm_cfg['type'], (auxiliary_cfg['out_channels'], norm_cfg['opts'])),
BuildActivation(act_cfg['type'], **act_cfg['opts']),
nn.Dropout2d(auxiliary_cfg['dropout']),
nn.Conv2d(auxiliary_cfg['out_channels'], cfg['num_classes'], kernel_size=1, stride=1, padding=0)
)
# freeze normalization layer if necessary
if cfg.get('is_freeze_normlayer', False): self.freezenormlayer()
'''forward'''
def forward(self, x, targets=None, losses_cfg=None):
h, w = x.size(2), x.size(3)
# feed to backbone network
x1, x2, x3, x4 = self.backbone_net(x)
# feed to non-local block
feats = self.conv_before_nl(x4)
feats = self.nl_block(feats)
feats = self.conv_after_nl(feats)
# feed to decoder
feats = torch.cat([x4, feats], dim=1)
preds = self.decoder(feats)
# feed to auxiliary decoder and return according to the mode
if self.mode == 'TRAIN':
preds = F.interpolate(preds, size=(h, w), mode='bilinear', align_corners=self.align_corners)
preds_aux = self.auxiliary_decoder(x3)
preds_aux = F.interpolate(preds_aux, size=(h, w), mode='bilinear', align_corners=self.align_corners)
return self.calculatelosses(
predictions={'loss_cls': preds, 'loss_aux': preds_aux},
targets=targets,
losses_cfg=losses_cfg
)
return preds
'''return all layers'''
def alllayers(self):
return {
'backbone_net': self.backbone_net,
'conv_before_nl': self.conv_before_nl,
'nl_block': self.nl_block,
'conv_after_nl': self.conv_after_nl,
'decoder': self.decoder,
'auxiliary_decoder': self.auxiliary_decoder
} | [
"[email protected]"
]
| |
26f66d348266283b60f81e944067b3f15c0bf29f | e03174f2b447f998415e9629821efd85117076f7 | /recipe_app/urls.py | 00bb48962163a84840617566218bd0418b587349 | [
"MIT"
]
| permissive | PatrickCmd/Recipe-API-Django-GraphQL | 64defbf46ba69a1cefec367228e7ef281071e4f8 | bed1f1ebab88615ca62ea3846fbeb8e1a69c09e6 | refs/heads/main | 2023-03-20T07:57:04.214175 | 2021-03-02T19:05:22 | 2021-03-02T19:08:51 | 343,881,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | """recipe_app URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from graphene_django.views import GraphQLView
urlpatterns = [
path('admin/', admin.site.urls),
path('graphql/', csrf_exempt(GraphQLView.as_view(graphiql=True))),
]
| [
"[email protected]"
]
| |
94a022a9f21e7f396b42b3a6c186a2c6f0e4cf76 | 4015291afebfd346da3fee4b1d5a775882b5b461 | /packages/models-library/src/models_library/services_ui.py | 2933c09a2120995bb48d1c780f35c5ca452fbded | [
"MIT"
]
| permissive | pcrespov/osparc-simcore | 3a8a6b5252038542f515c7e90d983ac6f1fb4de7 | eb5e00bc2cf4acfe81f5dc422a5e50a4646c9596 | refs/heads/master | 2023-08-06T04:33:38.594066 | 2023-07-12T09:47:00 | 2023-07-12T09:47:00 | 130,357,545 | 0 | 1 | MIT | 2023-04-18T08:04:27 | 2018-04-20T12:10:41 | Python | UTF-8 | Python | false | false | 895 | py | from enum import Enum
from typing import Union
from pydantic import BaseModel, Extra, Field
from pydantic.types import PositiveInt
class WidgetType(str, Enum):
TextArea = "TextArea"
SelectBox = "SelectBox"
class TextArea(BaseModel):
min_height: PositiveInt = Field(
..., alias="minHeight", description="minimum Height of the textarea"
)
class Config:
extra = Extra.forbid
class Structure(BaseModel):
key: Union[str, bool, float]
label: str
class Config:
extra = Extra.forbid
class SelectBox(BaseModel):
structure: list[Structure] = Field(..., min_items=1)
class Config:
extra = Extra.forbid
class Widget(BaseModel):
widget_type: WidgetType = Field(
..., alias="type", description="type of the property"
)
details: Union[TextArea, SelectBox]
class Config:
extra = Extra.forbid
| [
"[email protected]"
]
| |
f91540884b5e4959ef73492c7f863d2922eccf94 | 18aee5d93a63eab684fe69e3aa0abd1372dd5d08 | /test/collective/fleet/test_fleet_lamb_meta_optimizer.py | c32135bafc1922c15f3cde7cad759415d8939996 | [
"Apache-2.0"
]
| permissive | Shixiaowei02/Paddle | 8d049f4f29e281de2fb1ffcd143997c88078eadb | 3d4d995f26c48f7792b325806ec3d110fc59f6fc | refs/heads/develop | 2023-06-26T06:25:48.074273 | 2023-06-14T06:40:21 | 2023-06-14T06:40:21 | 174,320,213 | 2 | 1 | Apache-2.0 | 2022-12-28T05:14:30 | 2019-03-07T10:09:34 | C++ | UTF-8 | Python | false | false | 6,035 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import paddle
from paddle import fluid
from paddle.distributed import fleet
from paddle.distributed.fleet.base import role_maker
paddle.enable_static()
class TestFleetLambMetaOptimizer(unittest.TestCase):
def setUp(self):
os.environ["PADDLE_TRAINER_ID"] = "1"
os.environ[
"PADDLE_TRAINER_ENDPOINTS"
] = "127.0.0.1:36001,127.0.0.1:36002"
def net(self, main_prog, startup_prog):
with fluid.program_guard(main_prog, startup_prog):
with fluid.unique_name.guard():
input_x = paddle.static.data(
name="x", shape=[-1, 32], dtype='float32'
)
input_y = paddle.static.data(
name="y", shape=[-1, 1], dtype='int64'
)
fc_1 = paddle.static.nn.fc(
x=input_x, size=64, activation='tanh'
)
fc_2 = paddle.static.nn.fc(x=fc_1, size=256, activation='tanh')
prediction = paddle.static.nn.fc(
x=[fc_2], size=2, activation='softmax'
)
cost = paddle.nn.functional.cross_entropy(
input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
)
avg_cost = paddle.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.lamb = True
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': [],
}
return avg_cost, strategy
def test_lamb_optimizer(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('lamb', ops)
def test_lamb_not_apply_with_momentum(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Momentum(
learning_rate=0.1, momentum=0.9
)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertNotIn('lamb', ops)
def test_lamb_exclude_fn(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
startup_prog = fluid.Program()
train_prog = fluid.Program()
avg_cost, strategy = self.net(train_prog, startup_prog)
optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': ['.b_0'],
}
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops_without_wd = [
op
for op in avg_cost.block.ops
if op.type == 'lamb' and op.attr('op_role_var')[0].endswith('.b_0')
]
for op in ops_without_wd:
self.assertEqual(op.attr('weight_decay'), 0)
def test_lamb_apply_with_amp(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role)
input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32')
input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64')
fc_1 = paddle.static.nn.fc(x=input_x, size=64, activation='tanh')
fc_2 = paddle.static.nn.fc(x=fc_1, size=64, activation='tanh')
prediction = paddle.static.nn.fc(x=[fc_2], size=2, activation='softmax')
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(x=cost)
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.amp = True
strategy.amp_configs = {
"init_loss_scaling": 32768,
"decr_every_n_nan_or_inf": 2,
"incr_every_n_steps": 1000,
"incr_ratio": 2.0,
"use_dynamic_loss_scaling": True,
"decr_ratio": 0.5,
"custom_white_list": ['softmax'],
"custom_black_list": ['tanh'],
}
strategy.lamb = True
strategy.lamb_configs = {
'lamb_weight_decay': 0.01,
'exclude_from_weight_decay': [],
}
optimizer = paddle.fluid.optimizer.Adam(learning_rate=0.01)
optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy)
optimizer.minimize(avg_cost)
ops = [op.type for op in avg_cost.block.ops]
self.assertIn('lamb', ops)
self.assertIn('cast', ops)
self.assertIn('check_finite_and_unscale', ops)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
21e5c7969a5e8530ef51ea85feca0c3bfffdd174 | 5c1531b47fb4dc4d7e5998d44f7200bf1786b12b | /074_search_2d_matrix/search_2d_matrix.py | 3c4f9a7a01a2733e9c54637410c7700ced156771 | []
| no_license | Web-Dev-Collaborative/Leetcode-JS-PY-MD | d1f560051aad1896a80eccdd4b4fbb389e7033e3 | 675b94fa5da8d40f0ea79efe6d3ef1393221425f | refs/heads/master | 2023-09-01T22:30:32.313793 | 2021-10-26T02:17:03 | 2021-10-26T02:17:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 590 | py | import bisect
class Solution:
# @param {integer[][]} matrix
# @param {integer} target
# @return {boolean}
def searchMatrix(self, matrix, target):
row = bisect.bisect_left([r[0] for r in matrix], target)
if row == len(matrix):
row = row - 1
else:
if matrix[row][0] == target:
return True
else:
row = row - 1
col = bisect.bisect_left(matrix[row], target)
if col == len(matrix[0]):
return False
else:
return matrix[row][col] == target
| [
"[email protected]"
]
| |
ad120a33f5a38c61f5bf51b963ca28fe9bb7181e | 9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb | /sdk/compute/azure-mgmt-avs/generated_samples/workload_networks_list_virtual_machines.py | 5c0476b45004e198a88a5953076a48e7cd6a0852 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
]
| permissive | openapi-env-test/azure-sdk-for-python | b334a2b65eeabcf9b7673879a621abb9be43b0f6 | f61090e96094cfd4f43650be1a53425736bd8985 | refs/heads/main | 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 | MIT | 2023-09-08T08:38:48 | 2019-11-18T07:09:24 | Python | UTF-8 | Python | false | false | 1,593 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.avs import AVSClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-avs
# USAGE
python workload_networks_list_virtual_machines.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AVSClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.workload_networks.list_virtual_machines(
resource_group_name="group1",
private_cloud_name="cloud1",
)
for item in response:
print(item)
# x-ms-original-file: specification/vmware/resource-manager/Microsoft.AVS/stable/2022-05-01/examples/WorkloadNetworks_ListVirtualMachines.json
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
ab4aa406b1c17f7f9b9d681a9994f81bb61e1d9d | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/fc/inputothererrorshist5min.py | f32c1c9ff33549d1579e1e8c16dd6a037f659a95 | []
| no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 33,113 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class InputOtherErrorsHist5min(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.fc.InputOtherErrorsHist5min", "input other errors")
counter = CounterMeta("disparity8b10b", CounterCategory.COUNTER, "link", "input 8b10b disparity")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "disparity8b10bCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "disparity8b10bPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "disparity8b10bMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "disparity8b10bMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "disparity8b10bAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "disparity8b10bSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "disparity8b10bThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "disparity8b10bTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "disparity8b10bRate"
meta._counters.append(counter)
counter = CounterMeta("elp", CounterCategory.COUNTER, "link", "input elp")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "elpCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "elpPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "elpMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "elpMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "elpAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "elpSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "elpThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "elpTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "elpRate"
meta._counters.append(counter)
counter = CounterMeta("framing", CounterCategory.COUNTER, "link", "input framing errors")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "framingCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "framingPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "framingMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "framingMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "framingAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "framingSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "framingThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "framingTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "framingRate"
meta._counters.append(counter)
counter = CounterMeta("eisl", CounterCategory.COUNTER, "link", "input eisl")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "eislCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "eislPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "eislMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "eislMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "eislAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "eislSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "eislThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "eislTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "eislRate"
meta._counters.append(counter)
meta.moClassName = "fcInputOtherErrorsHist5min"
meta.rnFormat = "HDfcInputOtherErrors5min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical input other errors stats in 5 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.l1.FcPhysIf")
meta.parentClasses.add("cobra.model.pc.FcAggrIf")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.fc.InputOtherErrorsHist")
meta.rnPrefixes = [
('HDfcInputOtherErrors5min-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "disparity8b10bAvg", "disparity8b10bAvg", 43226, PropCategory.IMPLICIT_AVG)
prop.label = "input 8b10b disparity average value"
prop.isOper = True
prop.isStats = True
meta.props.add("disparity8b10bAvg", prop)
prop = PropMeta("str", "disparity8b10bCum", "disparity8b10bCum", 43222, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "input 8b10b disparity cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("disparity8b10bCum", prop)
prop = PropMeta("str", "disparity8b10bMax", "disparity8b10bMax", 43225, PropCategory.IMPLICIT_MAX)
prop.label = "input 8b10b disparity maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("disparity8b10bMax", prop)
prop = PropMeta("str", "disparity8b10bMin", "disparity8b10bMin", 43224, PropCategory.IMPLICIT_MIN)
prop.label = "input 8b10b disparity minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("disparity8b10bMin", prop)
prop = PropMeta("str", "disparity8b10bPer", "disparity8b10bPer", 43223, PropCategory.IMPLICIT_PERIODIC)
prop.label = "input 8b10b disparity periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("disparity8b10bPer", prop)
prop = PropMeta("str", "disparity8b10bRate", "disparity8b10bRate", 43230, PropCategory.IMPLICIT_RATE)
prop.label = "input 8b10b disparity rate"
prop.isOper = True
prop.isStats = True
meta.props.add("disparity8b10bRate", prop)
prop = PropMeta("str", "disparity8b10bSpct", "disparity8b10bSpct", 43227, PropCategory.IMPLICIT_SUSPECT)
prop.label = "input 8b10b disparity suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("disparity8b10bSpct", prop)
prop = PropMeta("str", "disparity8b10bThr", "disparity8b10bThr", 43228, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "input 8b10b disparity thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("disparity8b10bThr", prop)
prop = PropMeta("str", "disparity8b10bTr", "disparity8b10bTr", 43229, PropCategory.IMPLICIT_TREND)
prop.label = "input 8b10b disparity trend"
prop.isOper = True
prop.isStats = True
meta.props.add("disparity8b10bTr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "eislAvg", "eislAvg", 43247, PropCategory.IMPLICIT_AVG)
prop.label = "input eisl average value"
prop.isOper = True
prop.isStats = True
meta.props.add("eislAvg", prop)
prop = PropMeta("str", "eislCum", "eislCum", 43243, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "input eisl cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("eislCum", prop)
prop = PropMeta("str", "eislMax", "eislMax", 43246, PropCategory.IMPLICIT_MAX)
prop.label = "input eisl maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("eislMax", prop)
prop = PropMeta("str", "eislMin", "eislMin", 43245, PropCategory.IMPLICIT_MIN)
prop.label = "input eisl minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("eislMin", prop)
prop = PropMeta("str", "eislPer", "eislPer", 43244, PropCategory.IMPLICIT_PERIODIC)
prop.label = "input eisl periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("eislPer", prop)
prop = PropMeta("str", "eislRate", "eislRate", 43251, PropCategory.IMPLICIT_RATE)
prop.label = "input eisl rate"
prop.isOper = True
prop.isStats = True
meta.props.add("eislRate", prop)
prop = PropMeta("str", "eislSpct", "eislSpct", 43248, PropCategory.IMPLICIT_SUSPECT)
prop.label = "input eisl suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("eislSpct", prop)
prop = PropMeta("str", "eislThr", "eislThr", 43249, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "input eisl thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("eislThr", prop)
prop = PropMeta("str", "eislTr", "eislTr", 43250, PropCategory.IMPLICIT_TREND)
prop.label = "input eisl trend"
prop.isOper = True
prop.isStats = True
meta.props.add("eislTr", prop)
prop = PropMeta("str", "elpAvg", "elpAvg", 43268, PropCategory.IMPLICIT_AVG)
prop.label = "input elp average value"
prop.isOper = True
prop.isStats = True
meta.props.add("elpAvg", prop)
prop = PropMeta("str", "elpCum", "elpCum", 43264, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "input elp cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("elpCum", prop)
prop = PropMeta("str", "elpMax", "elpMax", 43267, PropCategory.IMPLICIT_MAX)
prop.label = "input elp maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("elpMax", prop)
prop = PropMeta("str", "elpMin", "elpMin", 43266, PropCategory.IMPLICIT_MIN)
prop.label = "input elp minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("elpMin", prop)
prop = PropMeta("str", "elpPer", "elpPer", 43265, PropCategory.IMPLICIT_PERIODIC)
prop.label = "input elp periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("elpPer", prop)
prop = PropMeta("str", "elpRate", "elpRate", 43272, PropCategory.IMPLICIT_RATE)
prop.label = "input elp rate"
prop.isOper = True
prop.isStats = True
meta.props.add("elpRate", prop)
prop = PropMeta("str", "elpSpct", "elpSpct", 43269, PropCategory.IMPLICIT_SUSPECT)
prop.label = "input elp suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("elpSpct", prop)
prop = PropMeta("str", "elpThr", "elpThr", 43270, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "input elp thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("elpThr", prop)
prop = PropMeta("str", "elpTr", "elpTr", 43271, PropCategory.IMPLICIT_TREND)
prop.label = "input elp trend"
prop.isOper = True
prop.isStats = True
meta.props.add("elpTr", prop)
prop = PropMeta("str", "framingAvg", "framingAvg", 43289, PropCategory.IMPLICIT_AVG)
prop.label = "input framing errors average value"
prop.isOper = True
prop.isStats = True
meta.props.add("framingAvg", prop)
prop = PropMeta("str", "framingCum", "framingCum", 43285, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "input framing errors cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("framingCum", prop)
prop = PropMeta("str", "framingMax", "framingMax", 43288, PropCategory.IMPLICIT_MAX)
prop.label = "input framing errors maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("framingMax", prop)
prop = PropMeta("str", "framingMin", "framingMin", 43287, PropCategory.IMPLICIT_MIN)
prop.label = "input framing errors minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("framingMin", prop)
prop = PropMeta("str", "framingPer", "framingPer", 43286, PropCategory.IMPLICIT_PERIODIC)
prop.label = "input framing errors periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("framingPer", prop)
prop = PropMeta("str", "framingRate", "framingRate", 43293, PropCategory.IMPLICIT_RATE)
prop.label = "input framing errors rate"
prop.isOper = True
prop.isStats = True
meta.props.add("framingRate", prop)
prop = PropMeta("str", "framingSpct", "framingSpct", 43290, PropCategory.IMPLICIT_SUSPECT)
prop.label = "input framing errors suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("framingSpct", prop)
prop = PropMeta("str", "framingThr", "framingThr", 43291, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "input framing errors thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("framingThr", prop)
prop = PropMeta("str", "framingTr", "framingTr", 43292, PropCategory.IMPLICIT_TREND)
prop.label = "input framing errors trend"
prop.isOper = True
prop.isStats = True
meta.props.add("framingTr", prop)
prop = PropMeta("str", "index", "index", 42815, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("l1FcIfToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
c8626cacf72ea2f02508cbcbc1bb016ff3d83bdb | 1f94767da0b3d9260cf2548014ff264127704796 | /ml/rl/training/parametric_dqn_trainer.py | e7d2a7ba67969378c6bcb4a2bea436b77c2f18aa | [
"BSD-3-Clause"
]
| permissive | johncliu/Horizon | 9ea6de069a0294f6e97a3821137394be9ae66c34 | cfa7a873ada5de3bb01e78e2f237d9849b8270b2 | refs/heads/master | 2022-11-26T21:15:07.795468 | 2018-12-09T00:27:42 | 2018-12-09T00:29:27 | 160,994,599 | 0 | 0 | NOASSERTION | 2022-11-21T22:30:40 | 2018-12-09T01:44:45 | Python | UTF-8 | Python | false | false | 12,392 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
from copy import deepcopy
from typing import Dict, Tuple
import torch
import torch.nn.functional as F
from ml.rl.models.dueling_q_network import DuelingQNetwork
from ml.rl.models.fully_connected_network import FullyConnectedNetwork
from ml.rl.preprocessing.normalization import (
NormalizationParameters,
get_num_output_features,
)
from ml.rl.thrift.core.ttypes import (
AdditionalFeatureTypes,
ContinuousActionModelParameters,
)
from ml.rl.training.dqn_trainer_base import DQNTrainerBase
from ml.rl.training.parametric_dqn_predictor import ParametricDQNPredictor
from ml.rl.training.parametric_inner_product import ParametricInnerProduct
from ml.rl.training.rl_trainer_pytorch import (
DEFAULT_ADDITIONAL_FEATURE_TYPES,
RLTrainer,
)
from ml.rl.training.training_data_page import TrainingDataPage
logger = logging.getLogger(__name__)
class ParametricDQNTrainer(DQNTrainerBase):
def __init__(
self,
parameters: ContinuousActionModelParameters,
state_normalization_parameters: Dict[int, NormalizationParameters],
action_normalization_parameters: Dict[int, NormalizationParameters],
use_gpu: bool = False,
additional_feature_types: AdditionalFeatureTypes = DEFAULT_ADDITIONAL_FEATURE_TYPES,
metrics_to_score=None,
gradient_handler=None,
use_all_avail_gpus: bool = False,
) -> None:
self.double_q_learning = parameters.rainbow.double_q_learning
self.warm_start_model_path = parameters.training.warm_start_model_path
self.minibatch_size = parameters.training.minibatch_size
self.state_normalization_parameters = state_normalization_parameters
self.action_normalization_parameters = action_normalization_parameters
self.num_state_features = get_num_output_features(
state_normalization_parameters
)
self.num_action_features = get_num_output_features(
action_normalization_parameters
)
self.num_features = self.num_state_features + self.num_action_features
# ensure state and action IDs have no intersection
overlapping_features = set(state_normalization_parameters.keys()) & set(
action_normalization_parameters.keys()
)
assert len(overlapping_features) == 0, (
"There are some overlapping state and action features: "
+ str(overlapping_features)
)
reward_network_layers = deepcopy(parameters.training.layers)
reward_network_layers[0] = self.num_features
reward_network_layers[-1] = 1
if parameters.rainbow.dueling_architecture:
parameters.training.layers[0] = self.num_state_features
parameters.training.layers[-1] = 1
elif parameters.training.factorization_parameters is None:
parameters.training.layers[0] = self.num_features
parameters.training.layers[-1] = 1
else:
parameters.training.factorization_parameters.state.layers[
0
] = self.num_state_features
parameters.training.factorization_parameters.action.layers[
0
] = self.num_action_features
RLTrainer.__init__(
self,
parameters,
use_gpu,
additional_feature_types,
metrics_to_score,
gradient_handler,
)
self.q_network = self._get_model(
parameters.training, parameters.rainbow.dueling_architecture
)
self.q_network_target = deepcopy(self.q_network)
self._set_optimizer(parameters.training.optimizer)
self.q_network_optimizer = self.optimizer_func(
self.q_network.parameters(),
lr=parameters.training.learning_rate,
weight_decay=parameters.training.l2_decay,
)
self.reward_network = FullyConnectedNetwork(
reward_network_layers, parameters.training.activations
)
self.reward_network_optimizer = self.optimizer_func(
self.reward_network.parameters(), lr=parameters.training.learning_rate
)
if self.use_gpu:
self.q_network.cuda()
self.q_network_target.cuda()
self.reward_network.cuda()
if use_all_avail_gpus:
self.q_network = torch.nn.DataParallel(self.q_network)
self.q_network_target = torch.nn.DataParallel(self.q_network_target)
self.reward_network = torch.nn.DataParallel(self.reward_network)
def _get_model(self, training_parameters, dueling_architecture=False):
if dueling_architecture:
return DuelingQNetwork(
training_parameters.layers,
training_parameters.activations,
action_dim=self.num_action_features,
)
elif training_parameters.factorization_parameters is None:
return FullyConnectedNetwork(
training_parameters.layers,
training_parameters.activations,
use_noisy_linear_layers=training_parameters.use_noisy_linear_layers,
)
else:
return ParametricInnerProduct(
FullyConnectedNetwork(
training_parameters.factorization_parameters.state.layers,
training_parameters.factorization_parameters.state.activations,
),
FullyConnectedNetwork(
training_parameters.factorization_parameters.action.layers,
training_parameters.factorization_parameters.action.activations,
),
self.num_state_features,
self.num_action_features,
)
def get_detached_q_values(
self, state_action_pairs
) -> Tuple[torch.Tensor, torch.Tensor]:
""" Gets the q values from the model and target networks """
with torch.no_grad():
q_values = self.q_network(state_action_pairs)
q_values_target = self.q_network_target(state_action_pairs)
return q_values, q_values_target
def train(self, training_samples: TrainingDataPage) -> None:
if self.minibatch == 0:
# Assume that the tensors are the right shape after the first minibatch
assert (
training_samples.states.shape[0] == self.minibatch_size
), "Invalid shape: " + str(training_samples.states.shape)
assert (
training_samples.next_states.shape == training_samples.states.shape
), "Invalid shape: " + str(training_samples.next_states.shape)
assert (
training_samples.not_terminal.shape == training_samples.rewards.shape
), "Invalid shape: " + str(training_samples.not_terminal.shape)
assert (
training_samples.actions.shape[0] == self.minibatch_size
), "Invalid shape: " + str(training_samples.actions.shape)
assert (
training_samples.possible_next_actions_mask.shape[0]
== self.minibatch_size
), "Invalid shape: " + str(
training_samples.possible_next_actions_mask.shape
)
assert (
training_samples.possible_next_actions_mask.shape
== training_samples.actions.shape
), (
"Invalid shape: "
+ str(training_samples.possible_next_actions_mask.shape)
+ " != "
+ str(training_samples.actions.shape)
)
assert (
training_samples.possible_next_actions_state_concat.shape[0]
== training_samples.possible_next_actions_mask.shape[0]
* training_samples.possible_next_actions_mask.shape[1]
), (
"Invalid shape: "
+ str(training_samples.possible_next_actions_state_concat.shape)
+ " != "
+ str(training_samples.possible_next_actions_mask.shape)
)
assert (
training_samples.possible_next_actions_state_concat.shape[0]
== training_samples.next_actions.shape[0]
* training_samples.next_actions.shape[1]
), (
"Invalid shape: "
+ str(training_samples.possible_next_actions_state_concat.shape)
+ " != "
+ str(training_samples.next_actions.shape)
)
self.minibatch += 1
states = training_samples.states.detach().requires_grad_(True)
actions = training_samples.actions
state_action_pairs = torch.cat((states, actions), dim=1)
rewards = training_samples.rewards
discount_tensor = torch.full(
training_samples.time_diffs.shape, self.gamma
).type(self.dtype)
not_done_mask = training_samples.not_terminal
if self.use_seq_num_diff_as_time_diff:
discount_tensor = discount_tensor.pow(training_samples.time_diffs)
if self.maxq_learning:
all_next_q_values, all_next_q_values_target = self.get_detached_q_values(
training_samples.possible_next_actions_state_concat
)
# Compute max a' Q(s', a') over all possible actions using target network
next_q_values, _ = self.get_max_q_values(
all_next_q_values,
all_next_q_values_target,
training_samples.possible_next_actions_mask,
)
else:
# SARSA
next_q_values, _ = self.get_detached_q_values(
torch.cat(
(training_samples.next_states, training_samples.next_actions), dim=1
)
)
assert next_q_values.shape == not_done_mask.shape, (
"Invalid shapes: "
+ str(next_q_values.shape)
+ " != "
+ str(not_done_mask.shape)
)
filtered_max_q_vals = next_q_values * not_done_mask
if self.minibatch < self.reward_burnin:
target_q_values = rewards
else:
assert discount_tensor.shape == filtered_max_q_vals.shape, (
"Invalid shapes: "
+ str(discount_tensor.shape)
+ " != "
+ str(filtered_max_q_vals.shape)
)
target_q_values = rewards + (discount_tensor * filtered_max_q_vals)
# Get Q-value of action taken
q_values = self.q_network(state_action_pairs)
all_action_scores = q_values.detach()
self.model_values_on_logged_actions = q_values.detach()
value_loss = self.q_network_loss(q_values, target_q_values)
self.loss = value_loss.detach()
self.q_network_optimizer.zero_grad()
value_loss.backward()
if self.gradient_handler:
self.gradient_handler(self.q_network.parameters())
self.q_network_optimizer.step()
if self.minibatch < self.reward_burnin:
# Reward burnin: force target network
self._soft_update(self.q_network, self.q_network_target, 1.0)
else:
# Use the soft update rule to update target network
self._soft_update(self.q_network, self.q_network_target, self.tau)
# get reward estimates
reward_estimates = self.reward_network(state_action_pairs)
reward_loss = F.mse_loss(reward_estimates, rewards)
self.reward_network_optimizer.zero_grad()
reward_loss.backward()
self.reward_network_optimizer.step()
self.loss_reporter.report(
td_loss=self.loss,
reward_loss=reward_loss,
model_values_on_logged_actions=all_action_scores,
)
def predictor(self) -> ParametricDQNPredictor:
"""Builds a ParametricDQNPredictor."""
return ParametricDQNPredictor.export(
self,
self.state_normalization_parameters,
self.action_normalization_parameters,
self._additional_feature_types.int_features,
self.use_gpu,
)
def export(self) -> ParametricDQNPredictor:
return self.predictor()
| [
"[email protected]"
]
| |
df4213385952a300718dc7d4f472408281b8134b | 525dc175d55c2f5f33f87df6915f3633537da17c | /oas_dev/util/plot/Nd_plot.py | 06a934191347906689c4cfcd78d717675e010ea2 | [
"CC0-1.0"
]
| permissive | sarambl/OAS-DEV | 1b4c020ff862075034536ea38f30a131968791fb | 8dec6d29ef23dee8135bc937cd6ee1ef5b64d304 | refs/heads/master | 2023-04-09T07:59:31.051158 | 2021-10-26T12:20:04 | 2021-10-26T12:20:04 | 310,578,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,605 | py | #import sensitivity_scripts
import oas_dev.util.plot.make_break_axis as mba
from matplotlib import colors, pyplot
from oas_dev.util.plot import plot_settings
#to_make_figures_paper.make_break_axis as mba
from oas_dev.util import practical_functions
import seaborn as sns
#import analysis_tools.Nd_pkg
#from oas_dev.util.Nd import Nd_pkg
from oas_dev.util.plot.plot_settings import set_equal_axis
import matplotlib.pyplot as plt
import numpy as np
sns.reset_orig()
def plot_Nd_bars_in_ax_CTRL(ax, model, pl_pd, cmap, header=None):
"""
Plots Absolute of CTRL column
:param ax:
:param model:
:param pl_pd:
:param cmap:
:return:
"""
if header is None:
header=model
kwargs = {'fontsize': 14, 'color': cmap, 'edgecolor': 'k', 'grid': {'b': True, 'axis': 'y'}}
pl_pd['CTRL'].transpose().plot.bar(ax=ax, title='%s: CTRL' % header, width=1, **kwargs) # , color=cmap,
ax.set_title('%s: CTRL' % model, fontsize=14)
ax.xaxis.grid(False)
def plot_Nd_bars_in_ax_DIFF(ax, model, pl_pd, relative, cmap, header=None):
"""
Plots difference to CTRL column either relative or absolute
:param ax:
:param model:
:param pl_pd:
:param relative: if relative, plots relative difference to CTRL
:param cmap:
:return:
"""
if header is None:
header=model
kwargs = {'fontsize': 14, 'color': cmap, 'edgecolor': 'k', 'grid': {'b': True, 'axis': 'y'}}
#ax.set_title('%s: CTRL' % model, fontsize=14)
if relative:
plt_diff = pl_pd.drop('CTRL', axis=1).sub(pl_pd['CTRL'], axis=0).div(np.abs(pl_pd['CTRL']), axis=0) * 100.
else:
plt_diff = pl_pd.drop('CTRL', axis=1).sub(pl_pd['CTRL'], axis=0) # .div(pl_pd['CTRL'])
if relative:
kwargs['title'] = '%s: relative difference' % header
kwargs['width'] = 0.85
kwargs['legend'] = False
kwargs['ax'] = ax
else:
kwargs['title'] = '%s: difference' % header
kwargs['width'] = 0.9
kwargs['legend'] = False
kwargs['ax'] = ax
kwargs['ax'] = ax
plt_diff.transpose().plot.bar(**kwargs)
ax.set_title(kwargs['title'], fontsize=14)
ax.xaxis.grid(False)
def plot_Nd_bars_all_models_break_ECHAM(nested_pd_model, models, area, N_vars,
relative=True, sharey_for_diffrel=False,
sharey_for_ctrl=False, fn_bs='',
plt_path='plots/bars_non_stacked/',
cmap = 'colorblind',
format = 'png'):
"""
Break axis for ECHAM
:param nested_pd_model:
:param models:
:param area:
:param N_vars:
:param relative:
:param sharey_for_diffrel:
:param sharey_for_ctrl:
:param without_be20:
:param plt_path:
:param cmap:
:return:
"""
plt_path = plt_path + area + '/'
practical_functions.make_folders(plt_path)
filen_base = plt_path + fn_bs
cmap = sns.color_palette(cmap, len(N_vars))
# If relative, break only ctrl:
if relative:
fig, gs, axs = mba.make_my_grid_subplots([1, 5], [4, 4, 1, 3])
else:
fig, gs, axs = mba.make_my_grid_subplots([3, 8], [4, 4, 1, 3])
for ax in axs.flatten():
ax.grid(True, axis='x')
ii = 0
filen = filen_base # plt_path+'bars_Nd_ctr_diff'
for model in models + ['ECHAM']:
pl_pd = nested_pd_model[model] # .drop(['N$_{d<20}$'])#, axis=1)#, axis=0)#.transpose()
pl_pd.index.name = None
plot_Nd_bars_in_ax_CTRL(axs[ii,0], model, pl_pd, cmap)
plot_Nd_bars_in_ax_DIFF(axs[ii,1], model, pl_pd, relative, cmap)
if relative:
axs[ii, 0].set_ylabel('#/cm$^3$', fontsize=14)
axs[ii, 1].set_ylabel('%', fontsize=14)
else:
axs[ii, 0].set_ylabel('#/cm$^3$', fontsize=14)
axs[ii, 1].set_ylabel('#/cm$^3$', fontsize=14)
filen = filen + '_' + model
if ii < len(models): # remove tick labels
axs[ii, 1].get_xaxis().set_ticklabels([])
axs[ii, 0].get_xaxis().set_ticklabels([])
ii += 1
if relative:
model = 'ECHAM'
ax = fig.add_subplot(gs[8:, 1:])
pl_pd = nested_pd_model[model]
pl_pd.index.name = None
plot_Nd_bars_in_ax_DIFF(ax, model, pl_pd, relative, cmap)
ax.set_ylabel('%', fontsize=14)
fig.delaxes(axs[2, 1])
fig.delaxes(axs[3, 1])
plot_settings.insert_abc_where(ax, 14, 2 * 2 + 1, ratioxy=1.2)
axs[2, 0].set_ylabel('', visible=False)
axs[3, 0].set_ylabel(' #/cm$^3$', fontsize=14) # , visible=False)
if not relative:
axs[2, 1].set_ylabel('', visible=False)
axs[3, 1].set_ylabel(' #/cm$^3$', fontsize=14) # , visible=False)
for ax in axs[3, :]:
ax.title.set_visible(False)
if sharey_for_diffrel:
set_equal_axis(axs[:, 1], which='y')
if sharey_for_ctrl:
set_equal_axis(axs[:, 0], which='y')
for ii in np.arange(len(models)-1):
plot_settings.insert_abc_where(axs[ii, 0], 14, ii * 2,ratioxy=.8 )
plot_settings.insert_abc_where( axs[ii, 1], 14, ii * 2 + 1, ratioxy=1.2)
plot_settings.insert_abc_where(axs[2, 0], 14, 2 * 2, ratioxy=.8)
if not relative:
plot_settings.insert_abc_where( axs[2, 1], 14, 2 * 2 + 1, ratioxy=3.)
if relative:
filen = filen + '_rel.%s'%format
else:
filen = filen + '.%s'%format
print(filen)
if relative:
mba.broken_axis(axs[2:, 0], [1000, 5500])
else:
mba.broken_axis(axs[2:, 0], [1000, 5500])
mba.broken_axis(axs[2:, 1], [21, 25])
gs.tight_layout(fig, pad=0.3)
print('Saving file to: %s' % filen)
plt.savefig(filen, dpi=300)
plt.show()
def plot_Nd_bars_all_models(nested_pd_model, models, area, N_vars, relative=True, sharey_for_diffrel=False,
sharey_for_ctrl=False, without_be20=True, plt_path='plots/bars_non_stacked/',
cmap='colorblind', format='png'):
"""
:param nested_pd_model:
:param models:
:param area:
:param N_vars:
:param relative:
:param sharey_for_diffrel:
:param sharey_for_ctrl:
:param without_be20:
:param plt_path:
:param cmap:
:return:
"""
plt_path=plt_path +area+'/'
practical_functions.make_folders(plt_path)
filen_base=plt_path+'bars_Nd_ctr_diff'
cmap = sns.color_palette(cmap, len(N_vars))
fig, axs = plt.subplots(len(models),2, figsize=[12,11],gridspec_kw = {'width_ratios':[1, 5]})
ii=0
filen=filen_base #plt_path+'bars_Nd_ctr_diff'
if without_be20:
filen=filen+'no_sub20'
for model in models:
if without_be20 and ('N$_{d<20}$' in nested_pd_model[model].index):
pl_pd= nested_pd_model[model].drop(['N$_{d<20}$'])#, axis=1)#, axis=0)#.transpose()
else:
pl_pd = nested_pd_model[model]#.drop(['N$_{d<20}$'])#, axis=1)#, axis=0)#.transpose()
pl_pd.index.name = None
plot_Nd_bars_in_ax_CTRL(axs[ii,0], model, pl_pd, cmap)
plot_Nd_bars_in_ax_DIFF(axs[ii,1], model, pl_pd, relative, cmap)
if relative:
axs[ii,0].set_ylabel('#/cm$^3$', fontsize=14)
axs[ii,1].set_ylabel('%', fontsize=14)
else:
axs[ii,0].set_ylabel('#/cm$^3$', fontsize=14)
filen = filen+'_'+model
if ii<len(models)-1:
axs[ii,1].get_xaxis().set_ticklabels([])
axs[ii,0].get_xaxis().set_ticklabels([])
ii+=1
if sharey_for_diffrel:
set_equal_axis(axs[:, 1], which='y')
if sharey_for_ctrl:
set_equal_axis(axs[:, 0], which='y')
for ii in np.arange(len(models)):
plot_settings.insert_abc_where(axs[ii, 0], 14, ii * 2,ratioxy=.8 )
plot_settings.insert_abc_where( axs[ii, 1], 14, ii * 2 + 1, ratioxy=1.2)
#sensitivity_scripts.plot_settings.insert_abc(axs[ii,0],14,ii*2)
#sensitivity_scripts.plot_settings.insert_abc(axs[ii,1],14,ii*2+1)
if relative:
filen = filen+'_rel.%s'%format
else:
filen = filen+'.%s'%format
print(filen)
plt.tight_layout(pad=2.)
plt.savefig(filen, dpi=300)
plt.show()
def plot_Nd_bars_n_areas(nested_pd_areas, model, areas, N_vars, cases, areas_label, relative=True, sharey_for_diffrel=False,
sharey_for_ctrl=False, without_be20=True, plt_path='plots/bars_non_stacked/',
cmap='colorblind', fn_base='bars_Nd_areas', format='png'):
"""
:param nested_pd_model:
:param models:
:param area:
:param N_vars:
:param relative:
:param sharey_for_diffrel:
:param sharey_for_ctrl:
:param without_be20:
:param plt_path:
:param cmap:
:return:
"""
areas_str= '_'.join(areas)
plt_path = plt_path+'m_areas/'
practical_functions.make_folders(plt_path)
filen_base = plt_path + fn_base + areas_str
cmap = sns.color_palette(cmap, len(N_vars))
if len(cases)==3:#Yields_only:
fig, axs = plt.subplots(2,2, figsize=[8,4*len(areas)],gridspec_kw = {'width_ratios':[1, 2]})
else:
fig, axs = plt.subplots(2,2, figsize=[11,5*len(areas)],gridspec_kw = {'width_ratios':[1, 4]})
#fig, axs = plt.subplots(len(areas), 2, figsize=[12, 11], gridspec_kw={'width_ratios': [1, 5]})
ii = 0
filen = filen_base # plt_path+'bars_Nd_ctr_diff'
if without_be20:
filen = filen + 'no_sub20'
for area, ii in zip(areas,range(0, len(areas))):
nested_pd_model = nested_pd_areas[area][model][cases]
if without_be20 and ('N$_{d<20}$' in nested_pd_model.index):
pl_pd = nested_pd_model.drop(['N$_{d<20}$']) # , axis=1)#, axis=0)#.transpose()
else:
pl_pd = nested_pd_model # .drop(['N$_{d<20}$'])#, axis=1)#, axis=0)#.transpose()
pl_pd.index.name = None
plot_Nd_bars_in_ax_CTRL(axs[ii, 0], model, pl_pd, cmap, header=areas_label[area])
plot_Nd_bars_in_ax_DIFF(axs[ii, 1], model, pl_pd, relative, cmap, header=areas_label[area])
if relative:
axs[ii, 0].set_ylabel('#/cm$^3$', fontsize=14)
axs[ii, 1].set_ylabel('%', fontsize=14)
else:
axs[ii, 0].set_ylabel('#/cm$^3$', fontsize=14)
axs[ii, 1].set_ylabel('#/cm$^3$', fontsize=14)
filen = filen + '_' + model
if ii < len(areas) - 1:
axs[ii, 1].get_xaxis().set_ticklabels([])
axs[ii, 0].get_xaxis().set_ticklabels([])
ii += 1
if sharey_for_diffrel:
set_equal_axis(axs[:, 1], which='y')
if sharey_for_ctrl:
set_equal_axis(axs[:, 0], which='y')
for ii in np.arange(len(areas)):
plot_settings.insert_abc_where(axs[ii, 0], 14, ii * 2, ratioxy=.8)
plot_settings.insert_abc_where(axs[ii, 1], 14, ii * 2 + 1, ratioxy=1.1)
# sensitivity_scripts.plot_settings.insert_abc(axs[ii,0],14,ii*2)
# sensitivity_scripts.plot_settings.insert_abc(axs[ii,1],14,ii*2+1)
if relative:
filen = filen + '_rel.%s'%format
else:
filen = filen + '.%s'%format
print(filen)
plt.tight_layout(pad=2.)
plt.savefig(filen, dpi=300)
plt.show()
def plot_sizedist_time(ds, ss_start_t, ss_end_t,
location=None,
var=None,
ax=None,
figsize=[5,10],
vmin=None, vmax=None,
norm_fun=colors.LogNorm,
**plt_kwargs):
if ax is None:
fig, ax = plt.subplots(1, figsize=figsize)
def_kwargs={'ylim':[3,1e3], 'yscale':'log', 'ax':ax}
for key in def_kwargs.keys():
if key not in plt_kwargs.keys():
plt_kwargs[key]=def_kwargs[key]
if location is not None:
ds = ds.sel(location=location)
if 'dNdlogD_sec' in ds:
ds['dNdlogD'] = ds['dNdlogD_sec'] + ds['dNdlogD_mod']
else:
ds['dNdlogD']= ds['dNdlogD_mod']
ds['dNdlogD'].attrs = ds['dNdlogD_mod'].attrs
ds['dNdlogD'].attrs['long_name'] = 'dNdlogD'
if var is None:
var = 'dNdlogD'
if 'norm' not in plt_kwargs:
plt_kwargs['norm']=norm_fun(vmin=vmin, vmax=vmax)
da =ds[var].mean('lev', keep_attrs=True)# ds[var]#.sel(time=slice(ss_start_t,ss_end_t))
#return da
da.plot(x='time',**plt_kwargs)
if 'case_name' in da.attrs:
tit = da.attrs['case_name']+', '+ location
else:
tit = location
ax.set_title(tit) | [
"[email protected]"
]
| |
8b6353a0c90e0e69cd17cfbd061e0b152fd0363a | 786027545626c24486753351d6e19093b261cd7d | /ghidra9.2.1_pyi/ghidra/app/util/recognizer/DmgRecognizer.pyi | 38a1f498d2fec2009f3d8d943ac5a215070df3d4 | [
"MIT"
]
| permissive | kohnakagawa/ghidra_scripts | 51cede1874ef2b1fed901b802316449b4bf25661 | 5afed1234a7266c0624ec445133280993077c376 | refs/heads/main | 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 797 | pyi | from typing import List
import ghidra.app.util.recognizer
import java.lang
class DmgRecognizer(object, ghidra.app.util.recognizer.Recognizer):
def __init__(self): ...
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getPriority(self) -> int: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def numberOfBytesRequired(self) -> int: ...
def recognize(self, __a0: List[int]) -> unicode: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def priority(self) -> int: ...
| [
"[email protected]"
]
| |
b401270ebad54df08a47715a824853f516267bdc | a3746020cf091f433beb41bde1b62818b4de569b | /past/rule_analysis/rule/text/check_using_revoke.py | b8b835f7f2fc5a709f3ae4af5bdc769296baefa9 | []
| no_license | kk71/sqlaudit | 59bab5765a67f56f1dd2f3103812051c5acbbc49 | 747aaa02573a9c2b46a9e14415d27c0ab8e6158c | refs/heads/master | 2023-02-04T18:38:46.125746 | 2020-06-05T09:49:46 | 2020-06-05T09:49:46 | 323,559,338 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | # Author: kk.Fang([email protected])
import re
from .utils import judge_if_ddl
def execute_rule(sql, db_model=None, **kwargs):
if not judge_if_ddl(sql):
return False
if re.search('revoke\s+', sql, re.I):
return True
return False
| [
"[email protected]"
]
| |
d8334689fb752ac8c03db533028e606d863cb0fe | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_add_segment_bind_response_wrapper.py | 2ac6343754d5d8b7139947919a07f681b7905ae0 | [
"Apache-2.0"
]
| permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 1,100 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.advancedsegmentbind.model.add_segment_bind_response_wrapper_body import AddSegmentBindResponseWrapperBody
from baiduads.common.model.api_response_header import ApiResponseHeader
globals()['AddSegmentBindResponseWrapperBody'] = AddSegmentBindResponseWrapperBody
globals()['ApiResponseHeader'] = ApiResponseHeader
from baiduads.advancedsegmentbind.model.add_segment_bind_response_wrapper import AddSegmentBindResponseWrapper
class TestAddSegmentBindResponseWrapper(unittest.TestCase):
"""AddSegmentBindResponseWrapper unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAddSegmentBindResponseWrapper(self):
"""Test AddSegmentBindResponseWrapper"""
# FIXME: construct object with mandatory attributes with example values
# model = AddSegmentBindResponseWrapper() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
13f1ee4a2434f217b952e800c62824c3622ae5c8 | c3d4f7e811b39de9d6f8fa8b013ecd1c13ed46b1 | /2018-01/01_Jan/19/_08_bases_.py | e3d2e736ed0920f3ea0979e26cc97440af0999d2 | [
"Apache-2.0"
]
| permissive | z727354123/pyCharmTest | b58ebb78c3b51633ed6894009565ec84c8441509 | 577aad45c5bf7bef055db0788b9f480529a04186 | refs/heads/master | 2023-03-10T23:59:20.098707 | 2023-03-01T05:12:48 | 2023-03-01T05:12:48 | 99,561,307 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | # _*_ encoding=utf-8 _*__*_ encoding=utf-8 _*_
# 元组
class A:
_name = "A"
@classmethod
def printName(self):
print(self._name)
class B:
_name = "B"
class StartA(A, B):
pass
class StartB(B, A):
pass
print(StartA._name)
print(StartB._name)
StartA.printName()
StartB.printName()
print(A.__dict__)
class B:
_name = "B"
class BSon(B):
pass
print(B.__dict__)
print(BSon.__dict__)
print(BSon.__weakref__ is B.__weakref__) | [
"[email protected]"
]
| |
aff8d834ffd907d4713b0f8caee9d5a834be77ab | a1c8731a8527872042bd46340d8d3e6d47596732 | /programming-laboratory-I/2r9q/jasei.py | 7aa512c05333853cbb941be1c35fa9fe5bede241 | [
"MIT"
]
| permissive | MisaelAugusto/computer-science | bbf98195b0ee954a7ffaf58e78f4a47b15069314 | d21335a2dc824b54ffe828370f0e6717fd0c7c27 | refs/heads/master | 2022-12-04T08:21:16.052628 | 2020-08-31T13:00:04 | 2020-08-31T13:00:04 | 287,621,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | # coding: utf-8
# Aluno: Misael Augusto
# Matrícula: 117110525
# Problema: Já sei tocar essa música
def sei_tocar_musica(musica, acordes):
n = 0
for i in range(len(musica)):
for j in range(len(acordes)):
if musica[i] == acordes[j]:
n += 1
break
if n == len(musica):
return True
else:
return False
| [
"[email protected]"
]
| |
3ae06584c472949daf17c71997368ef6a6d112a0 | 59ac1d0f09ebfb527701031f3ab2cfbfb8055f51 | /soapsales/basedata/serializers.py | 502074108cd066ca5c0ee996f46d869ec1458bb7 | []
| no_license | DUMBALINYOLO/erpmanu | d4eb61b66cfa3704bd514b58580bdfec5639e3b0 | db979bafcc7481f60af467d1f48d0a81bbbfc1aa | refs/heads/master | 2023-04-28T13:07:45.593051 | 2021-05-12T09:30:23 | 2021-05-12T09:30:23 | 288,446,097 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | from rest_framework import serializers
from .models import (
Note,
Organization,
UnitOfMeasure
)
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = "__all__"
class OrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = Organization
fields = "__all__"
class UnitOfMeasureSerializer(serializers.ModelSerializer):
class Meta:
model = UnitOfMeasure
fields = ['symbol', 'verbose_name', 'scale_factor', 'unit_type']
class UnitOfMeasureSerializer(serializers.ModelSerializer):
class Meta:
model = UnitOfMeasure
fields = [
'symbol',
'verbose_name',
'scale_factor',
'unit_type'
]
class UnitOfMeasureListSerializer(serializers.ModelSerializer):
unit_type = serializers.SerializerMethodField()
class Meta:
model = UnitOfMeasure
fields = [
'symbol',
'verbose_name',
'scale_factor',
'unit_type'
]
def get_unit_type(self, obj):
return obj.get_unit_type_display()
| [
"[email protected]"
]
| |
19b006dc67b046dbbd412173f9ecb217d47c117f | 3c797162b544aba5122c8eb85dddd3089f462065 | /vgg.py | 44c590d966b6ad101198d73f5ae3b7309651c99e | [
"Apache-2.0"
]
| permissive | MorvanZhou/Computer-Vision | cf5f6dfbc0dd534172f67d812874c72b8fccb75e | f0cb97c099ed4ec363c72ee8aae8c93315bef276 | refs/heads/main | 2023-02-06T12:05:29.553579 | 2020-12-24T12:01:09 | 2020-12-24T12:01:09 | 324,119,891 | 32 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,862 | py | # [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
# dependency file: https://github.com/MorvanZhou/Computer-Vision/requirements.txt
from tensorflow import keras
from tensorflow.keras import layers
from utils import load_mnist, save_model_structure, save_model_weights
# get data
(x_train, y_train), (x_test, y_test) = load_mnist()
# define model
# like LeNet with more layers and activations
model = keras.Sequential([
layers.Conv2D(filters=8, kernel_size=3, strides=1, padding="same", input_shape=(28, 28, 1)), # [n, 28, 28, 8]
layers.Conv2D(filters=8, kernel_size=3, strides=1, padding="same", input_shape=(28, 28, 1)), # [n, 28, 28, 8]
layers.ReLU(),
layers.MaxPool2D(pool_size=2, strides=2), # [n, 14, 14, 8]
layers.Conv2D(16, 3, 1, "same"), # [n, 14, 14, 16]
layers.Conv2D(16, 3, 1, "same"), # [n, 14, 14, 16]
layers.ReLU(),
layers.MaxPool2D(2, 2), # [n, 7, 7, 16]
layers.Flatten(), # [n, 7*7*16]
layers.Dense(32), # [n, 32]
layers.ReLU(),
layers.Dense(10) # [n, 32]
], name="VGG")
# show model
model.summary()
save_model_structure(model)
# define loss and optimizer
loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
opt = keras.optimizers.Adam(0.001)
accuracy = keras.metrics.SparseCategoricalAccuracy()
model.compile(optimizer=opt, loss=loss, metrics=[accuracy])
# training and validation
model.fit(x=x_train, y=y_train, batch_size=32, epochs=3, validation_data=(x_test, y_test))
# save model
save_model_weights(model)
| [
"[email protected]"
]
| |
b838de11fe10bfe0911408ce552355f4cee5e046 | 8b04ede84df2f20b3151db4ecdeee60ce9d33765 | /yapypy/utils/yapypy_tokenize37.py | 8d36a2b4550e9ab9d0d5e21a636cd0d0a99fdcfe | [
"MIT"
]
| permissive | nikita215/YAPyPy | 6dafac5d8340df6b1539c34bdd9bd592b6037d69 | bd432994126f2d9c8f13cd2d9d7b36a0a524c43b | refs/heads/master | 2020-06-08T15:38:18.517825 | 2018-10-29T01:16:45 | 2018-10-29T01:16:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,553 | py | """Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <[email protected]>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
from builtins import open as _builtin_open
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
from itertools import chain
import itertools as _itertools
import re
import sys
from token import *
cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)', re.ASCII)
blank_re = re.compile(br'^[ \t\f]*(?:[#\r\n]|$)', re.ASCII)
import token
__all__ = token.__all__ + ["tokenize", "detect_encoding",
"untokenize", "TokenInfo"]
del token
COLONEQUAL = N_TOKENS
N_TOKENS += 1
EXACT_TOKEN_TYPES = {
'(': LPAR,
')': RPAR,
'[': LSQB,
']': RSQB,
':': COLON,
',': COMMA,
';': SEMI,
'+': PLUS,
'-': MINUS,
'*': STAR,
'/': SLASH,
'|': VBAR,
'&': AMPER,
'<': LESS,
'>': GREATER,
'=': EQUAL,
'.': DOT,
'%': PERCENT,
'{': LBRACE,
'}': RBRACE,
'==': EQEQUAL,
'!=': NOTEQUAL,
'<=': LESSEQUAL,
'>=': GREATEREQUAL,
'~': TILDE,
'^': CIRCUMFLEX,
'<<': LEFTSHIFT,
'>>': RIGHTSHIFT,
'**': DOUBLESTAR,
'+=': PLUSEQUAL,
'-=': MINEQUAL,
'*=': STAREQUAL,
'/=': SLASHEQUAL,
'%=': PERCENTEQUAL,
'&=': AMPEREQUAL,
'|=': VBAREQUAL,
'^=': CIRCUMFLEXEQUAL,
'<<=': LEFTSHIFTEQUAL,
'>>=': RIGHTSHIFTEQUAL,
'**=': DOUBLESTAREQUAL,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'...': ELLIPSIS,
'->': RARROW,
'@': AT,
'@=': ATEQUAL,
':=': COLONEQUAL
}
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes():
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permuations (include 'fr', but not
# 'rf'). The various permutations will be generated.
_valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
# if we add binary f-strings, add: ['fb', 'fbr']
result = {''}
for prefix in _valid_string_prefixes:
for t in _itertools.permutations(prefix):
# create a list with upper and lower versions of each
# character
for u in _itertools.product(*[(c, c.upper()) for c in t]):
result.add(''.join(u))
return result
def _compile(expr):
return re.compile(expr, re.UNICODE)
# Note that since _all_string_prefixes includes the empty string,
# StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&@|^=<>:]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
# For a given string prefix plus quotes, endpats maps it to a regex
# to match the remainder of that string. _prefix can be empty, for
# a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in _all_string_prefixes():
endpats[_prefix + "'"] = Single
endpats[_prefix + '"'] = Double
endpats[_prefix + "'''"] = Single3
endpats[_prefix + '"""'] = Double3
# A set of all of the single and triple quoted string prefixes,
# including the opening quotes.
single_quoted = set()
triple_quoted = set()
for t in _all_string_prefixes():
for u in (t + '"', t + "'"):
single_quoted.add(u)
for u in (t + '"""', t + "'''"):
triple_quoted.add(u)
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
if row < self.prev_row or row == self.prev_row and col < self.prev_col:
raise ValueError("start ({},{}) precedes previous end ({},{})"
.format(row, col, self.prev_row, self.prev_col))
row_offset = row - self.prev_row
if row_offset:
self.tokens.append("\\\n" * row_offset)
self.prev_col = 0
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
it = iter(iterable)
indents = []
startline = False
for t in it:
if len(t) == 2:
self.compat(t, it)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
if tok_type == ENDMARKER:
break
if tok_type == INDENT:
indents.append(token)
continue
elif tok_type == DEDENT:
indents.pop()
self.prev_row, self.prev_col = end
continue
elif tok_type in (NEWLINE, NL):
startline = True
elif startline and indents:
indent = indents[-1]
if start[1] >= len(indent):
self.tokens.append(indent)
self.prev_col = len(indent)
startline = False
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
indents = []
toks_append = self.tokens.append
startline = token[0] in (NEWLINE, NL)
prevstring = False
for tok in chain([token], iterable):
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited input:
# Output bytes will tokenize back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = _builtin_open(filename, 'rb')
try:
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
except:
buffer.close()
raise
def tokenize(readline):
"""
The tokenize() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternatively, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
while True: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
pos += len(comment_token)
yield TokenInfo(NL, line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
if parenlev > 0:
yield TokenInfo(NL, token, spos, epos, line)
else:
yield TokenInfo(NEWLINE, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
# Check up to the first 3 chars of the token to see if
# they're in the single_quoted set. If so, they start
# a string.
# We're using the first 3, because we're looking for
# "rb'" (for example) at the start of the token. If
# we switch to longer prefixes, this needs to be
# adjusted.
# Note that initial == token[:1].
# Also note that single quote checking must come after
# triple quote checking (above).
elif (initial in single_quoted or
token[:2] in single_quoted or
token[:3] in single_quoted):
if token[-1] == '\n': # continued string
strstart = (lnum, start)
# Again, using the first 3 chars of the
# token. This is looking for the matching end
# regex for the correct type of quote
# character. So it's really looking for
# endpats["'"] or endpats['"'], by trying to
# skip string prefix characters, if any.
endprog = _compile(endpats.get(initial) or
endpats.get(token[1]) or
endpats.get(token[2]))
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with _builtin_open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except OSError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
8473cf96f72a7fa8bc7ff272a5dc599372da20de | 00c2e8163b2292348ac8337462e71e665039044b | /article/migrations/0004_auto_20200730_1618.py | c6b277529da7a46020789ec39abb33e7151d9d33 | []
| no_license | InjiChoi/third_crud | 6666c8ad4e0c6d40555b0a5c6a5a82fe45b54cc0 | 541f135dc1a328be35aa404ea28ef1583e5ba8f3 | refs/heads/master | 2022-11-29T06:13:14.602168 | 2020-08-13T07:28:35 | 2020-08-13T07:28:35 | 287,207,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # Generated by Django 2.2.9 on 2020-07-30 07:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('article', '0003_comment'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='article',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='article.Article'),
),
]
| [
"[email protected]"
]
| |
bea5f7c08bd226ba60f7bb07cbfa676a5538d5bf | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /EFwDXErjDywXp56WG_11.py | ccaa28a482253e024a286e75e335e487bfb65c08 | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | """
Create a function that takes a string and returns `True` or `False`, depending
on whether the characters are in order or not.
### Examples
is_in_order("abc") ➞ True
is_in_order("edabit") ➞ False
is_in_order("123") ➞ True
is_in_order("xyzz") ➞ True
### Notes
You don't have to handle empty strings.
"""
def is_in_order(txt):
x = sorted(txt)
x = "".join(x)
print(x)
return x == txt
| [
"[email protected]"
]
| |
c33158dd9ad65177efc24faced5f5bd61a02f722 | cc9a87e975546e2ee2957039cceffcb795850d4f | /HelloAdam/HelloAdam20TeknikLooping/__init__.py | 1aa7fb69d6a33a47a6b599344069d22eb1132404 | []
| no_license | CodeHunterDev/Belajar-Python | 304d3243801b91b3605d2b9bd09e49a30735e51b | 9dd2ffb556eed6b2540da19c5f206fedb218ae99 | refs/heads/master | 2023-03-19T22:12:46.330272 | 2020-02-04T08:02:00 | 2020-02-04T08:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | # Copyright (c) 2020. Adam Arthur Faizal
from sys import copyright
print("====== TEKNIK LOOPING ======\n")
hewan = ["Kucing", "Kelinci", "Sapi", "Ular", "Burung"]
print("Daftar hewan :", hewan)
buah = ["Mangga", "Stroberi", "Pepaya", "Melon"]
print("Daftar buah :", buah)
# Enumerate
print("--- Enumerate ---")
for nomer, nama in enumerate(hewan):
print(nomer + 1, ":", nama)
# Zip
print("--- Zip ---")
for namahewan, namabuah in zip(hewan, buah):
print(namahewan, ":", namabuah)
# Jika diterapkan pada set dan dictionary
# Set
barang = {"TV", "Lemari", "Meja", "Kursi", "Kipas Angin"}
print("Daftar barang :", barang)
for namabarang in sorted(barang):
print(namabarang)
# Dictionary
playlist = {"Linkin Park": "In The End", "Avenged Sevenfold": "So Far Away", "Maroon 5": "Payphone", "Slipknot": "Snuff", "Asking Alexandria": "Moving On"}
for band, lagu in enumerate(playlist.items()):
print(band, "-", lagu)
# Lain-lain
print("--- Lain-lain ---")
for i in reversed(range(1, 10, 1)):
print(i)
print('\n')
print(copyright)
# by Mbah Putih Mulyosugito
| [
"[email protected]"
]
| |
5ce496de26a11bcee871ef17fd2aababae82c386 | 9440d951de2c49c068f3dd1f760a94cce156aa92 | /chemical/main.py | 63fa4d969d8832f9ee44fd227449fbd4bd03dbc7 | []
| no_license | dxcv/crusader | ce0e8ed31e0a5500f76a36a43e4431af4b8cd2bd | e0f9f0f132405250a39ad8c3db9be4df1e02a7d4 | refs/heads/master | 2020-07-04T22:40:30.623026 | 2019-06-14T03:00:16 | 2019-06-14T03:00:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,604 | py | # encoding: utf-8
import pandas as pd
import numpy as np
import chemical.const as const
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, NumeralTickFormatter
from bokeh.plotting import figure
col_df = pd.read_excel(const.LIST_FNAME)
dic = {k: v for k, v in zip(col_df[u'名称'], col_df[u'代码'])}
# 乙烯、聚乙烯产量
source_eth = ColumnDataSource(data=dict(date=[], eth=[], pol=[]))
def update_eth():
df = pd.read_excel(u'%s/乙烯产量.xlsx'%(const.DATA_DIR))
tdf = pd.read_excel(u'%s/聚乙烯产量.xlsx'%(const.DATA_DIR))
df = df.merge(tdf, how='outer', left_index=True, right_index=True)
df = df.fillna(method='ffill')
source_eth.data = {'date': df.index,
'eth': df[dic[u'乙烯产量']] / 100,
'pol': df[dic[u'聚乙烯产量']] / 100}
# 纯碱产量
source_soda = ColumnDataSource(data=dict(date=[], prod=[]))
def update_soda():
df = pd.read_excel(u'%s/纯碱产量.xlsx'%(const.DATA_DIR))
source_soda.data = {'date': df.index, 'prod': df[dic[u'纯碱产量']] / 100}
# 尿素产量、出口量
source_car = ColumnDataSource(data=dict(date=[], prod=[], out=[]))
def update_car():
df = pd.read_excel(u'%s/尿素产量.xlsx'%(const.DATA_DIR))
tdf = pd.read_excel(u'%s/尿素出口量.xlsx'%(const.DATA_DIR))
df = df.merge(tdf, how='outer', left_index=True, right_index=True)
df = df[df <= 200]
df = df.fillna(method='ffill')
source_car.data = {'date': df.index,
'prod': df[dic[u'尿素产量']] / 100,
'out':df[dic[u'尿素出口量']] / 100}
# 钾肥产量、出口量
source_pot = ColumnDataSource(data=dict(date=[], prod=[], out=[]))
def update_pot():
df = pd.read_excel(u'%s/钾肥产量.xlsx'%(const.DATA_DIR))
tdf = pd.read_excel(u'%s/钾肥出口量.xlsx'%(const.DATA_DIR))
tdf = tdf.pct_change(12) * 100
df = df.merge(tdf, how='outer', left_index=True, right_index=True)
df = df[df <= 200]
df = df.fillna(method='ffill')
source_pot.data = {'date': df.index,
'prod': df[dic[u'钾肥产量']] / 100,
'out': df[dic[u'钾肥出口量']] / 100}
# 涤纶产量
source_ter = ColumnDataSource(data=dict(date=[], prod=[]))
def update_ter():
df = pd.read_excel(u'%s/涤纶产量.xlsx'%(const.DATA_DIR))
source_ter.data = {'date': df.index, 'prod': df[dic[u'涤纶产量']] / 100}
# 东南亚乙烯、LDPE价格
source_ep = ColumnDataSource(data=dict(date=[], p=[], ldpe=[]))
def update_ep():
df = pd.read_excel(u'%s/东南亚乙烯价格.xlsx'%(const.DATA_DIR))
tdf = pd.read_excel(u'%s/东南亚LDPE价格.xlsx'%(const.DATA_DIR))
df = df.merge(tdf, how='outer', left_index=True, right_index=True)
df = df.fillna(method='ffill')
df = df[df.diff() != 0].dropna()
source_ep.data = {'date': df.index,
'p': df[dic[u'东南亚乙烯价格']],
'ldpe': df[dic[u'东南亚LDPE价格']]}
# 轻质纯碱价格
source_lsoda = ColumnDataSource(data=dict(date=[], p=[]))
def update_lsoda():
df = pd.read_excel(u'%s/轻质纯碱价格.xlsx'%(const.DATA_DIR))
df = df[df.diff() != 0].dropna()
source_lsoda.data = {'date': df.index, 'p': df[dic[u'轻质纯碱价格']]}
# 国内尿素出厂价
source_dcar = ColumnDataSource(data=dict(date=[], p=[]))
def update_dcar():
df = pd.read_excel(u'%s/国内尿素出厂价.xlsx'%(const.DATA_DIR))
df = df[df.diff() != 0].dropna()
source_dcar.data = {'date': df.index, 'p': df[dic[u'国内尿素出厂价']]}
# 盐湖钾肥出厂价
source_potp = ColumnDataSource(data=dict(date=[], p=[]))
def update_potp():
df = pd.read_excel(u'%s/盐湖钾肥出厂价.xlsx'%(const.DATA_DIR))
df = df[df.diff() != 0].dropna()
source_potp.data = {'date': df.index, 'p': df[dic[u'盐湖钾肥出厂价']]}
# 氯化钾温哥华FOB
source_chpot = ColumnDataSource(data=dict(date=[], p=[]))
def update_chpot():
df = pd.read_excel(u'%s/氯化钾温哥华FOB.xlsx'%(const.DATA_DIR))
df = df[df.diff() != 0].dropna()
source_chpot.data = {'date': df.index, 'p': df[dic[u'氯化钾温哥华FOB']]}
# 涤纶短纤价格
source_ster = ColumnDataSource(data=dict(date=[], p=[]))
def update_ster():
df = pd.read_excel(u'%s/涤纶短纤价格.xlsx'%(const.DATA_DIR))
df = df[df.diff() != 0].dropna()
source_ster.data = {'date': df.index, 'p': df[dic[u'涤纶短纤价格']]}
# 新加坡石脑油价格
source_nap = ColumnDataSource(data=dict(date=[], p=[]))
def update_nap():
df = pd.read_excel(u'%s/新加坡石脑油价格.xlsx'%(const.DATA_DIR))
df = df[df.diff() != 0].dropna()
source_nap.data = {'date': df.index, 'p': df[dic[u'新加坡石脑油价格']]}
# PTA价格
source_pta = ColumnDataSource(data=dict(date=[], p=[]))
def update_pta():
df = pd.read_excel(u'%s/PTA价格.xlsx'%(const.DATA_DIR))
df = df[df.diff() != 0].dropna()
source_pta.data = {'date': df.index, 'p': df[dic[u'PTA价格']]}
# MEG价格
source_meg = ColumnDataSource(data=dict(date=[], p=[]))
def update_meg():
df = pd.read_excel(u'%s/MEG价格.xlsx'%(const.DATA_DIR))
df = df[df.diff() != 0].dropna()
source_meg.data = {'date': df.index, 'p': df[dic[u'MEG价格']]}
# OECD工业生产指数
source_oecd = ColumnDataSource(data=dict(date=[], index=[]))
def update_oecd():
df = pd.read_excel(u'%s/OECD工业生产指数.xlsx'%(const.DATA_DIR))
source_oecd.data = {'date': df.index, 'index': df[dic[u'OECD工业生产指数']]}
# 平板玻璃产量
source_glass = ColumnDataSource(data=dict(date=[], prod=[]))
def update_glass():
df = pd.read_excel(u'%s/平板玻璃产量.xlsx'%(const.DATA_DIR))
source_glass.data = {'date': df.index, 'prod': df[dic[u'平板玻璃产量']] / 100}
# 汽车产量
source_auto = ColumnDataSource(data=dict(date=[], prod=[]))
def update_auto():
df = pd.read_excel(u'%s/汽车产量.xlsx'%(const.DATA_DIR))
source_auto.data = {'date': df.index, 'prod': df[dic[u'汽车产量']] / 100}
# 房地产新开工面积、房地产投资开发增速
source_est = ColumnDataSource(data=dict(date=[], new=[], inc=[]))
def update_est():
df = pd.read_excel(u'%s/房地产新开工面积.xlsx'%(const.DATA_DIR))
tdf = pd.read_excel(u'%s/房地产投资开发增速.xlsx'%(const.DATA_DIR))
df = df.merge(tdf, how='outer', left_index=True, right_index=True)
df = df.fillna(method='ffill')
source_est.data = {'date': df.index,
'new': df[dic[u'房地产新开工面积']] / 100,
'inc': df[dic[u'房地产投资开发增速']] / 100}
# 国际玉米期货价格
source_corn = ColumnDataSource(data=dict(date=[], p=[]))
def update_corn():
df = pd.read_excel(u'%s/国际玉米期货价格.xlsx'%(const.DATA_DIR))
source_corn.data = {'date': df.index, 'p': df[dic[u'国际玉米期货价格']]}
# 纺织品出口量
source_spin = ColumnDataSource(data=dict(date=[], out=[]))
def update_spin():
df = pd.read_excel(u'%s/纺织品出口量.xlsx'%(const.DATA_DIR))
source_spin.data = {'date': df.index, 'out': df[dic[u'纺织品出口量']] / 100}
# 秦皇岛港6000大卡大同优混平仓价
source_coal = ColumnDataSource(data=dict(date=[], p=[]))
def update_coal():
df = pd.read_excel(u'%s/秦皇岛港6000大卡大同优混平仓价.xlsx'%(const.DATA_DIR))
source_coal.data = {'date': df.index, 'p': df[dic[u'秦皇岛港6000大卡大同优混平仓价']]}
def update_all():
update_eth()
update_soda()
update_car()
update_pot()
update_ter()
update_ep()
update_lsoda()
update_dcar()
update_potp()
update_chpot()
update_ster()
update_nap()
update_pta()
update_meg()
update_oecd()
update_glass()
update_auto()
update_est()
update_corn()
update_spin()
update_coal()
def get_plot(title, pct=False):
tools = "pan,wheel_zoom,box_select,reset"
plot = figure(plot_height=500, plot_width=1200, tools=tools, x_axis_type='datetime')
plot.title.text_font_size = "15pt"
plot.title.text_font = "Microsoft YaHei"
plot.yaxis.minor_tick_line_color = None
plot.title.text = title
if pct:
plot.yaxis.formatter = NumeralTickFormatter(format='0.00%')
else:
plot.yaxis.formatter = NumeralTickFormatter(format='0.00')
return plot
plot_eth = get_plot(u'乙烯、聚乙烯产量同比(月)', pct=True)
plot_eth.line('date', 'eth', source=source_eth, line_width=2, legend=u'乙烯产量')
plot_eth.line('date', 'pol', source=source_eth, line_width=2, color='green', legend=u'聚乙烯产量')
plot_soda = get_plot(u'纯碱产量(月)', pct=True)
plot_soda.line('date', 'prod', source=source_soda, line_width=2, legend=u'纯碱产量')
plot_car = get_plot(u'尿素产量、出口量同比(月)', pct=True)
plot_car.line('date', 'prod', source=source_car, line_width=2, legend=u'尿素产量')
plot_car.line('date', 'out', source=source_car, line_width=2, color='green', legend=u'尿素出口量')
plot_pot = get_plot(u'钾肥产量、出口量同比(月)', pct=True)
plot_pot.line('date', 'prod', source=source_pot, line_width=2, legend=u'钾肥产量')
plot_pot.line('date', 'out', source=source_pot, line_width=2, color='green', legend=u'钾肥出口量')
plot_ter = get_plot(u'涤纶产量(季)', pct=True)
plot_ter.line('date', 'prod', source=source_ter, line_width=2, legend=u'涤纶产量')
plot_ep = get_plot(u'东南亚乙烯、LDPE价格(周)')
plot_ep.line('date', 'p', source=source_ep, line_width=2, legend=u'东南亚乙烯价格')
plot_ep.line('date', 'ldpe', source=source_ep, line_width=2, color='green', legend=u'东南亚LDPE价格')
plot_lsoda = get_plot(u'轻质纯碱价格(周)')
plot_lsoda.line('date', 'p', source=source_lsoda, line_width=2, legend=u'轻质纯碱价格')
plot_dcar = get_plot(u'国内尿素出厂价(周)')
plot_dcar.line('date', 'p', source=source_dcar, line_width=2, legend=u'国内尿素出厂价')
plot_potp = get_plot(u'盐湖钾肥出厂价(季)')
plot_potp.line('date', 'p', source=source_potp, line_width=2, legend=u'盐湖钾肥出厂价')
plot_chpot = get_plot(u'氯化钾温哥华FOB(周)')
plot_chpot.line('date', 'p', source=source_chpot, line_width=2, legend=u'氯化钾温哥华FOB')
plot_ster = get_plot(u'涤纶短纤价格(周)')
plot_ster.line('date', 'p', source=source_ster, line_width=2, legend=u'涤纶短纤价格')
plot_nap = get_plot(u'新加坡石脑油价格(日)')
plot_nap.line('date', 'p', source=source_nap, line_width=2, legend=u'新加坡石脑油价格')
plot_pta = get_plot(u'PTA价格(日)')
plot_pta.line('date', 'p', source=source_pta, line_width=2, legend=u'PTA价格')
plot_meg = get_plot(u'MEG价格(日)')
plot_meg.line('date', 'p', source=source_meg, line_width=2, legend=u'MEG价格')
plot_oecd = get_plot(u'OECD工业生产指数(月)')
plot_oecd.line('date', 'index', source=source_oecd, line_width=2, legend=u'OECD工业生产指数')
plot_glass = get_plot(u'平板玻璃产量同比(月)', pct=True)
plot_glass.line('date', 'prod', source=source_glass, line_width=2, legend=u'平板玻璃产量')
plot_auto = get_plot(u'汽车产量同比(月)', pct=True)
plot_auto.line('date', 'prod', source=source_auto, line_width=2, legend=u'汽车产量')
plot_est = get_plot(u'房地产新开工面积、房地产投资开发增速同比(月)', pct=True)
plot_est.line('date', 'new', source=source_est, line_width=2, legend=u'房地产新开工面积')
plot_est.line('date', 'inc', source=source_est, line_width=2, color='green', legend=u'房地产投资开发增速')
plot_corn = get_plot(u'国际玉米期货价格(日)')
plot_corn.line('date', 'p', source=source_corn, line_width=2, legend=u'国际玉米期货价格')
plot_spin = get_plot(u'纺织品出口量同比(月)', pct=True)
plot_spin.line('date', 'out', source=source_spin, line_width=2, legend=u'纺织品出口量')
plot_coal = get_plot(u'秦皇岛港6000大卡大同优混平仓价(周)')
plot_coal.line('date', 'p', source=source_coal, line_width=2, legend=u'秦皇岛港6000大卡大同优混平仓价')
update_all()
curdoc().add_root(column(plot_eth, plot_soda, plot_car, plot_pot, plot_ter, plot_ep, plot_lsoda, plot_dcar,
plot_potp, plot_chpot, plot_ster, plot_nap, plot_pta, plot_meg, plot_oecd,
plot_glass, plot_auto, plot_est, plot_corn, plot_spin, plot_coal))
curdoc().title = u'化工中观数据库'
| [
"[email protected]"
]
| |
bd0e36d73cf11b2003e2087c21a73cdb2c8a846a | a290925e8c3103bb84327f6f38f0b4ffd7945c1d | /electramodels/modeling_outputs.py | 7a6a05791d9cedb361b80771f7bf71421f46fdec | []
| no_license | enterpriseih/lightningHotpotQA | 6db502747b2b7a876e7f32743b839c65f851ee49 | b3a992f27a1c2b7881e6ab0c16132c20fb880f8d | refs/heads/master | 2023-08-24T05:38:32.419496 | 2021-05-27T01:09:29 | 2021-05-27T01:09:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 52,405 | py | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from electramodels.file_utils import ModelOutput
@dataclass
class BaseModelOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPooling(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPast(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithCrossAttentions(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPoolingAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
"""
last_hidden_state: torch.FloatTensor = None
pooler_output: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseModelOutputWithPastAndCrossAttentions(ModelOutput):
"""
Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding).
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
``config.is_encoder_decoder=True`` 2 additional tensors of shape :obj:`(batch_size, num_heads,
encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
``config.is_encoder_decoder=True`` in the cross-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` and ``config.add_cross_attention=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqModelOutput(ModelOutput):
"""
Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential
decoding.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
If :obj:`past_key_values` is used only the last hidden-state of the sequences of shape :obj:`(batch_size,
1, hidden_size)` is output.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class CausalLMOutput(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class CausalLMOutputWithPast(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`tuple(tupel(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class CausalLMOutputWithCrossAttentions(ModelOutput):
"""
Base class for causal language model (or autoregressive) outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Cross attentions weights after the attention softmax, used to compute the weighted average in the
cross-attention heads.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`torch.FloatTensor` tuples of length :obj:`config.n_layers`, with each tuple containing the
cached key, value states of the self-attention and the cross-attention layers if model is used in
encoder-decoder setting. Only relevant if ``config.is_decoder = True``.
Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SequenceClassifierOutputWithPast(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`tuple(tupel(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
:obj:`past_key_values` input) to speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class MaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Masked language modeling (MLM) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqLMOutput(ModelOutput):
"""
Base class for sequence-to-sequence language models outputs.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Language modeling loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class NextSentencePredictorOutput(ModelOutput):
"""
Base class for outputs of models predicting if two sentences are consecutive or not.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`next_sentence_label` is provided):
Next sequence prediction (classification) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class SequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence sentence classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class MultipleChoiceModelOutput(ModelOutput):
"""
Base class for outputs of multiple choice models.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class TokenClassifierOutput(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class QuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of question answering models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of sequence-to-sequence question answering models.
Args:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
Tuple of :obj:`tuple(torch.FloatTensor)` of length :obj:`config.n_layers`, with each tuple having 2 tensors
of shape :obj:`(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
shape :obj:`(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see :obj:`past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
start_logits: torch.FloatTensor = None
end_logits: torch.FloatTensor = None
past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
cross_attentions: Optional[Tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None
| [
"[email protected]"
]
| |
479662d20de07396bd35fcfe7f488ed18826fad5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02855/s718736811.py | 8ddd9b2385d3289e1813e674e14423b272d3ea98 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | import sys
input = sys.stdin.readline
def main():
H, W, K = map(int, input().split())
cake = [[0]*W for _ in range(H)]
sb = []
for y in range(H):
s = input().strip()
for x, c in enumerate(s):
if c == "#":
sb.append((y, x))
for i, (y, x) in enumerate(sb):
cake[y][x] = i + 1
for i, s in enumerate(sb):
i += 1
y = s[0]
x0 = x1 = s[1]
for x in range(s[1]-1, -1, -1):
if cake[y][x] != 0:
break
cake[y][x] = i
x0 = x
for x in range(s[1]+1, W):
if cake[y][x] != 0:
break
cake[y][x] = i
x1 = x
for y in range(s[0]-1, -1, -1):
if cake[y][x0:x1+1].count(0) != x1-x0+1:
break
for x in range(x0, x1+1):
cake[y][x] = i
for y in range(s[0]+1, H):
if cake[y][x0:x1+1].count(0) != x1-x0+1:
break
for x in range(x0, x1+1):
cake[y][x] = i
for c in cake:
print(*c)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
f463a74f74787be2e051c8d2a6a94d0fb9b4f288 | 76938f270e6165514162856b2ed33c78e3c3bcb5 | /lib/coginvasion/hood/SafeZoneLoader.py | 002fba48993eee06b78f6836ee332a27de837fa4 | []
| no_license | coginvasion/src | 9a5ec682845cc4c9c013fcc35e9b379bd4360b6c | 2d7fcdb0cd073050250cb51292ee48300a9fe19f | refs/heads/master | 2021-01-19T06:50:11.786112 | 2015-11-08T12:28:52 | 2015-11-08T12:28:52 | 61,545,543 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,874 | py | # Embedded file name: lib.coginvasion.hood.SafeZoneLoader
"""
Filename: SafeZoneLoader.py
Created by: blach (14Dec14)
"""
from pandac.PandaModules import *
from direct.fsm.StateData import StateData
from direct.fsm.ClassicFSM import ClassicFSM
from direct.fsm.State import State
from QuietZoneState import QuietZoneState
from lib.coginvasion.manager.SettingsManager import SettingsManager
from direct.directnotify.DirectNotifyGlobal import directNotify
from lib.coginvasion.base.ShadowCreator import ShadowCreator
import ToonInterior
import LinkTunnel
class SafeZoneLoader(StateData):
notify = directNotify.newCategory('SafeZoneLoader')
def __init__(self, hood, parentFSMState, doneEvent):
StateData.__init__(self, doneEvent)
self.hood = hood
self.parentFSMState = parentFSMState
self.fsm = ClassicFSM('safeZoneLoader', [State('off', self.enterOff, self.exitOff),
State('playground', self.enterPlayground, self.exitPlayground, ['quietZone']),
State('toonInterior', self.enterToonInterior, self.exitToonInterior, ['quietZone']),
State('quietZone', self.enterQuietZone, self.exitQuietZone, ['playground', 'toonInterior'])], 'off', 'off')
self.placeDoneEvent = 'placeDone'
self.place = None
self.playground = None
self.battleMusic = None
self.invasionMusic = None
self.invasionMusicFiles = None
self.interiorMusic = None
self.bossBattleMusic = None
self.music = None
self.tournamentMusic = None
self.linkTunnels = []
return
def findAndMakeLinkTunnels(self):
for tunnel in self.geom.findAllMatches('**/*linktunnel*'):
dnaRootStr = tunnel.getName()
link = LinkTunnel.SafeZoneLinkTunnel(tunnel, dnaRootStr)
self.linkTunnels.append(link)
def load(self):
StateData.load(self)
if self.pgMusicFilename:
self.music = base.loadMusic(self.pgMusicFilename)
if self.battleMusicFile:
self.battleMusic = base.loadMusic(self.battleMusicFile)
if self.invasionMusicFiles:
self.invasionMusic = None
if self.bossBattleMusicFile:
self.bossBattleMusic = base.loadMusic(self.bossBattleMusicFile)
if self.interiorMusicFilename:
self.interiorMusic = base.loadMusic(self.interiorMusicFilename)
if self.tournamentMusicFiles:
self.tournamentMusic = None
self.createSafeZone(self.dnaFile)
self.parentFSMState.addChild(self.fsm)
width, height, fs, music, sfx, tex_detail, model_detail, aa, af = SettingsManager().getSettings('settings.json')
if af == 'on':
self.notify.info('Anisotropic Filtering is on, applying to textures.')
for nodepath in self.geom.findAllMatches('*'):
try:
for node in nodepath.findAllMatches('**'):
try:
node.findTexture('*').setAnisotropicDegree(8)
except:
pass
except:
continue
return
def unload(self):
StateData.unload(self)
self.parentFSMState.removeChild(self.fsm)
del self.parentFSMState
self.geom.removeNode()
del self.geom
del self.fsm
del self.hood
del self.playground
del self.music
del self.interiorMusic
del self.battleMusic
del self.bossBattleMusic
del self.tournamentMusic
self.ignoreAll()
ModelPool.garbageCollect()
TexturePool.garbageCollect()
def enter(self, requestStatus):
StateData.enter(self)
if base.localAvatar.zoneId < 61000:
self.findAndMakeLinkTunnels()
self.fsm.enterInitialState()
messenger.send('enterSafeZone')
self.setState(requestStatus['where'], requestStatus)
partyGate = self.geom.find('**/prop_party_gate_DNARoot')
if not partyGate.isEmpty():
partyGate.removeNode()
del partyGate
petShop = self.geom.find('**/prop_pet_shop_DNARoot')
if not petShop.isEmpty():
petShop.removeNode()
del petShop
def exit(self):
StateData.exit(self)
messenger.send('exitSafeZone')
for link in self.linkTunnels:
link.cleanup()
self.linkTunnels = []
def setState(self, stateName, requestStatus):
self.fsm.request(stateName, [requestStatus])
def createSafeZone(self, dnaFile):
if self.szStorageDNAFile:
loader.loadDNAFile(self.hood.dnaStore, self.szStorageDNAFile)
node = loader.loadDNAFile(self.hood.dnaStore, dnaFile)
if node.getNumParents() == 1:
self.geom = NodePath(node.getParent(0))
self.geom.reparentTo(hidden)
else:
self.geom = hidden.attachNewNode(node)
self.makeDictionaries(self.hood.dnaStore)
if self.__class__.__name__ not in ('TTSafeZoneLoader',):
self.geom.flattenMedium()
gsg = base.win.getGsg()
if gsg:
self.geom.prepareScene(gsg)
def makeDictionaries(self, dnaStore):
self.nodeList = []
for i in xrange(dnaStore.getNumDNAVisGroups()):
groupFullName = dnaStore.getDNAVisGroupName(i)
groupName = base.cr.hoodMgr.extractGroupName(groupFullName)
groupNode = self.geom.find('**/' + groupFullName)
if groupNode.isEmpty():
self.notify.error('Could not find visgroup')
if self.__class__.__name__ not in ('TTSafeZoneLoader',):
groupNode.flattenMedium()
self.nodeList.append(groupNode)
self.hood.dnaStore.resetPlaceNodes()
self.hood.dnaStore.resetDNAGroups()
self.hood.dnaStore.resetDNAVisGroups()
self.hood.dnaStore.resetDNAVisGroupsAI()
def enterPlayground(self, requestStatus):
try:
self.hood.stopSuitEffect()
except:
pass
self.acceptOnce(self.placeDoneEvent, self.handlePlaygroundDone)
self.place = self.playground(self, self.fsm, self.placeDoneEvent)
self.place.load()
def exitPlayground(self):
self.ignore(self.placeDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def handlePlaygroundDone(self):
status = self.place.doneStatus
if self.hood.isSameHood(status) and status['loader'] == 'safeZoneLoader' and status['where'] not in ('minigame',):
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
messenger.send(self.doneEvent)
def enterToonInterior(self, requestStatus):
self.acceptOnce(self.placeDoneEvent, self.handleToonInteriorDone)
self.place = ToonInterior.ToonInterior(self, self.fsm, self.placeDoneEvent)
self.place.load()
def enterThePlace(self, requestStatus):
base.cr.playGame.setPlace(self.place)
self.place.enter(requestStatus)
def exitToonInterior(self):
self.ignore(self.placeDoneEvent)
self.place.exit()
self.place.unload()
self.place = None
base.cr.playGame.setPlace(self.place)
return
def handleToonInteriorDone(self):
status = self.place.doneStatus
if status['loader'] == 'safeZoneLoader' and self.hood.isSameHood(status) and status['shardId'] == None or status['how'] == 'doorOut':
self.fsm.request('quietZone', [status])
else:
self.doneStatus = status
messenger.send(self.doneEvent)
return
def enterQuietZone(self, requestStatus):
self.fsm.request(requestStatus['where'], [requestStatus], exitCurrent=0)
self.quietZoneDoneEvent = uniqueName('quietZoneDone')
self.acceptOnce(self.quietZoneDoneEvent, self.handleQuietZoneDone)
self.quietZoneStateData = QuietZoneState(self.quietZoneDoneEvent)
self.quietZoneStateData.load()
self.quietZoneStateData.enter(requestStatus)
def exitQuietZone(self):
self.ignore(self.quietZoneDoneEvent)
del self.quietZoneDoneEvent
self.quietZoneStateData.exit()
self.quietZoneStateData.unload()
self.quietZoneStateData = None
return
def handleQuietZoneDone(self):
status = self.quietZoneStateData.getDoneStatus()
self.exitQuietZone()
if status['where'] == 'estate' or status['loader'] == 'townLoader':
self.doneStatus = status
messenger.send(self.doneEvent)
else:
self.enterThePlace(status)
def enterOff(self):
pass
def exitOff(self):
pass | [
"[email protected]"
]
| |
a82bee3b593d95974bad78f55ee6156122db8fd3 | f998a574343292d050777f616b408a74fde05738 | /eshop_docker/eshop/extra_apps/social_core/pipeline/mail.py | 0011dff7ec36b703d0f114d0e63bcffcaea7276f | []
| no_license | Boomshakal/Django | 7987e0572fc902bd56360affea0b5087a4cb04a7 | a149691c472eab3440028bf2460cd992acec0f8a | refs/heads/master | 2023-01-11T06:16:29.283428 | 2022-12-23T08:00:05 | 2022-12-23T08:00:05 | 199,360,433 | 0 | 0 | null | 2020-06-06T09:37:02 | 2019-07-29T02:01:09 | Python | UTF-8 | Python | false | false | 1,363 | py | from ..exceptions import InvalidEmail
from .partial import partial
@partial
def mail_validation(backend, details, is_new=False, *args, **kwargs):
requires_validation = backend.REQUIRES_EMAIL_VALIDATION or \
backend.setting('FORCE_EMAIL_VALIDATION', False)
send_validation = details.get('email') and \
(is_new or backend.setting('PASSWORDLESS', False))
if requires_validation and send_validation:
data = backend.strategy.request_data()
if 'verification_code' in data:
backend.strategy.session_pop('email_validation_address')
if not backend.strategy.validate_email(details['email'],
data['verification_code']):
raise InvalidEmail(backend)
else:
current_partial = kwargs.get('current_partial')
backend.strategy.send_email_validation(backend,
details['email'],
current_partial.token)
backend.strategy.session_set('email_validation_address',
details['email'])
return backend.strategy.redirect(
backend.strategy.setting('EMAIL_VALIDATION_URL')
)
| [
"[email protected]"
]
| |
221c800690ad2075781c1a540faef149ed4178c1 | 0a973640f0b02d7f3cf9211fcce33221c3a50c88 | /.history/src/eastmoney_20210204151516.py | 5a538aeca4821250f7da0e18299c626362fd9943 | []
| no_license | JiajunChen123/IPO_under_review_crawler | 5468b9079950fdd11c5e3ce45af2c75ccb30323c | 031aac915ebe350ec816c05a29b5827fde588567 | refs/heads/main | 2023-02-26T08:23:09.622725 | 2021-02-04T10:11:16 | 2021-02-04T10:11:16 | 332,619,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,012 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : easy-money.py
@Time : 2021/02/04 09:03:02
@Author : Jiajun Chen
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA
'''
# 东方财富网 首发申报及上会信息
import re
import pickle
from datetime import datetime,
from urllib.parse import urlencode
import pandas as pd
import requests
import csv
import time
from bs4 import BeautifulSoup
import os.path
from utils import save_pickle,load_pickle
base_url = 'https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?'
eastmoney_raw_data_path = './data/EastMoney/eastmoney_raw_data.csv'
zzsc_csv_path = './data/EastMoney/eastmoney_zzsc.csv'
zzsc_pkl_path = './saved_config/eastmoney_zzsc.pkl'
szzxb_stocksInfo_path = './saved_config/szzxb_stocksInfo.pkl'
shzb_stocksInfo_path = './saved_config/shzb_stocksInfo.pkl'
zb_zxb_stocksInfo_path = './saved_config/zb_zxb_stocksInfo.pkl'
eastmoney_meeting_path = './data/EastMoney/eastmoney_data_meeting.csv'
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'}
def update_date():
'''
获取最新更新日期
'''
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',
headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
newDate = soup.find('option').get_text()
return newDate
def dateList_gen():
'''
fetch all existing date_data
'''
r = requests.get('http://data.eastmoney.com/xg/xg/sbqy.html',
headers=headers)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
dateList = [i.text for i in soup.findAll('option')]
return dateList
def update_eastmoneyData():
'''
更新东方财富网首发申报信息
'''
dataList = dateList_gen()
# 如果文件不存在,新建文件
if not os.path.isfile('./data/EastMoney/eastmoneyRawData.csv'):
columns = ['机构名称', '类型', '板块', '注册地', '保荐机构', '保荐代表人', '律师事务所', '签字律师', '会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业', '日期', 'xxx', '时间戳', '简称', '文件链接']
with open('./data/EastMoney/eastmoneyRawData.csv','w') as f:
writer = csv.DictWriter(f, fieldnames=columns)
writer.writeheader()
for date in reversed(dataList):
if not os.path.isfile('./data/EastMoney/首发信息/{}.csv'.format(date)):
print('find new date:{}, fetching.....'.format(date))
df = get_eastmoneyData(date)
df.to_csv('./data/EastMoney/eastmoneyRawData.csv', mode='a', header=False,index=False,encoding='utf-8-sig')
return
def get_eastmoneyData(date):
query = {'type': 'NS',
'sty': 'NSFR',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '1',
'fd' : date,
'rt': '53721774'
}
rs = requests.get(base_url, params=query, headers=headers)
js = rs.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(js[:-1])
temp = [i.split(',') for i in data]
columns = [
'会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
'是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df = df[[
'机构名称', '类型', '板块', '注册地', '保荐机构', '保荐代表人', '律师事务所', '签字律师', '会计师事务所',
'签字会计师', '是否提交财务自查报告', '所属行业', '日期', 'xxx', '时间戳', '简称', '文件链接'
]]
df = df[df['板块'] != '创业板']
df.replace({'是否提交财务自查报告': ' '}, '是')
df.replace({'是否提交财务自查报告': '不适用'}, '是')
df['机构名称'] = df['机构名称'].replace(r'\*', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'股份有限公司', '', regex=True)
df.to_csv('C:/Users/chen/Desktop/IPO_info/data/EastMoney/首发信息/{}.csv'.format(date),index=False, encoding='utf-8-sig')
return df
def update_zzscData():
today = datetime.today().date().strftime('%Y-%m-%d')
newDate = dateList_gen()[0]
if today != newDate:
print('正在更新终止审查数据。。。')
try:
zzsc_dict = load_pickle(zzsc_pkl_path)
data = get_zzscData(newDate)
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
except:
zzsc_dict = gen_zzscDict()
else:
zzsc_df = pd.DataFrame(zzsc_dict.items(), columns=['机构名称', '决定终止审查时间'])
zzsc_df['机构名称'] = zzsc_df['机构名称'].replace(r'\*', '', regex=True)
zzsc_df['机构名称'] = zzsc_df['机构名称'].replace(r'股份有限公司', '', regex=True)
zzsc_df['机构名称'] = zzsc_df['机构名称'].replace(r'\(', '(', regex=True)
zzsc_df['机构名称'] = zzsc_df['机构名称'].replace(r'\)', ')', regex=True)
zzsc_df.to_csv(zzsc_csv_path,
encoding='utf-8-sig',
index=False)
save_pickle(zzsc_dict,zzsc_pkl_path)
lastDate = newDate
else:
zzsc_df = pd.read_csv(zzsc_csv_path)
return zzsc_df
def gen_zzscDict():
dateList = dateList_gen()
zzsc_dict = {}
for date in dateList:
data = get_zzscData(date)
for i in data:
name = i.split(',')[1]
if name not in zzsc_dict:
zzsc_dict[name] = i.split(',')[2]
else:
continue
save_pickle(zzsc_dict,zzsc_pkl_path)
return zzsc_dict
def get_zzscData(date):
query = {
'type': 'NS',
'sty': 'NSSE',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '500',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': '4',
'stat': 'zzsc',
'fd': date,
'rt': '53727636'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
if rss.text == 'var IBhynDx={pages:0,data:[{stats:false}]}':
return ''
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
return data
def get_meetingData():
today = datetime.today().date().strftime('%Y-%m-%d')
newDate = dateList_gen()[0]
if today == newDate or not os.path.isfile(eastmoney_meeting_path):
print('正在更新数据。。。')
meetingInfo = []
for marketType in ['2', '4']: # 2 为主板, 4 为中小板
query = {
'type': 'NS',
'sty': 'NSSH',
'st': '1',
'sr': '-1',
'p': '1',
'ps': '5000',
'js': 'var IBhynDx={pages:(pc),data:[(x)]}',
'mkt': marketType,
'rt': '53723990'
}
url = base_url + urlencode(query)
rss = requests.get(url, headers=headers)
jss = rss.text.split('var IBhynDx={pages:1,data:')[1]
data = eval(jss[:-1])
meetingInfo.extend(data)
temp = [j.split(',') for j in meetingInfo]
columns = [
'时间戳', 'yyy', '公司代码', '机构名称', '详情链接', '申报日期', '上会日期', '申购日期', '上市日期',
'9', '拟发行数量', '发行前总股本', '发行后总股本', '13', '占发行后总股本比例', '当前状态', '上市地点',
'主承销商', '承销方式', '发审委委员', '网站', '简称'
]
df = pd.DataFrame(temp, columns=columns)
df['文件链接'] = df['时间戳'].apply(
lambda x: "https://notice.eastmoney.com/pdffile/web/H2_" + x + "_1.pdf"
)
df['详情链接'] = df['公司代码'].apply(
lambda x: "data.eastmoney.com/xg/gh/detail/" + x + ".html")
df = df[[
'机构名称', '当前状态', '上市地点', '拟发行数量', '申报日期', '上会日期', '申购日期', '上市日期',
'主承销商', '承销方式', '9', '发行前总股本', '发行后总股本', '13', '占发行后总股本比例', '发审委委员',
'网站', '公司代码', 'yyy', '时间戳', '简称', '详情链接', '文件链接'
]]
df['机构名称'] = df['机构名称'].replace(r'\*', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'股份有限公司', '', regex=True)
df['机构名称'] = df['机构名称'].replace(r'\(', '(', regex=True)
df['机构名称'] = df['机构名称'].replace(r'\)', ')', regex=True)
df.to_csv(
eastmoney_meeting_path,
index=False,
encoding='utf-8-sig')
else:
df = pd.read_csv(eastmoney_meeting_path,keep_default_na=False)
return df
def eastmoney_cleanUP():
east_money = pd.read_csv(eastmoney_raw_data_path, keep_default_na=False)
east_money.replace({'是否提交财务自查报告': ' '}, '是')
east_money.replace({'是否提交财务自查报告': '不适用'}, '是')
east_money['机构名称'] = east_money['机构名称'].replace(r'\*', '', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'股份有限公司', '', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'\(', '(', regex=True)
east_money['机构名称'] = east_money['机构名称'].replace(r'\)', ')', regex=True)
east_money = east_money[east_money['板块'] != '创业板']
east_money['类型'] = pd.Categorical(east_money['类型'],
categories=["已受理","已反馈","预先披露更新","中止审查","已提交发审会讨论,暂缓表决",
"已上发审会,暂缓表决","已通过发审会"],ordered=True)
east_money.sort_values(['机构名称','保荐机构','类型','日期'], inplace=True)
# east_money.to_csv('./pre_cleab.csv',encoding='utf-8-sig',index=False)
east_money.drop_duplicates(subset=['机构名称','保荐机构', '类型',],
keep='first',
inplace=True)
east_money.to_csv(
'./data/EastMoney/eastmoney_data_cleaned_v2.csv',
encoding='utf-8-sig',
index=False)
return east_money
def gen_finalData(cleaned_easymoney_df, meetingInfo_df, zzsc_df):
'''
主板、中小板 = {'机构名称':'',
'简称':'',
'Wind代码':'',
'统一社会信用代码':'',
'板块':'',
'注册地':'',
'所属行业':'',
'经营范围':'',
'预先披露':'[日期]',
'已反馈':'[日期]',
'预先披露更新':'[日期]',
'发审会':{'中止审查':'[日期]',
'已上发审会,暂缓表决':'[日期]',
'已提交发审会讨论,暂缓表决:'[日期]',
'已通过发审会':'[日期]'},
'终止审查':'[日期]',
'上市日期':'[日期]',
'保荐机构':'',
'律师事务所':,
'会计师事务所':'',
'发行信息':{'拟发行数量':'',
'发行前总股本':'',
'发行后总股本':''},
'反馈文件':'[链接]'
}
'''
all_data = {} # 总数据
ekk = cleaned_easymoney_df.values.tolist()
for i in ekk:
i
if i[0] not in all_data:
all_data[i[0]] = {
'机构名称': i[0] + '股份有限公司',
'简称': i[15],
'Wind代码': '',
'统一社会信用代码': '',
'板块': i[2],
'注册地': '',
'所属行业': '',
'经营范围': '',
'预先披露': '',
'已反馈': '',
'预先披露更新': '',
'发审会': {
'中止审查': '',
'已上发审会,暂缓表决': '',
'已提交发审会讨论,暂缓表决': '',
'已通过发审会': ''
},
'终止审查': '',
'上市日期': '',
'保荐机构': i[4],
'保荐代表人': '',
'律师事务所': i[6],
'签字律师': '',
'会计师事务所': i[8],
'签字会计师': '',
'发行信息': {
'拟发行数量(万)': '',
'发行前总股本(万)': '',
'发行后总股本(万)': ''
},
'反馈文件': ''
}
if i[1] == '已受理':
all_data[i[0]]['预先披露'] = i[12]
elif i[1] == '已反馈':
all_data[i[0]]['已反馈'] = i[12]
elif i[1] == '预先披露更新':
all_data[i[0]]['预先披露更新'] = i[12]
elif i[1] == '已通过发审会':
all_data[i[0]]['发审会']['已通过发审会'] = i[12]
elif i[1] == '已提交发审会讨论,暂缓表决':
all_data[i[0]]['发审会']['已提交发审会讨论,暂缓表决'] = i[12]
elif i[1] == '已上发审会,暂缓表决':
all_data[i[0]]['发审会']['已上发审会,暂缓表决'] = i[12]
elif i[1] == '中止审查':
all_data[i[0]]['发审会']['中止审查'] = i[12]
if all_data[i[0]]['注册地'] == '' and i[3] != '':
all_data[i[0]]['注册地'] = i[3]
if all_data[i[0]]['所属行业'] == '' and i[11] != '':
all_data[i[0]]['所属行业'] = i[11]
if all_data[i[0]]['保荐代表人'] == '' and i[5] != '':
all_data[i[0]]['保荐代表人'] = i[5]
if all_data[i[0]]['签字律师'] == '' and i[7] != '':
all_data[i[0]]['签字律师'] = i[7]
if all_data[i[0]]['签字会计师'] == '' and i[9] != '':
all_data[i[0]]['签字会计师'] = i[9]
# 添加上会信息
ekk2 = meetingInfo_df.values.tolist()
error_set = {}
for i in ekk2:
i[0] = i[0].replace(r'股份有限公司', '')
if i[0] not in all_data:
print("Error: Cannot find ", i[0])
error_set.update({i[0]: i[5]})
continue
if i[1] == '上会未通过':
all_data[i[0]]['发审会']['上会未通过'] = i[5]
elif i[1] == '取消审核':
all_data[i[0]]['发审会']['取消审核'] = i[5]
elif i[1] == '上会通过':
all_data[i[0]]['发审会']['已通过发审会'] = i[5]
if i[7] != '':
all_data[i[0]]['上市时间'] = i[7]
all_data[i[0]]['发行信息']['拟发行数量'] = "{:.2f}".format(int(i[3]) / 10000)
all_data[i[0]]['发行信息']['发行前总股本'] = "{:.2f}".format(int(i[11]) / 10000)
all_data[i[0]]['发行信息']['发行后总股本'] = "{:.2f}".format(int(i[12]) / 10000)
# 添加终止审查信息
ekk3 = zzsc_df.values.tolist()
for i in ekk3:
name = i[0].replace(r'股份有限公司', '')
if name not in all_data:
print("Error: Cannot find in zzsc", i[0])
error_set.update({name: i[1]})
continue
all_data[name]['终止审查'] = i[1]
save_pickle(all_data, zb_zxb_stocksInfo_path)
return all_data
# def update_all():
# try:
# with open('','rb') as file:
# zb_zxb_dict = pickle.load(file)
# _,temp = update_eastmoneyData()
# for i in temp:
# if i not in zb_zxb_dict:
# pass
# else:
# # columns = [
# # '会计师事务所', '保荐代表人', '保荐机构', 'xxx', '律师事务所', '日期', '所属行业', '板块',
# # '是否提交财务自查报告', '注册地', '类型', '机构名称', '签字会计师', '签字律师', '时间戳', '简称'
# # ]
# i[]
if __name__ == '__main__':
# newDate = update_date()
# # update_eastmoneyData(newDate)
# east_money_df = eastmoney_cleanUP()
# meetingInfo_df = get_meetingData(newDate)
# zzsc_df = update_zzscData(newDate)
# # dateList = date_gen()
# # get_eastmoneyData(dateList)
# # east_money_df = eastmoney_cleanUP()
# # east_money_df = pd.read_csv('./EastMoney/easymoney_data_new.csv',keep_default_na=False)
# # meetingInfo_df = pd.read_csv('./EastMoney/eastmoney_data_meeting.csv',keep_default_na=False)
# # meetingInfo_df = get_meetingData()
# # zzsc_df = pd.read_csv('./EastMoney/zzsc.csv')
# all_data,_,_ = gen_finalData(east_money_df,meetingInfo_df,zzsc_df)
# print('Complete!')
eastmoney_cleanUP() | [
"[email protected]"
]
| |
f5003698eeb746238b83debfde8aa260cb48bdfd | ec56e3a57fb71f3fc4f19b168d3fa34cebb781ab | /tcga_encoder/analyses/old/tsne_from_z_space.py | 34915972bbefbbd33094bd0c0e2d2d99481dc0c7 | [
"MIT"
]
| permissive | tedmeeds/tcga_encoder | 64d60148b0c69092cb499abec22618e740ba8b6c | 805f9a5bcc422a43faea45baa0996c88d346e3b4 | refs/heads/master | 2021-01-13T04:50:42.643743 | 2017-08-25T13:09:38 | 2017-08-25T13:09:38 | 78,621,753 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,225 | py | from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
def main( data_location, results_location ):
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
tsne_dir = os.path.join( results_path, "tsne" )
check_and_mkdir(tsne_dir)
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
tissue = data_store["/CLINICAL/TISSUE"]
barcodes_train = Z_train.index.values
tissue_train = data_store["/CLINICAL/TISSUE"].loc[barcodes_train]
tissues = tissue_train.columns
tissue_idx = np.argmax( tissue_train.values, 1 )
#pdb.set_trace()
#class sklearn.manifold.TSNE(n_components=2, perplexity=30.0, early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000, n_iter_without_progress=30, min_grad_norm=1e-07, metric='euclidean', init='random', verbose=0, random_state=None, method='barnes_hut', angle=0.5)
print "Running TSNE"
Z_normed = Z_train.values# - Z_train.values.mean(0)
#Z_normed = Z_normed - Z_train.values.std(0)
Z_normed = Z_normed[:,:100]
perplexity=30
nbr = 2000
np.random.seed(1)
I = np.random.permutation( len(Z_normed ))[:nbr]
tsne = TSNE(n_components=3,verbose=1, learning_rate=1000, perplexity=perplexity, method='exact')
#embedded,dummy = locally_linear_embedding(Z_normed[I,:], n_neighbors=10, n_components=4)
n_components =5
w = np.random.randn( Z_normed.shape[1],n_components)
embedded = np.dot( Z_normed[I,:], w )
#pdb.set_trace()
np.savetxt( tsne_dir + "/z.csv", Z_normed[I,:], fmt='%.3f',delimiter=',')
labels = [tissues[idx] for idx in tissue_idx[I]]
np.savetxt( tsne_dir + "/labels.csv", labels, fmt='%s',delimiter=',')
embedded = tsne.fit_transform( embedded )
print "DONE!"
# z_2d = bh_sne(Z_n,perplexity=30)
colors = "bgrkycmbgrkycmbgrkycmbgrkycmbgrkycmbgrkycmbgrkycmbgrkycmbgrkycm"
markers = "ooooooosssssssvvvvvvvppppppphhhhhhhDDDDDDDooooooosssssssvvvvvvvppppppphhhhhhhDDDDDDD"
pp.figure( figsize=(12,12))
for t_idx in range( len(tissues) ):
ids = tissue_idx[I] == t_idx
#'o', mec="r", mew="2",ms=30,fillstyle="none"
if len(ids) >=10:
pp.plot( embedded[ids,:][:10,0], embedded[ids,:][:10,1], markers[t_idx], mec=colors[t_idx], mew="2", ms=10, fillstyle="none", alpha=0.5 )
#pp.show()
pp.savefig( tsne_dir + "/tsne_perplexity_%d.png"%(perplexity), format='png', dpi=300 )
#pdb.set_trace()
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
main( data_location, results_location ) | [
"[email protected]"
]
| |
713e7e523ab6077cab397b0e284922c44b0230b9 | 66bad57fb663349518cfc5a19dd34e00b4672165 | /sn_projection/train_sn_projection.py | daa59db9d86ea7b2b2d08faa821e8143516c8daf | []
| no_license | B-Step62/anime-GANs | 233ac55ae5b0b2031757d7c4d908a65952a25c8d | 8ea05e19121612bb8adb2dd4c7453423e198f320 | refs/heads/master | 2020-04-22T12:13:59.644268 | 2019-03-26T07:17:43 | 2019-03-26T07:17:43 | 170,365,673 | 6 | 2 | null | 2019-10-21T06:05:40 | 2019-02-12T18:02:21 | Python | UTF-8 | Python | false | false | 9,002 | py | import os
import sys
import glob
import argparse
import shutil
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torch.optim import Adam
from torch.autograd import Variable
from torchvision.utils import save_image
sys.path.append(pardir)
from models import sn_projection
from common.dataset.dataset import MultiClassFaceDataset
from common.utils.config import Config
def parse_args():
parser = argparse.ArgumentParser(description='MultiClassGAN')
parser.add_argument('config', type=str)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
return args
def main():
global device, cfg
args = parse_args()
cfg = Config.from_file(args.config)
out = cfg.train.out
if not os.path.exists(out):
os.makedirs(out)
loss_type = cfg.train.loss_type
# save config and command
commands = sys.argv
with open(f'{out}/command.txt', 'w') as f:
f.write('## Command ################\n\n')
f.write(f'python {commands[0]} ')
for command in commands[1:]:
f.write(command + ' ')
f.write('\n\n\n')
f.write('## Args ###################\n\n')
for name in vars(args):
f.write(f'{name} = {getattr(args, name)}\n')
shutil.copy(args.config, f'./{out}')
# Set device
cuda = torch.cuda.is_available()
if cuda and args.gpu >= 0:
print('# cuda available! #')
device = torch.device(f'cuda:{args.gpu}')
else:
device = 'cpu'
gen = getattr(sn_projection, cfg.models.generator.name)(z_dim=cfg.models.generator.z_dim, norm=cfg.models.generator.norm, n_classes=cfg.train.n_classes).to(device)
dis = getattr(sn_projection, cfg.models.discriminator.name)(norm=cfg.models.discriminator.norm, n_classes=cfg.train.n_classes).to(device)
train_dataset = MultiClassFaceDataset(cfg)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=cfg.train.batchsize,
shuffle=True,
num_workers=32,
pin_memory=True,
drop_last=True)
print(f'train dataset contains {len(train_dataset)} images.')
opt_gen = Adam(gen.parameters(), lr=cfg.train.parameters.g_lr, betas=(0., 0.999))
opt_dis = Adam(dis.parameters(), lr=cfg.train.parameters.d_lr, betas=(0., 0.999))
if loss_type == 'ls':
criterion = torch.nn.MSELoss().to(device)
elif loss_type == 'hinge':
criterion = torch.nn.ReLU().to(device)
iteration = 0
batchsize = cfg.train.batchsize
iterations_per_epoch = len(train_loader)
epochs = cfg.train.iterations // iterations_per_epoch
for epoch in range(epochs):
gen.train()
dis.train()
y_real = Variable(torch.ones(batchsize, 1)).to(device)
y_fake = Variable(torch.zeros(batchsize, 1)).to(device)
for i, batch in enumerate(train_loader):
for j in range(cfg.train.discriminator_iter):
# Update Generator
if j == 0:
z = Variable(torch.randn((batchsize, cfg.models.generator.z_dim))).to(device)
x_fake_label = Variable(torch.randint(0, cfg.train.n_classes, (batchsize,), dtype=torch.long)).to(device)
x_fake = gen(z, y=x_fake_label)
d_fake = dis(x_fake, y=x_fake_label)
if loss_type == 'ls':
g_loss = criterion(d_fake, y_real)
elif loss_type == 'wgan-gp':
g_loss = - torch.mean(d_fake)
elif loss_type == 'hinge':
g_loss = - torch.mean(d_fake)
opt_gen.zero_grad()
g_loss.backward()
opt_gen.step()
# Update Dicscriminator
x_real_data = torch.zeros((batchsize, 3, cfg.train.target_size, cfg.train.target_size))
x_real_label_data = torch.zeros(batchsize, dtype=torch.long)
for k in range(batchsize):
x_real_data[k,:,:,:] += batch[0][k]
x_real_label_data[k] += batch[1][k]
x_real = Variable(x_real_data).to(device)
x_real_label = Variable(x_real_label_data).to(device)
z = Variable(torch.randn((batchsize, cfg.models.generator.z_dim))).to(device)
x_fake_label = x_real_label#Variable(torch.randint(0, cfg.train.n_classes, (batchsize,), dtype=torch.long)).to(device)
with torch.no_grad():
x_fake = gen(z, x_fake_label).detach()
d_real = dis(x_real, y=x_real_label)
d_fake = dis(x_fake, y=x_fake_label)
if loss_type == 'ls':
d_loss_fake = criterion(d_fake, y_fake)
d_loss_real = criterion(d_real, y_real)
elif loss_type == 'wgan-gp':
d_loss_fake = torch.mean(d_fake)
d_loss_real = - torch.mean(d_real)
elif loss_type == 'hinge':
d_loss_fake = F.relu(1.0 + d_fake).mean()
d_loss_real = F.relu(1.0 - d_real).mean()
d_loss = d_loss_fake + d_loss_real
if loss_type == 'wgan-gp':
d_loss_gp = gradient_penalty(x_real, x_fake, x_real_label, dis)
d_loss += cfg.train.parameters.lambda_gp * d_loss_gp + 0.1 * torch.mean(d_real * d_real)
opt_dis.zero_grad()
d_loss.backward()
opt_dis.step()
g_lr = poly_lr_scheduler(opt_gen, cfg.train.parameters.g_lr, iteration, lr_decay_iter=10, max_iter=cfg.train.iterations)
d_lr = poly_lr_scheduler(opt_dis, cfg.train.parameters.d_lr, iteration, lr_decay_iter=10, max_iter=cfg.train.iterations)
iteration += 1
if iteration % cfg.train.print_interval == 0:
if loss_type == 'wgan-gp':
print(f'Epoch:[{epoch}][{iteration}/{cfg.train.iterations}] Loss dis:{d_loss:.5f} dis-gp:{d_loss_gp} gen:{g_loss:.5f}')
else:
print(f'Epoch:[{epoch}][{iteration}/{cfg.train.iterations}] Loss dis:{d_loss:.5f} gen:{g_loss:.5f}')
if iteration % cfg.train.save_interval == 0:
if not os.path.exists(os.path.join(out, 'checkpoint')):
os.makedirs(os.path.join(out, 'checkpoint'))
path = os.path.join(out, 'checkpoint', f'iter_{iteration:04d}.pth.tar')
state = {'gen_state_dict':gen.state_dict(),
'dis_state_dict':dis.state_dict(),
'opt_gen_state_dict':opt_gen.state_dict(),
'opt_dis_state_dict':opt_dis.state_dict(),
'iteration':iteration,
}
torch.save(state, path)
if iteration % cfg.train.preview_interval == 0:
x_fake = (x_fake[:min(32, batchsize),:,:,:] + 1.0) * 0.5
save_image(x_fake.data.cpu(), os.path.join(out, 'preview', f'iter_{iteration:04d}.png'))
if iteration == 1:
if not os.path.exists(os.path.join(out, 'preview')):
os.makedirs(os.path.join(out, 'preview'))
x_real = (x_real[:min(32, batchsize),:,:,:] + 1.0) * 0.5
save_image(x_real.data.cpu(), os.path.join(out, 'preview', f'real.png'))
def gradient_penalty(x_real, x_fake, y, dis):
epsilon = torch.rand(x_real.shape[0], 1, 1, 1).to(device).expand_as(x_real)
x_hat = Variable(epsilon * x_real.data + (1 - epsilon) * x_fake.data, requires_grad=True)
d_hat = dis(x_hat, y=y)
grad = torch.autograd.grad(outputs=d_hat,
inputs=x_hat,
grad_outputs=torch.ones(d_hat.shape).to(device),
retain_graph=True,
create_graph=True,
only_inputs=True)[0]
grad = grad.view(grad.shape[0], -1)
grad_norm = torch.sqrt(torch.sum(grad ** 2, dim=1))
d_loss_gp = torch.mean((grad_norm - 1) ** 2)
return d_loss_gp
def poly_lr_scheduler(optimizer, init_lr, iteration, lr_decay_iter=1,
max_iter=100, power=0.9):
"""Polynomial decay of learning rate
:param init_lr is base learning rate
:param iter is a current iteration
:param lr_decay_iter how frequently decay occurs, default is 1
:param max_iter is number of maximum iterations
:param power is a polymomial power
"""
if iteration % lr_decay_iter or iteration > max_iter:
return optimizer
lr = init_lr*(1 - iteration/max_iter)**power
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
f1794c53533ed59e09135659ebffe26264600476 | 04a26d4908cef6d94f2e491de4be0d0ad7cf82a8 | /summerforprogress/settings/dev.py | 85a05591de96d9fa677d8a7ea2b84fde4b75ac35 | []
| no_license | Our-Revolution/summer-for-progress | 468293144c38ca5de0e3147ad7eb14bd9265dc7b | b3fae3254d0bf1151470147be6118ea85d6c1cab | refs/heads/master | 2021-01-01T15:53:59.444536 | 2018-03-09T19:52:13 | 2018-03-09T19:52:13 | 97,728,229 | 0 | 0 | null | 2018-03-09T19:52:14 | 2017-07-19T14:48:17 | CSS | UTF-8 | Python | false | false | 58 | py | from base import *
DEBUG = True
COMPRESS_ENABLED = True
| [
"[email protected]"
]
| |
e96ba09963ac7c70cb2539d107d9a761a45db37f | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/machinelearning/azure-mgmt-machinelearningservices/azure/mgmt/machinelearningservices/aio/operations/_online_deployments_operations.py | b3dd667cf623ad3ff04388eae957841a138bff5a | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 55,662 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from io import IOBase
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._online_deployments_operations import (
build_create_or_update_request,
build_delete_request,
build_get_logs_request,
build_get_request,
build_list_request,
build_list_skus_request,
build_update_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OnlineDeploymentsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.machinelearningservices.aio.MachineLearningServicesMgmtClient`'s
:attr:`online_deployments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
order_by: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.OnlineDeployment"]:
"""List Inference Endpoint Deployments.
List Inference Endpoint Deployments.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Inference endpoint name. Required.
:type endpoint_name: str
:param order_by: Ordering of list. Default value is None.
:type order_by: str
:param top: Top of list. Default value is None.
:type top: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OnlineDeployment or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OnlineDeploymentTrackedResourceArmPaginatedResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
subscription_id=self._config.subscription_id,
order_by=order_by,
top=top,
skip=skip,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OnlineDeploymentTrackedResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, endpoint_name: str, deployment_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
)
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, workspace_name: str, endpoint_name: str, deployment_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete Inference Endpoint Deployment (asynchronous).
Delete Inference Endpoint Deployment (asynchronous).
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Inference endpoint name. Required.
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod, AsyncARMPolling(lro_delay, lro_options={"final-state-via": "location"}, **kwargs)
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, workspace_name: str, endpoint_name: str, deployment_name: str, **kwargs: Any
) -> _models.OnlineDeployment:
"""Get Inference Deployment Deployment.
Get Inference Deployment Deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Inference endpoint name. Required.
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OnlineDeployment or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.OnlineDeployment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OnlineDeployment] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("OnlineDeployment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
}
async def _update_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: Union[_models.PartialMinimalTrackedResourceWithSku, IO],
**kwargs: Any
) -> Optional[_models.OnlineDeployment]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[Optional[_models.OnlineDeployment]] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IOBase, bytes)):
_content = body
else:
_json = self._serialize.body(body, "PartialMinimalTrackedResourceWithSku")
request = build_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("OnlineDeployment", pipeline_response)
if response.status_code == 202:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
)
response_headers["Location"] = self._deserialize("str", response.headers.get("Location"))
response_headers["Retry-After"] = self._deserialize("int", response.headers.get("Retry-After"))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
}
@overload
async def begin_update(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: _models.PartialMinimalTrackedResourceWithSku,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.OnlineDeployment]:
"""Update Online Deployment (asynchronous).
Update Online Deployment (asynchronous).
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Online Endpoint entity to apply during operation. Required.
:type body: ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSku
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.OnlineDeployment]:
"""Update Online Deployment (asynchronous).
Update Online Deployment (asynchronous).
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Online Endpoint entity to apply during operation. Required.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: Union[_models.PartialMinimalTrackedResourceWithSku, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.OnlineDeployment]:
"""Update Online Deployment (asynchronous).
Update Online Deployment (asynchronous).
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Online Endpoint name. Required.
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Online Endpoint entity to apply during operation. Is either a
PartialMinimalTrackedResourceWithSku type or a IO type. Required.
:type body: ~azure.mgmt.machinelearningservices.models.PartialMinimalTrackedResourceWithSku or
IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.OnlineDeployment] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OnlineDeployment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
}
async def _create_or_update_initial(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: Union[_models.OnlineDeployment, IO],
**kwargs: Any
) -> _models.OnlineDeployment:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.OnlineDeployment] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IOBase, bytes)):
_content = body
else:
_json = self._serialize.body(body, "OnlineDeployment")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize("OnlineDeployment", pipeline_response)
if response.status_code == 201:
response_headers["x-ms-async-operation-timeout"] = self._deserialize(
"duration", response.headers.get("x-ms-async-operation-timeout")
)
response_headers["Azure-AsyncOperation"] = self._deserialize(
"str", response.headers.get("Azure-AsyncOperation")
)
deserialized = self._deserialize("OnlineDeployment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
}
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: _models.OnlineDeployment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.OnlineDeployment]:
"""Create or update Inference Endpoint Deployment (asynchronous).
Create or update Inference Endpoint Deployment (asynchronous).
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Inference endpoint name. Required.
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Inference Endpoint entity to apply during operation. Required.
:type body: ~azure.mgmt.machinelearningservices.models.OnlineDeployment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.OnlineDeployment]:
"""Create or update Inference Endpoint Deployment (asynchronous).
Create or update Inference Endpoint Deployment (asynchronous).
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Inference endpoint name. Required.
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Inference Endpoint entity to apply during operation. Required.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: Union[_models.OnlineDeployment, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.OnlineDeployment]:
"""Create or update Inference Endpoint Deployment (asynchronous).
Create or update Inference Endpoint Deployment (asynchronous).
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Inference endpoint name. Required.
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param body: Inference Endpoint entity to apply during operation. Is either a OnlineDeployment
type or a IO type. Required.
:type body: ~azure.mgmt.machinelearningservices.models.OnlineDeployment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either OnlineDeployment or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.machinelearningservices.models.OnlineDeployment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.OnlineDeployment] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
body=body,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("OnlineDeployment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(
AsyncPollingMethod,
AsyncARMPolling(lro_delay, lro_options={"final-state-via": "original-uri"}, **kwargs),
)
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}"
}
@overload
async def get_logs(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: _models.DeploymentLogsRequest,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DeploymentLogs:
"""Polls an Endpoint operation.
Polls an Endpoint operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Inference endpoint name. Required.
:type endpoint_name: str
:param deployment_name: The name and identifier for the endpoint. Required.
:type deployment_name: str
:param body: The request containing parameters for retrieving logs. Required.
:type body: ~azure.mgmt.machinelearningservices.models.DeploymentLogsRequest
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLogs or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeploymentLogs
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def get_logs(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.DeploymentLogs:
"""Polls an Endpoint operation.
Polls an Endpoint operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Inference endpoint name. Required.
:type endpoint_name: str
:param deployment_name: The name and identifier for the endpoint. Required.
:type deployment_name: str
:param body: The request containing parameters for retrieving logs. Required.
:type body: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLogs or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeploymentLogs
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def get_logs(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
body: Union[_models.DeploymentLogsRequest, IO],
**kwargs: Any
) -> _models.DeploymentLogs:
"""Polls an Endpoint operation.
Polls an Endpoint operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Inference endpoint name. Required.
:type endpoint_name: str
:param deployment_name: The name and identifier for the endpoint. Required.
:type deployment_name: str
:param body: The request containing parameters for retrieving logs. Is either a
DeploymentLogsRequest type or a IO type. Required.
:type body: ~azure.mgmt.machinelearningservices.models.DeploymentLogsRequest or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLogs or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.DeploymentLogs
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.DeploymentLogs] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(body, (IOBase, bytes)):
_content = body
else:
_json = self._serialize.body(body, "DeploymentLogsRequest")
request = build_get_logs_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.get_logs.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("DeploymentLogs", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_logs.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}/getLogs"
}
@distributed_trace
def list_skus(
self,
resource_group_name: str,
workspace_name: str,
endpoint_name: str,
deployment_name: str,
count: Optional[int] = None,
skip: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.SkuResource"]:
"""List Inference Endpoint Deployment Skus.
List Inference Endpoint Deployment Skus.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: Name of Azure Machine Learning workspace. Required.
:type workspace_name: str
:param endpoint_name: Inference endpoint name. Required.
:type endpoint_name: str
:param deployment_name: Inference Endpoint Deployment name. Required.
:type deployment_name: str
:param count: Number of Skus to be retrieved in a page of results. Default value is None.
:type count: int
:param skip: Continuation token for pagination. Default value is None.
:type skip: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SkuResource or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.SkuResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.SkuResourceArmPaginatedResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_skus_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
endpoint_name=endpoint_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
count=count,
skip=skip,
api_version=api_version,
template_url=self.list_skus.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SkuResourceArmPaginatedResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_skus.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/onlineEndpoints/{endpointName}/deployments/{deploymentName}/skus"
}
| [
"[email protected]"
]
| |
9aafc60a18e81c3c46b7f61ce8584b6642ac2b85 | 5f86944bdf1b810a84c63adc6ed01bbb48d2c59a | /kubernetes/client/models/v1alpha1_cluster_role_binding.py | 0a4350d713ee282f409da55e0d78e80cd0581b15 | [
"Apache-2.0"
]
| permissive | m4ttshaw/client-python | 384c721ba57b7ccc824d5eca25834d0288b211e2 | 4eac56a8b65d56eb23d738ceb90d3afb6dbd96c1 | refs/heads/master | 2021-01-13T06:05:51.564765 | 2017-06-21T08:31:03 | 2017-06-21T08:31:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,473 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1ClusterRoleBinding(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, kind=None, metadata=None, role_ref=None, subjects=None):
"""
V1alpha1ClusterRoleBinding - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'role_ref': 'V1alpha1RoleRef',
'subjects': 'list[V1alpha1Subject]'
}
self.attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'role_ref': 'roleRef',
'subjects': 'subjects'
}
self._api_version = api_version
self._kind = kind
self._metadata = metadata
self._role_ref = role_ref
self._subjects = subjects
@property
def api_version(self):
"""
Gets the api_version of this V1alpha1ClusterRoleBinding.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1alpha1ClusterRoleBinding.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1alpha1ClusterRoleBinding.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1alpha1ClusterRoleBinding.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1alpha1ClusterRoleBinding.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1alpha1ClusterRoleBinding.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1alpha1ClusterRoleBinding.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1alpha1ClusterRoleBinding.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1alpha1ClusterRoleBinding.
Standard object's metadata.
:return: The metadata of this V1alpha1ClusterRoleBinding.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1alpha1ClusterRoleBinding.
Standard object's metadata.
:param metadata: The metadata of this V1alpha1ClusterRoleBinding.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def role_ref(self):
"""
Gets the role_ref of this V1alpha1ClusterRoleBinding.
RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.
:return: The role_ref of this V1alpha1ClusterRoleBinding.
:rtype: V1alpha1RoleRef
"""
return self._role_ref
@role_ref.setter
def role_ref(self, role_ref):
"""
Sets the role_ref of this V1alpha1ClusterRoleBinding.
RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.
:param role_ref: The role_ref of this V1alpha1ClusterRoleBinding.
:type: V1alpha1RoleRef
"""
if role_ref is None:
raise ValueError("Invalid value for `role_ref`, must not be `None`")
self._role_ref = role_ref
@property
def subjects(self):
"""
Gets the subjects of this V1alpha1ClusterRoleBinding.
Subjects holds references to the objects the role applies to.
:return: The subjects of this V1alpha1ClusterRoleBinding.
:rtype: list[V1alpha1Subject]
"""
return self._subjects
@subjects.setter
def subjects(self, subjects):
"""
Sets the subjects of this V1alpha1ClusterRoleBinding.
Subjects holds references to the objects the role applies to.
:param subjects: The subjects of this V1alpha1ClusterRoleBinding.
:type: list[V1alpha1Subject]
"""
if subjects is None:
raise ValueError("Invalid value for `subjects`, must not be `None`")
self._subjects = subjects
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1ClusterRoleBinding):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
2a51fc184829e1da5455b36180993db24eb91034 | d0ca6faf4b672be1d97b6cf6302430a3dc970895 | /apps/spark/src/spark/migrations/0001_initial.py | f94b6466d45d50367513e891f3d02575f4ac7780 | [
"Apache-2.0"
]
| permissive | jesman/hue | 8aaea0a6134e1624c12145159fae94d6e01e5db4 | 21edfc1b790510e512216ab5cc8aeb1a84255de3 | refs/heads/master | 2021-01-14T13:48:06.054767 | 2013-11-26T22:26:32 | 2013-11-26T23:10:41 | 14,733,058 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,825 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SparkScript'
db.create_table('spark_sparkscript', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('data', self.gf('django.db.models.fields.TextField')(default='{"name": "", "parameters": [], "script": "", "hadoopProperties": [], "type": "python", "properties": [], "resources": [], "job_id": null}')),
))
db.send_create_signal('spark', ['SparkScript'])
def backwards(self, orm):
# Deleting model 'SparkScript'
db.delete_table('spark_sparkscript')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'desktop.document': {
'Meta': {'object_name': 'Document'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'extra': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'default': "''"}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'doc_owner'", 'to': "orm['auth.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['desktop.DocumentTag']", 'db_index': 'True', 'symmetrical': 'False'}),
'version': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
'desktop.documenttag': {
'Meta': {'object_name': 'DocumentTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'tag': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'spark.sparkscript': {
'Meta': {'object_name': 'SparkScript'},
'data': ('django.db.models.fields.TextField', [], {'default': '\'{"name": "", "parameters": [], "script": "", "hadoopProperties": [], "type": "python", "properties": [], "resources": [], "job_id": null}\''}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['spark'] | [
"[email protected]"
]
| |
3180b25bad018eec1119e875c24d615762cfd99d | 7add1f8fc31b09bb79efd2b25cc15e23666c1d1d | /tfx/dsl/context_managers/context_manager_test.py | 6cd12f340655d602f1e13a0924a240f2424f8509 | [
"Apache-2.0"
]
| permissive | twitter-forks/tfx | b867e9fee9533029ca799c4a4c5d1c5430ba05fe | cb3561224c54a5dad4d5679165d5b3bafc8b451b | refs/heads/master | 2021-11-19T18:45:09.157744 | 2021-10-19T00:02:34 | 2021-10-19T00:02:34 | 205,426,993 | 2 | 1 | Apache-2.0 | 2021-10-18T21:03:50 | 2019-08-30T17:21:03 | Python | UTF-8 | Python | false | false | 7,687 | py | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.dsl.context_manager."""
import threading
from typing import Dict, Any
import tensorflow as tf
from tfx.dsl.components.base import base_node
from tfx.dsl.context_managers import context_manager
class _FakeNode(base_node.BaseNode):
@property
def inputs(self) -> Dict[str, Any]:
return {}
@property
def outputs(self) -> Dict[str, Any]:
return {}
@property
def exec_properties(self) -> Dict[str, Any]:
return {}
class _FakeContext(context_manager.DslContext):
pass
class _FakeContextManager(context_manager.DslContextManager):
def create_context(self) -> _FakeContext:
return _FakeContext()
def enter(self, context: _FakeContext) -> _FakeContext:
return context
class ContextManagerTest(tf.test.TestCase):
def reset_registry(self) -> context_manager._DslContextRegistry:
result = context_manager._registry = context_manager._DslContextRegistry()
return result
def testContext_ContextIdAttrFactory(self):
# ID is in format <classname>:<num> where <num> is incremental
# regardless of the depth of the contexts.
self.reset_registry()
with _FakeContextManager() as c1:
self.assertEqual(c1.id, '_FakeContext:1')
with _FakeContextManager() as c2:
self.assertEqual(c2.id, '_FakeContext:2')
with _FakeContextManager() as c3:
self.assertEqual(c3.id, '_FakeContext:3')
with _FakeContextManager() as c4:
self.assertEqual(c4.id, '_FakeContext:4')
# ID count is reset after registry reset.
self.reset_registry()
with _FakeContextManager() as c5:
self.assertEqual(c5.id, '_FakeContext:1')
def testContext_ParentAttrFactory(self):
registry = self.reset_registry()
bg = registry.background_context
with _FakeContextManager() as c1:
self.assertIs(c1.parent, bg)
with _FakeContextManager() as c2:
self.assertIs(c2.parent, c1)
with _FakeContextManager() as c3:
self.assertEqual(c3.parent, c2)
with _FakeContextManager() as c4:
self.assertIs(c4.parent, c1)
def testContext_Ancestors(self):
registry = self.reset_registry()
bg = registry.background_context
self.assertEqual(list(bg.ancestors), [])
with _FakeContextManager() as c1:
self.assertEqual(list(c1.ancestors), [bg])
with _FakeContextManager() as c2:
self.assertEqual(list(c2.ancestors), [bg, c1])
with _FakeContextManager() as c3:
self.assertEqual(list(c3.ancestors), [bg, c1, c2])
with _FakeContextManager() as c4:
self.assertEqual(list(c4.ancestors), [bg, c1])
def testRegistry_AllContexts(self):
registry = self.reset_registry()
bg = registry.background_context
self.assertEqual(registry.all_contexts, [bg])
with _FakeContextManager() as c1:
self.assertEqual(registry.all_contexts, [bg, c1])
with _FakeContextManager() as c2:
self.assertEqual(registry.all_contexts, [bg, c1, c2])
with _FakeContextManager() as c3:
self.assertEqual(registry.all_contexts, [bg, c1, c2, c3])
with _FakeContextManager() as c4:
self.assertEqual(registry.all_contexts, [bg, c1, c2, c3, c4])
def testRegistry_ActiveContexts(self):
registry = self.reset_registry()
bg = registry.background_context
self.assertEqual(registry.active_contexts, [bg])
with _FakeContextManager() as c1:
self.assertEqual(registry.active_contexts, [bg, c1])
with _FakeContextManager() as c2:
self.assertEqual(registry.active_contexts, [bg, c1, c2])
with _FakeContextManager() as c3:
self.assertEqual(registry.active_contexts, [bg, c1, c2, c3])
with _FakeContextManager() as c4:
self.assertEqual(registry.active_contexts, [bg, c1, c4])
def testRegistry_NodeAndContextAssociations(self):
registry = self.reset_registry()
bg = registry.background_context
n0 = _FakeNode()
with _FakeContextManager() as c1:
n1 = _FakeNode()
with _FakeContextManager() as c2:
n2 = _FakeNode()
with _FakeContextManager() as c3:
n3 = _FakeNode()
with _FakeContextManager() as c4:
n4 = _FakeNode()
# Associated nodes for each context
self.assertEqual(registry.get_nodes(bg), [n0, n1, n2, n3, n4])
self.assertEqual(registry.get_nodes(c1), [n1, n2, n3, n4])
self.assertEqual(registry.get_nodes(c2), [n2, n3])
self.assertEqual(registry.get_nodes(c3), [n3])
self.assertEqual(registry.get_nodes(c4), [n4])
# Convenient property for calling registry.get_nodes()
self.assertEqual(bg.nodes, [n0, n1, n2, n3, n4])
self.assertEqual(c1.nodes, [n1, n2, n3, n4])
self.assertEqual(c2.nodes, [n2, n3])
self.assertEqual(c3.nodes, [n3])
self.assertEqual(c4.nodes, [n4])
# Associated contexts for each node
self.assertEqual(registry.get_contexts(n0), [bg])
self.assertEqual(registry.get_contexts(n1), [bg, c1])
self.assertEqual(registry.get_contexts(n2), [bg, c1, c2])
self.assertEqual(registry.get_contexts(n3), [bg, c1, c2, c3])
self.assertEqual(registry.get_contexts(n4), [bg, c1, c4])
def testContextManager_EnterMultipleTimes(self):
cm = _FakeContextManager()
with cm as c1:
pass
with cm as c2:
self.assertNotEqual(c1, c2)
with cm as c3:
self.assertNotEqual(c2, c3)
def testContextManager_EnterReturnValue(self):
class UltimateContextManager(_FakeContextManager):
def enter(self, context: _FakeContext) -> int:
return 42
with UltimateContextManager() as captured:
self.assertEqual(captured, 42)
def testRegistry_MultiThreads(self):
num_threads = 2
b = threading.Barrier(num_threads)
def test():
with _FakeContextManager() as c1:
self.assertEqual(c1.id, '_FakeContext:1')
b.wait()
with _FakeContextManager() as c2:
self.assertEqual(c2.id, '_FakeContext:2')
self.assertEqual(c2.parent, c1)
threads = [threading.Thread(target=test) for i in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join() # Expects no unhandled exceptions.
def testGetNodes(self):
self.reset_registry()
n0 = _FakeNode()
with _FakeContextManager() as c1:
n1 = _FakeNode()
with _FakeContextManager() as c2:
n2 = _FakeNode()
with _FakeContextManager() as c3:
n3 = _FakeNode()
with _FakeContextManager() as c4:
n4 = _FakeNode()
with self.subTest('With argument'):
self.assertEqual(context_manager.get_nodes(c1), [n1, n2, n3, n4])
self.assertEqual(context_manager.get_nodes(c2), [n2, n3])
self.assertEqual(context_manager.get_nodes(c3), [n3])
self.assertEqual(context_manager.get_nodes(c4), [n4])
with self.subTest('Without argument'):
# get_nodes() without argument queries nodes for background context,
# which works as a node registry
self.assertEqual(context_manager.get_nodes(), [n0, n1, n2, n3, n4])
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
]
| |
91cc85e0ffa21fb97bb0ff8de417a2d95386a351 | 2c4648efe8c7e408b8c3a649b2eed8bb846446ec | /codewars/Python/6 kyu/EqualSidesOfAnArray/find_even_index_test.py | 8b6028dd54d750990bb81debe7dec85cc4fd15a3 | []
| no_license | Adasumizox/ProgrammingChallenges | 9d79bd1b0ce4794b576124f9874aabb86d5c0713 | 3630fcde088d7991e344eb1b84805e9e756aa1a2 | refs/heads/master | 2021-07-16T08:16:57.538577 | 2020-07-19T19:58:28 | 2020-07-19T19:58:28 | 190,159,085 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | from find_even_index import find_even_index
import unittest
from random import randint
class TestFindEvenIndex(unittest.TestCase):
def test(self):
self.assertEqual(find_even_index([1,2,3,4,3,2,1]), 3, 'Basic test')
self.assertEqual(find_even_index([1,100,50,-51,1,1]), 1, 'Basic test')
self.assertEqual(find_even_index([1,2,3,4,5,6]), -1, 'Basic test')
self.assertEqual(find_even_index([20,10,30,10,10,15,35]), 3, 'Basic test')
self.assertEqual(find_even_index([20,10,-80,10,10,15,35]), 0, 'Basic test')
self.assertEqual(find_even_index([10,-80,10,10,15,35,20]), 6, 'Basic test')
self.assertEqual(find_even_index(range(1,100)),-1, 'Basic test')
self.assertEqual(find_even_index([0,0,0,0,0]), 0, 'Basic test')
self.assertEqual(find_even_index([-1,-2,-3,-4,-3,-2,-1]), 3, 'Basic test')
self.assertEqual(find_even_index(range(-100,-1)), -1, 'Basic test')
def test_rand(self):
find_even_sol=lambda arr, l=0, r="null", i=0: (lambda r: -1 if i>=len(arr) else i if r==l else find_even_sol(arr, l+arr[i], r-(0 if i+1>=len(arr) else arr[i+1]), i+1))(r if r!="null" else sum(arr[1:]))
contract=lambda arr: (lambda pos: arr[:pos]+[sum(arr[pos:])])(randint(0,len(arr)-1))
for _ in range(40):
left=[randint(-20, 20) for qu in range(randint(10,20))]
right=left[:]
if randint(0,1): left[randint(0,len(left)-1)]+=randint(-20,20)
left=sorted(contract(left), key=lambda a: randint(1,1000))
right=sorted(contract(right), key=lambda a: randint(1,1000))
arr=([]+left[:]+[randint(-20,20)]+right[:])[:]
self.assertEqual(find_even_index(arr[:]), find_even_sol(arr), "It should work for random inputs too")
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
]
| |
8eac4fc825850b11e722174526cd44ce446db0db | 8158f19a3dde47587b27730296c50c670ab8eb5f | /py/lvmtarget/brightstar.py | 2741dfc82e12294cb601d2f26d5f437627f5156e | [
"BSD-3-Clause"
]
| permissive | sdss/lvmtarget | e083ecef5f3df50f7fa606d44c5de17792cbd902 | 8014fe1a60f26475526e2beea943f5444cd78a82 | refs/heads/master | 2021-05-11T19:42:54.477069 | 2019-10-28T19:53:25 | 2019-10-28T19:53:25 | 117,886,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,634 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
lvmtarget.brightstar
=====================
Module for studying and masking bright stars in the sweeps
"""
from __future__ import (absolute_import, division)
from time import time
import numpy as np
import numpy.lib.recfunctions as rfn
import fitsio
from glob import glob
from astropy.coordinates import SkyCoord
from astropy import units as u
import os
import re
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, Ellipse, Rectangle
from matplotlib.collections import PatchCollection
from . import __version__ as lvmtarget_version
from lvmtarget import io
from lvmtarget.internal import sharedmem
from lvmtarget import desi_mask, targetid_mask
from lvmtarget.targets import encode_targetid
from lvmutil import depend, brick
import healpy as hp
def circles(x, y, s, c='b', vmin=None, vmax=None, **kwargs):
"""Make a scatter plot of circles. Similar to plt.scatter, but the size of circles are in data scale
Parameters
----------
x, y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circles.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
plt.colorbar()
References
----------
With thanks to https://gist.github.com/syrte/592a062c562cd2a98a83
"""
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if 'fc' in kwargs:
kwargs.setdefault('facecolor', kwargs.pop('fc'))
if 'ec' in kwargs:
kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if 'ls' in kwargs:
kwargs.setdefault('linestyle', kwargs.pop('ls'))
if 'lw' in kwargs:
kwargs.setdefault('linewidth', kwargs.pop('lw'))
# You can set `facecolor` with an array for each patch,
# while you can only set `facecolors` with a value for all.
patches = [Circle((x_, y_), s_)
for x_, y_, s_ in np.broadcast(x, y, s)]
collection = PatchCollection(patches, **kwargs)
if c is not None:
collection.set_array(np.asarray(c))
collection.set_clim(vmin, vmax)
ax = plt.gca()
ax.add_collection(collection)
ax.autoscale_view()
plt.draw_if_interactive()
if c is not None:
plt.sci(collection)
return collection
def ellipses(x, y, w, h=None, rot=0.0, c='b', vmin=None, vmax=None, **kwargs):
"""Make a scatter plot of ellipses
Parameters
----------
x, y : scalar or array_like, shape (n, )
Center of ellipses.
w, h : scalar or array_like, shape (n, )
Total length (diameter) of horizontal/vertical axis.
`h` is set to be equal to `w` by default, ie. circle.
rot : scalar or array_like, shape (n, )
Rotation in degrees (anti-clockwise).
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
ellipses(a, a, w=4, h=a, rot=a*30, c=a, alpha=0.5, ec='none')
plt.colorbar()
References
----------
With thanks to https://gist.github.com/syrte/592a062c562cd2a98a83
"""
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if 'fc' in kwargs:
kwargs.setdefault('facecolor', kwargs.pop('fc'))
if 'ec' in kwargs:
kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if 'ls' in kwargs:
kwargs.setdefault('linestyle', kwargs.pop('ls'))
if 'lw' in kwargs:
kwargs.setdefault('linewidth', kwargs.pop('lw'))
# You can set `facecolor` with an array for each patch,
# while you can only set `facecolors` with a value for all.
if h is None:
h = w
zipped = np.broadcast(x, y, w, h, rot)
patches = [Ellipse((x_, y_), w_, h_, rot_)
for x_, y_, w_, h_, rot_ in zipped]
collection = PatchCollection(patches, **kwargs)
if c is not None:
c = np.broadcast_to(c, zipped.shape).ravel()
collection.set_array(c)
collection.set_clim(vmin, vmax)
ax = plt.gca()
ax.add_collection(collection)
ax.autoscale_view()
plt.draw_if_interactive()
if c is not None:
plt.sci(collection)
return collection
def max_objid_bricks(targs):
"""For a set of targets, return the maximum value of BRICK_OBJID in each BRICK_ID
Parameters
----------
targs : :class:`recarray`
A recarray of targets as made by :mod:`lvmtarget.cuts.select_targets`
Returns
-------
maxobjid : :class:`dictionary`
A dictionary with keys for each unique BRICKID and values of the maximum OBJID in that brick
"""
#ADM the maximum BRICKID in the passed target set
brickmax = np.max(targs["BRICKID"])
#ADM how many OBJIDs are in each unique brick, starting from 0 and ordered on BRICKID
h = np.histogram(targs["BRICKID"],range=[0,brickmax],bins=brickmax)[0]
#ADM remove zero entries from the histogram
h = h[np.where(h > 0)]
#ADM the index of the maximum OBJID in eacn brick if the bricks are ordered on BRICKID and OBJID
maxind = np.cumsum(h)-1
#ADM an array of BRICKID, OBJID sorted first on BRICKID and then on OBJID within each BRICKID
ordered = np.array(sorted(zip(targs["BRICKID"],targs["BRICK_OBJID"]), key=lambda x: (x[0], x[1])))
#ADM return a dictionary of the maximum OBJID (values) for each BRICKID (keys)
return dict(ordered[maxind])
def cap_area(theta):
"""True area of a circle of a given radius drawn on the surface of a sphere
Parameters
----------
theta : array_like
(angular) radius of a circle drawn on the surface of the unit sphere (in DEGREES)
Returns
-------
area : array_like
surface area on the sphere included within the passed angular radius
Notes
-----
- The approximate formula pi*theta**2 is only accurate to ~0.0025% at 1o, ~0.25% at 10o,
sufficient for bright star mask purposes. But the equation in this function is more general.
- We recast the input array as float64 to circumvent precision issues with np.cos()
when radii of only a few arcminutes are passed
- Even for passed radii of 1 (0.1) arcsec, float64 is sufficiently precise to give the correct
area to ~0.00043 (~0.043%) using np.cos()
"""
#ADM recast input array as float64
theta = theta.astype('<f8')
#ADM factor to convert steradians to sq.deg.
st2sq = 180.*180./np.pi/np.pi
#ADM return area
return st2sq*2*np.pi*(1-(np.cos(np.radians(theta))))
def sphere_circle_ra_off(theta,centdec,declocs):
"""Offsets in RA needed for given declinations in order to draw a (small) circle on the sphere
Parameters
----------
theta : :class:`float`
(angular) radius of a circle drawn on the surface of the unit sphere (in DEGREES)
centdec : :class:`float`
declination of the center of the circle to be drawn on the sphere (in DEGREES)
declocs : array_like
declinations of positions on the boundary of the circle at which to calculate RA offsets (in DEGREES)
Returns
-------
raoff : array_like
offsets in RA that correspond to the passed dec locations for the given dec circle center (IN DEGREES)
Notes
-----
- This function is ambivalent to the SIGN of the offset. In other words, it can only draw the semi-circle
in theta from -90o->90o, which corresponds to offsets in the POSITIVE RA direction. The user must determine
which offsets are to the negative side of the circle, or call this function twice.
"""
#ADM convert the input angles from degrees to radians
thetar = np.radians(theta)
centdecr = np.radians(centdec)
declocsr = np.radians(declocs)
#ADM determine the offsets in RA from the small circle equation (easy to derive from, e.g. converting
#ADM to Cartesian coordinates and using dot products). The answer is the arccos of the following:
cosoffrar = (np.cos(thetar) - (np.sin(centdecr)*np.sin(declocsr))) / (np.cos(centdecr)*np.cos(declocsr))
#ADM catch cases where the offset angle is very close to 0
offrar = np.arccos(np.clip(cosoffrar,-1,1))
#ADM return the angular offsets in degrees
return np.degrees(offrar)
def collect_bright_stars(bands,maglim,numproc=4,rootdirname='/global/project/projectdirs/cosmo/data/legacysurvey/dr3.1/sweep/3.1',outfilename=None):
"""Extract a structure from the sweeps containing only bright stars in a given band to a given magnitude limit
Parameters
----------
bands : :class:`str`
A magnitude band from the sweeps, e.g., "G", "R", "Z".
Can pass multiple bands as string, e.g. "GRZ", in which case maglim has to be a
list of the same length as the string
maglim : :class:`float`
The upper limit in that magnitude band for which to assemble a list of bright stars.
Can pass a list of magnitude limits, in which case bands has to be a string of the
same length (e.g., "GRZ" for [12.3,12.7,12.6]
numproc : :class:`int`, optional
Number of processes over which to parallelize
rootdirname : :class:`str`, optional, defaults to dr3
Root directory containing either sweeps or tractor files...e.g. for dr3 this might be
/global/project/projectdirs/cosmo/data/legacysurvey/dr3/sweeps/dr3.1
outfilename : :class:`str`, optional, defaults to not writing anything to file
(FITS) File name to which to write the output structure of bright stars
Returns
-------
:class:`recarray`
The structure of bright stars from the sweeps limited in the passed band(s) to the
passed maglim(s).
"""
#ADM set up default logger
from lvmutil.log import get_logger
log = get_logger()
#ADM use io.py to retrieve list of sweeps or tractor files
infiles = io.list_sweepfiles(rootdirname)
if len(infiles) == 0:
infiles = io.list_tractorfiles(rootdirname)
if len(infiles) == 0:
raise IOError('No sweep or tractor files found in {}'.format(rootdirname))
#ADM force the input maglim to be a list (in case a single value was passed)
if type(maglim) == type(16) or type(maglim) == type(16.):
maglim = [maglim]
#ADM set bands to uppercase if passed as lower case
bands = bands.upper()
#ADM the band names as a flux array instead of a string
bandnames = np.array([ "FLUX_"+band for band in bands ])
if len(bandnames) != len(maglim):
raise IOError('bands has to be the same length as maglim and {} does not equal {}'.format(len(bands),len(maglim)))
#ADM change input magnitude(s) to a flux to test against
fluxlim = 10.**((22.5-np.array(maglim))/2.5)
#ADM parallel formalism from this step forward is stolen from cuts.select_targets
#ADM function to grab the bright stars from a given file
def _get_bright_stars(filename):
'''Retrieves bright stars from a sweeps/Tractor file'''
objs = io.read_tractor(filename)
#ADM write the fluxes as an array instead of as named columns
fluxes = objs[bandnames].view(objs[bandnames].dtype[0]).reshape(objs[bandnames].shape + (-1,))
#ADM Retain rows for which ANY band is brighter than maglim
w = np.where(np.any(fluxes > fluxlim,axis=1))
if len(w[0]) > 0:
return objs[w]
#ADM counter for how many files have been processed
#ADM critical to use np.ones because a numpy scalar allows in place modifications
# c.f https://www.python.org/dev/peps/pep-3104/
totfiles = np.ones((),dtype='i8')*len(infiles)
nfiles = np.ones((), dtype='i8')
t0 = time()
log.info('Collecting bright stars from sweeps...')
def _update_status(result):
'''wrapper function for the critical reduction operation,
that occurs on the main parallel process'''
if nfiles%25 == 0:
elapsed = time() - t0
rate = nfiles / elapsed
log.info('{}/{} files; {:.1f} files/sec; {:.1f} total mins elapsed'.format(nfiles, totfiles, rate, elapsed/60.))
nfiles[...] += 1 #this is an in-place modification
return result
#ADM did we ask to parallelize, or not?
if numproc > 1:
pool = sharedmem.MapReduce(np=numproc)
with pool:
starstruc = pool.map(_get_bright_stars, infiles, reduce=_update_status)
else:
starstruc = []
for file in infiles:
starstruc.append(_update_status(_get_bright_stars(file)))
#ADM note that if there were no bright stars in a file then
#ADM the _get_bright_stars function will have returned NoneTypes
#ADM so we need to filter those out
starstruc = [x for x in starstruc if x is not None]
if len(starstruc) == 0:
raise IOError('There are no stars brighter than {} in {} in files in {} with which to make a mask'.format(str(maglim),bands,rootdirname))
#ADM concatenate all of the output recarrays
starstruc = np.hstack(starstruc)
#ADM if the name of a file for output is passed, then write to it
if outfilename is not None:
fitsio.write(outfilename, starstruc, clobber=True)
return starstruc
def model_bright_stars(band,instarfile,rootdirname='/global/project/projectdirs/cosmo/data/legacysurvey/dr3.1/'):
"""Build a dictionary of the fraction of bricks containing a star of a given
magnitude in a given band as function of Galactic l and b
Parameters
----------
band : :class:`str`
A magnitude band from the sweeps, e.g., "G", "R", "Z"
instarfile : :class:`str`
File of bright objects in (e.g.) sweeps, created by collect_bright_stars
rootdirname : :class:`str`, optional, defaults to dr3
Root directory for a data release...e.g. for dr3 this would be
/global/project/projectdirs/cosmo/data/legacysurvey/dr3.1/
Returns
-------
:class:`dictionary`
dictionary of the fraction of bricks containing a star of a given
magnitude in a given band as function of Galactic l Keys are mag
bin CENTERS, values are arrays running from 0->1 to 359->360
:class:`dictionary`
dictionary of the fraction of bricks containing a star of a given
magnitude in a given band as function of Galactic b. Keys are mag
bin CENTERS, values are arrays running from -90->-89 to 89->90
Notes
-----
- converts using coordinates of the brick center, so is an approximation
"""
#ADM histogram bin edges in Galactic coordinates at resolution of 1 degree
lbinedges = np.arange(361)
bbinedges = np.arange(-90,91)
#ADM set band to uppercase if passed as lower case
band = band.upper()
#ADM read in the bright object file
fx = fitsio.FITS(instarfile)
objs = fx[1].read()
#ADM convert fluxes in band of interest for each object to magnitudes
mags = 22.5-2.5*np.log10(objs["FLUX_"+band])
#ADM Galactic l and b for each object of interest
c = SkyCoord(objs["RA"]*u.degree, objs["DEC"]*u.degree, frame='icrs')
lobjs = c.galactic.l.degree
bobjs = c.galactic.b.degree
#ADM construct histogram bin edges in magnitude in passed band
magstep = 0.1
magmin = -1.5 #ADM magnitude of Sirius to 1 d.p.
magmax = np.max(mags)
magbinedges = np.arange(np.rint((magmax-magmin)/magstep))*magstep+magmin
#ADM read in the data-release specific brick information file
fx = fitsio.FITS(glob(rootdirname+'/survey-bricks-dr*.fits.gz')[0], upper=True)
bricks = fx[1].read(columns=['RA','DEC'])
#ADM convert RA/Dec of the brick center to Galatic coordinates and
#ADM build a histogram of the number of bins at each coordinate...
#ADM using the center is imperfect, so this is approximate at best
c = SkyCoord(bricks["RA"]*u.degree, bricks["DEC"]*u.degree, frame='icrs')
lbrick = c.galactic.l.degree
bbrick = c.galactic.b.degree
lhistobrick = (np.histogram(lbrick,bins=lbinedges))[0]
bhistobrick = (np.histogram(bbrick,bins=bbinedges))[0]
#ADM loop through the magnitude bins and populate a dictionary
#ADM of the number of stars in this magnitude range per brick
ldict, bdict = {}, {}
for mag in magbinedges:
key = "{:.2f}".format(mag+(0.5*magstep))
#ADM range in magnitude
w = np.where( (mags >= mag) & (mags < mag+magstep) )
if len(w[0]):
#ADM histograms of numbers of objects in l, b
lhisto = (np.histogram(lobjs[w],bins=lbinedges))[0]
bhisto = (np.histogram(bobjs[w],bins=bbinedges))[0]
#ADM fractions of objects in l, b per brick
#ADM use a sneaky where so that 0/0 results in 0
lfrac = np.where(lhistobrick > 0, lhisto/lhistobrick, 0)
bfrac = np.where(bhistobrick > 0, bhisto/bhistobrick, 0)
#ADM populate the dictionaries
ldict[key], bdict[key] = lfrac, bfrac
return ldict, bdict
def make_bright_star_mask(bands,maglim,numproc=4,rootdirname='/global/project/projectdirs/cosmo/data/legacysurvey/dr3.1/sweep/3.1',infilename=None,outfilename=None):
"""Make a bright star mask from a structure of bright stars drawn from the sweeps
Parameters
----------
bands : :class:`str`
A magnitude band from the sweeps, e.g., "G", "R", "Z".
Can pass multiple bands as string, e.g. "GRZ", in which case maglim has to be a
list of the same length as the string
maglim : :class:`float`
The upper limit in that magnitude band for which to assemble a list of bright stars.
Can pass a list of magnitude limits, in which case bands has to be a string of the
same length (e.g., "GRZ" for [12.3,12.7,12.6]
numproc : :class:`int`, optional
Number of processes over which to parallelize
rootdirname : :class:`str`, optional, defaults to dr3
Root directory containing either sweeps or tractor files...e.g. for dr3 this might be
/global/project/projectdirs/cosmo/data/legacysurvey/dr3/sweeps/dr3.1
infilename : :class:`str`, optional,
if this exists, then the list of bright stars is read in from the file of this name
if this is not passed, then code defaults to deriving the recarray of bright stars
via a call to collect_bright_stars
outfilename : :class:`str`, optional, defaults to not writing anything to file
(FITS) File name to which to write the output bright star mask
Returns
-------
:class:`recarray`
The bright star mask in the form RA, DEC, TARGETID, IN_RADIUS, NEAR_RADIUS (may also be written to file
if "outfilename" is passed)
The radii are in ARCMINUTES
TARGETID is as calculated in :mod:`lvmtarget.targets.encode_targetid`
Notes
-----
- IN_RADIUS is a smaller radius that corresponds to the IN_BRIGHT_OBJECT bit in data/targetmask.yaml
- NEAR_RADIUS is a radius that corresponds to the NEAR_BRIGHT_OBJECT bit in data/targetmask.yaml
- Currently uses the radius-as-a-function-of-B-mag for Tycho stars from the BOSS mask (in every band) to set
the NEAR_RADIUS:
R = (0.0802B*B - 1.860B + 11.625) (see Eqn. 9 of https://arxiv.org/pdf/1203.6594.pdf)
and half that radius to set the IN_RADIUS.
- It's an open question as to what the correct radii are for DESI observations
"""
#ADM set bands to uppercase if passed as lower case
bands = bands.upper()
#ADM the band names and nobs columns as arrays instead of strings
bandnames = np.array([ "FLUX_"+band for band in bands ])
nobsnames = np.array([ "NOBS_"+band for band in bands ])
#ADM force the input maglim to be a list (in case a single value was passed)
if type(maglim) == type(16) or type(maglim) == type(16.):
maglim = [maglim]
if len(bandnames) != len(maglim):
raise IOError('bands has to be the same length as maglim and {} does not equal {}'.format(len(bandnames),len(maglim)))
#ADM change input magnitude(s) to a flux to test against
fluxlim = 10.**((22.5-np.array(maglim))/2.5)
if infilename is not None:
objs = io.read_tractor(infilename)
else:
objs = collect_bright_stars(bands,maglim,numproc,rootdirname,outfilename)
#ADM write the fluxes and bands as arrays instead of named columns
fluxes = objs[bandnames].view(objs[bandnames].dtype[0]).reshape(objs[bandnames].shape + (-1,))
nobs = objs[nobsnames].view(objs[nobsnames].dtype[0]).reshape(objs[nobsnames].shape + (-1,))
#ADM set any observations with NOBS = 0 to have small flux so glitches don't end up as bright star masks.
w = np.where(nobs == 0)
if len(w[0]) > 0:
fluxes[w] = 0.
#ADM limit to the passed faint limit
w = np.where(np.any(fluxes > fluxlim,axis=1))
fluxes = fluxes[w]
objs = objs[w]
#ADM grab the (GRZ) magnitudes for observations
#ADM and record only the largest flux (smallest magnitude)
fluxmax = np.max(fluxes,axis=1)
mags = 22.5-2.5*np.log10(fluxmax)
#ADM convert the largest magnitude into radii for "in" and "near" bright objects. This will require
#ADM more consideration to determine the truly correct numbers for DESI
near_radius = (0.0802*mags*mags - 1.860*mags + 11.625)
in_radius = 0.5*(0.0802*mags*mags - 1.860*mags + 11.625)
#ADM calculate the TARGETID
targetid = encode_targetid(objid=objs['OBJID'], brickid=objs['BRICKID'], release=objs['RELEASE'])
#ADM create an output recarray that is just RA, Dec, TARGETID and the radius
done = objs[['RA','DEC']].copy()
done = rfn.append_fields(done,["TARGETID","IN_RADIUS","NEAR_RADIUS"],[targetid,in_radius,near_radius],
usemask=False,dtypes=['>i8','<f8','<f8'])
if outfilename is not None:
fitsio.write(outfilename, done, clobber=True)
return done
def plot_mask(mask,limits=None,radius="IN_RADIUS",over=False,show=True):
"""Make a plot of a mask and either display it or retain the plot object for over-plotting
Parameters
----------
mask : :class:`recarray`
A mask constructed by make_bright_star_mask (or read in from file in the make_bright_star_mask format)
limits : :class:`list`, optional
A list defining the RA/Dec limits of the plot as would be passed to matplotlib.pyplot.axis
radius : :class: `str`, optional
Which of the mask radii to plot ("IN_RADIUS" or "NEAR_RADIUS"). Both can be plotted by calling
this function twice with show=False the first time and over=True the second time
over : :class:`boolean`
If True, then don't set-up the plot commands. Just issue the command to plot the mask so that the
mask will be over-plotted on any existing plot (of targets etc.)
show : :class:`boolean`
If True, then display the plot, Otherwise, just execute the plot commands so it can be shown or
saved to file later
Returns
-------
Nothing
"""
#ADM set up the plot
if not over:
plt.figure(figsize=(8,8))
ax = plt.subplot(aspect='equal')
plt.xlabel('RA (o)')
plt.ylabel('Dec (o)')
if limits is not None:
plt.axis(limits)
#ADM draw ellipse patches from the mask information converting radius to degrees
#ADM include the cos(dec) term to expand the RA semi-major axis at higher declination
#ADM note the "ellipses" takes the diameter, not the radius
minoraxis = mask[radius]/60.
majoraxis = minoraxis/np.cos(np.radians(mask["DEC"]))
out = ellipses(mask["RA"], mask["DEC"], 2*majoraxis, 2*minoraxis, alpha=0.2, edgecolor='none')
if show:
plt.show()
return
def is_in_bright_star(targs,starmask):
"""Determine whether a set of targets is in a bright star mask
Parameters
----------
targs : :class:`recarray`
A recarray of targets as made by lvmtarget.cuts.select_targets
starmask : :class:`recarray`
A recarray containing a bright star mask as made by lvmtarget.brightstar.make_bright_star_mask
Returns
-------
in_mask : array_like.
True for array entries that correspond to a target that is IN a bright star mask
near_mask : array_like.
True for array entries that correspond to a target that is NEAR a bright star mask
"""
#ADM initialize an array of all False (nothing is yet in a star mask)
in_mask = np.zeros(len(targs), dtype=bool)
near_mask = np.zeros(len(targs), dtype=bool)
#ADM turn the coordinates of the masks and the targets into SkyCoord objects
ctargs = SkyCoord(targs["RA"]*u.degree, targs["DEC"]*u.degree)
cstars = SkyCoord(starmask["RA"]*u.degree, starmask["DEC"]*u.degree)
#ADM this is the largest search radius we should need to consider
#ADM in the future an obvious speed up is to split on radius
#ADM as large radii are rarer but take longer
maxrad = max(starmask["NEAR_RADIUS"])*u.arcmin
#ADM coordinate match the star masks and the targets
idtargs, idstars, d2d, d3d = cstars.search_around_sky(ctargs,maxrad)
#ADM catch the case where nothing fell in a mask
if len(idstars) == 0:
return in_mask, near_mask
#ADM for a matching star mask, find the angular separations that are less than the mask radius
w_in = np.where(d2d.arcmin < starmask[idstars]["IN_RADIUS"])
w_near = np.where(d2d.arcmin < starmask[idstars]["NEAR_RADIUS"])
#ADM any matching idtargs that meet this separation criterion are in a mask (at least one)
in_mask[idtargs[w_in]] = 'True'
near_mask[idtargs[w_near]] = 'True'
return in_mask, near_mask
def is_bright_star(targs,starmask):
"""Determine whether any of a set of targets are, themselves, a bright star mask
Parameters
----------
targs : :class:`recarray`
A recarray of targets as made by lvmtarget.cuts.select_targets
starmask : :class:`recarray`
A recarray containing a bright star mask as made by lvmtarget.brightstar.make_bright_star_mask
Returns
-------
is_mask : array_like.
True for array entries that correspond to targets that are, themselves, a bright star mask
"""
#ADM initialize an array of all False (nothing yet has been shown to correspond to a star mask)
is_mask = np.zeros(len(targs), dtype=bool)
#ADM calculate the TARGETID for the targets
targetid = encode_targetid(objid=targs['BRICK_OBJID'],
brickid=targs['BRICKID'],
release=targs['RELEASE'])
#ADM super-fast set-based look-up of which TARGETIDs are matches between the masks and the targets
matches = set(starmask["TARGETID"]).intersection(set(targetid))
#ADM determine the indexes of the targets that have a TARGETID in matches
w_mask = [ index for index, item in enumerate(targetid) if item in matches ]
#ADM w_mask now contains the target indices that match to a bright star mask on TARGETID
is_mask[w_mask] = 'True'
return is_mask
def generate_safe_locations(starmask,Npersqdeg):
"""Given a bright star mask, generate SAFE (BADSKY) locations at its periphery
Parameters
----------
starmask : :class:`recarray`
A recarray containing a bright star mask as made by :mod:`lvmtarget.brightstar.make_bright_star_mask`
npersqdeg : :class:`int`
The number of safe locations to generate per square degree of each mask
Returns
-------
ra : array_like.
The Right Ascensions of the SAFE (BADSKY) locations
dec : array_like.
The Declinations of the SAFE (BADSKY) locations
Notes
-----
- See the Tech Note at https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=2346 for more details
"""
#ADM the radius of each mask in degrees with a 0.1% kick to get things beyond the mask edges
radius = 1.001*starmask["IN_RADIUS"]/60.
#ADM determine the area of each mask
area = cap_area(radius)
#ADM determine the number of SAFE locations to assign to each
#ADM mask given the passed number of locations per sq. deg.
Nsafe = np.ceil(area*Npersqdeg).astype('i')
#ADM determine Nsafe Dec offsets equally spaced around the perimeter for each mask
offdec = [ rad*np.sin(np.arange(ns)*2*np.pi/ns) for ns, rad in zip(Nsafe,radius) ]
#ADM use offsets to determine DEC positions
dec = starmask["DEC"] + offdec
#ADM determine the offsets in RA at these Decs given the mask center Dec
offrapos = [ sphere_circle_ra_off(th,cen,declocs) for th,cen,declocs in zip(radius,starmask["DEC"],dec) ]
#ADM determine which of the RA offsets are in the positive direction
sign = [ np.sign(np.cos(np.arange(ns)*2*np.pi/ns)) for ns in Nsafe ]
#ADM determine the RA offsets with the appropriate sign and add them to the RA of each mask
offra = [ o*s for o,s in zip(offrapos,sign) ]
ra = starmask["RA"] + offra
#ADM have to turn the generated locations into 1-D arrays before returning them
return np.hstack(ra), np.hstack(dec)
def append_safe_targets(targs,starmask,nside=None,drbricks=None):
"""Append targets at SAFE (BADSKY) locations to target list, set bits in TARGETID and LVM_TARGET
Parameters
----------
targs : :class:`~numpy.ndarray`
A recarray of targets as made by lvmtarget.cuts.select_targets
nside : :class:`integer`
The HEALPix nside used throughout the DESI data model
starmask : :class:`~numpy.ndarray`
A recarray containing a bright star mask as made by lvmtarget.brightstar.make_bright_star_mask
drbricks : :class:`~numpy.ndarray`, optional
A rec array containing at least the "release", "ra", "dec" and "nobjs" columns from a survey bricks file.
This is typically used for testing only.
Returns
-------
The original recarray of targets (targs) is returned with additional SAFE (BADSKY) targets appended to it
Notes
-----
- See the Tech Note at https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=2346 for more details
on the SAFE (BADSKY) locations
- See the Tech Note at https://desi.lbl.gov/DocDB/cgi-bin/private/RetrieveFile?docid=2348 for more details
on setting the SKY bit in TARGETID
- Currently hard-coded to create an additional 10,000 safe locations per sq. deg. of mask. What is the
correct number per sq. deg. (Npersqdeg) for DESI is an open question.
- Perhaps we should move the default nside to a config file, somewhere?
"""
#ADM Number of safe locations per sq. deg. of each mask in starmask
Npersqdeg = 10000
#ADM generate SAFE locations at the periphery of the star masks appropriate to a density of Npersqdeg
ra, dec = generate_safe_locations(starmask,Npersqdeg)
#ADM duplicate the targs rec array with a number of rows equal to the generated safe locations
nrows = len(ra)
safes = np.zeros(nrows, dtype=targs.dtype)
#ADM populate the safes recarray with the RA/Dec of the SAFE locations
safes["RA"] = ra
safes["DEC"] = dec
#ADM set the bit for SAFE locations in DESITARGET
safes["LVM_TARGET"] |= desi_mask.BADSKY
#ADM add the brick information for the SAFE/BADSKY targets
b = brick.Bricks(bricksize=0.25)
safes["BRICKID"] = b.brickid(safes["RA"],safes["DEC"])
safes["BRICKNAME"] = b.brickname(safes["RA"],safes["DEC"])
#ADM get the string version of the data release (to find directories for brick information)
drint = np.max(targs['RELEASE']//1000)
#ADM check the targets all have the same release
checker = np.min(targs['RELEASE']//1000)
if drint != checker:
raise IOError('Objects from multiple data releases in same input numpy array?!')
drstring = 'dr'+str(drint)
#ADM now add the OBJIDs, ensuring they start higher than any other OBJID in the DR
#ADM read in the Data Release bricks file
if drbricks is None:
rootdir = "/project/projectdirs/cosmo/data/legacysurvey/"+drstring.strip()+"/"
drbricks = fitsio.read(rootdir+"survey-bricks-"+drstring.strip()+".fits.gz")
#ADM the BRICK IDs that are populated for this DR
drbrickids = b.brickid(drbricks["ra"],drbricks["dec"])
#ADM the maximum possible BRICKID at bricksize=0.25
brickmax = 662174
#ADM create a histogram of how many SAFE/BADSKY objects are in each brick
hsafes = np.histogram(safes["BRICKID"],range=[0,brickmax+1],bins=brickmax+1)[0]
#ADM create a histogram of how many objects are in each brick in this DR
hnobjs = np.zeros(len(hsafes),dtype=int)
hnobjs[drbrickids] = drbricks["nobjs"]
#ADM make each OBJID for a SAFE/BADSKY +1 higher than any other OBJID in the DR
safes["BRICK_OBJID"] = hnobjs[safes["BRICKID"]] + 1
#ADM sort the safes array on BRICKID
safes = safes[safes["BRICKID"].argsort()]
#ADM remove zero entries from the histogram of BRICKIDs in safes, for speed
hsafes = hsafes[np.where(hsafes > 0)]
#ADM the count by which to augment each OBJID to make unique OBJIDs for safes
objsadd = np.hstack([ np.arange(i) for i in hsafes ])
#ADM finalize the OBJID for each SAFE target
safes["BRICK_OBJID"] += objsadd
#ADM finally, update the TARGETID with the OBJID, the BRICKID, and the fact these are skies
safes["TARGETID"] = encode_targetid(objid=safes['BRICK_OBJID'],
brickid=safes['BRICKID'],
sky=1)
#ADM return the input targs with the SAFE targets appended
return np.hstack([targs,safes])
def set_target_bits(targs,starmask):
"""Apply bright star mask to targets, return desi_target array
Parameters
----------
targs : :class:`recarray`
A recarray of targets as made by lvmtarget.cuts.select_targets
starmask : :class:`recarray`
A recarray containing a bright star mask as made by lvmtarget.brightstar.make_bright_star_mask
Returns
-------
an ndarray of the updated desi_target bit that includes bright star information
Notes
-----
- Sets IN_BRIGHT_OBJECT and NEAR_BRIGHT_OBJECT via coordinate matches to the mask centers and radii
- Sets BRIGHT_OBJECT via an index match on TARGETID (defined as in :mod:`lvmtarget.targets.encode_targetid`)
See :mod:`lvmtarget.targetmask` for the definition of each bit
"""
bright_object = is_bright_star(targs,starmask)
in_bright_object, near_bright_object = is_in_bright_star(targs,starmask)
desi_target = targs["LVM_TARGET"].copy()
desi_target |= bright_object * desi_mask.BRIGHT_OBJECT
desi_target |= in_bright_object * desi_mask.IN_BRIGHT_OBJECT
desi_target |= near_bright_object * desi_mask.NEAR_BRIGHT_OBJECT
return desi_target
def mask_targets(targs,instarmaskfile=None,nside=None,bands="GRZ",maglim=[10,10,10],numproc=4,rootdirname='/global/project/projectdirs/cosmo/data/legacysurvey/dr3.1/sweep/3.1',outfilename=None,drbricks=None):
"""Add bits for whether objects are in a bright star mask, and SAFE (BADSKY) sky locations, to a list of targets
Parameters
----------
targs : :class:`str` or `~numpy.ndarray`
A recarray of targets created by lvmtarget.cuts.select_targets OR a filename of
a file that contains such a set of targets
instarmaskfile : :class:`str`, optional
An input bright star mask created by lvmtarget.brightstar.make_bright_star_mask
If None, defaults to making the bright star mask from scratch
The next 5 parameters are only relevant to making the bright star mask from scratch
nside : :class:`integer`
The HEALPix nside used throughout the DESI data model
bands : :class:`str`
A magnitude band from the sweeps, e.g., "G", "R", "Z".
Can pass multiple bands as string, e.g. "GRZ", in which case maglim has to be a
list of the same length as the string
maglim : :class:`float`
The upper limit in that magnitude band for which to assemble a list of bright stars.
Can pass a list of magnitude limits, in which case bands has to be a string of the
same length (e.g., "GRZ" for [12.3,12.7,12.6]
numproc : :class:`int`, optional
Number of processes over which to parallelize
rootdirname : :class:`str`, optional, defaults to dr3
Root directory containing either sweeps or tractor files...e.g. for dr3 this might be
/global/project/projectdirs/cosmo/data/legacysurvey/dr3/sweeps/dr3.1
outfilename : :class:`str`, optional, defaults to not writing anything to file
(FITS) File name to which to write the output bright star mask ONE OF outfilename or
instarmaskfile MUST BE PASSED
drbricks : :class:`~numpy.ndarray`, optional
A rec array containing at least the "release", "ra", "dec" and "nobjs" columns from a survey bricks file
This is typically used for testing only.
Returns
-------
:class:`~numpy.ndarray`
the input targets with the LVM_TARGET column updated to reflect the BRIGHT_OBJECT bits
and SAFE (BADSKY) sky locations added around the perimeter of the bright star mask.
Notes
-----
- See the Tech Note at https://desi.lbl.gov/DocDB/cgi-bin/private/ShowDocument?docid=2346 for more details
about SAFE (BADSKY) locations
- Runs in about 10 minutes for 20M targets and 50k masks (roughly maglim=10)
- (not including 5-10 minutes to build the star mask from scratch)
"""
#ADM set up default logger
from lvmutil.log import get_logger
log = get_logger()
t0 = time()
if instarmaskfile is None and outfilename is None:
raise IOError('One of instarmaskfile or outfilename must be passed')
#ADM Check if targs is a filename or the structure itself
if isinstance(targs, str):
if not os.path.exists(targs):
raise ValueError("{} doesn't exist".format(targs))
targs = fitsio.read(targs)
#ADM check if a file for the bright star mask was passed, if not then create it
if instarmaskfile is None:
starmask = make_bright_star_mask(bands,maglim,numproc=numproc,
rootdirname=rootdirname,outfilename=outfilename)
else:
starmask = fitsio.read(instarmaskfile)
ntargsin = len(targs)
log.info('Number of targets {}...t={:.1f}s'.format(ntargsin, time()-t0))
log.info('Number of star masks {}...t={:.1f}s'.format(len(starmask), time()-t0))
#ADM generate SAFE locations and add them to the target list
targs = append_safe_targets(targs,starmask,nside=nside,drbricks=drbricks)
log.info('Generated {} SAFE (BADSKY) locations...t={:.1f}s'.format(len(targs)-ntargsin, time()-t0))
#ADM update the bits depending on whether targets are in a mask
dt = set_target_bits(targs,starmask)
done = targs.copy()
done["LVM_TARGET"] = dt
#ADM remove any SAFE locations that are in bright masks (because they aren't really safe)
w = np.where( ((done["LVM_TARGET"] & desi_mask.BADSKY) == 0) |
((done["LVM_TARGET"] & desi_mask.IN_BRIGHT_OBJECT) == 0) )
if len(w[0]) > 0:
done = done[w]
log.info("...of these, {} SAFE (BADSKY) locations aren't in masks...t={:.1f}s"
.format(len(done)-ntargsin, time()-t0))
log.info('Finishing up...t={:.1f}s'.format(time()-t0))
return done
| [
"[email protected]"
]
| |
4bc7285423b98e0b74f38de3f3b9828f8e2cf477 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-apig/huaweicloudsdkapig/v2/model/show_details_of_domain_name_certificate_v2_response.py | 0252ac99c6b536ce317ea20d030cdf72e3ff3c1b | [
"Apache-2.0"
]
| permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,067 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ShowDetailsOfDomainNameCertificateV2Response(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'common_name': 'str',
'san': 'list[str]',
'version': 'str',
'organization': 'list[str]',
'organizational_unit': 'list[str]',
'locality': 'list[str]',
'state': 'list[str]',
'country': 'list[str]',
'not_before': 'str',
'not_after': 'str',
'serial_number': 'str',
'issuer': 'list[str]',
'signature_algorithm': 'str'
}
attribute_map = {
'common_name': 'common_name',
'san': 'san',
'version': 'version',
'organization': 'organization',
'organizational_unit': 'organizational_unit',
'locality': 'locality',
'state': 'state',
'country': 'country',
'not_before': 'not_before',
'not_after': 'not_after',
'serial_number': 'serial_number',
'issuer': 'issuer',
'signature_algorithm': 'signature_algorithm'
}
def __init__(self, common_name=None, san=None, version=None, organization=None, organizational_unit=None, locality=None, state=None, country=None, not_before=None, not_after=None, serial_number=None, issuer=None, signature_algorithm=None):
"""ShowDetailsOfDomainNameCertificateV2Response - a model defined in huaweicloud sdk"""
super(ShowDetailsOfDomainNameCertificateV2Response, self).__init__()
self._common_name = None
self._san = None
self._version = None
self._organization = None
self._organizational_unit = None
self._locality = None
self._state = None
self._country = None
self._not_before = None
self._not_after = None
self._serial_number = None
self._issuer = None
self._signature_algorithm = None
self.discriminator = None
if common_name is not None:
self.common_name = common_name
if san is not None:
self.san = san
if version is not None:
self.version = version
if organization is not None:
self.organization = organization
if organizational_unit is not None:
self.organizational_unit = organizational_unit
if locality is not None:
self.locality = locality
if state is not None:
self.state = state
if country is not None:
self.country = country
if not_before is not None:
self.not_before = not_before
if not_after is not None:
self.not_after = not_after
if serial_number is not None:
self.serial_number = serial_number
if issuer is not None:
self.issuer = issuer
if signature_algorithm is not None:
self.signature_algorithm = signature_algorithm
@property
def common_name(self):
"""Gets the common_name of this ShowDetailsOfDomainNameCertificateV2Response.
证书域名
:return: The common_name of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: str
"""
return self._common_name
@common_name.setter
def common_name(self, common_name):
"""Sets the common_name of this ShowDetailsOfDomainNameCertificateV2Response.
证书域名
:param common_name: The common_name of this ShowDetailsOfDomainNameCertificateV2Response.
:type: str
"""
self._common_name = common_name
@property
def san(self):
"""Gets the san of this ShowDetailsOfDomainNameCertificateV2Response.
SAN域名
:return: The san of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: list[str]
"""
return self._san
@san.setter
def san(self, san):
"""Sets the san of this ShowDetailsOfDomainNameCertificateV2Response.
SAN域名
:param san: The san of this ShowDetailsOfDomainNameCertificateV2Response.
:type: list[str]
"""
self._san = san
@property
def version(self):
"""Gets the version of this ShowDetailsOfDomainNameCertificateV2Response.
证书版本
:return: The version of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this ShowDetailsOfDomainNameCertificateV2Response.
证书版本
:param version: The version of this ShowDetailsOfDomainNameCertificateV2Response.
:type: str
"""
self._version = version
@property
def organization(self):
"""Gets the organization of this ShowDetailsOfDomainNameCertificateV2Response.
公司、组织
:return: The organization of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: list[str]
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this ShowDetailsOfDomainNameCertificateV2Response.
公司、组织
:param organization: The organization of this ShowDetailsOfDomainNameCertificateV2Response.
:type: list[str]
"""
self._organization = organization
@property
def organizational_unit(self):
"""Gets the organizational_unit of this ShowDetailsOfDomainNameCertificateV2Response.
部门
:return: The organizational_unit of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: list[str]
"""
return self._organizational_unit
@organizational_unit.setter
def organizational_unit(self, organizational_unit):
"""Sets the organizational_unit of this ShowDetailsOfDomainNameCertificateV2Response.
部门
:param organizational_unit: The organizational_unit of this ShowDetailsOfDomainNameCertificateV2Response.
:type: list[str]
"""
self._organizational_unit = organizational_unit
@property
def locality(self):
"""Gets the locality of this ShowDetailsOfDomainNameCertificateV2Response.
城市
:return: The locality of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: list[str]
"""
return self._locality
@locality.setter
def locality(self, locality):
"""Sets the locality of this ShowDetailsOfDomainNameCertificateV2Response.
城市
:param locality: The locality of this ShowDetailsOfDomainNameCertificateV2Response.
:type: list[str]
"""
self._locality = locality
@property
def state(self):
"""Gets the state of this ShowDetailsOfDomainNameCertificateV2Response.
省份
:return: The state of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: list[str]
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this ShowDetailsOfDomainNameCertificateV2Response.
省份
:param state: The state of this ShowDetailsOfDomainNameCertificateV2Response.
:type: list[str]
"""
self._state = state
@property
def country(self):
"""Gets the country of this ShowDetailsOfDomainNameCertificateV2Response.
国家
:return: The country of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: list[str]
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this ShowDetailsOfDomainNameCertificateV2Response.
国家
:param country: The country of this ShowDetailsOfDomainNameCertificateV2Response.
:type: list[str]
"""
self._country = country
@property
def not_before(self):
"""Gets the not_before of this ShowDetailsOfDomainNameCertificateV2Response.
证书有效期起始时间
:return: The not_before of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: str
"""
return self._not_before
@not_before.setter
def not_before(self, not_before):
"""Sets the not_before of this ShowDetailsOfDomainNameCertificateV2Response.
证书有效期起始时间
:param not_before: The not_before of this ShowDetailsOfDomainNameCertificateV2Response.
:type: str
"""
self._not_before = not_before
@property
def not_after(self):
"""Gets the not_after of this ShowDetailsOfDomainNameCertificateV2Response.
证书有效期截止时间
:return: The not_after of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: str
"""
return self._not_after
@not_after.setter
def not_after(self, not_after):
"""Sets the not_after of this ShowDetailsOfDomainNameCertificateV2Response.
证书有效期截止时间
:param not_after: The not_after of this ShowDetailsOfDomainNameCertificateV2Response.
:type: str
"""
self._not_after = not_after
@property
def serial_number(self):
"""Gets the serial_number of this ShowDetailsOfDomainNameCertificateV2Response.
序列号
:return: The serial_number of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: str
"""
return self._serial_number
@serial_number.setter
def serial_number(self, serial_number):
"""Sets the serial_number of this ShowDetailsOfDomainNameCertificateV2Response.
序列号
:param serial_number: The serial_number of this ShowDetailsOfDomainNameCertificateV2Response.
:type: str
"""
self._serial_number = serial_number
@property
def issuer(self):
"""Gets the issuer of this ShowDetailsOfDomainNameCertificateV2Response.
颁发者
:return: The issuer of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: list[str]
"""
return self._issuer
@issuer.setter
def issuer(self, issuer):
"""Sets the issuer of this ShowDetailsOfDomainNameCertificateV2Response.
颁发者
:param issuer: The issuer of this ShowDetailsOfDomainNameCertificateV2Response.
:type: list[str]
"""
self._issuer = issuer
@property
def signature_algorithm(self):
"""Gets the signature_algorithm of this ShowDetailsOfDomainNameCertificateV2Response.
签名算法
:return: The signature_algorithm of this ShowDetailsOfDomainNameCertificateV2Response.
:rtype: str
"""
return self._signature_algorithm
@signature_algorithm.setter
def signature_algorithm(self, signature_algorithm):
"""Sets the signature_algorithm of this ShowDetailsOfDomainNameCertificateV2Response.
签名算法
:param signature_algorithm: The signature_algorithm of this ShowDetailsOfDomainNameCertificateV2Response.
:type: str
"""
self._signature_algorithm = signature_algorithm
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowDetailsOfDomainNameCertificateV2Response):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
645e0aebaab4b939d2f1eaa75c24879619c3c55e | 73d5f8918e7933f31a1ead5f23000989ff8d1445 | /buildbuild/api/tests/test_api_team_list_search.py | f6b2e34370b2cc2fb16426f7cd16ea1c0462076e | [
"BSD-3-Clause"
]
| permissive | wikibootup/buildbuild | c8f1d3f241131059315ba1ca368bfa173449ee9a | 7629b3ac172803d474af312f22eb4a9631342ed3 | refs/heads/master | 2021-01-18T07:22:25.530180 | 2014-11-20T19:03:23 | 2014-11-20T19:03:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | from django.test import TestCase
from django.test.client import Client
from teams.models import Team
class TestAPITeamListSearch(TestCase):
def setUp(self):
self.test_string = "test_string"
self.team_with_test_string = Team.objects.create_team(
name="team_name_with_" + self.test_string,
# prefix 'test_' is excluded in this test case
# because of model validation ( max_length=30 on Team.name )
)
self.team_without_test_string = Team.objects.create_team(
name="team_name_without_", # + self.test_string,
)
self.client = Client()
self.response = self.client.get("/api/teams/?search=" + self.test_string)
def test_api_team_list_search_should_return_valid_result(self):
self.assertContains(self.response, self.team_with_test_string.name)
self.assertNotContains(self.response, self.team_without_test_string.name)
| [
"[email protected]"
]
| |
8542295dc0339d09a52885dcaa9e153216812568 | ffb05b145989e01da075e2a607fb291955251f46 | /mtraits/strait.py | 8a9a1b7d9c0d49bab9a31ca2e20f96ba6118265e | []
| no_license | micheles/papers | a5e7f2fa0cf305cd3f8face7c7ecc0db70ce7cc7 | be9070f8b7e8192b84a102444b1238266bdc55a0 | refs/heads/master | 2023-06-07T16:46:46.306040 | 2018-07-14T04:17:51 | 2018-07-14T04:17:51 | 32,264,461 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,528 | py | __version__ = '0.5.3'
__all__ = ['__version__', 'include', 'MetaTOS']
import inspect
import types
import warnings
class OverridingError(NameError):
pass
class OverridingWarning(Warning):
pass
class Super(object):
# this is needed to fix a shortcoming of unbound super objects,
# i.e. this is how the unbound version of super should work
def __init__(self, thisclass):
self.__thisclass__ = thisclass
def __get__(self, obj, objcls):
return super(self.__thisclass__, obj or objcls)
def oldstyle(*bases):
"Return True if there are no bases or all bases are old-style"
return not bases or set(map(type, bases)) == set([types.ClassType])
class Namespace(dict):
"A named dictionary containing the attribute of a class and its ancestors"
@classmethod
def from_cls(klass, cls):
if oldstyle(cls):
mro = inspect.getmro(cls)
else:
mro = cls.__mro__[:-1] # all except object
dic = merge(subc.__dict__ for subc in reversed(mro))
return klass(cls.__name__, dic)
def __init__(self, name, attrs):
self.__name__ = name
self.update(attrs)
def merge(dicts):
"""Merge a sequence of dictionaries. In case of name clashes,
the last dict in the sequence wins."""
dic = {}
for d in dicts:
dic.update(d)
return dic
class MetaTOS(type):
"The metaclass of the Trait Object System"
def __new__(mcl, name, bases, dic):
if len(bases) > 1:
raise TypeError(
'Multiple inheritance of bases %s is forbidden for TOS classes'
% str(bases))
elif oldstyle(*bases): # converts into new-style
bases += (object,)
cls = mcl.__super.__new__(mcl, name, bases, dic)
setattr(cls, '_%s__super' % name, Super(cls))
return cls
MetaTOS._MetaTOS__super = Super(MetaTOS)
def find_common_names(namespaces):
"Perform n*(n-1)/2 namespace overlapping checks on a set of n namespaces"
n = len(namespaces)
if n <= 1:
return
names = map(set, namespaces)
for i in range(0, n):
for j in range(i+1, n):
ci, cj = namespaces[i], namespaces[j]
common = names[i] & names[j]
if common:
yield common, ci, cj
def check_overridden(namespaces, exclude=frozenset(), raise_='error'):
"Raise an OverridingError for common names not in the exclude set"
for common, n1, n2 in find_common_names(namespaces):
overridden = ', '.join(common - exclude)
if overridden:
msg = '%s overrides names in %s: {%s}' % (
n1.__name__, n2.__name__, overridden)
if raise_ == 'error':
raise OverridingError(msg)
elif raise_ == 'warning':
warnings.warn(msg, OverridingWarning, stacklevel=2)
known_metas = set([MetaTOS])
def get_right_meta(metatos, bases):
# there is only one base because of the single-inheritance constraint
try:
base = bases[0]
except IndexError:
base = object
meta = type(base)
if meta in (types.ClassType, type): # is a builtin meta
return metatos
elif any(issubclass(meta, m) for m in known_metas):
return meta
# meta is independent from all known_metas, make a new one with
# __new__ method coming from MetaTOS
newmeta = type(
'_TOS' + meta.__name__, (meta,), dict(__new__=metatos.__new__))
setattr(newmeta, '_%s__super' % metatos.__name__, Super(newmeta))
known_metas.add(newmeta)
return newmeta
exclude_attrs = set('__doc__ __module__ __dict__ __weakref__'.split())
def new(metatos, name, bases, attrs, traits):
# traits as in Squeak take the precedence over the base class
# but they are overridden by attributes in the class
namespaces = map(Namespace.from_cls, traits)
check_overridden(namespaces, exclude=set(attrs) | exclude_attrs)
meta = get_right_meta(metatos, bases)
cls = meta(name, bases, merge(namespaces + [Namespace(name, attrs)]))
cls.__traits__ = traits
for t in traits:
setattr(cls, '_%s__super' % t.__name__, Super(cls))
return cls
def include(*traits, **kw):
"Returns a class factory"
metatos = kw.get('MetaTOS', MetaTOS) # other kw free for future extensions
def makecls(name, bases, dic):
return new(metatos, name, bases, dic, traits)
makecls.__name__ = 'include_%s' % '_'.join(m.__name__ for m in traits)
return makecls
| [
"[email protected]"
]
| |
cad351df6be0d827b8bb24971c1b081dbad29221 | d0645bc7120a4e577c604a7cbfc568e558e7f083 | /pitchfork/load_balancers/__init__.py | d2aba35fb87ad0e7d9c3e0dad962ef44bf7e0de5 | [
"Apache-2.0"
]
| permissive | dev-ace/pitchfork | 85380f9def9cac10fa30f6b851bc6f94b8a7a265 | ba372daef78b64abffb9cc7ca51d14d8299f1ca4 | refs/heads/master | 2021-01-18T20:44:00.550156 | 2014-04-02T18:54:47 | 2014-04-02T18:54:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,002 | py | # Copyright 2014 Dave Kludt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Blueprint, current_app, render_template, g
from pitchfork.adminbp.decorators import check_perms
bp = Blueprint(
'load_balancers',
__name__,
template_folder='templates',
static_folder='static',
static_url_path='/load_balancers'
)
COLLECTION = 'load_balancers'
BLUEPRINT = 'load_balancers'
URL_LINK = 'load_balancers'
TITLE = 'Cloud Load Balancers'
import pymongo
import global_routes
| [
"[email protected]"
]
| |
ceee035fae6adfbdcf3ec2a990ba6b0f0ef0fa01 | 7e145d1fff87cdabf7c9ae9c08637f299fbd3849 | /8. String to Integer (atoi).py | e2c542ddf189458ca878044d1437b0bbd14f4fa0 | []
| no_license | dundunmao/LeetCode2019 | 2b39ef181a7f66efc9de7d459b11eb1a4a7f60a8 | 9b38a7742a819ac3795ea295e371e26bb5bfc28c | refs/heads/master | 2020-09-16T16:46:50.482697 | 2020-06-07T08:01:16 | 2020-06-07T08:01:16 | 223,833,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | class Solution:
# @return an integer
def myAtoi(self, str):
str = str.strip() #去掉所有空格
if len(str) == 0:
return 0
tmp = ""
result = 0
i = 0
sign = 1
if str[0] == "-": #处理正负号
sign = -1
i = 1
if str[0] == "+": #处理正负号
i = 1
MAX_INT = 2147483647
MIN_INT = -2147483648
for i in range(i, len(str)): #把digit的取出来,遇到非digit的break
if str[i].isdigit():
tmp += str[i]
else:
break
if len(tmp) > 0: #计算长度
result = sign * int(tmp)
if result > MAX_INT > 0: #处理溢出情况
return MAX_INT
elif result < MIN_INT < 0:
return MIN_INT
else:
return result
# class Solution(object):
# def myAtoi(self, str):
# """
# :type str: str
# :rtype: int
# """
# if str == '' or str is None:
# return 0
# str.strip()
# i = 0
# while str[0] == '-' or str[0] == '+' or str[i] == '0' or not str[i].isdigit():
# if str[0] == '-' or str[0] == '+' or str[i] == '0':
# i += 1
# if i > len(str)-1:
# break
# if i < len(str) and not str[i].isdigit():
# return 0
#
# le = 0
# j = i
# nums = 0
# while j < len(str):
# if str[j].isdigit():
# le += 1
# j += 1
# else:
# break
#
# for k in range(i, i + le):
# nums += int(str[k]) * 10 ** (le - 1)
# # print nums
# le -= 1
#
# if str[0] == '-':
# return -nums
# else:
# return nums
if __name__ == "__main__":
# s = "12"
s = "-3.14159"
x = Solution()
print(x.myAtoi(s))
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.