blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
902bff8182253dc5452804b38313cae134a0b77a | e24cdd3433911fb9e7193de488811c80d5b97746 | /ByTags/Design/362. Design Hit Counter.py | b74743e6ef81c7fa6ee85683eb7f02fb3da89e51 | [] | no_license | lynkeib/LeetCode | 753f6a07270d956ca802632edfb0480029fe6f51 | 8a82905d40b882b20a9b6f862942f8f3e4bebcf0 | refs/heads/master | 2021-06-16T11:33:17.830068 | 2021-02-18T07:17:20 | 2021-02-18T07:17:20 | 165,439,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,156 | py | class HitCounter(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.counter = [[0, i + 1] for i in range(300)]
def hit(self, timestamp):
"""
Record a hit.
@param timestamp - The current timestamp (in seconds granularity).
:type timestamp: int
:rtype: None
"""
index = (timestamp - 1) % 300
if self.counter[index][1] == timestamp:
self.counter[index][0] += 1
else:
self.counter[index][0] = 1
self.counter[index][1] = timestamp
def getHits(self, timestamp):
"""
Return the number of hits in the past 5 minutes.
@param timestamp - The current timestamp (in seconds granularity).
:type timestamp: int
:rtype: int
"""
res = 0
for x in self.counter:
hits, time = x[0], x[1]
if timestamp - time < 300:
res += hits
return res
# Your HitCounter object will be instantiated and called as such:
# obj = HitCounter()
# obj.hit(timestamp)
# param_2 = obj.getHits(timestamp) | [
"[email protected]"
] | |
360daec8504d31ef81d10f54ec8ad22ae5f25ff8 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /u9rnDxsJRDdvRmFai_5.py | 46788572927af8035136536ea75c987317eff0b3 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py |
import re
pattern = 'best\sb\w+'
| [
"[email protected]"
] | |
038a2afe4dbd87146aadaa7a5e7d1b80e3d07b78 | 0306bea08e9aab18f34a799ce8a73e86921f90f7 | /medium/EvaluareReversePolishNotation.py | f4166dc5308c8152784f3331a6b0a9e6cfd0e66d | [] | no_license | GeorgianBadita/LeetCode | 78686fde88ef65b64f84fb7c2a22ba37ef21b8d9 | e3b0571182369c5308e0c29fb87106bb0b0d615a | refs/heads/master | 2022-10-21T00:23:26.479943 | 2022-10-14T20:27:27 | 2022-10-14T20:27:27 | 251,733,951 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | # https://leetcode.com/explore/interview/card/top-interview-questions-medium/114/others/823/
from typing import List
class Solution:
def __init__(self) -> None:
self.__operators = "+-*/"
def apply_operator(self, op1, op, op2):
if op == '+':
return op1 + op2
if op == '-':
return op1 - op2
if op == '/':
return int(op1 / op2)
if op == '*':
return op1 * op2
def evalRPN(self, tokens: List[str]) -> int:
if not tokens:
return 0
operands_stack = []
for token in tokens:
if token in self.__operators:
op2 = operands_stack.pop()
op1 = operands_stack.pop()
operands_stack.append(self.apply_operator(op1, token, op2))
else:
operands_stack.append(int(token))
return operands_stack.pop()
print(Solution().evalRPN(["10", "6", "9", "3", "+",
"-11", "*", "/", "*", "17", "+", "5", "+"]))
| [
"[email protected]"
] | |
28140476e361402bc2865261bdff072d090b730d | b5cba88ce8c86740c8c3453134610fd5bafbb8c4 | /Leetcode/17. Letter Combinations of a Phone Number/solution.py | d023f235fd394de279c5c4e274ffb3348e3e229f | [] | no_license | EduardoSantos7/Algorithms4fun | 55fcf9d515ea3b70b93298ac96a58d2ae68dee11 | 6ff182ed596b6322322b087f29e6ad98baec3f97 | refs/heads/master | 2023-07-23T01:38:08.216313 | 2023-07-23T01:35:58 | 2023-07-23T01:35:58 | 227,448,848 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if not digits:
return []
letters = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z'],
}
ans = []
def helper(acum):
if acum and len(acum) == len(digits):
ans.append(acum)
return
for letter in letters[digits[len(acum)]]:
helper(acum + letter)
helper('')
return ans
| [
"[email protected]"
] | |
550581cdb8b24a81f41dad30bf26ba3cb86a88a4 | 505963904ce8fedd73caf562ffe993feb98e1043 | /home/urls.py | 113b829e90992f2b6b7a4471dee27cb26be8ce71 | [] | no_license | madmax330/Jobin | 05f0c3be31c1fce10d8df48047affd78c57c45ed | b06d04878ff9f4de1bf9d8cd64cd1c4322610d31 | refs/heads/Develop | 2020-12-08T22:22:44.283964 | 2018-05-30T07:23:27 | 2018-05-30T07:23:27 | 67,643,755 | 0 | 2 | null | 2018-05-30T07:12:30 | 2016-09-07T21:07:10 | JavaScript | UTF-8 | Python | false | false | 1,426 | py | from django.conf.urls import url
from . import views
app_name = 'home'
urlpatterns = [
url(r'^$', views.index_view, name='index'),
url(r'^login/student/$', views.login_student, name='student_login'),
url(r'^login/company/$', views.login_company, name='company_login'),
url(r'^send/contact/message/$', views.send_contact_message, name='send_contact_message'),
url(r'^register/student/$', views.register_student, name='register_student'),
url(r'^register/company/$', views.register_company, name='register_company'),
url(r'^verify/$', views.verify, name='verify'),
url(r'^activate/company/(?P<key>.+)/$', views.activate_company, name='activate_company'),
url(r'^activate/student/(?P<key>.+)/$', views.activate_student, name='activate_student'),
url(r'^new-activation/$', views.new_verification, name='new_activation'),
url(r'^new/password/(?P<ut>\w+)/$', views.new_password_view, name='new_password'),
url(r'^change/user/info/(?P<ut>\w+)/$', views.ChangeUserInfo.as_view(), name='change_info'),
url(r'^logout/$', views.user_logout, name='logout'),
url(r'^privacy-policy/$', views.privacy_policy, name='policy'),
url(r'^terms-and-conditions/$', views.terms_and_conditions, name='terms'),
url(r'^create/content/(?P<n>[0-9]+)/$', views.create_test_content, name='gen_content'),
url(r'^clear/content/$', views.clear_test_content, name='clear_content'),
]
| [
"[email protected]"
] | |
8bd8704b983c3d96858f8d0d288946e59f30920a | 10bfea81cdde6710c6abd4a4ef48a99112d286f8 | /crm/models.py | 7505c38e26858acf86545000549f77849846fab2 | [] | no_license | egAhmed/Django_KMA_Apps | f6b3971a5d2d08f91d7e6d2d76208db1e2877f4e | 83a7491b8c5afe6f60ab78d9bdb826b783c80d08 | refs/heads/master | 2022-12-02T10:28:09.526611 | 2020-08-18T13:24:19 | 2020-08-18T13:24:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,911 | py | from django.db import models
from django.urls import reverse
# from django.contrib.admin import widgets
# from datetime import datetime
# from django.utils import timezone
# from django.utils.timezone import now
# Create your models here.
class Registration(models.Model):
name = models.CharField(max_length=150)
username = models.CharField(max_length=50)
email = models.EmailField(blank=True, max_length=254)
password = models.CharField(max_length=50)
bio = models.TextField(blank=True)
#this class for
# class Choices(models.Model):
# description = models.CharField(max_length=100)
class Clients(models.Model):
# record = models.ForeignKey(RecordFirm, on_delete=models.CASCADE)
name = models.CharField(max_length=200)
phone = models.CharField(max_length=20, blank=True, null=True)
mobile = models.CharField(max_length=20, blank=True, null=True)
address = models.CharField(max_length=200, blank=True, null=True)
photo = models.ImageField(upload_to='Clients_pics', blank=True)
identityNo = models.CharField(max_length=200, blank=True, null=True)
notes = models.CharField(max_length=2000, blank=True, null=True)
def __str__(self):
return self.name #+ " | " + str(self.photo)
def get_absolute_url(self):
return reverse('crm:clients_update', kwargs={'id': self.id})
def goto_home(self):
return reverse('crm:home')
class RecordFirm(models.Model):
#now = timezone.now()
Currency = (
('EGY', 'Egy Pound'),
('USD', 'US Dollar')
)
# Tax_Choice = ('taxno', 'Tax No.')
# Part_Choice=('partno', 'Part No.')
# Purchase_Choice=('purchaseno', 'Purchase No.')
client_id = models.ForeignKey(Clients,
on_delete=models.CASCADE,
default=False,
null=False)
firm_name = models.CharField(max_length=200,
blank=True,
null=True,
name='Company Name') # name= 'Company Name'
manager = models.CharField(max_length=200, blank=True, null=True)
repres_name = models.CharField(max_length=200, blank=True, null=True)
last_visit = models.DateField()
notes = models.TextField()
type = models.CharField(max_length=3, choices=Currency, null=True)
#paper = models.ManyToManyField(Choices)
tax_no = models.BooleanField(default=False)
part_no = models.BooleanField(default=False)
purchase_no = models.BooleanField(default=False)
# client_id = models.
def __str__(self):
return self.client_id
def get_url(self):
return reverse('crm:firm_update', kwargs={'id': self.id})
def go_home(self):
return reverse('crm:regdata') # , kwargs={'id': self.id}
| [
"[email protected]"
] | |
d1acfdd2bd0014aee0b1f83318cc3cd27a0d2093 | db0b0935c069a877a7e583cc6cbbe006f3ea271d | /Section2/053.py | bc374f36e5ecb74356822b407f3291a39fd70eda | [] | no_license | partrita/biopython | 9294033c8809441190ea861e484b7678dbb2909a | a488559820980bd054b67395756e01cffa738965 | refs/heads/master | 2020-05-05T13:28:12.465422 | 2019-04-08T06:03:49 | 2019-04-08T06:03:49 | 180,078,766 | 4 | 1 | null | 2019-04-08T05:51:39 | 2019-04-08T05:51:39 | null | UTF-8 | Python | false | false | 180 | py | #053.py
a = [3, 5, 2, 1, 4]
b = [8, 10, 7, 6, 9]
print("sorted(a)")
print(sorted(a, reverse=True))
print("a")
print(a)
print("")
b.sort(reverse=True)
print("b.sort()")
print(b)
| [
"[email protected]"
] | |
861c5f70e7ecb76286b4a0b2647c81904eed9c51 | 44b2743ff70ce0631e9714ce78c44720fa63a9ad | /app/config/urls.py | 00d26fff98d1af869b8364b221e013f52296ccf8 | [
"MIT"
] | permissive | hoelsner/product-database | 1b1b4db8e968f5bc149605093e4639c48a9ae1ad | c649569fb82bc4b0a5e9ef9615fff8a364ce652f | refs/heads/master | 2023-07-24T21:39:01.870692 | 2023-07-09T17:03:56 | 2023-07-09T17:03:56 | 43,767,455 | 43 | 27 | MIT | 2023-04-16T19:17:25 | 2015-10-06T17:44:50 | Python | UTF-8 | Python | false | false | 683 | py | """
Product Database Config URL configuration (namespace "productdb_config")
"""
from django.conf.urls import url
from app.config import views
# namespace: productdb_config
urlpatterns = [
# user views
url(r'^change/$', views.change_configuration, name='change_settings'),
url(r'^status/$', views.status, name='status'),
url(r'^flush_cache/$', views.flush_cache, name='flush_cache'),
url(r'^messages/$', views.server_messages_list, name='notification-list'),
url(r'^messages/add/$', views.add_notification, name='notification-add'),
url(r'^messages/(?P<message_id>\d+)/$', views.server_message_detail, name='notification-detail'),
]
app_name = "config"
| [
"[email protected]"
] | |
171a3cc32d02abfa9872ea2167a3e1182d2aae6a | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/consumption/latest/__init__.py | f3e9e2805ca06f72ddd2b242d2c80d0413d13a4a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .budget import *
from .budget_by_resource_group_name import *
from .get_budget import *
from .get_budget_by_resource_group_name import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:consumption/latest:Budget":
return Budget(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:consumption/latest:BudgetByResourceGroupName":
return BudgetByResourceGroupName(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "consumption/latest", _module_instance)
_register_module()
| [
"[email protected]"
] | |
a709e84115ad3c609b6e6e41d13916caa6b916ed | 6044b98e65c38233fb587b3fd40290a7be0b4c6d | /uni_ticket/migrations/0036_ticketreply_read.py | 592ee30f86fc243801323666f87cac4ffb684a03 | [
"Apache-2.0"
] | permissive | libremente/uniTicket | f5bef4ff85edb03a799b5e87a49050becd1822fa | 6f41f0ce9bd0f1238bffcde1c4e12a38266c781b | refs/heads/master | 2022-04-23T05:21:39.528135 | 2020-04-22T16:11:29 | 2020-04-22T16:11:29 | 257,953,451 | 0 | 0 | Apache-2.0 | 2020-04-22T16:11:01 | 2020-04-22T16:11:00 | null | UTF-8 | Python | false | false | 393 | py | # Generated by Django 2.2.3 on 2019-07-30 09:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('uni_ticket', '0035_auto_20190725_1632'),
]
operations = [
migrations.AddField(
model_name='ticketreply',
name='read',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
02ece8aff15fad1464676b60c95f4ee8493c447c | 31a928cff4960236923b6bc3b68e34bb2f46f470 | /ctc-executioner/setup.py | 768b03ff56cb33d23f6cf759647c444e12ae35e2 | [
"BSD-3-Clause"
] | permissive | webclinic017/ml_monorepo | 707df2afd2f986eb0721d26430e6135c917817c6 | 945f0a83d6b94282c547bb6f4805f3381ad9c16a | refs/heads/master | 2021-10-19T21:02:53.322944 | 2019-02-19T20:58:51 | 2019-02-23T20:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | from setuptools import setup
setup(name='gym_ctc_executioner',
packages=['gym_ctc_executioner'],
version='0.0.1',
install_requires=['gym']
)
setup(name='gym_ctc_marketmaker',
packages=['gym_ctc_marketmaker'],
version='0.0.1',
install_requires=['gym']
)
| [
"[email protected]"
] | |
e1be4b994675e0158566681530cd9169bb10ece9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startPyquil447.py | b50d0c38f7773bb40682486d38291f1927f81bbc | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,898 | py | # qubit number=2
# total number=83
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += H(1) # number=70
prog += RX(-0.09738937226128368,2) # number=2
prog += H(1) # number=33
prog += Y(2) # number=56
prog += CZ(2,1) # number=34
prog += H(1) # number=35
prog += H(1) # number=3
prog += H(0) # number=45
prog += H(1) # number=77
prog += CZ(2,1) # number=78
prog += H(1) # number=79
prog += CZ(1,0) # number=46
prog += H(0) # number=47
prog += Y(1) # number=15
prog += H(0) # number=66
prog += CZ(1,0) # number=67
prog += H(0) # number=68
prog += H(1) # number=19
prog += CZ(0,1) # number=20
prog += RX(-0.6000441968356504,1) # number=28
prog += H(1) # number=21
prog += H(1) # number=30
prog += CZ(0,1) # number=31
prog += H(1) # number=32
prog += H(1) # number=57
prog += CZ(0,1) # number=58
prog += H(1) # number=59
prog += CNOT(0,1) # number=51
prog += CNOT(0,1) # number=71
prog += X(1) # number=72
prog += CNOT(0,1) # number=73
prog += CNOT(0,1) # number=53
prog += H(1) # number=80
prog += CZ(0,1) # number=81
prog += H(1) # number=82
prog += Y(2) # number=69
prog += H(2) # number=29
prog += H(1) # number=36
prog += X(1) # number=64
prog += CZ(0,1) # number=37
prog += Y(2) # number=44
prog += H(1) # number=38
prog += Z(1) # number=55
prog += H(1) # number=61
prog += CZ(0,1) # number=62
prog += Z(2) # number=65
prog += H(1) # number=63
prog += Z(1) # number=11
prog += RX(-1.1780972450961724,2) # number=54
prog += H(1) # number=42
prog += H(0) # number=39
prog += CZ(1,0) # number=40
prog += H(0) # number=41
prog += CNOT(2,1) # number=26
prog += Y(1) # number=14
prog += CNOT(1,0) # number=5
prog += CNOT(0,1) # number=74
prog += X(1) # number=75
prog += CNOT(0,1) # number=76
prog += Z(1) # number=8
prog += X(1) # number=7
prog += H(2) # number=43
prog += RX(-2.42845112122491,1) # number=25
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil447.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
] | |
27bb58ea731197cadd74122bc2818fb29224ab4e | 90d9610ab5a878966868858b1b261cb6be5f6f97 | /test/parser_test/log_line_test.py | e817f891780930044bfe3b69da50f507a998eeec | [] | no_license | torbjoernk/pfasst_py | fd4c68cd63592feca8c811b9d994c66a470b541c | 9309734a41a17ff0e617a242d1c8ebfd75a89698 | refs/heads/master | 2021-01-10T01:55:54.256722 | 2015-11-04T13:07:22 | 2015-11-04T13:07:22 | 45,114,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,411 | py | # coding=utf-8
"""
.. moduleauthor:: Torbjörn Klatt <[email protected]>
"""
import datetime
import unittest
from pfasst_py.parser.log_line import LogLine
class LogLineTest(unittest.TestCase):
def setUp(self):
self.msg_normal = "04.11.2015 13:51:15,37 [PFASST , INFO , MPI 0] PFASST Prediction step"
self.msg_no_text = "04.11.2015 13:51:15,37 [PFASST , INFO , MPI 0] "
self.msg_no_mpi = "04.11.2015 13:51:15,37 [SDC , INFO ] PFASST Prediction step"
self.msg_no_mpi_no_text = "04.11.2015 13:51:15,37 [SDC , INFO ] "
def test_emits_a_warning_for_wrongly_formatted_log_lines(self):
with self.assertLogs('pfasst_py', level='WARNING') as cptr:
LogLine('not a log line')
self.assertRegex('\n'.join(cptr.output), "Log line could not be parsed")
def test_parse_mpi_line_with_message(self):
obj = LogLine(self.msg_normal)
self.assertEqual(obj.timestamp.value, datetime.datetime(2015, 11, 4, 13, 51, 15, 370000))
self.assertEqual(obj.logger.value, 'PFASST')
self.assertEqual(obj.level.value, 'INFO')
self.assertEqual(obj.rank.value, '0')
self.assertEqual(obj.message.value, 'PFASST Prediction step')
def test_parse_mpi_line_without_message(self):
obj = LogLine(self.msg_no_text)
self.assertEqual(obj.timestamp.value, datetime.datetime(2015, 11, 4, 13, 51, 15, 370000))
self.assertEqual(obj.logger.value, 'PFASST')
self.assertEqual(obj.level.value, 'INFO')
self.assertEqual(obj.rank.value, '0')
self.assertEqual(obj.message.value, '')
def test_parse_non_mpi_line_with_message(self):
obj = LogLine(self.msg_no_mpi)
self.assertEqual(obj.timestamp.value, datetime.datetime(2015, 11, 4, 13, 51, 15, 370000))
self.assertEqual(obj.logger.value, 'SDC')
self.assertEqual(obj.level.value, 'INFO')
self.assertIsNone(obj.rank)
self.assertEqual(obj.message.value, 'PFASST Prediction step')
def test_parse_non_mpi_line_without_message(self):
obj = LogLine(self.msg_no_mpi_no_text)
self.assertEqual(obj.timestamp.value, datetime.datetime(2015, 11, 4, 13, 51, 15, 370000))
self.assertEqual(obj.logger.value, 'SDC')
self.assertEqual(obj.level.value, 'INFO')
self.assertIsNone(obj.rank)
self.assertEqual(obj.message.value, '')
| [
"[email protected]"
] | |
5631ee24c00d9fdae2e324a445b848c7cf580bf8 | 6a898e59343d0b3ea4f9580f489ef76d888b2b7e | /ecommerce/migrations/0010_stock_first_quantity.py | 61dafb7c5d1ec221bb519674e006100650c3d362 | [] | no_license | oujri/ecommerce | 4b08b0316671e24206e810a38728d71c77fdc396 | 3fd8095dd2ed771a6951ed7fff08ca11ef0b94a1 | refs/heads/master | 2020-03-19T03:39:36.002373 | 2018-06-01T22:09:31 | 2018-06-01T22:09:31 | 135,749,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Generated by Django 2.0.4 on 2018-05-05 00:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommerce', '0009_auto_20180505_0126'),
]
operations = [
migrations.AddField(
model_name='stock',
name='first_quantity',
field=models.IntegerField(default=0),
),
]
| [
"[email protected]"
] | |
ce29c2926f8ae832ab67ad03400bf494c196277d | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/semantic_segmentation/PSPNet/mmcv_replace/version.py | b67226251d80062997136a3247b76992815b9730 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 833 | py | # Copyright (c) Open-MMLab. All rights reserved.
__version__ = '1.2.7'
def parse_version_info(version_str):
"""Parse a version string into a tuple.
Args:
version_str (str): The version string.
Returns:
tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
(1, 3, 0), and "2.0.0rc1" is parsed into (2, 0, 0, 'rc1').
"""
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
version_info.append(int(patch_version[0]))
version_info.append(f'rc{patch_version[1]}')
return tuple(version_info)
version_info = parse_version_info(__version__)
__all__ = ['__version__', 'version_info', 'parse_version_info']
| [
"[email protected]"
] | |
1da3ef21d2bb5e19ae2f2df7650686cf980d2119 | bfa81e9ebd6c394fb7ff27afd063bca6e5cf48c1 | /signal_example/apps.py | daa5ee116228405237999efc5ff5f8cee079ea3a | [
"MIT"
] | permissive | bluebamus/django_miscellaneous_book | b0dea3b856323304faca1d41edb8f70f2b8c6455 | 22e0851b3a07aeef94bb723b334f036ed5c17f72 | refs/heads/main | 2023-07-09T17:15:56.206762 | 2021-08-11T10:36:47 | 2021-08-11T10:36:49 | 382,425,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | from django.apps import AppConfig
class SignalExampleConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'signal_example'
| [
"[email protected]"
] | |
a5a08d451a68ff1a255c4cf0f9c08783a001317a | 926b3c52070f6e309567c8598248fd5c57095be9 | /src/onnx/onnx/test/shape_inference_test.py | bdb1048542e988f7a7bdbed1a6310da680325c1b | [
"Apache-2.0"
] | permissive | fengbingchun/PyTorch_Test | 410f7cd2303707b0141d433fb9d144a961e1f4c8 | df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348 | refs/heads/master | 2023-05-23T16:42:29.711338 | 2023-03-25T11:31:43 | 2023-03-25T11:31:43 | 167,339,907 | 15 | 4 | null | 2023-03-25T11:31:45 | 2019-01-24T09:24:59 | C++ | UTF-8 | Python | false | false | 228,993 | py | # SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
from onnx import checker, helper, numpy_helper, TensorProto, NodeProto, GraphProto, ValueInfoProto, ModelProto, ONNX_ML, SparseTensorProto, TypeProto
from onnx.defs import ONNX_DOMAIN, ONNX_ML_DOMAIN, AI_ONNX_PREVIEW_TRAINING_DOMAIN
from onnx.helper import make_node, make_tensor, make_tensor_value_info, make_empty_tensor_value_info, make_opsetid, make_tensor_sequence_value_info
from typing import Sequence, Union, Tuple, Type, List, Any, Optional
import onnx.shape_inference
import unittest
import os
import numpy as np # type: ignore
class TestShapeInference(unittest.TestCase):
def _make_graph(self,
seed_values: Sequence[Union[str, Tuple[str, TensorProto.DataType, Any]]],
nodes: List[NodeProto],
value_info: List[ValueInfoProto],
initializer: Optional[Sequence[TensorProto]] = None
) -> GraphProto:
if initializer is None:
initializer = []
names_in_initializer = {x.name for x in initializer}
input_value_infos = []
# If the starting values are not also initializers,
# introduce the starting values as the output of reshape,
# so that the sizes are guaranteed to be unknown
for seed_value in seed_values:
if isinstance(seed_value, tuple):
seed_name, proto_type = seed_value[:2]
seed_value_info = make_tensor_value_info(*seed_value)
else:
seed_name, proto_type = seed_value, TensorProto.UNDEFINED
seed_value_info = make_empty_tensor_value_info(seed_value)
if seed_name in names_in_initializer:
input_value_infos.append(seed_value_info)
else:
value_info.append(seed_value_info)
input_value_infos.append(make_tensor_value_info('SEED_' + seed_name, proto_type, ()))
input_value_infos.append(make_tensor_value_info('UNKNOWN_SHAPE_' + seed_name, TensorProto.INT64, ()))
nodes[:0] = [make_node("Reshape", ['SEED_' + seed_name, 'UNKNOWN_SHAPE_' + seed_name], [seed_name])]
return helper.make_graph(nodes, "test", input_value_infos, [], initializer=initializer, value_info=value_info)
def _inferred(self, graph: GraphProto, **kwargs: Any) -> ModelProto:
kwargs['producer_name'] = 'onnx-test'
data_prop = kwargs.pop('data_prop', False)
orig_model = helper.make_model(graph, **kwargs)
inferred_model = onnx.shape_inference.infer_shapes(orig_model, strict_mode=True, data_prop=data_prop)
checker.check_model(inferred_model)
return inferred_model
def _assert_inferred(self, graph: GraphProto, vis: List[ValueInfoProto], **kwargs: Any) -> None:
names_in_vis = {x.name for x in vis}
vis = list(x for x in graph.value_info if x.name not in names_in_vis) + vis
inferred_model = self._inferred(graph, **kwargs)
inferred_vis = list(inferred_model.graph.value_info)
vis = list(sorted(vis, key=lambda x: x.name))
inferred_vis = list(sorted(inferred_vis, key=lambda x: x.name))
assert len(vis) == len(inferred_vis)
for i in range(len(vis)):
self._compare_value_infos(vis[i].type, inferred_vis[i].type)
def _compare_value_infos(self, vi_type: TypeProto, inferred_vi_type: TypeProto) -> None:
if vi_type.HasField('tensor_type'):
assert inferred_vi_type.HasField('tensor_type')
assert vi_type.tensor_type.HasField('elem_type')
assert inferred_vi_type.tensor_type.HasField('elem_type')
assert vi_type.tensor_type.elem_type == inferred_vi_type.tensor_type.elem_type
assert vi_type.tensor_type.HasField('shape') == inferred_vi_type.tensor_type.HasField('shape')
if vi_type.tensor_type.HasField('shape'):
assert len(vi_type.tensor_type.shape.dim) == len(inferred_vi_type.tensor_type.shape.dim)
for dim_i in range(len(vi_type.tensor_type.shape.dim)):
dim = vi_type.tensor_type.shape.dim[dim_i]
inferred_dim = inferred_vi_type.tensor_type.shape.dim[dim_i]
# if it is a symbolic shape, make sure the inferred symbol has generated (dim_param)
if dim.dim_param:
assert dim.dim_param == inferred_dim.dim_param, f'\n{vi_type}\n{inferred_vi_type}\n'
else:
assert dim.dim_value == inferred_dim.dim_value, f'\n{vi_type}\n{inferred_vi_type}\n'
elif vi_type.HasField('sequence_type'):
assert inferred_vi_type.HasField('sequence_type')
vi = vi_type.sequence_type.elem_type
inferred_vi = inferred_vi_type.sequence_type.elem_type
self._compare_value_infos(vi, inferred_vi)
elif vi_type.HasField('optional_type'):
assert inferred_vi_type.HasField('optional_type')
vi = vi_type.optional_type.elem_type
inferred_vi = inferred_vi_type.optional_type.elem_type
self._compare_value_infos(vi, inferred_vi)
else:
raise NotImplementedError(
"Unrecognized value info type in _compare_value_infos: ", str(vi_type))
def test_empty_graph(self) -> None:
graph = self._make_graph(
['y'],
[], [])
self.assertRaises(onnx.shape_inference.InferenceError, self._inferred, graph)
def _identity_prop(self, op: str, **kwargs: Any) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5))],
[make_node(op, 'x', 'y', **kwargs)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (30, 4, 5))])
def test_transpose(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))])
def test_transpose_preexisting(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])],
[make_tensor_value_info("Y", TensorProto.FLOAT, None)])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))])
def test_transpose_partial(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])],
[make_tensor_value_info("Y", TensorProto.UNDEFINED, (3, "a", "b"))]) # type: ignore
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (3, 2, 4))])
def test_transpose_preexisting_incorrect_shape(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])],
[make_tensor_value_info("Y", TensorProto.FLOAT, (5, 5, 5))])
self.assertRaises(onnx.shape_inference.InferenceError, self._inferred, graph)
def test_transpose_preexisting_incorrect_type(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 2])],
[make_tensor_value_info("Y", TensorProto.STRING, (3, 2, 4))])
self.assertRaises(onnx.shape_inference.InferenceError, self._inferred, graph)
def test_transpose_incorrect_repeated_perm(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node("Transpose", ["X"], ["Y"], perm=[1, 0, 1])],
[])
self.assertRaises(onnx.shape_inference.InferenceError, self._inferred, graph)
def _make_matmul_test_all_dims_known(self, shape1: Sequence[int], shape2: Sequence[int]) -> None:
expected_out_shape = np.matmul(np.arange(np.product(shape1)).reshape(shape1),
np.arange(np.product(shape2)).reshape(shape2)).shape
graph = self._make_graph(
[('x', TensorProto.FLOAT, shape1),
('y', TensorProto.FLOAT, shape2)],
[make_node('MatMul', ['x', 'y'], ['z'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, expected_out_shape)])
def test_matmul_all_dims_known(self) -> None:
self._make_matmul_test_all_dims_known((2,), (2,))
self._make_matmul_test_all_dims_known((4, 2), (2, 4))
self._make_matmul_test_all_dims_known((5, 2), (2, 4))
self._make_matmul_test_all_dims_known((5, 2), (2, 1))
self._make_matmul_test_all_dims_known((1, 2), (2, 3))
self._make_matmul_test_all_dims_known((2,), (2, 3))
self._make_matmul_test_all_dims_known((4, 2), (2,))
self._make_matmul_test_all_dims_known((1, 4, 2), (3, 2, 3))
self._make_matmul_test_all_dims_known((3, 4, 2), (3, 2, 3))
self._make_matmul_test_all_dims_known((5, 1, 4, 2), (1, 3, 2, 3))
self._make_matmul_test_all_dims_known((4, 2), (3, 2, 3))
def _make_matmul_test_allow_unknown(self, shape1: Any, shape2: Any, expected_out_shape: Any) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, shape1),
('y', TensorProto.FLOAT, shape2)],
[make_node('MatMul', ['x', 'y'], ['z'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, expected_out_shape)])
def test_matmul_allow_unknown(self) -> None:
self._make_matmul_test_allow_unknown((None,), (None,), ())
self._make_matmul_test_allow_unknown((3,), (None,), ())
self._make_matmul_test_allow_unknown((2,), (2, "a"), ("a",))
self._make_matmul_test_allow_unknown((4, 2), (2, "a"), (4, "a"))
self._make_matmul_test_allow_unknown((4, None), (2, "a"), (4, "a"))
self._make_matmul_test_allow_unknown((4, None), (None, "a"), (4, "a"))
self._make_matmul_test_allow_unknown((1, 4, 2), ("a", 2, 5), ("a", 4, 5))
self._make_matmul_test_allow_unknown((1, 3, 4, 2), ("a", 2, 5), (1, 3, 4, 5))
self._make_matmul_test_allow_unknown((3,), None, None)
self._make_matmul_test_allow_unknown(None, None, None)
def test_cast(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 4, 3))],
[make_node("Cast", ["x"], ["y"], to=TensorProto.UINT8)],
[])
self._assert_inferred(graph, [make_tensor_value_info("y", TensorProto.UINT8, (2, 4, 3))])
def test_cast_like(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 4, 3)), ("t", TensorProto.FLOAT16, ("N",))],
[make_node("CastLike", ["x", "t"], ["y"])],
[])
self._assert_inferred(graph, [make_tensor_value_info("y", TensorProto.FLOAT16, (2, 4, 3))])
def test_concat(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 4, 3)),
("y", TensorProto.FLOAT, (7, 4, 3))],
[make_node("Concat", ['x', 'y'], ['z'], axis=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (9, 4, 3))])
def test_concat_missing_shape(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 4, 3)),
"y",
("z", TensorProto.FLOAT, (None, None, None))],
[make_node("Concat", ['x', 'y', 'z'], ['out'], axis=0)],
[])
self.assertRaises(onnx.shape_inference.InferenceError, self._inferred, graph)
def test_concat_3d_axis_2(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 2, 2)),
('y', TensorProto.FLOAT, (2, 2, 2))],
[make_node('Concat', ['x', 'y'], ['z'], axis=2)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 2, 4))])
def test_concat_param(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, ("a", 2)),
("y", TensorProto.FLOAT, ("a", 3))],
[make_node("Concat", ['x', 'y'], ['z'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ("a", 5))])
def test_concat_param_single_input(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, ("a", 2))],
[make_node("Concat", ['x'], ['z'], axis=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ("a", 2))])
def test_reshape_dynamic_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (2, 4, 3)),
('shape', TensorProto.INT64, (2,))],
[make_node("Reshape", ['x', 'shape'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, None)])
def test_reshape_static_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (2, 4, 3)),
('shape', TensorProto.INT64, (2,))],
[make_node("Reshape", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (2,), (3, 8))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (3, 8))])
def test_reshape_static_shape_inferred(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (2, 4, 3)),
('shape', TensorProto.INT64, (3,))],
[make_node("Reshape", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (3,), (0, 3, -1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (2, 3, 4))])
def test_reshape_static_shape_zero(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (1, 1, 1)),
('shape', TensorProto.INT64, (3,))],
[make_node("Reshape", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (3,), (0, 1, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (1, 1, 1))])
def test_reshape_static_shape_allowzero(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (1, 0, 0)),
('shape', TensorProto.INT64, (3,))],
[make_node("Reshape", ['x', 'shape'], ['y'], allowzero=1)],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (3,), (0, 1, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (0, 1, 1))])
def test_reshape_static_shape_constant(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (2, 4, 3))],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (2,), (3, 8))),
make_node("Reshape", ['x', 'shape'], ['y'])],
[])
self._assert_inferred(graph, [
make_tensor_value_info('shape', TensorProto.INT64, (2,)),
make_tensor_value_info('y', TensorProto.UINT8, (3, 8))])
def test_upsample(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (2, 4, 3, 5)),
('scales', TensorProto.FLOAT, (4,))],
[make_node("Upsample", ['x', 'scales'], ['y'])],
[],
initializer=[make_tensor('scales', TensorProto.FLOAT, (4,), (1.0, 1.1, 1.3, 1.9))])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 4, 3, 9))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 9)])
def test_upsample_raw_data(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (2, 4, 3, 5)),
('scales', TensorProto.FLOAT, (4,))],
[make_node("Upsample", ['x', 'scales'], ['y'])],
[],
initializer=[make_tensor('scales', TensorProto.FLOAT, (4,),
vals=np.array([1.0, 1.1, 1.3, 1.9], dtype='<f4').tobytes(), raw=True)]) # Feed raw bytes (force little endian ordering like onnx standard) for test purpose
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 4, 3, 9))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 9)])
def test_upsample_raw_data_v7(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (1, 3, 4, 5))],
[make_node("Upsample", ['x'], ['y'], scales=[2.0, 1.1, 2.3, 1.9])],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 3, 9, 9))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 7)])
def test_expand(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (3, 1)),
('shape', TensorProto.INT64, (3,))],
[make_node("Expand", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (3,), (2, 1, 6))])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 3, 6))])
def test_expand_scalar_input(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, ()),
('shape', TensorProto.INT64, (2,))],
[make_node("Expand", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (2,), (4, 8))])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (4, 8))])
def test_expand_raw_data(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (3, 1)),
('shape', TensorProto.INT64, (2,))],
[make_node("Expand", ['x', 'shape'], ['y'])],
[],
initializer=[make_tensor('shape', TensorProto.INT64, (2,),
vals=np.array([3, 4], dtype='<i8').tobytes(), raw=True)]) # Feed raw bytes (force little endian ordering like onnx standard) for test purpose
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (3, 4))])
def test_expand_symbolic_input(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (3, 1, 2)),
('y', TensorProto.INT32, (1, 4, 2))],
[make_node("Shape", ['y'], ['shape']),
make_node("Expand", ['x', 'shape'], ['z'])],
[])
self._assert_inferred(graph, [
make_tensor_value_info('shape', TensorProto.INT64, (3,)),
make_tensor_value_info('z', TensorProto.INT32, (3, 4, 2))],
data_prop=True)
def test_expand_dynamic_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (1, 2, None)),
('shape', TensorProto.INT64, (3,))],
[make_node("Expand", ['x', 'shape'], ['y'])],
[],
initializer=[])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (None, 2, None))])
def test_expand_symbolic_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (1, 2, None)),
('shape', TensorProto.INT64, ('unk__0',))],
[make_node("Expand", ['x', 'shape'], ['y'])],
[],
initializer=[])
# if giving a symbolic shape, Expand should not infer any shape or rank inference
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, None)])
def test_resize_size(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (2, 4, 3, 5)),
('roi', TensorProto.FLOAT, (8,)),
('scales', TensorProto.FLOAT, (4,)),
('sizes', TensorProto.INT64, (4,))],
[make_node("Resize", ['x', 'roi', 'scales', 'sizes'], ['y'])],
[],
initializer=[make_tensor('sizes', TensorProto.INT64, (4,), (3, 5, 6, 7))])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (3, 5, 6, 7))])
def test_resize_scale(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (2, 4, 3, 5)),
('roi', TensorProto.FLOAT, (8,)),
('scales', TensorProto.FLOAT, (4,))],
[make_node("Resize", ['x', 'roi', 'scales'], ['y'])],
[],
initializer=[make_tensor('scales', TensorProto.FLOAT, (4,), (1.0, 1.1, 1.3, 1.9))])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 4, 3, 9))])
def test_resize_scale_raw_data(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (1, 3, 4, 5)),
('roi', TensorProto.FLOAT, (8,)),
('scales', TensorProto.FLOAT, (4,))],
[make_node("Resize", ['x', 'roi', 'scales'], ['y'])],
[],
initializer=[make_tensor('scales', TensorProto.FLOAT, (4,),
vals=np.array([2.0, 1.1, 2.3, 1.9], dtype='<f4').tobytes(), raw=True)])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.INT32, (2, 3, 9, 9))])
def test_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4, 3))],
[make_node("Shape", ['x'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (3,))])
def test_shape_start_1(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4, 3))],
[make_node("Shape", ['x'], ['y'], start=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (2,))])
def test_shape_end_1(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4, 3))],
[make_node("Shape", ['x'], ['y'], end=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (1,))])
def test_shape_negative_start(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4, 3))],
[make_node("Shape", ['x'], ['y'], start=-1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (1,))])
def test_shape_clip1(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4, 3))],
[make_node("Shape", ['x'], ['y'], start=-5)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (3,))])
def test_shape_clip2(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4, 3))],
[make_node("Shape", ['x'], ['y'], end=10)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (3,))])
def test_size(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4, 3))],
[make_node("Size", ['x'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, ())])
def test_gather(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 3)),
('i', TensorProto.INT64, (2,))],
[make_node("Gather", ['x', 'i'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 3))]) # type: ignore
def test_gather_axis1(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 3, 5)),
('i', TensorProto.INT64, (1, 2))],
[make_node("Gather", ['x', 'i'], ['y'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (4, 1, 2, 5))]) # type: ignore
def test_gather_into_scalar(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3,)),
('i', TensorProto.INT64, ())],
[make_node("Gather", ['x', 'i'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, ())])
def test_gather_elements(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 2)),
('i', TensorProto.INT64, (2, 2))],
[make_node("GatherElements", ['x', 'i'], ['y'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 2))]) # type: ignore
def test_gather_elements_axis0(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 3)),
('i', TensorProto.INT64, (2, 3))],
[make_node("GatherElements", ['x', 'i'], ['y'], axis=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 3))]) # type: ignore
def test_scatter(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 3)),
('i', TensorProto.INT64, (2, 3)),
('u', TensorProto.FLOAT, (2, 3))],
[make_node("Scatter", ['x', 'i', 'u'], ['y'])],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (3, 3))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 10)]) # type: ignore
def test_scatter_axis1(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, 5)),
('i', TensorProto.INT64, (1, 2)),
('u', TensorProto.FLOAT, (1, 2))],
[make_node("Scatter", ['x', 'i', 'u'], ['y'], axis=1)],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (1, 5))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 10)]) # type: ignore
def test_scatter_elements(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 3)),
('i', TensorProto.INT64, (2, 3)),
('u', TensorProto.FLOAT, (2, 3))],
[make_node("ScatterElements", ['x', 'i', 'u'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (3, 3))]) # type: ignore
def test_scatter_elements_axis1(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, 5)),
('i', TensorProto.INT64, (1, 2)),
('u', TensorProto.FLOAT, (1, 2))],
[make_node("ScatterElements", ['x', 'i', 'u'], ['y'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 5))]) # type: ignore
def test_scatternd(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('indices', TensorProto.INT64, (3, 3, 2)),
('updates', TensorProto.FLOAT, (3, 3, 6))],
[make_node("ScatterND", ['x', 'indices', 'updates'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (4, 5, 6))]) # type: ignore
def test_scatternd_noshape(self) -> None:
# The shape of 'x_reshaped' cannot be inferred, since it is the output of a dynamic reshape.
# Thus the shape of 'y' is also None.
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('indices', TensorProto.INT64, (3, 3, 2)),
('updates', TensorProto.FLOAT, (3, 3, 6)),
('shape', TensorProto.INT64, (2,))],
[make_node("Reshape", ['x', 'shape'], ['x_reshaped']),
make_node("ScatterND", ['x_reshaped', 'indices', 'updates'], ['y'])],
[])
self._assert_inferred(graph, [
make_tensor_value_info('x_reshaped', TensorProto.FLOAT, None),
make_tensor_value_info('y', TensorProto.FLOAT, None)]) # type: ignore
def test_squeeze(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, 3, 1, 1, 2, 1)),
('axes', TensorProto.INT64, (4,))],
[make_node('Squeeze', ['x', 'axes'], 'y')],
[],
initializer=[make_tensor('axes', TensorProto.INT64, (4,), (0, 2, 3, 5))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (3, 2))])
def test_unsqueeze_regular(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('axes', TensorProto.INT64, (4,))],
[make_node('Unsqueeze', ['x', 'axes'], 'y')],
[],
initializer=[make_tensor('axes', TensorProto.INT64, (4,), (0, 1, 3, 5))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 1, 3, 1, 2, 1))])
def test_unsqueeze_unsorted_axes(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5)),
('axes', TensorProto.INT64, (2,))],
[make_node('Unsqueeze', ['x', 'axes'], 'y')],
[],
initializer=[make_tensor('axes', TensorProto.INT64, (2,), (4, 0))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 3, 4, 5, 1))])
def test_unsqueeze_negative_axes(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5)),
('axes', TensorProto.INT64, (2,))],
[make_node('Unsqueeze', ['x', 'axes'], 'y')],
[],
initializer=[make_tensor('axes', TensorProto.INT64, (2,), (0, -1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 3, 4, 5, 1))])
def test_unsqueeze_scalar(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, ()),
('axes', TensorProto.INT64, ())],
[make_node('Unsqueeze', ['x', 'axes'], 'y')],
[],
initializer=[make_tensor('axes', TensorProto.INT64, (), (-1,))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1,))])
def test_slice_without_input_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)), ('starts', TensorProto.INT64, (1,)), ('ends', TensorProto.INT64, (1,))],
[make_node('Slice', ['x', 'starts', 'ends'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, None)])
def test_slice_with_input_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)), ('starts', TensorProto.INT64, (2, )), ('ends', TensorProto.INT64, (2, ))],
[make_node('Slice', ['x', 'starts', 'ends'], ['y'])],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2, ),
vals=np.array([1, 0], dtype='<i8').tobytes(), raw=True), # Feed raw bytes (force little endian ordering like onnx standard) for test purpose
make_tensor('ends', TensorProto.INT64, (2, ), (2, 2))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 2))])
def test_slice_with_input_shape_containing_dim_params(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, 'a', 1)),
('starts', TensorProto.INT64, (3,)),
('ends', TensorProto.INT64, (3,))],
[make_node('Slice', ['x', 'starts', 'ends'], ['y'])],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (3,), (0, 0, 0)),
make_tensor('ends', TensorProto.INT64, (3,), (1, 1, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, None, 1))]) # type: ignore
def test_slice_with_input_shape_steps(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 6, 7)),
('starts', TensorProto.INT64, (3,)),
('ends', TensorProto.INT64, (3,)),
('axes', TensorProto.INT64, (None)),
('steps', TensorProto.INT64, (3,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes', 'steps'], ['y'])],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (3,), (1, 0, 0)),
make_tensor('ends', TensorProto.INT64, (3,), (2, 6, 6)),
make_tensor('steps', TensorProto.INT64, (3,), (1, 4, 3))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 2, 2))])
def test_slice_with_input_shape_axes(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 6, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,)),
('steps', TensorProto.INT64, (None))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes', 'steps'], ['y'])],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT64, (2,), (2, 2)),
make_tensor('axes', TensorProto.INT64, (2,), (0, 2))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 6, 2))])
def test_slice_unsorted_axes(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT64, (2,), (2, 2)),
make_tensor('axes', TensorProto.INT64, (2,), (1, 0))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 1))]) # can handle unsorted axes
def test_slice_giant_number(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT64, (2,), (200, 22000)),
make_tensor('axes', TensorProto.INT64, (2,), (0, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 2))])
def test_slice_giant_step(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,)),
('steps', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes', 'steps'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT64, (2,), (200, 200)),
make_tensor('axes', TensorProto.INT64, (2,), (0, 1)),
make_tensor('steps', TensorProto.INT64, (2,), (1, 200))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 1))])
def test_slice_negative_end(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT64, (2,), (200, -1)), # negative end means begin from end of a dimension (here end = 2 - 1 = 1)
make_tensor('axes', TensorProto.INT64, (2,), (0, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 1))]) # type: ignore
def test_slice_negative_start(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 2)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, -2)), # negative start means begin from end of a dimension (here end = 2 - 2 = 0)
make_tensor('ends', TensorProto.INT64, (2,), (200, 3)),
make_tensor('axes', TensorProto.INT64, (2,), (0, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 2))]) # type: ignore
def test_slice_negative_step(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4)),
('starts', TensorProto.INT64, (2,)),
('ends', TensorProto.INT64, (2,)),
('axes', TensorProto.INT64, (2,)),
('steps', TensorProto.INT64, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes', 'steps'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (2,), (1, 4)), # 4 will be clamped to 3 since we are negative stepping
make_tensor('ends', TensorProto.INT64, (2,), (200, 0)),
make_tensor('axes', TensorProto.INT64, (2,), (0, 1)),
make_tensor('steps', TensorProto.INT64, (2,), (1, -1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 3))]) # type: ignore
def test_slice_variable_copy(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, ("a", 2)),
('starts', TensorProto.INT64, (1,)),
('ends', TensorProto.INT64, (1,)),
('axes', TensorProto.INT64, (1,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT64, (1,), (1,)),
make_tensor('ends', TensorProto.INT64, (1,), (200,)),
make_tensor('axes', TensorProto.INT64, (1,), (1,))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, ("a", 1))]) # type: ignore
def test_slice_variable_input_types(self) -> None:
graph = self._make_graph(
[('x', TensorProto.DOUBLE, (3, 2)),
('starts', TensorProto.INT32, (2,)),
('ends', TensorProto.INT32, (2,)),
('axes', TensorProto.INT32, (2,))],
[make_node('Slice', ['x', 'starts', 'ends', 'axes'], 'y')],
[],
initializer=[make_tensor('starts', TensorProto.INT32, (2,), (1, 0)),
make_tensor('ends', TensorProto.INT32, (2,), (200, 22000)),
make_tensor('axes', TensorProto.INT32, (2,), (0, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.DOUBLE, (2, 2))])
def test_conv(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 6, 7)),
('y', TensorProto.FLOAT, (5, 4, 2, 4, 3))],
[make_node('Conv', ['x', 'y'], 'z', pads=[0, 1, 1, 0, 0, 1], dilations=[1, 2, 2], strides=[1, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (3, 5, 4, 1, 3))])
def test_conv_1d_simple(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y', TensorProto.FLOAT, (50, 4, 2))],
[make_node('Conv', ['x', 'y'], 'z', dilations=[1])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 4))])
def test_conv_dilations(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 8, 8, 8)),
('y', TensorProto.FLOAT, (50, 4, 3, 3, 3))],
[make_node('Conv', ['x', 'y'], 'z', dilations=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 6, 4, 2))])
def test_conv_strides(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 8, 8, 8)),
('y', TensorProto.FLOAT, (50, 4, 3, 3, 3))],
[make_node('Conv', ['x', 'y'], 'z', strides=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 6, 3, 2))])
def test_conv_pads(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 7, 6, 4)),
('y', TensorProto.FLOAT, (50, 4, 3, 3, 3))],
[make_node('Conv', ['x', 'y'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 6, 6, 6))])
def test_conv_auto_pad(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 7, 6, 4)),
('y', TensorProto.FLOAT, (50, 4, 4, 3, 2))],
[make_node('Conv', ['x', 'y'], 'z', auto_pad='SAME_UPPER')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 7, 6, 4))])
def test_conv_auto_pads(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 7, 6, 4)),
('y', TensorProto.FLOAT, (50, 4, 4, 3, 2))],
[make_node('Conv', ['x', 'y'], 'z', auto_pad='SAME_UPPER', strides=[2, 2, 1])],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 4, 3, 4))])
def test_conv_auto_pad_dilation(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 65, 64, 63)),
('y', TensorProto.FLOAT, (50, 4, 4, 3, 2))],
[make_node('Conv', ['x', 'y'], 'z', auto_pad='SAME_UPPER', dilations=[2, 3, 4])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 65, 64, 63))])
def test_conv_group(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 8, 8, 8)),
('y', TensorProto.FLOAT, (4, 1, 8, 8, 8))],
[make_node('Conv', ['x', 'y'], 'z', group=4)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 4, 1, 1, 1))])
def test_conv_only_one_pos(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y', TensorProto.FLOAT, (50, 4, 5))],
[make_node('Conv', ['x', 'y'], 'z', strides=[2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, 1))])
def test_conv_partial_missing_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, None, 6, 4)),
('y', TensorProto.FLOAT, (50, 4, 3, 3, 3))],
[make_node('Conv', ['x', 'y'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 50, None, 6, 6))]) # type: ignore
def test_conv_partial_missing_weight_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 7, 6, 4)),
('y', TensorProto.FLOAT, (50, 4, None, 3, 3))],
[make_node('Conv', ['x', 'y'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, None)])
def test_average_pool_auto_pads(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 7, 6, 4))],
[make_node('AveragePool', ['x'], 'z', auto_pad='SAME_UPPER', kernel_shape=[4, 3, 2], strides=[2, 2, 1])],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('z', TensorProto.FLOAT, (30, 4, 4, 3, 4))])
def test_relu(self) -> None:
self._identity_prop('Relu')
def test_identity(self) -> None:
self._identity_prop('Identity')
def test_identity_sequence(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 5, 4))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('Identity', ['in_sequence'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 4)), # type: ignore
make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, None, 4))]) # type: ignore
def test_identity_optional(self) -> None:
graph = self._make_graph(
[('in_tensor', TensorProto.FLOAT, (2, 3, 4))],
[make_node('Optional', ['in_tensor'], ['in_optional']),
make_node('Identity', ['in_optional'], ['output_optional'])],
[])
tensor_type_proto = helper.make_tensor_type_proto(TensorProto.FLOAT, (2, 3, 4))
optional_type_proto = helper.make_optional_type_proto(tensor_type_proto)
self._assert_inferred(
graph,
[helper.make_value_info('in_optional', optional_type_proto), # type: ignore
helper.make_value_info('output_optional', optional_type_proto)]) # type: ignore
def test_identity_optional_sequence(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 5, 4))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('Optional', ['in_sequence'], ['in_optional']),
make_node('Identity', ['in_optional'], ['output_optional'])],
[])
tensor_type_proto = helper.make_tensor_type_proto(TensorProto.FLOAT, (2, None, 4))
sequence_type_proto = helper.make_sequence_type_proto(tensor_type_proto)
optional_type_proto = helper.make_optional_type_proto(sequence_type_proto)
self._assert_inferred(
graph,
[helper.make_value_info('in_sequence', sequence_type_proto), # type: ignore
helper.make_value_info('in_optional', optional_type_proto), # type: ignore
helper.make_value_info('output_optional', optional_type_proto)]) # type: ignore
def test_add(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y', TensorProto.FLOAT, (30, 4, 5))],
[make_node('Add', ['x', 'y'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 4, 5))])
def test_pow(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y', TensorProto.FLOAT, (30, 4, 5))],
[make_node('Pow', ['x', 'y'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (30, 4, 5))])
def test_bitshift(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT32, (2, 3, 1)),
('y', TensorProto.UINT32, (2, 3, 1))],
[make_node('BitShift', ['x', 'y'], 'z', direction="RIGHT")],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.UINT32, (2, 3, 1))])
def test_bitshift_broadcast_to_first(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT32, (16, 4, 1)),
('y', TensorProto.UINT32, (1,))],
[make_node('BitShift', ['x', 'y'], 'z', direction="RIGHT")],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.UINT32, (16, 4, 1))])
def test_bitshift_broadcast_to_second(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT32, (1,)),
('y', TensorProto.UINT32, (2, 3, 1))],
[make_node('BitShift', ['x', 'y'], 'z', direction="RIGHT")],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.UINT32, (2, 3, 1))])
def test_sum_single(self) -> None:
self._identity_prop('Sum')
def test_sum_multi(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y', TensorProto.FLOAT, (30, 4, 5)),
('z', TensorProto.FLOAT, (30, 4, 5))],
[make_node('Sum', ['x', 'y', 'z'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (30, 4, 5))])
def test_sum_multi_broadcasting(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 1, 5)),
('y', TensorProto.FLOAT, ("a", 4, 1)),
('z', TensorProto.FLOAT, (4, "b"))],
[make_node('Sum', ['x', 'y', 'z'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (30, 4, 5))])
def test_sum_broadcasting_param(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, ("a", 1, 5)),
('y', TensorProto.FLOAT, ("a", 4, 1))],
[make_node('Sum', ['x', 'y'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, ("a", 4, 5))])
def test_random_normal(self) -> None:
graph = self._make_graph(
[],
[make_node('RandomNormal', [], ['out'], dtype=TensorProto.DOUBLE, shape=(3, 4, 5))],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.DOUBLE, (3, 4, 5))])
def test_random_normal_like(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node('RandomNormalLike', ['X'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (2, 3, 4))])
def test_random_normal_like_with_dtype(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (2, 3, 4))],
[make_node('RandomNormalLike', ['X'], ['out'], dtype=TensorProto.DOUBLE,)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.DOUBLE, (2, 3, 4))])
def test_bernoulli(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4))],
[make_node('Bernoulli', ['x'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4))]) # type: ignore
def test_bernoulli_with_dtype(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3, 4))],
[make_node('Bernoulli', ['x'], ['out'], dtype=TensorProto.DOUBLE,)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.DOUBLE, (2, 3, 4))]) # type: ignore
def _logical_binary_op(self, op: str, input_type: TensorProto.DataType) -> None:
graph = self._make_graph(
[('x', input_type, (30, 4, 5)),
('y', input_type, (30, 4, 5))],
[make_node(op, ['x', 'y'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.BOOL, (30, 4, 5))])
def _logical_binary_op_with_broadcasting(self, op: str, input_type: TensorProto.DataType) -> None:
graph = self._make_graph(
[('x', input_type, (1, 5)),
('y', input_type, (30, 4, 5))],
[make_node(op, ['x', 'y'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.BOOL, (30, 4, 5))])
def test_logical_and(self) -> None:
self._logical_binary_op('And', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('And', TensorProto.BOOL)
def test_logical_or(self) -> None:
self._logical_binary_op('Or', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('Or', TensorProto.BOOL)
def test_logical_xor(self) -> None:
self._logical_binary_op('Xor', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('Xor', TensorProto.BOOL)
def test_greater(self) -> None:
self._logical_binary_op('Greater', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('Greater', TensorProto.BOOL)
def test_less(self) -> None:
self._logical_binary_op('Less', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('Less', TensorProto.BOOL)
def test_equal(self) -> None:
self._logical_binary_op('Equal', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('Equal', TensorProto.BOOL)
def test_logical_not(self) -> None:
graph = self._make_graph(
[('x', TensorProto.BOOL, (30, 4, 5))],
[make_node('Not', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.BOOL, (30, 4, 5))])
def test_less_or_equal(self) -> None:
self._logical_binary_op('LessOrEqual', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('LessOrEqual', TensorProto.BOOL)
def test_greater_or_equal(self) -> None:
self._logical_binary_op('GreaterOrEqual', TensorProto.BOOL)
self._logical_binary_op_with_broadcasting('GreaterOrEqual', TensorProto.BOOL)
def test_flatten(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3, 4, 5))],
[make_node('Flatten', ['x'], ['z'], axis=2)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (6, 20))])
def test_flatten_default_axis(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3, 4, 5))],
[make_node('Flatten', ['x'], ['z'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 60))])
def test_flatten_zero_axis(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3, 4, 5))],
[make_node('Flatten', ['x'], ['z'], axis=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (1, 120))])
def test_flatten_unknown_dim(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 'N', 4, 5))],
[make_node('Flatten', ['x'], ['z'], axis=2)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (None, 20))]) # type: ignore
def test_space_to_depth(self) -> None:
b = 10
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3, 100, 100))],
[make_node('SpaceToDepth', ['x'], ['z'], blocksize=b)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 300, 10, 10))])
def test_space_to_depth_unknown_dim(self) -> None:
b = 10
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 'N', 100, 100))],
[make_node('SpaceToDepth', ['x'], ['z'], blocksize=b)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, None, 10, 10))]) # type: ignore
def test_depth_to_space(self) -> None:
b = 10
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 300, 10, 10))],
[make_node('DepthToSpace', ['x'], ['z'], blocksize=b, mode='DCR')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 3, 100, 100))])
def _rnn_forward(self, seqlen: int, batchsize: int, inpsize: int, hiddensize: int) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (seqlen, batchsize, inpsize)),
('w', TensorProto.FLOAT, (1, hiddensize, inpsize)),
('r', TensorProto.FLOAT, (1, hiddensize, hiddensize))],
[make_node('RNN', ['x', 'w', 'r'], ['all', 'last'], hidden_size=hiddensize)],
[])
self._assert_inferred(graph, [
make_tensor_value_info('all', TensorProto.FLOAT, (seqlen, 1, batchsize, hiddensize)),
make_tensor_value_info('last', TensorProto.FLOAT, (1, batchsize, hiddensize))])
def test_rnn_forward(self) -> None:
self._rnn_forward(64, 32, 10, 4)
def _rnn_bidirectional(self, seqlen: int, batchsize: int, inpsize: int, hiddensize: int) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (seqlen, batchsize, inpsize)),
('w', TensorProto.FLOAT, (2, hiddensize, inpsize)),
('r', TensorProto.FLOAT, (2, hiddensize, hiddensize))],
[make_node('RNN', ['x', 'w', 'r'], ['all', 'last'], hidden_size=hiddensize,
direction="bidirectional")],
[])
self._assert_inferred(graph, [
make_tensor_value_info('all', TensorProto.FLOAT, (seqlen, 2, batchsize, hiddensize)),
make_tensor_value_info('last', TensorProto.FLOAT, (2, batchsize, hiddensize))])
def test_rnn_layout(self) -> None:
self._rnn_layout(64, 32, 10, 4)
self._rnn_layout(64, 32, 10, 4, 'bidirectional')
def _rnn_layout(self, seqlen: int, batchsize: int, inpsize: int, hiddensize: int, direction: str = 'forward') -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (batchsize, seqlen, inpsize)),
('w', TensorProto.FLOAT, (1, hiddensize, inpsize)),
('r', TensorProto.FLOAT, (1, hiddensize, hiddensize))],
[make_node('RNN', ['x', 'w', 'r'], ['all', 'last'], hidden_size=hiddensize,
layout=1, direction=direction)],
[])
if(direction == 'bidirectional'):
num_directions = 2
else:
num_directions = 1
self._assert_inferred(graph, [
make_tensor_value_info('all', TensorProto.FLOAT, (batchsize, seqlen, num_directions, hiddensize)),
make_tensor_value_info('last', TensorProto.FLOAT, (batchsize, num_directions, hiddensize))])
def test_rnn_bidirectional(self) -> None:
self._rnn_bidirectional(64, 32, 10, 4)
def _lstm_forward(self, seqlen: int, batchsize: int, inpsize: int, hiddensize: int) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (seqlen, batchsize, inpsize)),
('w', TensorProto.FLOAT, (1, 4 * hiddensize, inpsize)),
('r', TensorProto.FLOAT, (1, 4 * hiddensize, hiddensize))],
[make_node('LSTM', ['x', 'w', 'r'], ['all', 'hidden', 'last'], hidden_size=hiddensize)],
[])
self._assert_inferred(graph, [
make_tensor_value_info('all', TensorProto.FLOAT, (seqlen, 1, batchsize, hiddensize)),
make_tensor_value_info('hidden', TensorProto.FLOAT, (1, batchsize, hiddensize)),
make_tensor_value_info('last', TensorProto.FLOAT, (1, batchsize, hiddensize))])
def test_lstm_forward(self) -> None:
self._lstm_forward(64, 32, 10, 4)
def test_topk_default_axis(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 10))],
[make_node('TopK', ['x', 'k'], ['y', 'z'])],
[],
initializer=[make_tensor('k', TensorProto.INT64, (1,), (2,))])
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (3, 4, 5, 2)),
make_tensor_value_info('z', TensorProto.INT64, (3, 4, 5, 2))])
def test_topk(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 10))],
[make_node('TopK', ['x', 'k'], ['y', 'z'], axis=2)],
[],
initializer=[make_tensor('k', TensorProto.INT64, (1,), (2,))])
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (3, 4, 2, 10)),
make_tensor_value_info('z', TensorProto.INT64, (3, 4, 2, 10))])
def test_topk_raw_data(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 10))],
[make_node('TopK', ['x', 'k'], ['y', 'z'], axis=2)],
[],
initializer=[make_tensor('k', TensorProto.INT64, (1,),
vals=np.array([3], dtype='<i8').tobytes(), raw=True)]) # Feed raw bytes (force little endian ordering like onnx standard) for test purpose
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (3, 4, 3, 10)),
make_tensor_value_info('z', TensorProto.INT64, (3, 4, 3, 10))])
def test_topk_missing_k_value_output_rank_check(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 10)),
('k', TensorProto.INT64, (1,))],
[make_node('TopK', ['x', 'k'], ['y', 'z'], axis=2)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (None, None, None, None)), # type: ignore
make_tensor_value_info('z', TensorProto.INT64, (None, None, None, None))]) # type: ignore
def test_gemm(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (7, 5)),
('y', TensorProto.FLOAT, (5, 11)),
('z', TensorProto.FLOAT, None)],
[make_node('Gemm', ['x', 'y', 'z'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (7, 11))])
def test_gemm_transA(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 7)),
('y', TensorProto.FLOAT, (5, 11)),
('z', TensorProto.FLOAT, None)],
[make_node('Gemm', ['x', 'y', 'z'], ['out'], transA=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (7, 11))])
def test_gemm_transB(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (7, 5)),
('y', TensorProto.FLOAT, (11, 5)),
('z', TensorProto.FLOAT, None)],
[make_node('Gemm', ['x', 'y', 'z'], ['out'], transB=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (7, 11))])
def test_gemm_transA_and_transB(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 7)),
('y', TensorProto.FLOAT, (11, 5)),
('z', TensorProto.FLOAT, None)],
[make_node('Gemm', ['x', 'y', 'z'], ['out'], transA=1, transB=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (7, 11))])
def test_gemm_no_bias(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (13, 7)),
('y', TensorProto.FLOAT, (7, 17))],
[make_node('Gemm', ['x', 'y'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (13, 17))])
def test_reduce_op_shape_2_axis(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ReduceL1', 'x', 'y', axes=(1, 2), keepdims=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (24,))])
def test_reduce_op_shape_keep_dims(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ReduceL1', 'x', 'y', axes=(1, 2), keepdims=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (24, 1, 1))])
def test_reduce_op_shape_default_value(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ReduceL1', 'x', 'y')],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 1, 1))])
def test_reduce_op_shape_no_axes_do_not_keep_dims(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ReduceL1', 'x', 'y', keepdims=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, tuple())])
def test_reduce_op_shape_negative_axis(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ReduceL1', 'x', 'y', axes=(-1, -2))],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (24, 1, 1))])
def test_argmax_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ArgMax', 'x', 'y', axis=1, keepdims=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (24, 1, 11))])
def test_argmax_shape_keepdims(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ArgMax', 'x', 'y', axis=0, keepdims=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (4, 11))])
def test_argmax_shape_default_value(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ArgMax', 'x', 'y')],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (1, 4, 11))])
def test_argmax_shape_negative_axis(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (24, 4, 11))],
[make_node('ArgMax', 'x', 'y', axis=-2)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (24, 1, 11))])
def test_dropout(self) -> None:
graph = self._make_graph(
[('data', TensorProto.FLOAT, (3, 4, 5,)),
('ratio', TensorProto.FLOAT, ())],
[make_node('Dropout', ['data', 'ratio'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, 5,))])
def test_LRN(self) -> None:
self._identity_prop('LRN', alpha=0.5, beta=0.5, size=1)
def test_batch_norm(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 6, 7)),
('scale', TensorProto.FLOAT, (4,)),
('b', TensorProto.FLOAT, (4,)),
('mean', TensorProto.FLOAT, (4,)),
('var', TensorProto.FLOAT, (4,))],
[make_node('BatchNormalization', ['x', 'scale', 'b', 'mean', 'var'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, 5, 6, 7))])
def test_batch_norm_rank1(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (128,)), # 1-dimensional permitted
('scale', TensorProto.FLOAT, (1,)),
('b', TensorProto.FLOAT, (1,)),
('mean', TensorProto.FLOAT, (1,)),
('var', TensorProto.FLOAT, (1,))],
[make_node('BatchNormalization', ['x', 'scale', 'b', 'mean', 'var'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (128,))])
def test_batch_norm_invalid(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (128,)),
('scale', TensorProto.FLOAT, (1, 2)), # invalid rank
('b', TensorProto.FLOAT, (1,)),
('mean', TensorProto.FLOAT, (1,)),
('var', TensorProto.FLOAT, (1,))],
[make_node('BatchNormalization', ['x', 'scale', 'b', 'mean', 'var'], ['out'])],
[])
self.assertRaises(onnx.shape_inference.InferenceError, self._inferred, graph)
def test_split_negative_axis(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4))],
[make_node('Split', ['x'], ['y', 'z'], axis=-1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 2)),
make_tensor_value_info('z', TensorProto.FLOAT, (2, 2))])
def test_split_with_split_attribute(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 4)),
('split', TensorProto.INT64, (2,))],
[make_node('Split', ['x', 'split'], ['y', 'z'], axis=1)],
[],
initializer=[make_tensor('split', TensorProto.INT64, (2,), (3, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 3)),
make_tensor_value_info('z', TensorProto.FLOAT, (2, 1))])
def test_split_with_split_attribute_unknown_split_dim(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 'a', 'b')),
('split', TensorProto.INT64, (2,))],
[make_node('Split', ['x', 'split'], ['y', 'z'], axis=1)],
[],
initializer=[make_tensor('split', TensorProto.INT64, (2,), (3, 1))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, None, 'b')), # type: ignore
make_tensor_value_info('z', TensorProto.FLOAT, (2, None, 'b'))]) # type: ignore
def test_split_from_GLU(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 6, 7))],
[make_node('Split', ['x'], ['y', 'z'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('z', TensorProto.FLOAT, (5, 3, 7))])
def test_GLU_partial(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 6, 7))],
[make_node('Split', ['x'], ['y', 'z'], axis=1),
make_node('Sigmoid', ['z'], ['a'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('z', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('a', TensorProto.FLOAT, (5, 3, 7))])
def test_GLU(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 6, 7))],
[make_node('Split', ['x'], ['y', 'z'], axis=1),
make_node('Sigmoid', ['z'], ['a']),
make_node('Mul', ['y', 'a'], ['b'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('z', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('a', TensorProto.FLOAT, (5, 3, 7)),
make_tensor_value_info('b', TensorProto.FLOAT, (5, 3, 7))])
def test_softmax_2d(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5))],
[make_node('Softmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5))])
def test_softmax_3d(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6))],
[make_node('Softmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5, 6))])
def test_hardmax_2d(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5))],
[make_node('Hardmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5))])
def test_hardmax_3d(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6))],
[make_node('Hardmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5, 6))])
def test_logsoftmax_2d(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5))],
[make_node('LogSoftmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5))])
def test_logsoftmax_3d(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6))],
[make_node('LogSoftmax', ['x'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5, 6))])
def test_logsoftmax_3d_negative_axis(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6))],
[make_node('LogSoftmax', ['x'], 'z', axis=-1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (4, 5, 6))])
def test_maxpool(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_maxpool_with_indices(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y", "Z"], kernel_shape=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3)),
make_tensor_value_info("Z", TensorProto.INT64, (5, 3, 3, 3))])
def test_maxpool_3D(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3, 3))])
def test_maxpool_with_padding(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 6, 6))])
def test_maxpool_with_padding_and_stride(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_maxpool_with_floor_mode(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (32, 288, 35, 35))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2], strides=[2, 2], ceil_mode=False)],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (32, 288, 17, 17))])
def test_maxpool_with_ceil_mode(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (32, 288, 35, 35))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2], strides=[2, 2], ceil_mode=True)],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (32, 288, 18, 18))])
def test_maxpool_ceil(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (1, 1, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[3, 3], strides=[2, 2], ceil_mode=True)],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (1, 1, 2, 2))])
def test_maxpool_with_dilations(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], kernel_shape=[2, 2], dilations=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 2, 2))])
def test_maxpool_with_same_upper_padding_and_stride(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_UPPER", kernel_shape=[2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 2, 2))])
def test_maxpool_with_same_upper_padding_and_stride_and_dilation(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_UPPER", kernel_shape=[2, 2], strides=[2, 2], dilations=[2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 2, 2))])
def test_maxpool_with_same_upper_padding_and_stride_one(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_UPPER", kernel_shape=[2, 2], strides=[1, 1])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 4, 4))])
def test_maxpool_with_same_lower_padding_and_stride(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 9, 9))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_LOWER", kernel_shape=[2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 5, 5))])
def test_maxpool_with_same_lower_padding_and_stride_and_dilation(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 9, 9))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_LOWER", kernel_shape=[2, 2], strides=[2, 2], dilations=[2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 5, 5))])
def test_maxpool_with_same_lower_padding_and_big_stride(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("MaxPool", ["X"], ["Y"], auto_pad="SAME_LOWER", kernel_shape=[2, 2], strides=[4, 4])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 1, 1))])
def test_averagepool(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("AveragePool", ["X"], ["Y"], kernel_shape=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_averagepool_3D(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4, 4))],
[make_node("AveragePool", ["X"], ["Y"], kernel_shape=[2, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3, 3))])
def test_averagepool_with_padding(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("AveragePool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 6, 6))])
def test_averagepool_with_padding_and_stride(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("AveragePool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_averagepool_ceil(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (1, 1, 4, 4))],
[make_node("AveragePool", ["X"], ["Y"], kernel_shape=[3, 3], strides=[2, 2], ceil_mode=True)],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (1, 1, 2, 2))])
def test_lppool(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("LpPool", ["X"], ["Y"], kernel_shape=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_lppool_3D(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4, 4))],
[make_node("LpPool", ["X"], ["Y"], kernel_shape=[2, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3, 3))])
def test_lppool_with_padding(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("LpPool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 6, 6))])
def test_lppool_with_padding_and_stride(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("LpPool", ["X"], ["Y"], kernel_shape=[2, 2], pads=[1, 1, 2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 3, 3))])
def test_roipool(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4)),
("rois", TensorProto.INT64, (2, 5))],
[make_node("MaxRoiPool", ["X", "rois"], ["Y"], pooled_shape=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3, 2, 2))])
def test_lp_norm(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 6, 7))],
[make_node('LpNormalization', ['x'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, 5, 6, 7))])
def test_instance_norm(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 6, 7)),
('scale', TensorProto.FLOAT, (4,)),
('b', TensorProto.FLOAT, (4,))],
[make_node('InstanceNormalization', ['x', 'scale', 'b'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, 5, 6, 7))])
def test_global_maxpool(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("GlobalMaxPool", ["X"], ["Y"])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 1, 1))])
def test_global_averagepool(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("GlobalAveragePool", ["X"], ["Y"])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 1, 1))])
def test_global_lppool(self) -> None:
graph = self._make_graph(
[("X", TensorProto.FLOAT, (5, 3, 4, 4))],
[make_node("GlobalLpPool", ["X"], ["Y"])],
[])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, (5, 3, 1, 1))])
def test_conv_transpose(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 33, 33))])
def test_conv_transpose_with_pads(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 30, 30))])
def test_conv_transpose_with_output_shape(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2], output_shape=[36, 36])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 36, 36))])
def test_conv_transpose_with_kernel_shape(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, None, None))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', kernel_shape=[3, 3], strides=[2, 2], pads=[1, 1, 2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 30, 30))])
def test_conv_transpose_with_dilations(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2], dilations=[3, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 34, 34))])
def test_conv_transpose_with_group(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2], group=2)],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 64, 30, 30))])
def test_conv_transpose_with_group_and_output_shape(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', strides=[2, 2], pads=[1, 1, 2, 2], group=2, output_shape=[36, 36])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 64, 36, 36))])
def test_conv_transpose_with_pads_and_auto_pads(self) -> None:
# This test should fail because pads cannot be used simultaneously with auto_pad
graph = self._make_graph(
[('X', TensorProto.FLOAT, (1, 1, 2, 2)),
('W', TensorProto.FLOAT, (1, 1, 3, 3)),
('B', TensorProto.FLOAT, (1, ))],
[make_node('ConvTranspose', ['X', 'W', 'B'], 'Y', auto_pad="SAME_UPPER", strides=[1, 1], pads=[0, 1, 1, 0])],
[])
self.assertRaises(onnx.shape_inference.InferenceError, onnx.shape_inference.infer_shapes, helper.make_model(graph), strict_mode=True)
def test_conv_transpose_auto_pads(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16)),
('W', TensorProto.FLOAT, (48, 32, 3, 3))],
[make_node('ConvTranspose', ['X', 'W'], 'Y', auto_pad="SAME_UPPER", strides=[2, 2])],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('Y', TensorProto.FLOAT, (25, 32, 32, 32))])
def test_mvn_function_output_shape(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16))],
[make_node('MeanVarianceNormalization', 'X', 'Y', axes=[0, 2, 3])],
[]
)
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 48, 16, 16))])
def test_scan(self) -> None:
batch_size = 1
seq_len = 'sequence'
input_size = 2
loop_state_size = 3
# can't use self._make_graph for the subgraph as it add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the number of inputs passed from Scan to match
# the GraphProto, but Scan knows nothing about the additional inputs.
input_value_infos = [make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('input', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.UNDEFINED, None)]
subgraph = helper.make_graph(
[make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('loop_state_orig', TensorProto.FLOAT, (batch_size, loop_state_size)),
('scan_input', TensorProto.FLOAT, (batch_size, seq_len, input_size))],
[make_node('Scan', ['', 'loop_state_orig', 'scan_input'], ['loop_state_final', 'scan_output'],
num_scan_inputs=1, body=subgraph)],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, (batch_size, loop_state_size)),
make_tensor_value_info('scan_output', TensorProto.FLOAT, (batch_size, seq_len, input_size))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 8)])
def test_scan_opset9(self) -> None:
seq_len = 'sequence'
input_size = 2
loop_state_size = 3
# can't use self._make_graph for the subgraph as it add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the number of inputs passed from Scan to match
# the GraphProto, but Scan knows nothing about the additional inputs.
input_value_infos = [make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('input', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.UNDEFINED, None)]
subgraph = helper.make_graph(
[make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('loop_state_orig', TensorProto.FLOAT, (loop_state_size,)),
('scan_input', TensorProto.FLOAT, (seq_len, input_size))],
[make_node('Scan', ['loop_state_orig', 'scan_input'], ['loop_state_final', 'scan_output'],
num_scan_inputs=1, body=subgraph)],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, (loop_state_size,)),
make_tensor_value_info('scan_output', TensorProto.FLOAT, (seq_len, input_size))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 9)])
def test_scan_opset9_axes(self) -> None:
axis_0_len = 'axis0'
seq_len = 'sequence'
input_size = 2
loop_state_size = 3
# can't use self._make_graph for the subgraph as it add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the number of inputs passed from Scan to match
# the GraphProto, but Scan knows nothing about the additional inputs.
input_value_infos = [make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('input', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.UNDEFINED, None)]
subgraph = helper.make_graph(
[make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('loop_state_orig', TensorProto.FLOAT, (loop_state_size,)),
('scan_input', TensorProto.FLOAT, (axis_0_len, seq_len, input_size))],
[make_node('Scan', ['loop_state_orig', 'scan_input'], ['loop_state_final', 'scan_output'],
num_scan_inputs=1, body=subgraph, scan_input_axes=[1])],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, (loop_state_size,)),
make_tensor_value_info('scan_output', TensorProto.FLOAT, (seq_len, axis_0_len, input_size))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 9)])
def test_scan_opset9_output_axes(self) -> None:
axis_0_len = 'axis0'
seq_len = 'sequence'
input_size = 2
loop_state_size = 3
input_value_infos = [make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('input', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.UNDEFINED, None)]
subgraph = helper.make_graph(
[make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('loop_state_orig', TensorProto.FLOAT, (loop_state_size,)),
('scan_input', TensorProto.FLOAT, (axis_0_len, seq_len, input_size))],
[make_node('Scan', ['loop_state_orig', 'scan_input'], ['loop_state_final', 'scan_output'],
num_scan_inputs=1, body=subgraph, scan_input_axes=[1], scan_output_axes=[1])],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, (loop_state_size,)),
make_tensor_value_info('scan_output', TensorProto.FLOAT, (axis_0_len, seq_len, input_size))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 9)])
def test_scan_opset9_negative_axes(self) -> None:
axis_0_len = 'axis0'
seq_len = 'sequence'
input_size = 2
loop_state_size = 3
input_value_infos = [make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('input', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.UNDEFINED, None)]
subgraph = helper.make_graph(
[make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('loop_state_orig', TensorProto.FLOAT, (loop_state_size,)),
('scan_input', TensorProto.FLOAT, (axis_0_len, seq_len, input_size))],
[make_node('Scan', ['loop_state_orig', 'scan_input'], ['loop_state_final', 'scan_output'],
num_scan_inputs=1, body=subgraph, scan_input_axes=[-2], scan_output_axes=[-2])],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, (loop_state_size,)),
make_tensor_value_info('scan_output', TensorProto.FLOAT, (axis_0_len, seq_len, input_size))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 9)])
def test_if_ver1(self) -> None:
# Create a simple If node where the 'then' subgraph adds to the current value, and the 'else' subgraph
# subtracts.
# can't use self._make_graph for the subgraphs as that add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the subgraphs to have zero inputs
then_subgraph = helper.make_graph(
[make_node('Add', ['current_value', 'add_value'], ['then_output'])],
"then_subgraph",
[], # no inputs
[make_tensor_value_info('then_output', TensorProto.UNDEFINED, None)],
)
else_subgraph = helper.make_graph(
[make_node('Sub', ['current_value', 'sub_value'], ['else_output'])],
"else_subgraph",
[], # no inputs
[make_tensor_value_info('else_output', TensorProto.UNDEFINED, None)],
)
graph = self._make_graph(
[('cond', TensorProto.BOOL, (1,)),
('current_value', TensorProto.FLOAT, (1,)),
('add_value', TensorProto.FLOAT, (1,)),
('sub_value', TensorProto.FLOAT, (1,))],
[make_node('If', ['cond'], ['if_output'],
then_branch=then_subgraph, else_branch=else_subgraph)],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('if_output', TensorProto.FLOAT, (1,))],
opset_imports=[make_opsetid(ONNX_DOMAIN, 10)])
def test_if(self) -> None:
# Create a simple If node where the 'then' subgraph adds to the current value, and the 'else' subgraph
# subtracts.
# can't use self._make_graph for the subgraphs as that add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the subgraphs to have zero inputs
then_subgraph = helper.make_graph(
[make_node('Add', ['current_value', 'add_value'], ['then_output'])],
"then_subgraph",
[], # no inputs
[make_tensor_value_info('then_output', TensorProto.UNDEFINED, None)],
)
else_subgraph = helper.make_graph(
[make_node('Sub', ['current_value', 'sub_value'], ['else_output'])],
"else_subgraph",
[], # no inputs
[make_tensor_value_info('else_output', TensorProto.UNDEFINED, None)],
)
graph = self._make_graph(
[('cond', TensorProto.BOOL, (1,)),
('current_value', TensorProto.FLOAT, (1,)),
('add_value', TensorProto.FLOAT, (1,)),
('sub_value', TensorProto.FLOAT, (1,))],
[make_node('If', ['cond'], ['if_output'],
then_branch=then_subgraph, else_branch=else_subgraph)],
[]
)
self._assert_inferred(graph, [make_tensor_value_info('if_output', TensorProto.FLOAT, (1,))])
def test_if_with_different_shapes_in_then_else_branches(self) -> None:
# Create a simple If node where the 'then' subgraph adds to the current value, and the 'else' subgraph
# subtracts.
# can't use self._make_graph for the subgraphs as that add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the subgraphs to have zero inputs
then_subgraph = helper.make_graph(
[make_node('Add', ['current_value', 'add_value'], ['then_output'])],
"then_subgraph",
[], # no inputs
[make_tensor_value_info('then_output', TensorProto.UNDEFINED, (1,))],
)
else_subgraph = helper.make_graph(
[make_node('Sub', ['current_value', 'sub_value'], ['else_output'])],
"else_subgraph",
[], # no inputs
[make_tensor_value_info('else_output', TensorProto.UNDEFINED, (5,))],
)
graph = self._make_graph(
[('cond', TensorProto.BOOL, (1,)),
('current_value', TensorProto.FLOAT, (1,)),
('add_value', TensorProto.FLOAT, (1,)),
('sub_value', TensorProto.FLOAT, (5,))],
[make_node('If', ['cond'], ['if_output'],
then_branch=then_subgraph, else_branch=else_subgraph)],
[]
)
self._assert_inferred(graph, [make_tensor_value_info('if_output', TensorProto.FLOAT, (None,))]) # type: ignore
def test_if_with_different_optional_shapes_in_then_else_branches(self) -> None:
# Create a simple If node where the 'then' subgraph adds to the current value, and the 'else' subgraph
# subtracts.
# can't use self._make_graph for the subgraphs as that add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the subgraphs to have zero inputs
then_tensor_proto = helper.make_tensor_type_proto(elem_type=TensorProto.UNDEFINED, shape=[1, ])
then_optional_type_proto = helper.make_optional_type_proto(then_tensor_proto)
then_optional_vi = helper.make_value_info('then_optional_output', then_optional_type_proto)
then_subgraph = helper.make_graph(
[make_node('Optional', ['then_tensor_value'], ['then_optional_output'])],
"then_subgraph",
[], # no inputs
[then_optional_vi],
)
else_tensor_proto = helper.make_tensor_type_proto(elem_type=TensorProto.UNDEFINED, shape=[5, ])
else_optional_type_proto = helper.make_optional_type_proto(else_tensor_proto)
else_optional_vi = helper.make_value_info('else_optional_output', else_optional_type_proto)
else_subgraph = helper.make_graph(
[make_node('Optional', ['else_tensor_value'], ['else_optional_output'])],
"else_subgraph",
[], # no inputs
[else_optional_vi],
)
graph = self._make_graph(
[('cond', TensorProto.BOOL, (1,)),
('then_tensor_value', TensorProto.FLOAT, (1,)),
('else_tensor_value', TensorProto.FLOAT, (5,))],
[make_node('If', ['cond'], ['if_output'],
then_branch=then_subgraph, else_branch=else_subgraph)],
[]
)
output_tensor_proto = helper.make_tensor_type_proto(elem_type=TensorProto.FLOAT, shape=(None, ))
output_optional_type_proto = helper.make_optional_type_proto(output_tensor_proto)
output_optional_vi = helper.make_value_info('if_output', output_optional_type_proto)
self._assert_inferred(graph, [output_optional_vi]) # type: ignore
def test_maxunpool_shape_without_output_shape(self) -> None:
graph = self._make_graph(
[('xT', TensorProto.FLOAT, (1, 1, 2, 2)),
('xI', TensorProto.FLOAT, (1, 1, 2, 2))],
[make_node('MaxUnpool', ['xT', 'xI'], 'Y', kernel_shape=[2, 2], strides=[2, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (1, 1, 4, 4))])
def test_maxunpool_shape_with_output_shape(self) -> None:
graph = self._make_graph(
[('xT', TensorProto.FLOAT, (1, 1, 2, 2)),
('xI', TensorProto.FLOAT, (1, 1, 2, 2)),
('output_shape', TensorProto.FLOAT, (4, ))],
[make_node('MaxUnpool', ['xT', 'xI', 'output_shape'], 'Y', kernel_shape=[2, 2], strides=[2, 2])],
[make_tensor_value_info("Y", TensorProto.FLOAT, None)])
self._assert_inferred(graph, [make_tensor_value_info("Y", TensorProto.FLOAT, None)])
def test_onehot_without_axis(self) -> None:
graph = self._make_graph(
[('indices', TensorProto.INT64, (2, 2)),
('depth', TensorProto.INT64, ()),
('values', TensorProto.FLOAT, (2, ))],
[make_node('OneHot', ['indices', 'depth', 'values'], 'Y')],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (2, 2, None))]) # type: ignore
def test_onehot_with_axis(self) -> None:
graph = self._make_graph(
[('indices', TensorProto.INT64, (2, 3, 5)),
('depth', TensorProto.INT64, (1, )),
('values', TensorProto.FLOAT, (2, ))],
[make_node('OneHot', ['indices', 'depth', 'values'], 'Y', axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (2, None, 3, 5))]) # type: ignore
def test_loop(self) -> None:
# can't use self._make_graph for the subgraph as it add more inputs for the Reshape operations it inserts.
# this breaks the subgraph inferencing as it expects the number of inputs passed from Loop to match
# the GraphProto, but Loop knows nothing about the additional inputs.
input_value_infos = [make_tensor_value_info('iter_num_in', TensorProto.INT64, (1,)),
make_tensor_value_info('cond_in', TensorProto.UNDEFINED, None),
make_tensor_value_info('loop_state_in', TensorProto.UNDEFINED, ())]
output_value_infos = [make_tensor_value_info('cond_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('loop_state_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.FLOAT, (3,))]
subgraph = helper.make_graph(
[make_node('Identity', ['cond_in'], ['cond_out']),
make_node('Identity', ['loop_state_in'], ['loop_state_out']),
make_node('Identity', ['outer_scope_input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('max_trip_count', TensorProto.INT64, (1,)),
('cond_orig', TensorProto.FLOAT, (1,)),
('loop_state_orig', TensorProto.FLOAT, (2,)),
('outer_scope_input', TensorProto.FLOAT, (3,))],
[make_node('Loop', ['max_trip_count', 'cond_orig', 'loop_state_orig'], ['loop_state_final', 'loop_output'],
body=subgraph)],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_state_final', TensorProto.FLOAT, None), # shape may change between iterations
make_tensor_value_info('loop_output', TensorProto.FLOAT, (None, 3))]) # type: ignore
def test_loop_no_state(self) -> None:
input_value_infos = [make_tensor_value_info('iter_num_in', TensorProto.INT64, (1,)),
make_tensor_value_info('cond_in', TensorProto.UNDEFINED, None)]
output_value_infos = [make_tensor_value_info('cond_out', TensorProto.UNDEFINED, None),
make_tensor_value_info('output', TensorProto.FLOAT, (3,))]
subgraph = helper.make_graph(
[make_node('Identity', ['cond_in'], ['cond_out']),
make_node('Identity', ['outer_scope_input'], ['output'])],
"subgraph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('max_trip_count', TensorProto.INT64, (1,)),
('cond_orig', TensorProto.FLOAT, (1,)),
('outer_scope_input', TensorProto.FLOAT, (3,))],
[make_node('Loop', ['max_trip_count', 'cond_orig'], ['loop_output'],
body=subgraph)],
[]
)
self._assert_inferred(
graph,
[make_tensor_value_info('loop_output', TensorProto.FLOAT, (None, 3))]) # type: ignore
def test_constantofshape_with_input_shape(self) -> None:
graph = self._make_graph([],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (3,), (3, 4, 5))),
make_node("ConstantOfShape", ['shape'], ['y'], value=make_tensor('value', TensorProto.INT32, (1, ), (2, )))],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, (3,)),
make_tensor_value_info('y', TensorProto.INT32, (3, 4, 5))]) # type: ignore
def test_constantofshape_without_input_shape(self) -> None:
graph = self._make_graph([('shape', TensorProto.INT64, (3, ))],
[make_node("ConstantOfShape", ['shape'], ['y'], value=make_tensor('value', TensorProto.UINT8, (1, ), (2, )))],
[])
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.UINT8, (None, None, None))]) # type: ignore
def test_constantofshape_with_symbolic_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5))],
[make_node("Shape", ['x'], ['shape']),
make_node("ConstantOfShape", ['shape'], ['y'], value=make_tensor('value', TensorProto.INT32, (1, ), (2, )))],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, (3,)),
make_tensor_value_info('y', TensorProto.INT32, (3, 4, 5))], data_prop=True) # type: ignore
def test_constantofshape_without_input_shape_scalar(self) -> None:
graph = self._make_graph([('shape', TensorProto.INT64, (0, ))],
[make_node("ConstantOfShape", ['shape'], ['y'], value=make_tensor('value', TensorProto.UINT8, (1, ), (2, )))],
[])
self._assert_inferred(graph,
[make_tensor_value_info('y', TensorProto.UINT8, ())]) # type: ignore
def test_constantofshape_with_shape_zero(self) -> None:
graph = self._make_graph([],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (1,), (0,))),
make_node("ConstantOfShape", ['shape'], ['y'], value=make_tensor('value', TensorProto.INT32, (1, ), (2, )))],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, (1,)),
make_tensor_value_info('y', TensorProto.INT32, (0,))]) # type: ignore
def test_convinteger(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (3, 4, 5, 6, 7)),
('y', TensorProto.UINT8, (5, 4, 2, 4, 3))],
[make_node('ConvInteger', ['x', 'y'], 'z', pads=[0, 1, 1, 0, 0, 1], dilations=[1, 2, 2], strides=[1, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (3, 5, 4, 1, 3))])
def test_convinetger_dilations(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 8, 8, 8)),
('y', TensorProto.INT8, (50, 4, 3, 3, 3)),
('x_zero_point', TensorProto.UINT8, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('ConvInteger', ['x', 'y', 'x_zero_point', 'y_zero_point'], 'z', dilations=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (30, 50, 6, 4, 2))])
def test_convinteger_strides(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT8, (30, 4, 8, 8, 8)),
('y', TensorProto.INT8, (50, 4, 3, 3, 3)),
('x_zero_point', TensorProto.UINT8, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('ConvInteger', ['x', 'y', 'x_zero_point', 'y_zero_point'], 'z', strides=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (30, 50, 6, 3, 2))])
def test_convineteger_pads(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 7, 6, 4)),
('y', TensorProto.INT8, (50, 4, 3, 3, 3))],
[make_node('ConvInteger', ['x', 'y'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (30, 50, 6, 6, 6))])
def test_convineteger_group(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT8, (30, 4, 8, 8, 8)),
('y', TensorProto.INT8, (4, 1, 8, 8, 8))],
[make_node('ConvInteger', ['x', 'y'], 'z', group=4)],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (30, 4, 1, 1, 1))])
def test_convineteger_partial_missing_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, None, 6, 4)),
('y', TensorProto.UINT8, (50, 4, 3, 3, 3)),
('x_zero_point', TensorProto.UINT8, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('ConvInteger', ['x', 'y', 'x_zero_point', 'y_zero_point'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, (30, 50, None, 6, 6))]) # type: ignore
def test_convineteger_partial_missing_weight_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 7, 6, 4)),
('y', TensorProto.UINT8, (50, 4, None, 3, 3))],
[make_node('ConvInteger', ['x', 'y'], 'z', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.INT32, None)])
def test_qlinearconv(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (3, 4, 5, 6, 7)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ()),
('w', TensorProto.UINT8, (5, 4, 2, 4, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', pads=[0, 1, 1, 0, 0, 1], dilations=[1, 2, 2], strides=[1, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (3, 5, 4, 1, 3))])
def test_qlinearconv_dilations(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 8, 8, 8)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ()),
('w', TensorProto.UINT8, (50, 4, 3, 3, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', dilations=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 50, 6, 4, 2))])
def test_qlinearconv_strides(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT8, (30, 4, 8, 8, 8)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.INT8, ()),
('w', TensorProto.INT8, (50, 4, 3, 3, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.INT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.INT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', strides=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT8, (30, 50, 6, 3, 2))])
def test_qlinearconv_pads(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 7, 6, 4)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ()),
('w', TensorProto.INT8, (50, 4, 3, 3, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.INT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 50, 6, 6, 6))])
def test_qlinearconv_group(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT8, (30, 4, 8, 8, 8)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.INT8, ()),
('w', TensorProto.INT8, (4, 1, 8, 8, 8)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.INT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.INT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', group=4)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT8, (30, 4, 1, 1, 1))])
def test_qlinearconv_partial_missing_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, None, 6, 4)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ()),
('w', TensorProto.UINT8, (50, 4, 3, 3, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 50, None, 6, 6))]) # type: ignore
def test_qlinearconv_partial_missing_weight_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 7, 6, 4)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ()),
('w', TensorProto.UINT8, (50, 4, None, 3, 3)),
('w_scale', TensorProto.FLOAT, ()),
('w_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearConv', ['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'], 'y', pads=[1, 1, 2, 0, 1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, None)])
def _make_qlinearmatmul_test(self, shape1: Sequence[int], shape2: Sequence[int]) -> None:
expected_out_shape = np.matmul(np.arange(np.product(shape1)).reshape(shape1),
np.arange(np.product(shape2)).reshape(shape2)).shape
graph = self._make_graph(
[('a', TensorProto.UINT8, shape1),
('a_scale', TensorProto.FLOAT, ()),
('a_zero_point', TensorProto.UINT8, ()),
('b', TensorProto.UINT8, shape2),
('b_scale', TensorProto.FLOAT, ()),
('b_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearMatMul', ['a', 'a_scale', 'a_zero_point', 'b', 'b_scale', 'b_zero_point', 'y_scale', 'y_zero_point'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, expected_out_shape)])
def test_qlinearmatmul(self) -> None:
self._make_qlinearmatmul_test((3,), (3,))
self._make_qlinearmatmul_test((4, 2), (2, 4))
self._make_qlinearmatmul_test((2,), (2, 3))
self._make_qlinearmatmul_test((4, 2), (2,))
self._make_qlinearmatmul_test((5, 1, 4, 2), (1, 3, 2, 3))
self._make_qlinearmatmul_test((4, 2), (3, 2, 3))
def _make_qlinearmatmul_test_allow_unknown(self, shape1: Any, shape2: Any, expected_out_shape: Any) -> None:
graph = self._make_graph(
[('a', TensorProto.UINT8, shape1),
('a_scale', TensorProto.FLOAT, ()),
('a_zero_point', TensorProto.UINT8, ()),
('b', TensorProto.UINT8, shape2),
('b_scale', TensorProto.FLOAT, ()),
('b_zero_point', TensorProto.UINT8, ()),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QLinearMatMul', ['a', 'a_scale', 'a_zero_point', 'b', 'b_scale', 'b_zero_point', 'y_scale', 'y_zero_point'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, expected_out_shape)])
def test_qlinearmatmul_allow_unknown(self) -> None:
self._make_qlinearmatmul_test_allow_unknown((None,), (None,), ())
self._make_qlinearmatmul_test_allow_unknown((3,), (None,), ())
self._make_qlinearmatmul_test_allow_unknown((2,), (2, "a"), ("a",))
self._make_qlinearmatmul_test_allow_unknown((4, 2), (2, "a"), (4, "a"))
self._make_qlinearmatmul_test_allow_unknown((4, None), (2, "a"), (4, "a"))
self._make_qlinearmatmul_test_allow_unknown((4, None), (None, "a"), (4, "a"))
self._make_qlinearmatmul_test_allow_unknown((1, 4, 2), ("a", 2, 5), ("a", 4, 5))
self._make_qlinearmatmul_test_allow_unknown((1, 3, 4, 2), ("a", 2, 5), (1, 3, 4, 5))
self._make_qlinearmatmul_test_allow_unknown(None, ("a", 2, 5), None)
self._make_qlinearmatmul_test_allow_unknown(None, None, None)
def _make_matmulinteger_test(self, shape1: Sequence[int], shape2: Sequence[int]) -> None:
expected_out_shape = np.matmul(np.arange(np.product(shape1)).reshape(shape1),
np.arange(np.product(shape2)).reshape(shape2)).shape
graph = self._make_graph(
[('A', TensorProto.UINT8, shape1),
('B', TensorProto.UINT8, shape2),
('a_zero_point', TensorProto.UINT8, ()),
('b_zero_point', TensorProto.UINT8, ())],
[make_node('MatMulInteger', ['A', 'B', 'a_zero_point', 'b_zero_point'], ['Y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.INT32, expected_out_shape)])
def test_matmulinteger(self) -> None:
self._make_matmulinteger_test((2,), (2,))
self._make_matmulinteger_test((1, 2), (2, 3))
self._make_matmulinteger_test((2,), (2, 3))
self._make_matmulinteger_test((4, 2), (2,))
self._make_matmulinteger_test((5, 1, 4, 2), (1, 3, 2, 3))
self._make_matmulinteger_test((4, 2), (3, 2, 3))
def test_quantizelinear(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y_scale', TensorProto.FLOAT, ()),
('y_zero_point', TensorProto.UINT8, ())],
[make_node('QuantizeLinear', ['x', 'y_scale', 'y_zero_point'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 4, 5))])
def test_quantizelinear_default_zp(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y_scale', TensorProto.FLOAT, ())],
[make_node('QuantizeLinear', ['x', 'y_scale'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 4, 5))])
def test_quantizelinear_optional_input(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5)),
('y_scale', TensorProto.FLOAT, ())],
[make_node('QuantizeLinear', ['x', 'y_scale', ''], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 4, 5))])
def test_dequantizelinear(self) -> None:
graph = self._make_graph(
[('x', TensorProto.UINT8, (30, 4, 5)),
('x_scale', TensorProto.FLOAT, ()),
('x_zero_point', TensorProto.UINT8, ())],
[make_node('DequantizeLinear', ['x', 'x_scale', 'x_zero_point'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (30, 4, 5))])
def test_dynamicquantizelinear(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (30, 4, 5))],
[make_node('DynamicQuantizeLinear', ['x'], ['y', 'y_scale', 'y_zero_point'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.UINT8, (30, 4, 5)),
make_tensor_value_info('y_scale', TensorProto.FLOAT, ()),
make_tensor_value_info('y_zero_point', TensorProto.UINT8, ())])
def test_reversesequence(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('sequence_lens', TensorProto.INT64, (5,))],
[make_node('ReverseSequence', ['x', 'sequence_lens'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (4, 5, 6))])
def test_unique_without_axis(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (2, 4, 2))],
[make_node('Unique', ['X'], ['Y', 'indices', 'inverse_indices', 'counts'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (None,)), # type: ignore
make_tensor_value_info('indices', TensorProto.INT64, (None,)), # type: ignore
make_tensor_value_info('inverse_indices', TensorProto.INT64, (None,)), # type: ignore
make_tensor_value_info('counts', TensorProto.INT64, (None,))]) # type: ignore
def test_unique_with_axis(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (2, 4, 2))],
[make_node('Unique', ['X'], ['Y', 'indices', 'inverse_indices', 'counts'], axis=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (2, None, 2)), # type: ignore
make_tensor_value_info('indices', TensorProto.INT64, (None,)), # type: ignore
make_tensor_value_info('inverse_indices', TensorProto.INT64, (None,)), # type: ignore
make_tensor_value_info('counts', TensorProto.INT64, (None,))]) # type: ignore
def test_det(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (3, 3))],
[make_node('Det', ['X'], ['Y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, ())])
graph = self._make_graph(
[('X', TensorProto.FLOAT, (4, 5, 6, 7, 7))],
[make_node('Det', ['X'], ['Y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (4, 5, 6))])
def test_tile(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('repeats', TensorProto.INT64, (3,))],
[make_node('Tile', ['x', 'repeats'], ['y'])],
[],
initializer=[make_tensor('repeats', TensorProto.INT64, (3,), (1, 2, 3))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (4, 10, 18))])
def test_tile_raw_input_data(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('repeats', TensorProto.INT64, (3,))],
[make_node('Tile', ['x', 'repeats'], ['y'])],
[],
initializer=[make_tensor('repeats', TensorProto.INT64, (3,),
vals=np.array([1, 2, 3], dtype='<i8').tobytes(), raw=True)]) # Feed raw bytes (force little endian ordering like onnx standard) for test purpose
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (4, 10, 18))])
def test_tile_rank_inference(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('repeats', TensorProto.INT64, (3,))],
[make_node('Tile', ['x', 'repeats'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (None, None, None))]) # type: ignore
def test_linearclassifier_1D_input(self) -> None:
if ONNX_ML:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5,))],
[make_node('LinearClassifier', ['x'], ['y', 'z'], domain=ONNX_ML_DOMAIN, coefficients=[0.0008, -0.0008], intercepts=[2.0, 2.0], classlabels_ints=[1, 2])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (1,)),
make_tensor_value_info('z', TensorProto.FLOAT, (1, 2))],
opset_imports=[make_opsetid(ONNX_ML_DOMAIN, 1), make_opsetid(ONNX_DOMAIN, 11)])
def test_linearclassifier_2D_input(self) -> None:
if ONNX_ML:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5))],
[make_node('LinearClassifier', ['x'], ['y', 'z'], domain=ONNX_ML_DOMAIN, coefficients=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6], intercepts=[2.0, 2.0, 3.0], classlabels_ints=[1, 2, 3])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (4,)),
make_tensor_value_info('z', TensorProto.FLOAT, (4, 3))],
opset_imports=[make_opsetid(ONNX_ML_DOMAIN, 1), make_opsetid(ONNX_DOMAIN, 11)])
def test_roialign_symbolic(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, ('N', 'C', 'H', 'W')),
('rois', TensorProto.FLOAT, ('num_rois', 4)),
('batch_indices', TensorProto.INT64, ('num_rois',))],
[make_node('RoiAlign', ['x', 'rois', 'batch_indices'], ['y'], output_height=10, output_width=5)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, ('num_rois', 'C', 10, 5))]) # type: ignore
def test_roialign_symbolic_defaults(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, ('N', 'C', 'H', 'W')),
('rois', TensorProto.FLOAT, ('num_rois', 4)),
('batch_indices', TensorProto.INT64, ('num_rois',))],
[make_node('RoiAlign', ['x', 'rois', 'batch_indices'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, ('num_rois', 'C', 1, 1))]) # type: ignore
def test_roialign_num_rois(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, ('N', 'C', 'H', 'W')),
('rois', TensorProto.FLOAT, ('num_rois', 4)),
('batch_indices', TensorProto.INT64, (15,))],
[make_node('RoiAlign', ['x', 'rois', 'batch_indices'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (15, 'C', 1, 1))]) # type: ignore
def test_label_encoder_string_int64(self) -> None:
if ONNX_ML:
string_list = ['A', 'm', 'y']
float_list = [94.17, 36.00]
int64_list = [12, 28, 86]
graph = self._make_graph(
[('x', TensorProto.STRING, (6, 1))],
[make_node('LabelEncoder', ['x'], ['y'], domain=ONNX_ML_DOMAIN,
keys_strings=string_list, values_int64s=int64_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (6, 1))],
opset_imports=[make_opsetid(ONNX_ML_DOMAIN, 2), make_opsetid(ONNX_DOMAIN, 11)])
graph = self._make_graph(
[('x', TensorProto.INT64, (2, 3))],
[make_node('LabelEncoder', ['x'], ['y'], domain=ONNX_ML_DOMAIN,
keys_int64s=int64_list, values_strings=string_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.STRING, (2, 3))],
opset_imports=[make_opsetid(ONNX_ML_DOMAIN, 2), make_opsetid(ONNX_DOMAIN, 11)])
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2,))],
[make_node('LabelEncoder', ['x'], ['y'], domain=ONNX_ML_DOMAIN,
keys_floats=float_list, values_int64s=int64_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (2,))],
opset_imports=[make_opsetid(ONNX_ML_DOMAIN, 2), make_opsetid(ONNX_DOMAIN, 11)])
graph = self._make_graph(
[('x', TensorProto.INT64, (8,))],
[make_node('LabelEncoder', ['x'], ['y'], domain=ONNX_ML_DOMAIN,
keys_int64s=int64_list, values_floats=float_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (8,))],
opset_imports=[make_opsetid(ONNX_ML_DOMAIN, 2), make_opsetid(ONNX_DOMAIN, 11)])
graph = self._make_graph(
[('x', TensorProto.FLOAT, ())],
[make_node('LabelEncoder', ['x'], ['y'], domain=ONNX_ML_DOMAIN,
keys_floats=float_list, values_strings=string_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.STRING, ())],
opset_imports=[make_opsetid(ONNX_ML_DOMAIN, 2), make_opsetid(ONNX_DOMAIN, 11)])
graph = self._make_graph(
[('x', TensorProto.STRING, (1, 2))],
[make_node('LabelEncoder', ['x'], ['y'], domain=ONNX_ML_DOMAIN,
keys_strings=string_list, values_floats=float_list)], [])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (1, 2))],
opset_imports=[make_opsetid(ONNX_ML_DOMAIN, 2), make_opsetid(ONNX_DOMAIN, 11)])
def make_sparse(self,
shape: Sequence[int],
values: Sequence[int],
indices_shape: Sequence[int],
indices: Sequence[int]
) -> SparseTensorProto:
sparse = SparseTensorProto()
sparse.dims.extend(shape)
nnz = len(values)
sparse.values.CopyFrom(helper.make_tensor('spval', TensorProto.INT64, (nnz,), values))
sparse.indices.CopyFrom(helper.make_tensor('spind', TensorProto.INT64, indices_shape, indices))
return sparse
def test_constant_sparse(self) -> None:
y_shape = [100]
y_value = self.make_sparse(y_shape, [13, 17, 19], [3], [9, 27, 81])
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], sparse_value=y_value)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, y_shape)]) # type: ignore
def test_constant_value_int(self) -> None:
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_int=42)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, [])])
def test_constant_value_ints(self) -> None:
value_ints = [1, 2, 3]
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_ints=value_ints)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, [len(value_ints)])])
def test_constant_value_float(self) -> None:
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_float=1.42)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, [])])
def test_constant_value_floats(self) -> None:
value_floats = [1.0, 1.1, 1.2]
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_floats=value_floats)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, [len(value_floats)])])
def test_constant_value_string(self) -> None:
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_string="String value")],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.STRING, [])])
def test_constant_value_strings(self) -> None:
value_strings = ["o", "n", "n", "x"]
graph = self._make_graph(
[],
[make_node('Constant', [], ['y'], value_strings=value_strings)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.STRING, [len(value_strings)])])
def test_range(self) -> None:
graph = self._make_graph(
[('start', TensorProto.FLOAT, ()),
('limit', TensorProto.FLOAT, ()),
('delta', TensorProto.FLOAT, ())],
[make_node('Range', ['start', 'limit', 'delta'], ['output'])],
[],
initializer=[make_tensor('start', TensorProto.FLOAT, (), (1,)),
make_tensor('limit', TensorProto.FLOAT, (), (5,)),
make_tensor('delta', TensorProto.FLOAT, (), (2,))])
self._assert_inferred(graph, [make_tensor_value_info('output', TensorProto.FLOAT, (2,))])
def test_range_rank_inference(self) -> None:
graph = self._make_graph(
[('start', TensorProto.INT32, ()),
('limit', TensorProto.INT32, ()),
('delta', TensorProto.INT32, ())],
[make_node('Range', ['start', 'limit', 'delta'], ['output'])],
[],
initializer=[make_tensor('start', TensorProto.INT32, (), (1,)),
make_tensor('limit', TensorProto.INT32, (), (5,))]) # Missing 'delta' initializer
self._assert_inferred(graph, [make_tensor_value_info('output', TensorProto.INT32, (None,))]) # type: ignore
def test_gathernd(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (4, 5, 6)),
('indices', TensorProto.INT64, (2,))],
[make_node('GatherND', ['x', 'indices'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (6,))])
def test_gathernd_batchdim_1(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 2, 2)),
('indices', TensorProto.INT64, (2, 1))],
[make_node('GatherND', ['x', 'indices'], ['y'], batch_dims=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 2))])
def test_cumsum(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3)),
('axis', TensorProto.FLOAT, (1,))],
[make_node('CumSum', ['x', 'axis'], 'z')],
[])
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 3))])
def test_nonmaxsuppression(self) -> None:
graph = self._make_graph(
[('boxes', TensorProto.FLOAT, (1, 3, 4)),
('scores', TensorProto.FLOAT, (1, 5, 3))],
[make_node('NonMaxSuppression', ['boxes', 'scores'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (None, 3))]) # type: ignore
def test_sequence_empty(self) -> None:
graph = self._make_graph(
[],
[make_node('SequenceEmpty', [], ['output'])],
[])
self._assert_inferred(graph, [make_tensor_sequence_value_info('output', TensorProto.FLOAT, None)]) # type: ignore
def test_sequence_construct(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 3, 4))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['output_sequence'])],
[])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 3, 4))]) # type: ignore
def test_sequence_construct_one_input(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4))],
[make_node('SequenceConstruct', ['input1'], ['output_sequence'])],
[])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 3, 4))]) # type: ignore
def test_sequence_construct_diff_rank(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3)),
('input3', TensorProto.FLOAT, (2, 3))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['output_sequence'])],
[])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, None)]) # type: ignore
def test_sequence_construct_diff_dim_size(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 5)),
('input3', TensorProto.FLOAT, (2, 3, 6))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['output_sequence'])],
[])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 3, None))]) # type: ignore
def test_sequence_insert(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('input4', TensorProto.FLOAT, (2, 3, 4))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceInsert', ['in_sequence', 'input4'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 4)),
make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 3, 4))]) # type: ignore
def test_sequence_insert_diff_rank(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('input4', TensorProto.FLOAT, (2, 3))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceInsert', ['in_sequence', 'input4'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 4)),
make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, None)]) # type: ignore
def test_sequence_insert_diff_shape(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 5, 4)),
('input4', TensorProto.FLOAT, (2, 5, 2))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceInsert', ['in_sequence', 'input4'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 4)), # type: ignore
make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, None, None))]) # type: ignore
def test_sequence_at(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('ind', TensorProto.INT64, ())],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceAt', ['in_sequence', 'ind'], ['output'])],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 4)),
make_tensor_value_info('output', TensorProto.FLOAT, (2, 3, 4))]) # type: ignore
def test_sequence_at_unknown_shape(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('ind', TensorProto.INT64, ())],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceAt', ['in_sequence', 'ind'], ['output'])],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, None),
make_tensor_value_info('output', TensorProto.FLOAT, None)]) # type: ignore
def test_sequence_at_unknown_dim_size(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 5)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('ind', TensorProto.INT64, ())],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceAt', ['in_sequence', 'ind'], ['output'])],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, None)), # type: ignore
make_tensor_value_info('output', TensorProto.FLOAT, (2, 3, None))]) # type: ignore
def test_sequence_erase(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4)),
('input2', TensorProto.FLOAT, (2, 3, 4)),
('input3', TensorProto.FLOAT, (2, 3, 4)),
('ind', TensorProto.INT64, ())],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceErase', ['in_sequence', 'ind'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 4)),
make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 3, 4))]) # type: ignore
def test_sequence_erase_diff_dim_size(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3, 'x')),
('input3', TensorProto.FLOAT, (2, 5, 'x')),
('ind', TensorProto.INT64, ())],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceErase', ['in_sequence', 'ind'], ['output_sequence'])],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 'x')), # type: ignore
make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, None, 'x'))]) # type: ignore
def test_sequence_length(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceLength', ['in_sequence'], ['len'])],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 'x')),
make_tensor_value_info('len', TensorProto.INT64, ())]) # type: ignore
def test_split_to_sequence(self) -> None:
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4)),
('split', TensorProto.INT32, (2,))],
[make_node('SplitToSequence', ['input', 'split'], ['output_sequence'])],
[],
initializer=[make_tensor('split', TensorProto.INT32, (2,), (3, 3))])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (3, 4))]) # type: ignore
def test_split_to_sequence_scalar(self) -> None:
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4)),
('split', TensorProto.INT32, ())],
[make_node('SplitToSequence', ['input', 'split'], ['output_sequence'])],
[],
initializer=[make_tensor('split', TensorProto.INT32, (), (2, ))])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (2, 4))]) # type: ignore
def test_split_to_sequence_keepdims(self) -> None:
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4))],
[make_node('SplitToSequence', ['input'], ['output_sequence'], keepdims=1)],
[])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (1, 4))]) # type: ignore
def test_split_to_sequence_not_keepdims(self) -> None:
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4))],
[make_node('SplitToSequence', ['input'], ['output_sequence'], keepdims=0)],
[])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (4, ))]) # type: ignore
def test_split_to_sequence_ignore_keepdims(self) -> None:
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4)),
('split', TensorProto.INT32, (2,))],
[make_node('SplitToSequence', ['input', 'split'], ['output_sequence'], keepdims=0)],
[],
initializer=[make_tensor('split', TensorProto.INT32, (2,), (3, 3))])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (3, 4))]) # type: ignore
def test_split_to_sequence_axis(self) -> None:
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4))],
[make_node('SplitToSequence', ['input'], ['output_sequence'], axis=1)],
[])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (6, 1))]) # type: ignore
def test_split_to_sequence_neg_axis(self) -> None:
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4))],
[make_node('SplitToSequence', ['input'], ['output_sequence'], axis=-2)],
[])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (1, 4))]) # type: ignore
def test_split_to_sequence_split_sizes(self) -> None:
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4)),
('split', TensorProto.INT32, (3,))],
[make_node('SplitToSequence', ['input', 'split'], ['output_sequence'])],
[],
initializer=[make_tensor('split', TensorProto.INT32, (3,), (2, 1, 3))])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (None, 4))]) # type: ignore
def test_split_to_sequence_non_divisible(self) -> None:
graph = self._make_graph(
[('input', TensorProto.FLOAT, (6, 4)),
('split', TensorProto.INT32, ())],
[make_node('SplitToSequence', ['input', 'split'], ['output_sequence'])],
[],
initializer=[make_tensor('split', TensorProto.INT32, (), (4, ))])
self._assert_inferred(graph,
[make_tensor_sequence_value_info('output_sequence', TensorProto.FLOAT, (None, 4))]) # type: ignore
def test_concat_from_sequence(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=0)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 'x')),
make_tensor_value_info('out', TensorProto.FLOAT, (None, 3, 'x'))]) # type: ignore
def test_concat_from_sequence_unknown_shape(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3)),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=0)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, None),
make_tensor_value_info('out', TensorProto.FLOAT, None)]) # type: ignore
def test_concat_from_sequence_unknown_dim_size(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 4, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=0)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 'x')), # type: ignore
make_tensor_value_info('out', TensorProto.FLOAT, (None, None, 'x'))]) # type: ignore
def test_concat_from_sequence_axis(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 4, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=2)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 'x')), # type: ignore
make_tensor_value_info('out', TensorProto.FLOAT, (2, None, None))]) # type: ignore
def test_concat_from_sequence_neg_axis(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 4, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=-3)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, None, 'x')), # type: ignore
make_tensor_value_info('out', TensorProto.FLOAT, (None, None, 'x'))]) # type: ignore
def test_concat_from_sequence_new_axis(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=2, new_axis=1)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 'x')),
make_tensor_value_info('out', TensorProto.FLOAT, (2, 3, None, 'x'))]) # type: ignore
def test_concat_from_sequence_neg_new_axis(self) -> None:
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 'x')),
('input2', TensorProto.FLOAT, (2, 3, 'x')),
('input3', TensorProto.FLOAT, (2, 3, 'x'))],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('ConcatFromSequence', ['in_sequence'], ['out'], axis=-1, new_axis=1)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (2, 3, 'x')),
make_tensor_value_info('out', TensorProto.FLOAT, (2, 3, 'x', None))]) # type: ignore
def test_adagrad(self) -> None:
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X', TensorProto.FLOAT, (1, 2)),
('G', TensorProto.FLOAT, (1, 2)),
('H', TensorProto.FLOAT, (1, 2))],
[make_node('Adagrad', ['R', 'T', 'X', 'G', 'H'], ['X_new', 'H_new'],
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN)],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('X_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('H_new', TensorProto.FLOAT, (1, 2))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 12), helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])
def test_adagrad_multiple(self) -> None:
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X1', TensorProto.FLOAT, (1, 2)),
('X2', TensorProto.FLOAT, (3, 4)),
('G1', TensorProto.FLOAT, (1, 2)),
('G2', TensorProto.FLOAT, (3, 4)),
('H1', TensorProto.FLOAT, (1, 2)),
('H2', TensorProto.FLOAT, (3, 4))],
[make_node('Adagrad', ['R', 'T', 'X1', 'X2', 'G1', 'G2', 'H1', 'H2'],
['X1_new', 'X2_new', 'H1_new', 'H2_new'],
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('X1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('X2_new', TensorProto.FLOAT, (3, 4)),
make_tensor_value_info('H1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('H2_new', TensorProto.FLOAT, (3, 4))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 12), helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])
def test_momentum(self) -> None:
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X', TensorProto.FLOAT, (1, 2)),
('G', TensorProto.FLOAT, (1, 2)),
('V', TensorProto.FLOAT, (1, 2))],
[make_node('Momentum', ['R', 'T', 'X', 'G', 'V'], ['X_new', 'V_new'],
alpha=0.9, beta=1.0, norm_coefficient=0.02, mode='standard',
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN)],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('X_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('V_new', TensorProto.FLOAT, (1, 2))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 12), helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])
def test_momentum_multiple(self) -> None:
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X1', TensorProto.FLOAT, (1, 2)),
('X2', TensorProto.FLOAT, (3, 4)),
('G1', TensorProto.FLOAT, (1, 2)),
('G2', TensorProto.FLOAT, (3, 4)),
('V1', TensorProto.FLOAT, (1, 2)),
('V2', TensorProto.FLOAT, (3, 4))],
[make_node('Momentum', ['R', 'T', 'X1', 'X2', 'G1', 'G2', 'V1', 'V2'],
['X1_new', 'X2_new', 'V1_new', 'V2_new'],
alpha=0.9, beta=1.0, norm_coefficient=0.02, mode='nesterov',
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN)],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('X1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('X2_new', TensorProto.FLOAT, (3, 4)),
make_tensor_value_info('V1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('V2_new', TensorProto.FLOAT, (3, 4))],
opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 12), helper.make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1)])
def test_adam(self) -> None:
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X', TensorProto.FLOAT, (1, 2)),
('G', TensorProto.FLOAT, (1, 2)),
('V', TensorProto.FLOAT, (1, 2)),
('H', TensorProto.FLOAT, (1, 2))],
[make_node('Adam', ['R', 'T', 'X', 'G', 'V', 'H'], ['X_new', 'V_new', 'H_new'],
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
alpha=0.9, beta=1.0, norm_coefficient=0.02)],
[])
infos = [make_tensor_value_info('X_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('V_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('H_new', TensorProto.FLOAT, (1, 2))]
self._assert_inferred(
graph,
infos,
opset_imports=[make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1), make_opsetid(ONNX_DOMAIN, 12)])
def test_adam_multiple(self) -> None:
graph = self._make_graph(
[('R', TensorProto.FLOAT, ()), # scalar's shape is ()
('T', TensorProto.INT64, ()), # scalar's shape is ()
('X1', TensorProto.FLOAT, (1, 2)),
('X2', TensorProto.FLOAT, (3, 4)),
('G1', TensorProto.FLOAT, (1, 2)),
('G2', TensorProto.FLOAT, (3, 4)),
('V1', TensorProto.FLOAT, (1, 2)),
('V2', TensorProto.FLOAT, (3, 4)),
('H1', TensorProto.FLOAT, (1, 2)),
('H2', TensorProto.FLOAT, (3, 4))],
[make_node('Adam', ['R', 'T', 'X1', 'X2', 'G1', 'G2', 'V1', 'V2', 'H1', 'H2'],
['X1_new', 'X2_new', 'V1_new', 'V2_new', 'H1_new', 'H2_new'],
domain=AI_ONNX_PREVIEW_TRAINING_DOMAIN,
alpha=0.9, beta=1.0, norm_coefficient=0.02)],
[])
infos = [make_tensor_value_info('X1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('X2_new', TensorProto.FLOAT, (3, 4)),
make_tensor_value_info('V1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('V2_new', TensorProto.FLOAT, (3, 4)),
make_tensor_value_info('H1_new', TensorProto.FLOAT, (1, 2)),
make_tensor_value_info('H2_new', TensorProto.FLOAT, (3, 4))]
self._assert_inferred(
graph,
infos,
opset_imports=[make_opsetid(AI_ONNX_PREVIEW_TRAINING_DOMAIN, 1), make_opsetid(ONNX_DOMAIN, 12)])
def test_pad_opset10(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, None, 2))],
[make_node('Pad', 'x', 'y', pads=[1, 3, 1, 1, 0, 1])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (3, None, 4))], opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 10)]) # type: ignore
def test_constant_pad_2d_opset10(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3, 4, 4))],
[make_node('Pad', 'x', 'y', pads=[0, 0, 3, 1, 0, 0, 4, 2], mode="constant", value=2.0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2, 3, 11, 7))], opset_imports=[helper.make_opsetid(ONNX_DOMAIN, 10)])
def test_pad(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, None, 2)),
('pads', TensorProto.INT64, (6,))],
[make_node('Pad', ['x', 'pads'], 'y')],
[],
initializer=[make_tensor('pads', TensorProto.INT64, (6,), (1, 3, 1, 1, 0, 1,))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (3, None, 4))]) # type: ignore
def test_gatherelements_basic(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (6,)),
('indices', TensorProto.INT64, (2,))],
[make_node('GatherElements', ['x', 'indices'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (2,))])
def test_gatherelements_indices_missing_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (6,)),
('indices', TensorProto.INT64, None)], # type: ignore
[make_node('GatherElements', ['x', 'indices'], ['y'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, None)]) # type: ignore
def test_einsum_transpose(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4))],
[make_node('Einsum', ['x'], ['y'], equation='ij->ji')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (None, None))]) # type: ignore
def test_einsum_dot(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1,)),
('y', TensorProto.FLOAT, (1,))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='i,i->')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ())]) # type: ignore
def test_einsum_scalar(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, ()),
('y', TensorProto.FLOAT, ())],
[make_node('Einsum', ['x', 'y'], ['z'], equation=',->')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ())]) # type: ignore
def test_einsum_outer_prod(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 5)),
('y', TensorProto.FLOAT, (7, 9))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='ij,ab->ijab')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None, None))]) # type: ignore
def test_einsum_sum_along_dim(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4))],
[make_node('Einsum', ['x'], ['y'], equation='i j->i ')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (None, ))]) # type: ignore
def test_einsum_ellipsis(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 4))],
[make_node('Einsum', ['x'], ['y'], equation='... ii ->... i')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (None, None))]) # type: ignore
def test_einsum_ellipsis_2(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 2, 2)),
('y', TensorProto.FLOAT, (2, 2, 2))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='...ij,...jk->...ik')],
[], )
self._assert_inferred(graph,
[make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None))]) # type: ignore
def test_einsum_ellipsis_3(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 2, 2)),
('y', TensorProto.FLOAT, (2, 2, 2))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='...ij,...jk')],
[], )
self._assert_inferred(graph,
[make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None))]) # type: ignore
def test_einsum_contraction(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 6, 7, 8)),
('y', TensorProto.FLOAT, (8, 9, 10))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='abcd,dfg->abcfg')],
[], )
self._assert_inferred(graph,
[make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None, None, None))]) # type: ignore
def test_einsum_contraction_2(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5)),
('y', TensorProto.FLOAT, (3, 5))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='ijk,ik->jk')],
[], )
self._assert_inferred(graph,
[make_tensor_value_info('z', TensorProto.FLOAT, (None, None))]) # type: ignore
def test_einsum_batch_matmul(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (5, 2, 3)),
('y', TensorProto.FLOAT, (5, 3, 4))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='bij , b jk-> bik')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None))]) # type: ignore
def test_einsum_left_hand_eqn(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (2, 3)),
('y', TensorProto.FLOAT, (3, 4))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='ij,kl')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (None, None, None, None))]) # type: ignore
def test_einsum_incorrect_num_inputs(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3)),
("y", TensorProto.FLOAT, (2, 3)),
("z", TensorProto.FLOAT, (2, 3))],
[make_node('Einsum', ['x', 'y'], ['z'], equation='i,...j, k, l-> i')],
[])
self.assertRaises(onnx.shape_inference.InferenceError, self._inferred, graph)
def test_negative_log_likehood_shape_is_NCdd(self) -> None:
N, C = 3, 4
graph = self._make_graph(
[('input', TensorProto.FLOAT, (N, C)),
('target', TensorProto.INT64, (N,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target'], ['loss'], reduction='none')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, (N, ))]) # type: ignore
def test_negative_log_likehood_shape_is_NC_with_weight(self) -> None:
N, C = 3, 4
graph = self._make_graph(
[('input', TensorProto.FLOAT, (N, C)),
('target', TensorProto.INT64, (N,)),
('weight', TensorProto.FLOAT, (C,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='none')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, (N, ))]) # type: ignore
def test_negative_log_likehood_shape_is_NC_reduction_mean(self) -> None:
N, C = 3, 4
graph = self._make_graph(
[('input', TensorProto.FLOAT, (N, C)),
('target', TensorProto.INT64, (N,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target'], ['loss'], reduction='mean')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, ())]) # type: ignore
def test_negative_log_likehood_shape_is_NC_with_weight_reduction_mean(self) -> None:
N, C = 3, 4
graph = self._make_graph(
[('input', TensorProto.FLOAT, (N, C)),
('target', TensorProto.INT64, (N,)),
('weight', TensorProto.FLOAT, (C,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='mean')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, ())]) # type: ignore
def test_negative_log_likehood_shape_is_NCd1d2(self) -> None:
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, C, d1, d2)),
("target", TensorProto.INT64, (N, d1, d2))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target'], ['loss'], reduction='none')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, (N, d1, d2))]) # type: ignore
def test_negative_log_likehood_shape_is_NCd1d2_with_weight(self) -> None:
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, C, d1, d2)),
("target", TensorProto.INT64, (N, d1, d2)),
("weight", TensorProto.FLOAT, (C,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='none')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, (N, d1, d2))]) # type: ignore
def test_negative_log_likehood_shape_is_NCd1d2_reduction_sum(self) -> None:
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, C, d1, d2)),
("target", TensorProto.INT64, (N, d1, d2))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target'], ['loss'], reduction='sum')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, ())]) # type: ignore
def test_negative_log_likehood_shape_is_NCd1d2_with_weight_reduction_mean(self) -> None:
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, C, d1, d2)),
("target", TensorProto.INT64, (N, d1, d2)),
("weight", TensorProto.FLOAT, (C,))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='mean')],
[])
self._assert_inferred(graph, [make_tensor_value_info('loss', TensorProto.FLOAT, ())]) # type: ignore
def test_negative_log_likehood_input_target_shape_mismatch(self) -> None:
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, d1, d2)),
("target", TensorProto.INT64, (N, d1 + 1, d2)),
("weight", TensorProto.FLOAT, (C,)),
("loss", TensorProto.FLOAT, ())],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='mean')],
[])
self.assertRaises(onnx.shape_inference.InferenceError, self._inferred, graph)
def test_negative_log_likehood_input_weight_shape_mismatch(self) -> None:
N, C, d1, d2 = 3, 4, 5, 6
graph = self._make_graph(
[("input", TensorProto.FLOAT, (N, C, d1, d2)),
("target", TensorProto.INT64, (N, d1, d2)),
("weight", TensorProto.FLOAT, (C + 1,)),
("loss", TensorProto.FLOAT, (N, d1, d2))],
[make_node('NegativeLogLikelihoodLoss', ['input', 'target', 'weight'], ['loss'], reduction='none')],
[])
self.assertRaises(checker.ValidationError, self._inferred, graph)
def test_softmax_cross_entropy_none(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3)),
("y", TensorProto.FLOAT, (2,))],
[make_node('SoftmaxCrossEntropyLoss', ['x', 'y'], ['z'], reduction='none')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2,))]) # type: ignore
def test_softmax_cross_entropy_mean(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3)),
("y", TensorProto.FLOAT, (2,))],
[make_node('SoftmaxCrossEntropyLoss', ['x', 'y'], ['z'], reduction='mean')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ())]) # type: ignore
def test_softmax_cross_entropy_none_NCD1D2(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3, 5, 8)),
("y", TensorProto.FLOAT, (2, 5, 8))],
[make_node('SoftmaxCrossEntropyLoss', ['x', 'y'], ['z'], reduction='none')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, (2, 5, 8))]) # type: ignore
def test_softmax_cross_entropy_mean_NCD1D2(self) -> None:
graph = self._make_graph(
[("x", TensorProto.FLOAT, (2, 3, 4, 5)),
("y", TensorProto.FLOAT, (2, 4, 5))],
[make_node('SoftmaxCrossEntropyLoss', ['x', 'y'], ['z'], reduction='mean')],
[],)
self._assert_inferred(graph, [make_tensor_value_info('z', TensorProto.FLOAT, ())]) # type: ignore
def test_celu_function_output_shape(self) -> None:
graph = self._make_graph(
[('X', TensorProto.FLOAT, (25, 48, 16, 16))],
[make_node('Celu', ['X'], ['Y'], alpha=2.0)],
[]
)
self._assert_inferred(graph, [make_tensor_value_info('Y', TensorProto.FLOAT, (25, 48, 16, 16))])
def prepare_input_initializer_tensors(self, initializer_shape, input_shape): # type: ignore
nodes = [make_node('Add', ['x', 'y'], 'z')]
if initializer_shape is None:
initializer = [] # type: ignore
else:
size = 1
for d in initializer_shape:
size = size * d
vals = [0.0 for i in range(size)]
initializer = [make_tensor("x", TensorProto.FLOAT, initializer_shape, vals), # type: ignore
make_tensor("y", TensorProto.FLOAT, initializer_shape, vals)]
if input_shape is None:
inputs = [] # type: ignore
else:
inputs = [helper.make_tensor_value_info('x', TensorProto.FLOAT, input_shape), # type: ignore
helper.make_tensor_value_info('y', TensorProto.FLOAT, input_shape)]
graph = helper.make_graph(nodes, "test", inputs=inputs, outputs=[], initializer=initializer, value_info=[])
return helper.make_model(graph)
def test_infer_with_initializer_without_input_above_ir4(self) -> None:
# This is for testing IR>=4: some tensors can only exist in initializer and not in input
# So shape_inference should make use of initializer shapes
initializer_shape = (8, 7)
original_model = self.prepare_input_initializer_tensors(initializer_shape, None)
inferred_model = onnx.shape_inference.infer_shapes(original_model, strict_mode=True)
# If shape inference fails, it will throw IndexError
z_tenor = inferred_model.graph.value_info.pop()
z_shape = (z_tenor.type.tensor_type.shape.dim[0].dim_value, z_tenor.type.tensor_type.shape.dim[1].dim_value)
assert z_shape == initializer_shape
def test_infer_with_initializer_without_input_below_ir4(self) -> None:
# This is for testing IR<4: tensors must exist both in initializer and input
# So shape_inference should not make use of initializer shapes
# Use (None, None) as empty input
initializer_shape = (8, 7)
input_shape = (None, None)
original_model = self.prepare_input_initializer_tensors(initializer_shape, input_shape)
original_model.ir_version = 3 # test ir_version < 4
inferred_model = onnx.shape_inference.infer_shapes(original_model, strict_mode=True)
z_tenor = inferred_model.graph.value_info.pop()
z_shape = (z_tenor.type.tensor_type.shape.dim[0].dim_value, z_tenor.type.tensor_type.shape.dim[1].dim_value)
# If the input is not updated by the initializer, the output shape will keep empty (0, 0)
assert z_shape == (0, 0)
def test_infer_initializer_input_mismatch(self) -> None:
# Catch error if initializer and input mismatch
initializer_shape = (8, 7)
input_shape = (4, 3)
original_model = self.prepare_input_initializer_tensors(initializer_shape, input_shape)
# Inferred shape and existing shape differ in dimension 0
self.assertRaises(onnx.shape_inference.InferenceError, onnx.shape_inference.infer_shapes, original_model, strict_mode=True)
def test_infer_initializer_input_consistency_all_none(self) -> None:
initializer_shape = (8, 7)
input_shape = (None, None) # accepatble
original_model = self.prepare_input_initializer_tensors(initializer_shape, input_shape)
onnx.shape_inference.infer_shapes(original_model, strict_mode=True)
def test_infer_initializer_input_consistency_single_none(self) -> None:
initializer_shape = (8, 7)
input_shape = (None, 7) # accepatble
original_model = self.prepare_input_initializer_tensors(initializer_shape, input_shape)
onnx.shape_inference.infer_shapes(original_model, strict_mode=True)
def test_infer_initializer_input_consistency_differnt_rank(self) -> None:
initializer_shape = (8, 7, 9)
input_shape = (None, 7) # accepatble
original_model = self.prepare_input_initializer_tensors(initializer_shape, input_shape)
# Inferred shape and existing shape differ in rank: (3) vs (2)
self.assertRaises(onnx.shape_inference.InferenceError, onnx.shape_inference.infer_shapes, original_model, strict_mode=True)
def test_infer_initializer_input_consistency_all_none_serialized(self) -> None:
# Reuse test_infer_initializer_input_consistency_all_none test case and check with
# Serialized model
initializer_shape = (8, 7)
input_shape = (None, None) # accepatble
original_model = self.prepare_input_initializer_tensors(initializer_shape, input_shape)
onnx.shape_inference.infer_shapes(original_model.SerializeToString(), strict_mode=True)
def test_trilu_upper(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5)),
('k', TensorProto.INT64, ())],
[make_node('Trilu', ['x', 'k'], ['y'])],
[],
initializer=[make_tensor('k', TensorProto.INT64, (), (2,))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (3, 4, 5))]) # type: ignore
def test_trilu_lower(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5)),
('k', TensorProto.INT64, ())],
[make_node('Trilu', ['x', 'k'], ['y'], upper=0)],
[],
initializer=[make_tensor('k', TensorProto.INT64, (), (10,))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.FLOAT, (3, 4, 5))]) # type: ignore
def test_trilu_upper_zero(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT64, (0, 5)),
('k', TensorProto.INT64, ())],
[make_node('Trilu', ['x', 'k'], ['y'], upper=1)],
[],
initializer=[make_tensor('k', TensorProto.INT64, (), (5,))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (0, 5))]) # type: ignore
def test_trilu_lower_one(self) -> None:
graph = self._make_graph(
[('x', TensorProto.INT32, (3, 1, 5))],
[make_node('Trilu', ['x'], ['y'], upper=0)],
[],)
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT32, (3, 1, 5))]) # type: ignore
def test_batch_norm_train(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 6, 7)),
('scale', TensorProto.FLOAT, (4,)),
('b', TensorProto.FLOAT, (4,)),
('input_mean', TensorProto.FLOAT, (4,)),
('input_var', TensorProto.FLOAT, (4,))],
[make_node('BatchNormalization', ['x', 'scale', 'b', 'input_mean', 'input_var'],
['out', 'output_mean', 'output_var'], training_mode=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, 5, 6, 7)), # type: ignore
make_tensor_value_info('output_mean', TensorProto.FLOAT, (4,)), # type: ignore
make_tensor_value_info('output_var', TensorProto.FLOAT, (4,)), # type: ignore
])
def test_batch_norm_train_dim_param(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 'C', 5, 6, 7)),
('scale', TensorProto.FLOAT, ('C',)),
('b', TensorProto.FLOAT, ('C',)),
('input_mean', TensorProto.FLOAT, ('C',)),
('input_var', TensorProto.FLOAT, ('C',))],
[make_node('BatchNormalization', ['x', 'scale', 'b', 'input_mean', 'input_var'],
['out', 'output_mean', 'output_var'], training_mode=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 'C', 5, 6, 7)), # type: ignore
make_tensor_value_info('output_mean', TensorProto.FLOAT, ('C',)), # type: ignore
make_tensor_value_info('output_var', TensorProto.FLOAT, ('C',)), # type: ignore
])
def test_batch_norm_train_with_diff_type(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT16, (3, 4, 5, 6, 7)),
('scale', TensorProto.FLOAT16, (4,)),
('b', TensorProto.FLOAT16, (4,)),
('input_mean', TensorProto.FLOAT, (4,)),
('input_var', TensorProto.FLOAT, (4,))],
[make_node('BatchNormalization', ['x', 'scale', 'b', 'input_mean', 'input_var'],
['out', 'output_mean', 'output_var'], training_mode=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT16, (3, 4, 5, 6, 7)), # type: ignore
make_tensor_value_info('output_mean', TensorProto.FLOAT, (4,)), # type: ignore
make_tensor_value_info('output_var', TensorProto.FLOAT, (4,)), # type: ignore
])
def test_batch_norm_test(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, 5, 6, 7)),
('scale', TensorProto.FLOAT, (4,)),
('b', TensorProto.FLOAT, (4,)),
('input_mean', TensorProto.FLOAT, (4,)),
('input_var', TensorProto.FLOAT, (4,))],
[make_node('BatchNormalization', ['x', 'scale', 'b', 'input_mean', 'input_var'],
['out'], training_mode=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, 5, 6, 7))]) # type: ignore
def test_batch_norm_test_no_dim(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3, 4, None, None, None)),
('scale', TensorProto.FLOAT, (4,)),
('b', TensorProto.FLOAT, (4,)),
('input_mean', TensorProto.FLOAT, (None,)),
('input_var', TensorProto.FLOAT, (4,))],
[make_node('BatchNormalization', ['x', 'scale', 'b', 'input_mean', 'input_var'],
['out'], training_mode=0)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, (3, 4, None, None, None))]) # type: ignore
def test_batch_norm_train_no_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, None),
('scale', TensorProto.FLOAT, None),
('b', TensorProto.FLOAT, None),
('input_mean', TensorProto.FLOAT, ('C',)),
('input_var', TensorProto.FLOAT, ('C',))],
[make_node('BatchNormalization', ['x', 'scale', 'b', 'input_mean', 'input_var'],
['out', 'running_mean', 'running_var'], training_mode=1)],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.FLOAT, None), # type: ignore
make_tensor_value_info('running_mean', TensorProto.FLOAT, ('C',)), # type: ignore
make_tensor_value_info('running_var', TensorProto.FLOAT, ('C',)), # type: ignore
])
def test_nonzero(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (None,))],
[make_node('NonZero', ['x'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.INT64, (1, None))]) # type: ignore
def test_nonzero_no_shape(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, None)],
[make_node('NonZero', ['x'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.INT64, (None, None))]) # type: ignore
def test_nonzero_existing_dim_param(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (3,))],
[make_node('NonZero', ['x'], ['y'])],
[make_tensor_value_info('y', TensorProto.INT64, (None, 'NZ'))])
self._assert_inferred(graph, [make_tensor_value_info('y', TensorProto.INT64, (1, 'NZ'))]) # type: ignore
def test_nonzero_scalar(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, ())],
[make_node('NonZero', ['x'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.INT64, (0, None))]) # type: ignore
def test_optional_construct_empty_tensor(self) -> None:
tensor_type_proto = helper.make_tensor_type_proto(elem_type=TensorProto.FLOAT, shape=[1, 2, 3])
optional_type_proto = helper.make_optional_type_proto(tensor_type_proto)
optional_val_info = helper.make_value_info(
name='output',
type_proto=optional_type_proto)
graph = self._make_graph(
[],
[make_node('Optional', [], ['output'], type=tensor_type_proto)],
[])
self._assert_inferred(graph, [optional_val_info]) # type: ignore
def test_optional_construct_empty_sequence(self) -> None:
tensor_type_proto = helper.make_tensor_type_proto(elem_type=TensorProto.INT32, shape=[1, 2, 3])
sequence_type_proto = helper.make_sequence_type_proto(tensor_type_proto)
optional_type_proto = helper.make_optional_type_proto(sequence_type_proto)
optional_val_info = helper.make_value_info(
name='output_sequence',
type_proto=optional_type_proto)
graph = self._make_graph(
[],
[make_node('Optional', [], ['output_sequence'], type=sequence_type_proto)],
[])
self._assert_inferred(graph, [optional_val_info]) # type: ignore
def test_optional_construct_tensor(self) -> None:
tensor_type_proto = helper.make_tensor_type_proto(elem_type=TensorProto.FLOAT, shape=[2, 3, 4])
optional_type_proto = helper.make_optional_type_proto(tensor_type_proto)
optional_val_info = helper.make_value_info(
name='output',
type_proto=optional_type_proto)
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4))],
[make_node('Optional', ['input1'], ['output'])],
[])
self._assert_inferred(graph, [optional_val_info]) # type: ignore
def test_optional_construct_sequence(self) -> None:
tensor_type_proto = helper.make_tensor_type_proto(elem_type=TensorProto.INT64, shape=[2, 3, 0])
sequence_type_proto = helper.make_sequence_type_proto(tensor_type_proto)
sequence_val_info = helper.make_value_info(
name='input_sequence',
type_proto=sequence_type_proto)
optional_type_proto = helper.make_optional_type_proto(sequence_type_proto)
optional_val_info = helper.make_value_info(
name='output_sequence',
type_proto=optional_type_proto)
graph = self._make_graph(
[('input1', TensorProto.INT64, (2, 3, 0))],
[make_node('SequenceConstruct', ['input1'], ['input_sequence']),
make_node('Optional', ['input_sequence'], ['output_sequence'])],
[])
self._assert_inferred(graph, [sequence_val_info, optional_val_info]) # type: ignore
def test_optional_tensor_has_element(self) -> None:
tensor_type_proto = helper.make_tensor_type_proto(elem_type=TensorProto.FLOAT, shape=[2, 3, 4])
optional_type_proto = helper.make_optional_type_proto(tensor_type_proto)
optional_val_info = helper.make_value_info(
name='sequence',
type_proto=optional_type_proto)
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (2, 3, 4))],
[make_node('Optional', ['input1'], ['sequence']),
make_node('OptionalHasElement', ['sequence'], ['output'])],
[])
self._assert_inferred(graph, [optional_val_info,
make_tensor_value_info('output', TensorProto.BOOL, ())]) # type: ignore
def test_optional_sequence_has_element(self) -> None:
tensor_type_proto = helper.make_tensor_type_proto(elem_type=TensorProto.FLOAT, shape=[0, 3, 4])
sequence_type_proto = helper.make_sequence_type_proto(tensor_type_proto)
sequence_val_info = helper.make_value_info(
name='sequence',
type_proto=sequence_type_proto)
optional_type_proto = helper.make_optional_type_proto(sequence_type_proto)
optional_val_info = helper.make_value_info(
name='optional',
type_proto=optional_type_proto)
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (0, 3, 4))],
[make_node('SequenceConstruct', ['input1'], ['sequence']),
make_node('Optional', ['sequence'], ['optional']),
make_node('OptionalHasElement', ['optional'], ['output'])],
[])
self._assert_inferred(graph, [sequence_val_info, optional_val_info,
make_tensor_value_info('output', TensorProto.BOOL, ())]) # type: ignore
def test_optional_tensor_get_element(self) -> None:
tensor_type_proto = helper.make_tensor_type_proto(elem_type=TensorProto.DOUBLE, shape=[2, 1, 4])
tensor_val_into = helper.make_value_info(
name='output',
type_proto=tensor_type_proto)
optional_type_proto = helper.make_optional_type_proto(tensor_type_proto)
optional_val_info = helper.make_value_info(
name='optional',
type_proto=optional_type_proto)
graph = self._make_graph(
[('input1', TensorProto.DOUBLE, (2, 1, 4))],
[make_node('Optional', ['input1'], ['optional']),
make_node('OptionalGetElement', ['optional'], ['output'])],
[])
self._assert_inferred(graph, [optional_val_info, tensor_val_into]) # type: ignore
def test_optional_sequence_get_element(self) -> None:
tensor_type_proto = helper.make_tensor_type_proto(elem_type=TensorProto.INT32, shape=[2, 0, 4])
sequence_type_proto = helper.make_sequence_type_proto(tensor_type_proto)
sequence_val_into = helper.make_value_info(
name='sequence',
type_proto=sequence_type_proto)
optional_type_proto = helper.make_optional_type_proto(sequence_type_proto)
optional_val_info = helper.make_value_info(
name='optional',
type_proto=optional_type_proto)
output_val_into = helper.make_value_info(
name='output',
type_proto=sequence_type_proto)
graph = self._make_graph(
[('input1', TensorProto.INT32, (2, 0, 4))],
[make_node('SequenceConstruct', ['input1'], ['sequence']),
make_node('Optional', ['sequence'], ['optional']),
make_node('OptionalGetElement', ['optional'], ['output'])],
[])
self._assert_inferred(graph, [optional_val_info, sequence_val_into, output_val_into]) # type: ignore
def test_where_bfloat(self) -> None:
graph = self._make_graph(
[('cond', TensorProto.BOOL, (10,)), ('x', TensorProto.BFLOAT16, (10,)), ('y', TensorProto.BFLOAT16, (10,))],
[make_node('Where', ['cond', 'x', 'y'], ['out'])],
[])
self._assert_inferred(graph, [make_tensor_value_info('out', TensorProto.BFLOAT16, (10,))]) # type: ignore
def test_parse_data_with_unsupported_tensor_type(self) -> None:
model = helper.make_model(
graph=helper.make_graph(
name='graph_with_unsupported_type',
inputs=[],
outputs=[helper.make_tensor_value_info('y', TensorProto.FLOAT, shape=None)],
nodes=[make_node('ConstantOfShape', ['x'], ['y'])],
# ConstantOfShape only accepts np.int64 instead of np.int32
initializer=[numpy_helper.from_array(np.array([4, 3], dtype=np.int32), name='x')]))
# Strict shape inference should catch this invalid type error (int32 is not supported)
self.assertRaises(onnx.shape_inference.InferenceError,
onnx.shape_inference.infer_shapes, model, strict_mode=True)
# Even nornmal shape inference should not produce any invalid shape due to wrong type for ParseData
inferred_model = onnx.shape_inference.infer_shapes(model)
self.assertFalse(inferred_model.graph.output[0].type.tensor_type.HasField('shape'))
def test_parse_data_with_undefined_tensor_type(self) -> None:
model = helper.make_model(
graph=helper.make_graph(
name='graph_with_undefined_type',
inputs=[],
outputs=[helper.make_tensor_value_info('y', TensorProto.FLOAT, shape=None)],
nodes=[make_node('ConstantOfShape', ['x'], ['y'])],
initializer=[numpy_helper.from_array(np.array([4, 3], dtype=np.int64), name='x')]))
# Hardcode the tensor type as UNDEFINED to test catching undefined type error
model.graph.initializer[0].data_type = TensorProto.UNDEFINED
# Strict shape inference should catch this undefined type error
self.assertRaises(onnx.shape_inference.InferenceError,
onnx.shape_inference.infer_shapes, model, strict_mode=True)
# Even nornmal shape inference should not produce any invalid shape due to undefined type for ParseData
inferred_model = onnx.shape_inference.infer_shapes(model)
self.assertFalse(inferred_model.graph.output[0].type.tensor_type.HasField('shape'))
def test_gridsample(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, (1, 1, 3, 3)),
('grid', TensorProto.INT64, (1, 3, 3, 2))],
[make_node("GridSample", ['x', 'grid'], ['y'], mode='nearest', padding_mode='border', align_corners=1)],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.FLOAT, (1, 1, 3, 3))]) # type: ignore
def test_gridsample_defaults(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, ('N', 'C', 'H', 'W')),
('grid', TensorProto.FLOAT, ('N', 'H_out', 'W_out', 2))],
[make_node("GridSample", ['x', 'grid'], ['y'])],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.FLOAT, ('N', 'C', 'H_out', 'W_out'))]) # type: ignore
def test_gridsample_no_dim(self) -> None:
graph = self._make_graph(
[('x', TensorProto.FLOAT, ('N', 'C', None, None)),
('grid', TensorProto.FLOAT, ('N', None, None, 2))],
[make_node("GridSample", ['x', 'grid'], ['y'], mode='bilinear', padding_mode='border')],
[])
self._assert_inferred(
graph,
[make_tensor_value_info('y', TensorProto.FLOAT, ('N', 'C', None, None))]) # type: ignore
def test_sequence_map_identity_known_dims(self): # type: () -> None
input_value_infos = [make_tensor_value_info(
'input', TensorProto.FLOAT, (220, 220, 3))]
output_value_infos = [make_tensor_value_info(
'output', TensorProto.FLOAT, (220, 220, 3))]
body_graph = helper.make_graph(
[make_node('Identity', ['input'], ['output'])],
"body_graph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (220, 220, 3)),
('input2', TensorProto.FLOAT, (220, 220, 3)),
('input3', TensorProto.FLOAT, (220, 220, 3)),
],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceMap', ['in_sequence'], ['out_sequence'], body=body_graph)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (220, 220, 3)),
make_tensor_sequence_value_info('out_sequence', TensorProto.FLOAT, (220, 220, 3))]
) # type: ignore
def test_sequence_map_identity_unknown_dims(self): # type: () -> None
input_value_infos = [make_tensor_value_info(
'input', TensorProto.FLOAT, ('H', 'W', 3))]
output_value_infos = [make_tensor_value_info(
'output', TensorProto.FLOAT, ('H', 'W', 3))]
body_graph = helper.make_graph(
[make_node('Identity', ['input'], ['output'])],
"body_graph",
input_value_infos,
output_value_infos
)
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (200, 300, 3)),
('input2', TensorProto.FLOAT, (100, 200, 3)),
('input3', TensorProto.FLOAT, (5, 1, 3)),
],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceMap', ['in_sequence'], ['out_sequence'], body=body_graph)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (None, None, 3)),
make_tensor_sequence_value_info('out_sequence', TensorProto.FLOAT, (None, None, 3))]) # type: ignore
def test_sequence_map_slice_outs_known_dims(self): # type: () -> None
body_graph = helper.make_graph(
nodes=[make_node('Slice', ['x', 'starts1', 'ends1', 'axes', ''], ['y1']),
make_node('Slice', ['x', 'starts2', 'ends2', 'axes', ''], ['y2'])],
name='body_graph',
inputs=[
onnx.helper.make_tensor_value_info(
'x', onnx.TensorProto.FLOAT, ('H', 'W', 3))
],
outputs=[
onnx.helper.make_tensor_value_info(
'y1', onnx.TensorProto.FLOAT, (10, 20, 3)),
onnx.helper.make_tensor_value_info(
'y2', onnx.TensorProto.FLOAT, (30, 40, 3)),
],
initializer=[make_tensor('axes', TensorProto.INT64, (2,), (0, 1)),
make_tensor('starts1', TensorProto.INT64,
(2,), (0, 0)),
make_tensor('ends1', TensorProto.INT64,
(2,), (10, 20)),
make_tensor('starts2', TensorProto.INT64,
(2,), (0, 0)),
make_tensor('ends2', TensorProto.INT64,
(2,), (30, 40)),
]
) # type: ignore
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (220, 310, 3)),
('input2', TensorProto.FLOAT, (110, 210, 3)),
('input3', TensorProto.FLOAT, (90, 110, 3)),
],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceMap', ['in_sequence'], ['out_sequence1', 'out_sequence2'], body=body_graph)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (None, None, 3)),
make_tensor_sequence_value_info(
'out_sequence1', TensorProto.FLOAT, (10, 20, 3)),
make_tensor_sequence_value_info(
'out_sequence2', TensorProto.FLOAT, (30, 40, 3)),
]) # type: ignore
def test_sequence_map_slice_outs_unknown_dims(self): # type: () -> None
body_graph = helper.make_graph(
nodes=[make_node('Slice', ['x', 'starts1', 'ends1', 'axes', ''], ['y1']),
make_node('Slice', ['x', 'starts2', 'ends2', 'axes', ''], ['y2'])],
name='body_graph',
inputs=[
onnx.helper.make_tensor_value_info(
'x', onnx.TensorProto.FLOAT, ('H', 'W', 3))
],
outputs=[
onnx.helper.make_tensor_value_info(
'y1', onnx.TensorProto.FLOAT, ('H1', 'W1', 3)),
onnx.helper.make_tensor_value_info(
'y2', onnx.TensorProto.FLOAT, ('H2', 'W2', 3)),
],
initializer=[make_tensor('axes', TensorProto.INT64, (2,), (0, 1)),
make_tensor('starts1', TensorProto.INT64,
(2,), (0, 0)),
make_tensor('ends1', TensorProto.INT64,
(2,), (10, 20)),
make_tensor('starts2', TensorProto.INT64,
(2,), (0, 0)),
make_tensor('ends2', TensorProto.INT64,
(2,), (30, 40)),
]
) # type: ignore
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (220, 310, 3)),
('input2', TensorProto.FLOAT, (110, 210, 3)),
('input3', TensorProto.FLOAT, (90, 110, 3)),
],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceMap', ['in_sequence'], ['out_sequence1', 'out_sequence2'], body=body_graph)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (None, None, 3)),
make_tensor_sequence_value_info(
'out_sequence1', TensorProto.FLOAT, (None, None, 3)),
make_tensor_sequence_value_info(
'out_sequence2', TensorProto.FLOAT, (None, None, 3)),
]) # type: ignore
def test_sequence_map_different_tensor_type(self): # type: () -> None
body_graph = helper.make_graph(
nodes=[make_node('Shape', ['x'], ['shape'])],
name='body_graph',
inputs=[
onnx.helper.make_tensor_value_info(
'x', onnx.TensorProto.FLOAT, ('H', 'W', 'C'))
],
outputs=[
onnx.helper.make_tensor_value_info(
'shape', onnx.TensorProto.INT64, (3,))
],
) # type: ignore
graph = self._make_graph(
[('input1', TensorProto.FLOAT, (220, 310, 3)),
('input2', TensorProto.FLOAT, (110, 210, 3)),
('input3', TensorProto.FLOAT, (90, 110, 3)),
],
[make_node('SequenceConstruct', ['input1', 'input2', 'input3'], ['in_sequence']),
make_node('SequenceMap', ['in_sequence'], ['shapes'], body=body_graph)],
[])
self._assert_inferred(
graph,
[make_tensor_sequence_value_info('in_sequence', TensorProto.FLOAT, (None, None, 3)),
make_tensor_sequence_value_info('shapes', TensorProto.INT64, (3,)),
]) # type: ignore
def test_hammingwindow(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (), (10,))),
make_node("HammingWindow", ['shape'], ['y'])],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, ()),
make_tensor_value_info('y', TensorProto.FLOAT, (10,))]) # type: ignore
graph = self._make_graph([],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (), (10,))),
make_node("HammingWindow", ['shape'], ['y'], periodic=0)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, ()),
make_tensor_value_info('y', TensorProto.FLOAT, (10,))]) # type: ignore
def test_hannwindow(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (), (10,))),
make_node("HannWindow", ['shape'], ['y'])],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, ()),
make_tensor_value_info('y', TensorProto.FLOAT, (10,))]) # type: ignore
graph = self._make_graph([],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (), (10,))),
make_node("HannWindow", ['shape'], ['y'], periodic=0)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, ()),
make_tensor_value_info('y', TensorProto.FLOAT, (10,))]) # type: ignore
def test_blackmanwindow(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (), (10,))),
make_node("BlackmanWindow", ['shape'], ['y'])],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, ()),
make_tensor_value_info('y', TensorProto.FLOAT, (10,))]) # type: ignore
graph = self._make_graph([],
[make_node("Constant", [], ['shape'],
value=make_tensor('shape', TensorProto.INT64, (), (10,))),
make_node("BlackmanWindow", ['shape'], ['y'], periodic=0)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.INT64, ()),
make_tensor_value_info('y', TensorProto.FLOAT, (10,))]) # type: ignore
def test_dft_reals(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['input'],
value=make_tensor('input', TensorProto.FLOAT, (2, 5, 1), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ))),
make_node("DFT", ['input', ''], ['output'])],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.FLOAT, (2, 5, 1)),
make_tensor_value_info('y', TensorProto.FLOAT, (2, 5, 2))]) # type: ignore
def test_dft_reals2(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['input'],
value=make_tensor('input', TensorProto.FLOAT, (1, 5, 10, 1,), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4))),
make_node("DFT", ['input', ''], ['output'], axis=1, onesided=1)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.FLOAT, (1, 5, 10, 1)),
make_tensor_value_info('y', TensorProto.FLOAT, (1, 3, 10, 2))]) # type: ignore
graph = self._make_graph([],
[make_node("Constant", [], ['input'],
value=make_tensor('input', TensorProto.FLOAT, (1, 5, 10, 1,), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4))),
make_node("DFT", ['input', ''], ['output'], axis=2, onesided=1)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.FLOAT, (1, 5, 10, 1)),
make_tensor_value_info('y', TensorProto.FLOAT, (1, 5, 6, 2))]) # type: ignore
graph = self._make_graph([],
[make_node("Constant", [], ['input'],
value=make_tensor('input', TensorProto.FLOAT, (1, 5, 10, 1,), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4))),
make_node("DFT", ['input', ''], ['output'], axis=1, onesided=0)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.FLOAT, (1, 5, 10, 1)),
make_tensor_value_info('y', TensorProto.FLOAT, (1, 5, 10, 2))]) # type: ignore
graph = self._make_graph([],
[make_node("Constant", [], ['input'],
value=make_tensor('input', TensorProto.FLOAT, (1, 5, 10, 1,), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4))),
make_node("DFT", ['input', ''], ['output'], axis=2, onesided=0)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.FLOAT, (1, 5, 10, 1)),
make_tensor_value_info('y', TensorProto.FLOAT, (1, 5, 10, 2))]) # type: ignore
def test_dft_complex(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['input'],
value=make_tensor('input', TensorProto.FLOAT, (2, 5, 2), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ))),
make_node("DFT", ['input', ''], ['output'])],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.FLOAT, (2, 5, 2)),
make_tensor_value_info('y', TensorProto.FLOAT, (2, 5, 2))]) # type: ignore
def test_dft_reals_onesided(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['input'],
value=make_tensor('input', TensorProto.FLOAT, (2, 5, 1), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ))),
make_node("DFT", ['input', ''], ['output'], onesided=1)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.FLOAT, (2, 5, 1)),
make_tensor_value_info('y', TensorProto.FLOAT, (2, 3, 2))]) # type: ignore
def test_dft_complex_onesided(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['input'],
value=make_tensor('input', TensorProto.FLOAT, (2, 5, 2), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ))),
make_node("DFT", ['input', ''], ['output'], onesided=1)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.FLOAT, (2, 5, 2)),
make_tensor_value_info('y', TensorProto.FLOAT, (2, 3, 2))]) # type: ignore
def test_dft_reals_inverse(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['input'],
value=make_tensor('input', TensorProto.FLOAT, (2, 5, 1), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ))),
make_node("DFT", ['input', ''], ['output'], inverse=1)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.FLOAT, (2, 5, 1)),
make_tensor_value_info('y', TensorProto.FLOAT, (2, 5, 2))]) # type: ignore
def test_dft_complex_inverse(self): # type: () -> None
graph = self._make_graph([],
[make_node("Constant", [], ['input'],
value=make_tensor('input', TensorProto.FLOAT, (2, 5, 2), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, ))),
make_node("DFT", ['input', ''], ['output'], inverse=1)],
[])
self._assert_inferred(graph,
[make_tensor_value_info('shape', TensorProto.FLOAT, (2, 5, 2)),
make_tensor_value_info('y', TensorProto.FLOAT, (2, 5, 2))]) # type: ignore
def test_stft_reals(self): # type: () -> None
graph = self._make_graph(
[],
[
make_node("Constant", [], ['signal'], value=make_tensor('signal', TensorProto.FLOAT, (2, 10, 1), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3))),
make_node("Constant", [], ['frame_step'], value=make_tensor('frame_step', TensorProto.INT64, (), (2, ))),
make_node("Constant", [], ['window'], value=make_tensor('window', TensorProto.INT64, (5, ), (1, 2, 3, 4, 5))),
make_node("STFT", ['signal', 'frame_step', 'window'], ['output']),
],
[])
self._assert_inferred(graph,
[
make_tensor_value_info('signal', TensorProto.FLOAT, (2, 10, 1)),
make_tensor_value_info('frame_step', TensorProto.INT64, ()),
make_tensor_value_info('window', TensorProto.INT64, (5, )),
make_tensor_value_info('output', TensorProto.FLOAT, (2, 3, 5, 2))
]
) # type: ignore
graph = self._make_graph(
[],
[
make_node("Constant", [], ['signal'], value=make_tensor('signal', TensorProto.FLOAT, (2, 10, 1), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3))),
make_node("Constant", [], ['frame_step'], value=make_tensor('frame_step', TensorProto.INT64, (), (2, ))),
make_node("Constant", [], ['window'], value=make_tensor('window', TensorProto.INT64, (5, ), (1, 2, 3, 4, 5))),
make_node("Constant", [], ['frame_length'], value=make_tensor('frame_length', TensorProto.INT64, (), (5, ))),
make_node("STFT", ['signal', 'frame_step', 'window'], ['output']),
],
[])
self._assert_inferred(graph,
[
make_tensor_value_info('signal', TensorProto.FLOAT, (2, 10, 1)),
make_tensor_value_info('frame_step', TensorProto.INT64, ()),
make_tensor_value_info('window', TensorProto.INT64, (5, )),
make_tensor_value_info('frame_length', TensorProto.INT64, ()),
make_tensor_value_info('output', TensorProto.FLOAT, (2, 3, 5, 2))
]
) # type: ignore
graph = self._make_graph(
[],
[
make_node("Constant", [], ['signal'], value=make_tensor('signal', TensorProto.FLOAT, (2, 10, 1), (0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3))),
make_node("Constant", [], ['frame_step'], value=make_tensor('frame_step', TensorProto.INT64, (), (2, ))),
make_node("Constant", [], ['frame_length'], value=make_tensor('frame_length', TensorProto.INT64, (), (5, ))),
make_node("STFT", ['signal', 'frame_step', '', 'frame_length'], ['output']),
],
[])
self._assert_inferred(graph,
[
make_tensor_value_info('signal', TensorProto.FLOAT, (2, 10, 1)),
make_tensor_value_info('frame_step', TensorProto.INT64, ()),
make_tensor_value_info('frame_length', TensorProto.INT64, ()),
make_tensor_value_info('output', TensorProto.FLOAT, (2, 3, 5, 2))
]
) # type: ignore
def test_melweightmatrix(self): # type: () -> None
graph = self._make_graph([],
[
make_node("Constant", [], ['num_mel_bins'], value=make_tensor('num_mel_bins', TensorProto.INT64, (), (10,))),
make_node("Constant", [], ['dft_length'], value=make_tensor('dft_length', TensorProto.INT64, (), (128,))),
make_node("Constant", [], ['sample_rate'], value=make_tensor('sample_rate', TensorProto.INT64, (), (10,))),
make_node("Constant", [], ['lower_edge_hertz'], value=make_tensor('lower_edge_hertz', TensorProto.FLOAT, (), (10.,))),
make_node("Constant", [], ['upper_edge_hertz'], value=make_tensor('upper_edge_hertz', TensorProto.FLOAT, (), (100.,))),
make_node("MelWeightMatrix", ['num_mel_bins', 'dft_length', 'sample_rate', 'lower_edge_hertz', 'upper_edge_hertz'], ['output'])],
[])
self._assert_inferred(graph,
[
make_tensor_value_info('num_mel_bins', TensorProto.INT64, ()),
make_tensor_value_info('dft_length', TensorProto.INT64, ()),
make_tensor_value_info('sample_rate', TensorProto.INT64, ()),
make_tensor_value_info('lower_edge_hertz', TensorProto.FLOAT, ()),
make_tensor_value_info('upper_edge_hertz', TensorProto.FLOAT, ()),
make_tensor_value_info('output', TensorProto.FLOAT, (65, 10))
]) # type: ignore
def test_melweightmatrix_with_output_datatype(self): # type: () -> None
graph = self._make_graph([],
[
make_node("Constant", [], ['num_mel_bins'], value=make_tensor('num_mel_bins', TensorProto.INT64, (), (10,))),
make_node("Constant", [], ['dft_length'], value=make_tensor('dft_length', TensorProto.INT64, (), (128,))),
make_node("Constant", [], ['sample_rate'], value=make_tensor('sample_rate', TensorProto.INT64, (), (10,))),
make_node("Constant", [], ['lower_edge_hertz'], value=make_tensor('lower_edge_hertz', TensorProto.FLOAT, (), (10.,))),
make_node("Constant", [], ['upper_edge_hertz'], value=make_tensor('upper_edge_hertz', TensorProto.FLOAT, (), (100.,))),
make_node("MelWeightMatrix", ['num_mel_bins', 'dft_length', 'sample_rate', 'lower_edge_hertz', 'upper_edge_hertz'], ['output'], output_datatype=TensorProto.DOUBLE)],
[])
self._assert_inferred(graph,
[
make_tensor_value_info('num_mel_bins', TensorProto.INT64, ()),
make_tensor_value_info('dft_length', TensorProto.INT64, ()),
make_tensor_value_info('sample_rate', TensorProto.INT64, ()),
make_tensor_value_info('lower_edge_hertz', TensorProto.FLOAT, ()),
make_tensor_value_info('upper_edge_hertz', TensorProto.FLOAT, ()),
make_tensor_value_info('output', TensorProto.DOUBLE, (65, 10))
]) # type: ignore
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
ee2c193886ab22514a4005b23ceb595186d0e4bc | c68ca71a3ac9f62063e866ad3fe31be9e265835f | /Pycharm Lab04/grammar.py | fddb105697c460343abfa84e8a7495296a7b1c14 | [] | no_license | Victor-Alexandru/Formal-Languages-and-Compiler-Design | 971453887c2a83da434f7d622867fd6fd0615592 | d36eaf96d200183165c3ebd3a2240e34fcf39e7d | refs/heads/master | 2020-08-08T04:54:25.659844 | 2020-01-13T13:57:05 | 2020-01-13T13:57:05 | 213,719,498 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,867 | py | import re
import tokenize
#
# A is defined as G = (N, E, P, S) where:
#
# N = set of non-terminals
# E = set of terminals
# P = set of productions
# S = starting symbol
class Grammar:
@staticmethod
def parseLine(line):
equalPos = line.index('=')
rhs = line[equalPos + 1:].strip('\n').strip(' ')[1:-1]
return [symbol.strip() for symbol in rhs.split(',')]
@staticmethod
def fromFile(fileName):
with open(fileName) as file:
N = Grammar.parseLine(file.readline())
E = Grammar.parseLine(file.readline())
S = file.readline().split('=')[1].strip()
P = Grammar.parseRules([line.strip('\n').strip(' ').strip(',') for line in file][1:-1])
return Grammar(N, E, P, S)
@staticmethod
def parseRules(rules):
result = []
for rule in rules:
lhs, rhs = rule.split('->')
lhs = lhs.strip()
rhs = [value.strip() for value in rhs.split('|')]
for value in rhs:
result.append((lhs, value.split()))
return result
def __init__(self, N, E, P, S):
self.N = N
self.E = E
self.P = P
self.S = S
def isNonTerminal(self, value):
return value in self.N
def isTerminal(self, value):
return value in self.E
def isRegular(self):
usedInRhs = dict()
notAllowedInRhs = list()
for rule in self.P:
lhs, rhs = rule
hasTerminal = False
hasNonTerminal = False
for char in rhs:
if self.isNonTerminal(char):
usedInRhs[char] = True
hasNonTerminal = True
elif self.isTerminal(char):
if hasNonTerminal:
return False
hasTerminal = True
if char == 'E':
notAllowedInRhs.append(lhs)
if hasNonTerminal and not hasTerminal:
return False
for char in notAllowedInRhs:
if char in usedInRhs:
return False
return True
def getProductionsFor(self, nonTerminal):
if not self.isNonTerminal(nonTerminal):
raise Exception('Can only show productions for non-terminals')
return [prod for prod in self.P if prod[0] == nonTerminal]
def showProductionsFor(self, nonTerminal):
productions = self.getProductionsFor(nonTerminal)
print(', '.join([' -> '.join(prod) for prod in productions]))
def __str__(self):
return 'N = { ' + ', '.join(self.N) + ' }\n' \
+ 'E = { ' + ', '.join(self.E) + ' }\n' \
+ 'P = { ' + ', '.join([' -> '.join([prod[0], ' '.join(prod[1])]) for prod in self.P]) + ' }\n' \
+ 'S = ' + str(self.S) + '\n'
| [
"[email protected]"
] | |
43504cf4ff2a416c1a81d72cfea563b923656089 | 792cceb9b573fdfff969404ded00448549e4aee7 | /overextends/templatetags/overextends_tags.py | 590b9e38862dbb754b27974876a34a6e959eeb85 | [
"BSD-2-Clause"
] | permissive | stefanw/django-overextends | bdc604bbe84664a844f76d10818630d554925834 | be53aaab7d7f9260c58ab22bcba1cf782c6224f3 | refs/heads/master | 2020-12-30T19:11:22.963138 | 2012-09-01T04:39:32 | 2012-09-01T04:39:32 | 12,434,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,730 | py |
import os
from django import template
from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist
from django.template.loader_tags import ExtendsNode
from django.template.loader import find_template_loader
register = template.Library()
class OverExtendsNode(ExtendsNode):
"""
Allows the template ``foo/bar.html`` to extend ``foo/bar.html``,
given that there is another version of it that can be loaded. This
allows templates to be created in a project that extend their app
template counterparts, or even app templates that extend other app
templates with the same relative name/path.
We use our own version of ``find_template``, that uses an explict
list of template directories to search for the template, based on
the directories that the known template loaders
(``app_directories`` and ``filesystem``) use. This list gets stored
in the template context, and each time a template is found, its
absolute path gets removed from the list, so that subsequent
searches for the same relative name/path can find parent templates
in other directories, which allows circular inheritance to occur.
Django's ``app_directories``, ``filesystem``, and ``cached``
loaders are supported. The ``eggs`` loader, and any loader that
implements ``load_template_source`` with a source string returned,
should also theoretically work.
"""
def find_template(self, name, context, peeking=False):
"""
Replacement for Django's ``find_template`` that uses the current
template context to keep track of which template directories it
has used when finding a template. This allows multiple templates
with the same relative name/path to be discovered, so that
circular template inheritance can occur.
"""
# These imports want settings, which aren't available when this
# module is imported to ``add_to_builtins``, so do them here.
from django.template.loaders.app_directories import app_template_dirs
from django.conf import settings
# Store a dictionary in the template context mapping template
# names to the lists of template directories available to
# search for that template. Each time a template is loaded, its
# origin directory is removed from its directories list.
context_name = "OVEREXTENDS_DIRS"
if context_name not in context:
context[context_name] = {}
if name not in context[context_name]:
all_dirs = list(settings.TEMPLATE_DIRS) + list(app_template_dirs)
# os.path.abspath is needed under uWSGI, and also ensures we
# have consistent path separators across different OSes.
context[context_name][name] = map(os.path.abspath, all_dirs)
# Build a list of template loaders to use. For loaders that wrap
# other loaders like the ``cached`` template loader, unwind its
# internal loaders and add those instead.
loaders = []
for loader_name in settings.TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
loaders.extend(getattr(loader, "loaders", [loader]))
# Go through the loaders and try to find the template. When
# found, removed its absolute path from the context dict so
# that it won't be used again when the same relative name/path
# is requested.
for loader in loaders:
dirs = context[context_name][name]
try:
source, path = loader.load_template_source(name, dirs)
except TemplateDoesNotExist:
pass
else:
# Only remove the absolute path for the initial call in
# get_parent, and not when we're peeking during the
# second call.
if not peeking:
remove_path = os.path.abspath(path[:-len(name) - 1])
context[context_name][name].remove(remove_path)
return Template(source)
raise TemplateDoesNotExist(name)
def get_parent(self, context):
"""
Load the parent template using our own ``find_template``, which
will cause its absolute path to not be used again. Then peek at
the first node, and if its parent arg is the same as the
current parent arg, we know circular inheritance is going to
occur, in which case we try and find the template again, with
the absolute directory removed from the search list.
"""
parent = self.parent_name.resolve(context)
# If parent is a template object, just return it.
if hasattr(parent, "render"):
return parent
template = self.find_template(parent, context)
if (isinstance(template.nodelist[0], ExtendsNode) and
template.nodelist[0].parent_name.resolve(context) == parent):
return self.find_template(parent, context, peeking=True)
return template
@register.tag
def overextends(parser, token):
"""
Extended version of Django's ``extends`` tag that allows circular
inheritance to occur, eg a template can both be overridden and
extended at once.
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once "
"in the same template" % bits[0])
return OverExtendsNode(nodelist, parent_name, None)
| [
"[email protected]"
] | |
ef3750605dc1ceb4f6039d5fb4f9de3f419c88ba | 58119a43f98d2d6387af04521ea6e577a12c3aed | /core/admin.py | 66d903c333e89d7d2c21040e1fc567afddea8c5e | [] | no_license | raysandeep/handly-backend | 21178b9580c3592d98c4b80bac7cbdcf704d6dba | 512767881ad0f04fb7870b8fa31241817aab1fe2 | refs/heads/master | 2022-12-24T22:34:59.807195 | 2020-09-29T22:17:43 | 2020-09-29T22:17:43 | 267,828,315 | 0 | 0 | null | 2020-05-29T10:16:06 | 2020-05-29T10:16:04 | null | UTF-8 | Python | false | false | 302 | py | from django.contrib import admin
from .models import (
Collections,
OutputFiles,
HandwritingInputLogger,
InputFile
)
# Register your models here.
admin.site.register(Collections)
admin.site.register(OutputFiles)
admin.site.register(HandwritingInputLogger)
admin.site.register(InputFile) | [
"[email protected]"
] | |
1682bd0197cf6a3cb8c7dbd041d629157afe7f2d | 317d199d36556ecf5da06c660cb5cb655a86ea09 | /Challenges/lisas_workbook/test_lisas_worbook.py | cc41dc87422cb6364030a186007bf9a595fbeac5 | [] | no_license | baubrun/Challenges-PY | e109126a64a20128202e03c2ed359c179f523dcd | e2ca45cbca264f5790ce303807e25810a5d8d977 | refs/heads/master | 2022-12-17T03:24:43.308680 | 2020-09-14T12:37:24 | 2020-09-14T12:37:24 | 262,485,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | import pytest
from lisas_workbook import workbook
@pytest.mark.parametrize("n, k, arr, result",
[
(5, 3, [4,2,6,1,10], 4)
]
)
def test_workbook(n, k, arr, result):
assert workbook(n, k, arr) == result | [
"[email protected]"
] | |
5180516dba72562eedf269560a7aef3602e6f7d9 | 0bf93a74ce5676e978f3ee79a98a1be90b0e20a5 | /htdocs/plotting/auto/scripts/__init__.py | 4a38e1e34555f0e92f3c8388885cd10dac45522d | [
"MIT"
] | permissive | claudemp/iem | 3c926361c55fde3265157e15bc5119d64dbf2418 | 557deb8c46342aa9a18ac56cba59345c072cf225 | refs/heads/master | 2021-04-06T13:39:08.352676 | 2018-03-12T20:58:14 | 2018-03-12T20:58:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,127 | py | """
Examples of widget types
dict(type='date', name='date2', default='2012/03/15', label='Bogus2:',
min="1893/01/01"), # Comes back to python as yyyy-mm-dd
"""
# Association of plots
data = {'plots': [
{'label': 'Daily', 'options': [
{'id': "108", 'mw': True,
"label": "Accumulated Station Departures of Precipitation/GDD/SDD"},
{'id': "172", 'mw': True,
"label": "Accumulated Year to Date Precipitation"},
{'id': "149", 'mw': True,
"label": "Arridity Index (High Temperature minus Precip Departures)"},
{'id': "11", 'label': "ASOS/AWOS Daily Min/Max Dew Point for a Year"},
{'id': "94",
"label": "Bias of 24 Hour High+Low Computation by Hour"},
{'id': "96",
"label": "Bias of 24 Hour Precipitation Computation by Hour"},
{'id': "82",
'label': "Calendar of Daily Observations from Automated Stations"},
{'id': "180", 'label': "Daily Temperature Climatology",
'mw': True},
{'id': "32", 'label': "Daily Temperature Departures for One Year",
'mw': True},
{'id': "21",
'label': "Change in NCDC 81 Daily Climatology over X Days"},
{'id': "174",
'label': "Compare Daily High/Low Temps for ASOS Stations"},
{'id': "91", 'mw': True,
"label": "Consecutative Day Statistics of High+Low Temps / Precip"},
{'id': "66", 'mw': True,
"label": ("Consecutative Days with High/Low Temp "
"Above/Below Threshold")},
{'id': "176", 'mw': True,
'label': "Daily Records Beat Margin"},
{'id': "5", 'mw': True,
'label': "Daily Records for each month of year"},
{'id': "31", 'mw': True,
'label': "Extreme Jumps or Dips in High Temperature over X days"},
{'id': "147", 'mw': True,
'label': "Frequency of One Station Warmer/Wetter than Another"},
{'id': "7", 'mw': True,
'label': "Growing Degree Day Periods for One Year by Planting Date"},
{'id': "61",
'label': ("High/Low Temp above/below avg OR dry streaks "
"by NWS CLI Sites")},
{'id': "19", 'mw': True,
'label': "Histogram of Daily High/Low Temperatures"},
{'id': "35", 'label': "Histogram of X Hour Temperature Changes"},
{'id': "60", 'label': ("Hourly Temperature/Dew Point Frequencies "
"Above/Below Threshold")},
{'id': "86", 'mw': True, 'label': "IEM Daily Reanalysis Plots"},
{'id': "139",
'label': "Largest Local Calendar Day Temperature Differences"},
{'id': "168", 'mw': True,
"label": "Latest Date of Year for High Temperature"},
{'id': "97", 'mw': True,
"label": "Map of Departures/Stats over One Period of Days"},
{'id': "34", 'mw': True,
'label': "Max Stretch of Days with High/Low Above/Below Threshold"},
{'id': "26",
'label': "Min Daily Low after 1 July / Max Daily High before 1 July"},
{'id': "126",
'label': ("Mixing Ratio / Vapor Pressure Deficit Climatology "
"and Yearly Timeseries Plot")},
{'id': "84", 'mw': True,
'label': ("MRMS Q3 / PRISM Estimated Precipitation "
"(multiday summaries/departures too)")},
{'id': "185", 'mw': True,
'label': ("Number of Days to Accumulate an Amount of Precipitation"
" (MRMS)")},
{'id': "164",
'label': ("Percentage of NWS CLI Sites Reporting Daily Above/Below "
"Temps or Precip/Snow")},
{'id': "22", 'mw': True,
'label': ("Percentage of Years within Temperature Range "
"from Averages")},
{'id': "83", 'mw': True,
'label': ("Period Averages or Totals of X days around a "
"given day of the year")},
{'id': "140",
'label': ("Period Statistics of Temp/Precip/Wind for a date period "
"each year [ASOS/Automated Stations]")},
{'id': "107", 'mw': True,
'label': ("Period Statistics of Temp/Precip for a date period "
"each year [COOP/Climate Sites]")},
{'id': "182", 'mw': True,
'label': "Precipitation (MRMS) Coverage Efficiency by State"},
{'id': "43",
'label': "Recent (Past 2-3 Days) Timeseries (Meteogram)"},
{'id': "157",
'label': "Relative Humidity Max/Min/Avg by Day of Year"},
{'id': "62", 'mw': True,
'label': "Snow Depth"},
{'id': "38", 'mw': True,
'label': "Solar Radiation Estimates from NARR"},
{'id': "25", 'mw': True,
'label': "Spread of Daily High and Low Temperatures"},
{'id': "137", 'mw': True,
'label': "Start Date of Spring/Fall with Statistics"},
{'id': "4", 'mw': True,
'label': "State Areal Coverage of Precip Intensity over X Days"},
{'id': "89", 'mw': True,
'label': "State Areal Coverage/Efficiency of Precipitation"},
{'id': "81", 'mw': True,
'label': "Standard Deviation of Daily Temperatures"},
{'id': "28", 'mw': True,
'label': "Trailing Number of Days Precipitation Total Rank"},
{'id': "142", 'mw': True,
'label': "Trailing X Number of Days Temp/Precipitation Departures"},
{'id': "132", 'mw': True,
'label': "Top 10 Precip/Temperature Values by Month/Season"},
{'id': "190", 'mw': True,
'label': "Year of Daily High/Low Temperature Record"},
]},
{'label': 'Monthly', 'options': [
{'id': "130", 'mw': True,
'label': "Average High/Low Temperature with/without Snowcover"},
{'id': "125", 'mw': True,
'label': "Climatological Maps of Annual/Monthly Averages"},
{'id': "1", 'mw': True,
'label': "Comparison of Multi-Month Totals/Averages"},
{'id': "55", 'label': "Daily Climatology Comparison"},
{'id': "17", 'label': "Daily High/Low Temps with Climatology"},
{'id': "129", 'mw': True,
'label': "Daily Observation Percentiles/Frequencies by Month"},
{'id': "15", 'mw': True,
'label': "Daily Temperature Change Frequencies by Month"},
{'id': "98", 'mw': True,
'label': "Day of Month Frequency of meeting temp/precip threshold"},
{'id': '65', 'mw': True,
'label': 'Day of the Month with the coldest/warmest temperature'},
{'id': '161',
'label': 'Days per month/season above/below some threshold'},
{'id': "29",
'label': "Frequency of Hourly Temperature within Range by Month"},
{'id': "9", 'mw': True, 'label': ("Growing Degree Day Climatology "
"and Daily Values for one Year")},
{'id': "42",
'label': ("Hourly Temperature/Dew Point "
"Streaks Above/Below Threshold")},
{'id': "154",
'label': "Hourly Temperature Averages by Month"},
{'id': "85",
'label': "Hourly Temperature Frequencies by Month"},
{'id': "177", 'mw': True,
'label': "ISU Soil Moisture Network Timeseries Plots"},
{'id': "2", 'mw': True,
'label': "Month Precipitation v Month Growing Degree Day Departures"},
{'id': "57", 'mw': True,
'label': "Monthly Precipitation/Temperature Records"},
{'id': "95", 'mw': True,
'label': "Monthly Precipitation/Temperature with El Nino Indices"},
{'id': "24", 'mw': True,
'label': ("Monthly Precipitation/Temperature "
"Climate District Ranks/Arridity")},
{'id': "3", 'mw': True,
'label': "Monthly Precipitation/Temperature Statistics by Year"},
{'id': "6", 'mw': True,
'label': "Monthly Precipitation/Temperature Distributions"},
{'id': "8", 'mw': True,
'label': "Monthly Precipitation Reliability"},
{'id': "23", 'mw': True,
'label': "Monthly Station Departures + El Nino 3.4 Index"},
{'id': "36", 'mw': True,
'label': "Month warmer than other Month for Year"},
{'id': "58", 'mw': True,
'label': ("One Day's Precipitation Greater than X percentage "
"of Monthly Total")},
{'id': "41", 'mw': True,
'label': ("Quantile / Quantile Plot of Daily Temperatures "
"for Two Months/Periods")},
{'id': "20", 'label': "Hours of Precipitation by Month"},
{'id': "47", 'mw': True,
'label': "Snowfall vs Precipitation Total for a Month"},
{'id': "39", 'mw': True,
'label': "Scenarios for this month besting some previous month"},
{'id': "71",
'label': "Wind Speed and Wind Direction Daily Averages for Month"},
{'id': "138",
'label': "Wind Speed and Wind Direction Monthly Climatology"},
{'id': "173",
'label': "Wind Speed Hourly Climatology by Month or Period"},
]},
{'label': 'Yearly', 'options': [
{'id': "135", 'mw': True,
'label': "Accumulated Days with High/Low Above/Below Threshold"},
{'id': "76",
'label': "Avg Dew Point / Vapor Pressure Deficit by Year or Season"},
{'id': "125", 'mw': True,
'label': "Climatological Maps of Annual/Monthly Averages"},
{'id': "151", 'mw': True,
'label': ("Difference between two periods or "
"single period of years [map]")},
{'id': "128", 'mw': True,
'label': "Comparison of Yearly Summaries between two stations"},
{'id': "99", 'label': "Daily High + Low Temperatures with Departures",
'mw': True},
{'id': "12", 'mw': True,
'label': ("Days per year and first/latest date "
"above/below given threshold")},
{'id': "184", 'mw': True,
'label': ("Days per year with High Temperature "
"above temperature thresholds")},
{'id': "74", 'mw': True,
'label': ("Days per year by season or year with temperature "
"above/below threshold")},
{'id': "181", 'mw': True,
'label': ("Days per year with temp/precip/snowfall "
"within ranges")},
{'id': "13", 'mw': True,
'label': "End/Start Date of Summer (warmest 91 day period) per Year"},
{'id': "27", 'mw': True,
'label': "First Fall Temp Below Threshold (First Freeze/Frost)"},
{'id': "165", 'mw': True,
'label': "First Fall/Last Spring Temp Below Threshold [map]"},
{'id': "119",
'label': "Frequency of First Fall Low Temperature by Day of Year"},
{'id': "189", 'mw': True,
'label': ("General yearly totals with trend line fitted")},
{'id': "179", 'mw': True,
'label': ("Growing Degree Day Scenarios For This Year")},
{'id': "152", 'mw': True,
'label': ("Growing Season Differences Map between "
"Two Periods")},
{'id': "148", 'mw': True,
'label': "Holiday or Same Day Daily Weather Observations each year"},
{'id': "53", 'label': ("Hourly Frequency of Temperature within "
"Certain Ranges")},
{'id': "10", 'mw': True,
'label': ("Last Spring and First Fall Date "
"above/below given threshold")},
{'id': '64', 'mw': True,
'label': 'Last or First Snowfall of Each Winter Season'},
{'id': "33", 'mw': True, 'label': "Maximum Low Temperature Drop"},
{'id': "188", 'mw': True,
'label': ("Max/Min High/Low after first "
"temperature exceedence of season")},
{'id': "105", 'mw': True,
'label': "Maximum Period between Precipitation Amounts"},
{'id': "46", 'label': "Minimum Wind Chill Temperature"},
{'id': "30", 'mw': True, 'label': "Monthly Temperature Range"},
{'id': "44", 'label': "NWS Office Accumulated SVR+TOR Warnings"},
{'id': "69", 'mw': True,
'label': "Percentage of Days each Year Above Average"},
{'id': "77", 'mw': True,
'label': "Period between Last and First High Temperature for Year"},
{'id': "134", 'mw': True,
'label': "Period each year that was warmest/coldest/wettest"},
{'id': "75", 'mw': True,
'label': "Precipitation Totals by Season/Year"},
{'id': "63", 'mw': True,
'label': "Records Set by Year (Max High / Min Low)"},
{'id': "144", 'mw': True,
'label': "Soil Temperature Periods Above/Below Threshold in Spring"},
{'id': "145", 'mw': True,
'label': "Soil Temperature Daily Time Series by Year"},
{'id': "175", 'mw': True,
'label': "Snow Coverage Percentage for State For One Winter"},
{'id': "133", 'mw': True,
'label': "Snowfall Season Totals Split by Date within Season"},
{'id': "103", 'mw': True,
'label': "Step Ups in High Temp / Step Downs in Low Temp by Year"},
{'id': "100", 'mw': True,
'label': "Temperature / Precipitation Statistics by Year"},
{'id': "136",
'label': "Time per Winter Season below Wind Chill Threshold"},
{'id': "104", 'mw': True,
'label': "Trailing X day temp/precip departures (weather cycling)"},
{'id': "14", 'mw': True,
'label': "Yearly Precipitation Contributions by Daily Totals"},
]},
{'label': 'Hydrology Plots', 'options': [
{'id': "160",
'label': ("River Guage Obs and Forecasts from HML Products")},
{'id': "178",
'label': ("NWS RFC Flash Flood Guidance Plots")},
{'id': "183",
'label': ("US Drought Monitor Areal Coverage by State")},
{'id': "186",
'label': ("US Drought Monitor Change in Areal Coverage by State")},
]},
{'label': 'METAR ASOS Special Plots', 'options': [
{'id': "78",
'label': ("Average Dew Point/RH% by Air Temperature "
"by Month or Season or Year")},
{'id': "79",
'label': ("Average Dew Point by Wind Direction "
"by Month or Season or Year")},
{'id': "40",
'label': "Cloud Amount and Level Timeseries for One Month"},
{'id': "88",
'label': "Cloudiness Impact on Hourly Temperatures"},
{'id': "59",
'label': "Daily u and v Wind Component Climatologies"},
{'id': "54",
'label': ("Difference between morning low "
"or afternoon high temperature between two sites")},
{'id': "167",
'label': ("Flight / Aviation Condition (VFR, MVFR, IFR, LIFR) "
"hourly for one month")},
{'id': "87",
'label': ("Frequency of METAR Code (Thunder, etc) "
"by week by hour")},
{'id': "131",
'label': ("Frequency of Overcast Clouds by Air Temperature "
"by month/season")},
{'id': "93", 'label': ("Heat Index / Temperature / Dew Point / "
"Wind Chill Hourly Histogram")},
{'id': "153",
'label': "Hourly Temp/Dew Point Extremes by Month/Season/Year"},
{'id': "159",
'label': "Hourly Temp/Dew Point Frequency by-year by-hour-of-day"},
{'id': "106",
'label': "Hourly temp distributions on days exceeding temperature"},
{'id': "169",
'label': "Largest Rise/Drop in Temperature over X Hours"},
{'id': "18", 'label': "Long term observation time series"},
{'id': "45", 'label': "Monthly Frequency of Overcast Conditions"},
{'id': "170",
'label': "Monthly Frequency of Present Weather Code in METAR Report"},
{'id': "67",
'label': "Monthly Frequency of Wind Speeds by Air Temperature"},
{'id': "37",
'label': "MOS Forecasted Temperature Ranges + Observations"},
{'id': "162",
'label': "Overcast Sky Condition 2D Histogram (Level by Week)"},
{'id': "146",
'label': "Temperature Frequency by Week During Precipitation"},
{'id': "155",
'label': "Top Ten Hourly Precipitation Reports"},
{'id': "16", 'label': "Wind Rose when specified criterion is meet"},
]},
{'label': 'NASS Quickstats (USDA Crop Statistics)', 'options': [
{'id': "156",
'label': ("Crop Condition by Year for Six States")},
{'id': "127",
'label': ("Crop Progress by Year")},
]},
{'label': 'NWS Warning Plots', 'options': [
{'id': "191",
'label': "Calendar Plot of Watch/Warn/Adv Daily Counts by WFO"},
{'id': "92",
'label': "Days since Last Watch/Warning/Advisory by WFO"},
{'id': "72",
'label': "Frequency of Issuance time for Watch/Warning/Advisories"},
{'id': "52",
'label': "Gaant Chart of WFO Issued Watch/Warning/Advisories"},
{'id': "163",
'label': "Local Storm Reports Issued by WFO [map]"},
{'id': "102",
'label': "Local Storm Report Source Type Ranks by Year"},
{'id': "44",
'label': "NWS Office Accumulated Warning/Warning/Advisories by Year"},
{'id': "68",
'label': "Number of Distinct Phenomena/Significance VTEC per Year"},
{'id': "73",
'label': "Number of Watch/Warning/Advisories Issued per Year"},
{'id': "171",
'label': ("Number of Watch/Warning/Advisories Issued "
"per Year per Month")},
{'id': "70",
'label': "Period between First and Last VTEC Product Each Year"},
{'id': "166",
'label': "Storm Prediction Center Watches per Year for a State"},
{'id': "48", 'label': "Time of Day Frequency for Given Warning / UGC"},
{'id': "80",
'label': "Time Duration of a Watch/Warning/Advisory for a UGC"},
{'id': "101",
'label': "Top 25 Most Frequent VTEC Products by Office/NWS"},
{'id': "56", 'label': "Weekly Frequency of a Watch/Warning/Advisory"},
{'id': "109",
'label': "WFO VTEC Event Counts for a Given Period (map)"},
{'id': "90",
'label': ("UGC or Polygon SBW Statistics for "
"Watch/Warning/Advisory by state/wfo")},
]},
{'label': 'Sustainable Corn Project Plots', 'options': [
{'id': "49", 'mw': True,
'label': "Two Day Precipitation Total Frequencies"},
{'id': "50", 'mw': True,
'label': "Frequency of Measurable Daily Precipitation"},
{'id': "51", 'mw': True,
'label': "Frequency of No Daily Precipitation over 7 Days"},
]},
{'label': 'Tall Towers Plots', 'options': [
{'id': "158", 'mw': True,
'label': "1 Second Interval Time Series "},
]},
{'label': 'Upper Air / RAOB Sounding Plots', 'options': [
{'id': "150",
'label': ("Single Sounding Mandatory Level Percentile Ranks")},
]},
]}
| [
"[email protected]"
] | |
4c7cc1da3d3db48d74d8801ee92a4e8e292c1dc1 | 538ca338d88598c1c671c502d03b94781fbebdff | /thriftworker/utils/imports.py | 09ebd16a067c8e7e23e570bf62ed703035ec4809 | [] | no_license | gdeetotdom/thriftworker | e12c94ac05eebe1b28f157415d3d6072ecf77876 | c9b7554845a7f76de2e2076568e2fb779cacdfff | refs/heads/master | 2021-01-10T19:22:53.608418 | 2013-11-11T08:18:59 | 2013-11-11T08:18:59 | 5,068,927 | 1 | 1 | null | 2013-11-11T08:19:00 | 2012-07-16T15:03:37 | C | UTF-8 | Python | false | false | 3,550 | py | from __future__ import absolute_import
import os
import sys
import imp
import importlib
from contextlib import contextmanager
import six
def symbol_by_name(name, aliases={}, imp=None, package=None,
sep='.', default=None, **kwargs):
"""Get symbol by qualified name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
or using ':' to separate module and symbol::
celery.concurrency.processes:TaskPool
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> symbol_by_name("celery.concurrency.processes.TaskPool")
<class 'celery.concurrency.processes.TaskPool'>
>>> symbol_by_name("default", {
... "default": "celery.concurrency.processes.TaskPool"})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> symbol_by_name(TaskPool) is TaskPool
True
"""
if imp is None:
imp = importlib.import_module
if not isinstance(name, basestring):
return name # already a class
name = aliases.get(name) or name
sep = ':' if ':' in name else sep
module_name, _, cls_name = name.rpartition(sep)
if not module_name:
cls_name, module_name = None, package if package else cls_name
try:
try:
module = imp(module_name, package=package, **kwargs)
except ValueError, exc:
exc = ValueError("Couldn't import %r: %s" % (name, exc))
six.reraise(ValueError, exc, sys.exc_info()[2])
return getattr(module, cls_name) if cls_name else module
except (ImportError, AttributeError):
if default is None:
raise
return default
def instantiate(name, *args, **kwargs):
"""Instantiate class by name.
See :func:`symbol_by_name`.
"""
return symbol_by_name(name)(*args, **kwargs)
def qualname(obj):
if isinstance(obj, basestring):
return obj
if not hasattr(obj, '__name__') and hasattr(obj, '__class__'):
return qualname(obj.__class__)
return '.'.join([obj.__module__, obj.__name__])
def get_real_module(name):
"""Get the real Python module, regardless of any monkeypatching"""
fp, pathname, description = imp.find_module(name)
imp.acquire_lock()
try:
_realmodule = imp.load_module('_real_{0}'.format(name), fp, pathname,
description)
return _realmodule
finally:
imp.release_lock()
if fp:
fp.close()
@contextmanager
def cwd_in_path():
cwd = os.getcwd()
if cwd in sys.path:
yield
else:
sys.path.insert(0, cwd)
try:
yield cwd
finally:
try:
sys.path.remove(cwd)
except ValueError: # pragma: no cover
pass
def import_from_cwd(module, imp=None, package=None):
"""Import module, but make sure it finds modules
located in the current directory.
Modules located in the current directory has
precedence over modules located in `sys.path`.
"""
if imp is None:
imp = importlib.import_module
with cwd_in_path():
return imp(module, package=package)
| [
"[email protected]"
] | |
52e850e88c68b715b9fff51b8ed2477e68d341f2 | 79140b67cac1f5c8e3eb3ab3e7ad65a3a98866e8 | /test/dnacode.py | a712a7e8dceed824582a9d407ad1ef22a93bd53f | [] | no_license | dlovemore/bible | 63c1eceed4a919f7a6d2dfb76b6b084d05c49612 | 2594a2414a66c0abedd1278fef805415a8793f28 | refs/heads/master | 2021-01-03T07:17:45.527017 | 2020-05-16T17:54:18 | 2020-05-16T17:54:18 | 239,975,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,549 | py | # >>> from dna import *
# >>> (Genesis[1]-Genesis[4]).midv()
# Genesis 2:22 And the rib, which the LORD God had taken from man, made he a woman, and brought her unto the man.
# Genesis 2:23 And Adam said, This is now bone of my bones, and flesh of my flesh: she shall be called Woman, because she was taken out of Man.
# >>>
# >>> b.book(5)
# Deuteronomy 1:1-34:12 (959 verses)
# >>> b.book(5)[5].vn()
# 5055
# >>> tell(ssum,osum,'אלהימ')
# א ל ה י מ
# 1+30+5+10+40=86
# 1+12+5+10+13=41
# >>> tell(ssum,osum,'King')
# K i n g
# 20+9+50+7=86
# 11+9+14+7=41
# >>> tell(osum,ssum,'ברא')
# ב ר א
# 2+ 20+1= 23
# 2+200+1=203
# >>> tell('cre ate d')
# cre ate d
# 26+ 26+4=56
# >>> tell('God')
# G o d
# 7+15+4=26
# >>> tell(ssum,'LORD JEHOVAH')
# LORD JEHOVAH
# 184 + 492 =676
# >>> osum('God')**2
# 676
# >>> AY=AV+W+X+Y
# >>> tell(ssum,'את')
# א ת
# 1+400=401
# >>> tell(ssum,'King')
# K i n g
# 20+9+50+7=86
# >>> AY
# 3088286401
# >>> bin(AY)
# '0b10111000000100111000001011000001'
# >>> 23<<27|39<<15|11<<6|1
# 3088286401
# >>> tell('Ch ri st')
# Ch ri st
# 11+27+39=77
# >>> Isaiah[41:4]
# Isaiah 41:4 Who hath wrought and done it, calling the generations from the beginning? I the LORD, the first, and with the last; I am he.
# >>> b/'was'/'and is'/'to come'
# 2 Samuel 7:19 And this was yet a small thing in thy sight, O Lord GOD; but thou hast spoken also of thy servant's house for a great while to come. And is this the manner of man, O Lord GOD?
# Revelation 4:8 And the four beasts had each of them six wings about him; and they were full of eyes within: and they rest not day and night, saying, Holy, holy, holy, LORD God Almighty, which was, and is, and is to come.
# >>> bin(975)
# '0b1111001111'
# >>> b/'ladder'
# Genesis 28:12 And he dreamed, and behold a ladder set up on the earth, and the top of it reached to heaven: and behold the angels of God ascending and descending on it.
# >>> _.tell()
# And he dreamed, and behold a ladder set up on the earth, and the top of it reached to heaven: and behold the angels of God ascending and descending on it.
# 19+13+ 50 + 19+ 46 +1+ 44 + 44+37+29+ 33+ 52 + 19+ 33+ 51+21+29+ 44 +35+ 55 + 19+ 46 + 33+ 58 +21+ 26+ 76 + 19+ 84 +29+ 29=1114
# >>> base(22,AV+W+X+Y)
# [1, 5, 5, 5, 8, 9, 0, 13]
# >>> int('1555890d',22)
# 3088286401
# >>> base(12,AV+W+X+Y)
# [7, 2, 2, 3, 1, 6, 9, 4, 1]
# >>> base(23,AV+W+X+Y)
# [20, 19, 18, 19, 18, 11, 18]
# >>> int('KJIJIBI',23)
# 3088286401
# >>>
# >>> Ecclesiastes[7:27]
# Ecclesiastes 7:27 Behold, this have I found, saith the preacher, counting one by one, to find out the account:
# >>> Genesis/'divide'/'light'
# Genesis 1:4,14,18 (3 verses)
# >>> p(_)
# Genesis 1
# 4 And God saw the light, that it was good: and God divided the light from the darkness.
# 14 And God said, Let there be lights in the firmament of the heaven to divide the day from the night; and let them be for signs, and for seasons, and for days, and years:
# 18 And to rule over the day and over the night, and to divide the light from the darkness: and God saw that it was good.
# >>>
# >>> AY-2**32
# -1206680895
# >>> AX=AV+W+X
# >>> AX
# 3031058986
# >>> 55055**2
# 3031053025
# >>> AX-55055**2
# 5961
# >>> pf(_)
# Counter({3: 1, 1987: 1})
# >>> math.sqrt(.05414)
# 0.23268003782017915
# >>> nF(414)
# (14, 377, -37, 414, 196, 610, 15)
# >>>
### >>> b/40000
### Joshua 4:13;Judges 5:8;2 Samuel 10:18;1 Kings 4:26;1 Chronicles 12:36;19:18 (6 verses)
### >>> p(_)
### Joshua 4:13 About forty thousand prepared for war passed over before the LORD unto battle, to the plains of Jericho.
### Judges 5:8 They chose new gods; then was war in the gates: was there a shield or spear seen among forty thousand in Israel?
### 2 Samuel 10:18 And the Syrians fled before Israel; and David slew the men of seven hundred chariots of the Syrians, and forty thousand horsemen, and smote Shobach the captain of their host, who died there.
### 1 Kings 4:26 And Solomon had forty thousand stalls of horses for his chariots, and twelve thousand horsemen.
### 1 Chronicles 12:36 And of Asher, such as went forth to battle, expert in war, forty thousand.
### 1 Chronicles 19:18 But the Syrians fled before Israel; and David slew of the Syrians seven thousand men which fought in chariots, and forty thousand footmen, and killed Shophach the captain of the host.
# >>> math.sqrt(40)
# 6.324555320336759
# >>> math.sqrt(22)
# 4.69041575982343
# >>> math.sqrt(14)
# 3.7416573867739413
# >>> math.sqrt(2)
# 1.4142135623730951
# >>>
| [
"[email protected]"
] | |
d0dc44857bee01a251fbea954848bda531caf3e6 | 857da13a653162cc8c83a7d53a254e2caa97836d | /tensorpac/io.py | 2e92c2212e409ac58f510be095da81bbd48b6f8a | [
"BSD-3-Clause"
] | permissive | EtienneCmb/tensorpac | 18a5e844826b7f63796276ec435d9dc43c440e40 | ac9058fd375d423757943810f613d63785fab85f | refs/heads/master | 2023-05-22T16:54:09.656836 | 2023-02-08T09:42:45 | 2023-02-08T09:42:45 | 93,316,276 | 70 | 18 | BSD-3-Clause | 2023-03-08T22:12:03 | 2017-06-04T13:47:18 | Python | UTF-8 | Python | false | false | 5,546 | py | """Define tensorpac logger.
See :
https://stackoverflow.com/questions/384076/how-can-i-color-python-logging-output
"""
import logging
import sys
import re
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
'DEBUG': GREEN,
'PROFILER': MAGENTA,
'INFO': WHITE,
'WARNING': YELLOW,
'ERROR': RED,
'CRITICAL': RED,
}
FORMAT = {'compact': "$BOLD%(levelname)s | %(message)s",
'spacy': "$BOLD%(levelname)-19s$RESET | %(message)s",
'tensorpac': "$BOLD%(name)s-%(levelname)-19s$RESET | %(message)s",
'print': "%(message)s",
}
def formatter_message(message, use_color=True):
"""Format the message."""
return message.replace("$RESET", RESET_SEQ).replace("$BOLD", BOLD_SEQ)
class _Formatter(logging.Formatter):
"""Formatter."""
def __init__(self, format_type='compact'):
logging.Formatter.__init__(self, FORMAT[format_type])
self._format_type = format_type
def format(self, record):
name = record.levelname
msg = record.getMessage()
# If * in msg, set it in RED :
if '*' in msg:
regexp = '\*.*?\*'
re_search = re.search(regexp, msg).group()
to_color = COLOR_SEQ % (30 + RED) + re_search + COLOR_SEQ % (
30 + WHITE) + RESET_SEQ
msg_color = re.sub(regexp, to_color, msg)
msg_color += RESET_SEQ
record.msg = msg_color
# Set level color :
levelname_color = COLOR_SEQ % (30 + COLORS[name]) + name + RESET_SEQ
record.levelname = levelname_color
if record.levelno == 20:
logging.Formatter.__init__(self, FORMAT['print'])
else:
logging.Formatter.__init__(self, FORMAT[self._format_type])
return formatter_message(logging.Formatter.format(self, record))
class _StreamHandler(logging.StreamHandler):
"""Stream handler allowing matching and recording."""
def __init__(self):
logging.StreamHandler.__init__(self, sys.stderr)
self.setFormatter(_lf)
self._str_pattern = None
self.emit = self._tensorpac_emit
def _tensorpac_emit(self, record, *args):
msg = record.getMessage()
test = self._match_pattern(record, msg)
if test:
record.msg = test
return logging.StreamHandler.emit(self, record)
else:
return
def _match_pattern(self, record, message):
if isinstance(self._str_pattern, str):
if re.search(self._str_pattern, message):
sub = '*{}*'.format(self._str_pattern)
return re.sub(self._str_pattern, sub, message)
else:
return ''
else:
return message
logger = logging.getLogger('tensorpac')
_lf = _Formatter()
_lh = _StreamHandler() # needs _lf to exist
logger.addHandler(_lh)
PROFILER_LEVEL_NUM = 1
logging.addLevelName(PROFILER_LEVEL_NUM, "PROFILER")
def profiler_fcn(self, message, *args, **kws):
# Yes, logger takes its '*args' as 'args'.
if self.isEnabledFor(PROFILER_LEVEL_NUM):
self._log(PROFILER_LEVEL_NUM, message, args, **kws)
logging.Logger.profiler = profiler_fcn
LOGGING_TYPES = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL, PROFILER=PROFILER_LEVEL_NUM)
def set_log_level(verbose=None, match=None):
"""Convenience function for setting the logging level.
This function comes from the PySurfer package. See :
https://github.com/nipy/PySurfer/blob/master/surfer/utils.py
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either
PROFILER, DEBUG, INFO, WARNING, ERROR, or CRITICAL.
match : string | None
Filter logs using a string pattern.
"""
# if verbose is None:
# verbose = "INFO"
logger = logging.getLogger('tensorpac')
if isinstance(verbose, bool):
verbose = 'INFO' if verbose else 'WARNING'
if isinstance(verbose, str):
if (verbose.upper() in LOGGING_TYPES):
verbose = verbose.upper()
verbose = LOGGING_TYPES[verbose]
logger.setLevel(verbose)
else:
raise ValueError("verbose must be in "
"%s" % ', '.join(LOGGING_TYPES))
if isinstance(match, str):
_lh._str_pattern = match
def progress_bar(value, endvalue, bar_length=20, pre_st=None):
"""Progress bar."""
percent = float(value) / endvalue
arrow = '-' * int(round(percent * bar_length) - 1) + '>'
spaces = ' ' * (bar_length - len(arrow))
pre_st = '' if not isinstance(pre_st, str) else pre_st
sys.stdout.write("\r{0} [{1}] {2}%".format(pre_st, arrow + spaces,
int(round(percent * 100))))
sys.stdout.flush()
def is_pandas_installed():
"""Test if pandas is installed."""
try:
import pandas # noqa
except:
raise IOError("pandas not installed. See https://pandas.pydata.org/"
"pandas-docs/stable/install.html")
def is_statsmodels_installed():
"""Test if statsmodels is installed."""
try:
import statsmodels # noqa
except:
raise IOError("statsmodels not installed. See http://www.statsmodels."
"org/stable/install.html")
| [
"[email protected]"
] | |
13cfc887befe8ef7649877ee07af76ae36b68717 | 86da8c4d616a78afc7cd09711b0151e5f852a8b8 | /pythonprograms/practice/set.py | ff360377c4e4240ec2d4af0caab010bd53b6dd66 | [] | no_license | sharijamusthafa/luminarpython | d1d3274d23d93af2c5e4db7d2652e8cb46b133aa | 8ebd75ea5f734e5061a7138153a2c6b1cd43a738 | refs/heads/master | 2022-12-23T22:45:40.194242 | 2020-10-07T16:40:09 | 2020-10-07T16:40:09 | 290,109,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | st1={1,2,3,4,5}
st2={3,4,6,7,8}
st3=st1.difference(st2)
print(st3) | [
"[email protected]"
] | |
0d3b33c17b727ca246bacc1905cedb882f75f976 | ddf2274de833cbaa3422a0b4e1a6c70e98f9188f | /tensorflow/python/training/checkpointable/data_structures.py | c46af68155b596199a38969471c19fc697c90cab | [
"Apache-2.0"
] | permissive | TanguyUrvoy/tensorflow | 98b4534a3ecc2ce0738fc864386c9594148f4755 | ad683f866b465fa753731be283515bb0a67ad078 | refs/heads/master | 2020-04-16T14:49:40.864312 | 2019-01-14T14:51:47 | 2019-01-14T14:57:33 | 165,680,898 | 1 | 0 | Apache-2.0 | 2019-01-14T15:02:37 | 2019-01-14T15:02:36 | null | UTF-8 | Python | false | false | 28,048 | py | """Checkpointable data structures."""
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import operator
import six
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import revived_types
from tensorflow.python.training.checkpointable import base
from tensorflow.python.training.checkpointable import layer_utils
class NoDependency(object):
"""Allows attribute assignment to `Checkpointable` objects with no dependency.
Example usage:
```python
obj = Checkpointable()
obj.has_dependency = tf.Variable(0., name="dep")
obj.no_dependency = NoDependency(tf.Variable(1., name="nodep"))
assert obj.no_dependency.name == "nodep:0"
```
`obj` in this example has a dependency on the variable "dep", and both
attributes contain un-wrapped `Variable` objects.
`NoDependency` also works with `tf.keras.Model`, but only for checkpoint
dependencies: wrapping a `Layer` in `NoDependency` will assign the (unwrapped)
`Layer` to the attribute without a checkpoint dependency, but the `Model` will
still track the `Layer` (so it will appear in `Model.layers`, and its
variables will appear in `Model.variables`).
"""
def __init__(self, value):
self.value = value
def _wrap_or_unwrap(value):
"""Wraps basic data structures, unwraps NoDependency objects."""
if isinstance(value, NoDependency):
return value.value
if isinstance(value, base.CheckpointableBase):
return value # Skip conversion for already checkpointable objects.
elif isinstance(value, dict):
return _DictWrapper(value)
elif isinstance(value, list):
return _ListWrapper(value)
else:
return value
# TODO(allenl): Handle other common data structures. Tuples will require
# special casing (tuple subclasses are not weak referenceable, so replacement
# with a wrapper that subclasses tuple on attribute assignment works poorly,
# and replacement with a wrapper that isn't a tuple is also problematic),
# probably a tree traversal where the leaves are non-tuples(/namedtuples) to
# come up with names. Dictionaries should look like lists.
def sticky_attribute_assignment(checkpointable, name, value):
"""Adds dependencies, generally called from __setattr__.
This behavior is shared between Checkpointable and Model.
Respects NoDependency indicators, but otherwise makes checkpointable objects
out of common data structures and tracks objects by their attribute names.
Args:
checkpointable: The object to add dependencies to (generally the one having
an attribute assigned).
name: The attribute name being assigned.
value: The value being assigned. Not necessarily a checkpointable object.
Returns:
The value which should be stored in the attribute (unwrapped from a
NoDependency object if necessary).
"""
if isinstance(value, NoDependency):
add_dependency = False
else:
add_dependency = True
value = _wrap_or_unwrap(value)
if not add_dependency:
return value
if isinstance(value, base.CheckpointableBase):
checkpointable._track_checkpointable( # pylint: disable=protected-access
value, name=name,
# Allow the user to switch the Checkpointable which is tracked by this
# name, since assigning a new variable to an attribute has
# historically been fine (e.g. Adam did this).
overwrite=True)
return value
class CheckpointableDataStructure(base.CheckpointableBase):
"""Base class for data structures which contain checkpointable objects."""
def __init__(self):
self.trainable = True
self._extra_variables = []
def _track_value(self, value, name):
"""Add a dependency on `value`."""
value = sticky_attribute_assignment(
checkpointable=self, value=value, name=name)
if isinstance(value, variables.Variable):
self._extra_variables.append(value)
if not isinstance(value, base.CheckpointableBase):
raise ValueError(
("Only checkpointable objects (such as Layers or Optimizers) may be "
"stored in a List object. Got %s, which does not inherit from "
"CheckpointableBase.") % (value,))
if hasattr(value, "_use_resource_variables"):
# In subclassed models, legacy layers (tf.layers) must always use
# resource variables.
value._use_resource_variables = True # pylint: disable=protected-access
return value
@property
def _values(self):
"""An iterable/sequence which may contain checkpointable objects."""
raise NotImplementedError("Abstract method")
@property
def _layers(self):
"""All Layers and Layer containers, including empty containers."""
# Filter objects on demand so that wrapper objects use values from the thing
# they're wrapping if out of sync.
collected = []
for obj in self._values:
if (isinstance(obj, CheckpointableDataStructure)
or layer_utils.is_layer(obj)
or layer_utils.has_weights(obj)):
collected.append(obj)
return collected
@property
def layers(self):
return layer_utils.filter_empty_layer_containers(self._layers)
@property
def trainable_weights(self):
return layer_utils.gather_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._extra_variables)
@property
def non_trainable_weights(self):
return layer_utils.gather_non_trainable_weights(
trainable=self.trainable,
sub_layers=self._layers,
extra_variables=self._extra_variables)
@property
def weights(self):
return self.trainable_weights + self.non_trainable_weights
@property
def trainable_variables(self):
return self.trainable_weights
@property
def non_trainable_variables(self):
return self.non_trainable_weights
@property
def variables(self):
return self.weights
@property
def updates(self):
"""Aggregate updates from any `Layer` instances."""
# Updates and conditional losses are forwarded as-is rather than being
# filtered based on inputs, since this is just a container and won't ever
# have any inputs.
aggregated = []
for layer in self.layers:
if hasattr(layer, "updates"):
aggregated += layer.updates
return aggregated
@property
def losses(self):
"""Aggregate losses from any `Layer` instances."""
aggregated = []
for layer in self.layers:
if hasattr(layer, "losses"):
aggregated += layer.losses
return aggregated
def __hash__(self):
# Support object-identity hashing, so these structures can be used as keys
# in sets/dicts.
return id(self)
def __eq__(self, other):
# Similar to Tensors, checkpointable data structures use object-identity
# equality to support set/dict membership.
return self is other
class List(CheckpointableDataStructure, collections.Sequence):
"""An append-only sequence type which is checkpointable.
Maintains checkpoint dependencies on its contents (which must also be
checkpointable), and forwards any `Layer` metadata such as updates and losses.
Note that `List` is purely a container. It lets a `tf.keras.Model` or
other checkpointable object know about its contents, but does not call any
`Layer` instances which are added to it. To indicate a sequence of `Layer`
instances which should be called sequentially, use `tf.keras.Sequential`.
Example usage:
```python
class HasList(tf.keras.Model):
def __init__(self):
super(HasList, self).__init__()
self.layer_list = tf.contrib.checkpoint.List([layers.Dense(3)])
self.layer_list.append(layers.Dense(4))
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += tf.reduce_sum(x)
return aggregation
```
This kind of wrapping is necessary because `Checkpointable` objects do not
(yet) deeply inspect regular Python data structures, so for example assigning
a regular list (`self.layer_list = [layers.Dense(3)]`) does not create a
checkpoint dependency and does not add the `Layer` instance's weights to its
parent `Model`.
"""
def __init__(self, *args, **kwargs):
"""Construct a new sequence. Arguments are passed to `list()`."""
super(List, self).__init__()
self._storage = self._make_storage(*args, **kwargs)
for index, element in enumerate(self._storage):
self._storage[index] = self._track_value(
element, name=self._name_element(index))
def __copy__(self):
return type(self)(copy.copy(self._storage))
def __deepcopy__(self, memo):
return type(self)(copy.deepcopy(self._storage, memo))
def _make_storage(self, *args, **kwargs):
"""Determines the backing storage (overridden in subclasses)."""
return list(*args, **kwargs)
def _name_element(self, index):
return "%d" % (index,)
@property
def _values(self):
return self
def append(self, value):
"""Add a new checkpointable value."""
value = self._track_value(value, self._name_element(len(self._storage)))
self._storage.append(value)
def extend(self, values):
"""Add a sequence of checkpointable values."""
for value in values:
self._storage.append(self._track_value(
value, name=self._name_element(len(self._storage))))
def __iadd__(self, values):
self.extend(values)
return self
def __add__(self, other):
if isinstance(other, List):
return self.__class__(self._storage + other._storage) # pylint: disable=protected-access
else:
return self.__class__(self._storage + other)
def __radd__(self, other):
return self + other
def __getitem__(self, key):
return self._storage[key]
def __len__(self):
return len(self._storage)
def __repr__(self):
return "List(%s)" % (repr(self._storage),)
class _ListWrapper(List, collections.MutableSequence,
# Shadowed, but there for isinstance checks.
list):
"""Wraps the built-in `list` to support restore-on-create for variables.
Unlike `List`, this sequence type is mutable in the same ways built-in lists
are. Instead of throwing an error immediately like `List`, it records
problematic mutations (e.g. assigning a new element to a position already
occupied, meaning both elements get the same names at different times) and
refuses to save.
On assignment to an attribute of a Model or Checkpointable object, Python
lists are replaced with _ListWrapper. Wrapping a list in a
`tf.contrib.checkpoint.NoDependency` object prevents this.
"""
def __init__(self, wrapped_list):
"""Construct a new list wrapper.
Args:
wrapped_list: The initial value of the data structure. A shallow copy may
be maintained for error checking. `wrapped_list` itself should not be
modified directly after constructing the `_ListWrapper`, and if changes
are detected the `_ListWrapper` will throw an exception on save.
"""
# Monotonic flags which indicate this object would not be restored properly,
# and therefore should throw an error on save to avoid giving the impression
# that restoring it will work.
self._non_append_mutation = False
self._external_modification = False
super(_ListWrapper, self).__init__(wrapped_list)
self._last_wrapped_list_snapshot = list(self._storage)
# pylint: disable=protected-access
def __copy__(self):
copied = super(_ListWrapper, self).__copy__()
copied._non_append_mutation = self._non_append_mutation
copied._external_modification = self._external_modification
return copied
def __deepcopy__(self, memo):
copied = super(_ListWrapper, self).__deepcopy__(memo)
copied._non_append_mutation = self._non_append_mutation
copied._external_modification = self._external_modification
return copied
# pylint: enable=protected-access
def _make_storage(self, wrapped_list):
"""Use the user's original list for storage."""
return wrapped_list
def _check_external_modification(self):
"""Checks for any changes to the wrapped list not through the wrapper."""
if self._external_modification or self._non_append_mutation:
return
if self._storage != self._last_wrapped_list_snapshot:
self._external_modification = True
self._last_wrapped_list_snapshot = None
def _update_snapshot(self):
"""Acknowledges tracked changes to the wrapped list."""
if self._external_modification or self._non_append_mutation:
return
self._last_wrapped_list_snapshot = list(self._storage)
@property
def _checkpoint_dependencies(self):
self._check_external_modification()
if self._non_append_mutation:
raise ValueError(
("Unable to save the object %s (a list wrapper constructed to track "
"checkpointable TensorFlow objects). A list element was replaced "
"(__setitem__), deleted, or inserted. In order to support "
"restoration on object creation, tracking is exclusively for "
"append-only data structures.\n\nIf you don't need this list "
"checkpointed, wrap it in a tf.contrib.checkpoint.NoDependency "
"object; it will be automatically un-wrapped and subsequently "
"ignored." % (self,)))
if self._external_modification:
raise ValueError(
("Unable to save the object %s (a list wrapper constructed to track "
"checkpointable TensorFlow objects). The wrapped list was modified "
"outside the wrapper (its final value was %s, its value when a "
"checkpoint dependency was added was %s), which breaks restoration "
"on object creation.\n\nIf you don't need this list checkpointed, "
"wrap it in a tf.contrib.checkpoint.NoDependency object; it will be "
"automatically un-wrapped and subsequently ignored." % (
self, self._storage, self._last_wrapped_list_snapshot)))
return super(_ListWrapper, self)._checkpoint_dependencies
def __delitem__(self, key):
self._non_append_mutation = True
del self._storage[key]
def __setitem__(self, key, value):
self._non_append_mutation = True
self._storage[key] = value
def append(self, value):
"""Add a new checkpointable value."""
self._check_external_modification()
super(_ListWrapper, self).append(value)
self._update_snapshot()
def extend(self, values):
"""Add a sequence of checkpointable values."""
self._check_external_modification()
super(_ListWrapper, self).extend(values)
self._update_snapshot()
def __eq__(self, other):
return self._storage == getattr(other, "_storage", other)
def __ne__(self, other):
return self._storage != getattr(other, "_storage", other)
def __lt__(self, other):
return self._storage < getattr(other, "_storage", other)
def __le__(self, other):
return self._storage <= getattr(other, "_storage", other)
def __gt__(self, other):
return self._storage > getattr(other, "_storage", other)
def __ge__(self, other):
return self._storage >= getattr(other, "_storage", other)
def __hash__(self):
# List wrappers need to compare like regular lists, and so like regular
# lists they don't belong in hash tables.
raise TypeError("unhashable type: 'ListWrapper'")
def insert(self, index, obj):
self._non_append_mutation = True
self._storage.insert(index, obj)
def _track_value(self, value, name):
"""Allows storage of non-checkpointable objects."""
try:
value = super(_ListWrapper, self)._track_value(value=value, name=name)
except ValueError:
# Even if this value isn't checkpointable, we need to make sure
# NoDependency objects get unwrapped.
value = sticky_attribute_assignment(
checkpointable=self, value=value, name=name)
return value
def __repr__(self):
return "ListWrapper(%s)" % (repr(self._storage),)
class Mapping(CheckpointableDataStructure, collections.Mapping):
"""An append-only checkpointable mapping data structure with string keys.
Maintains checkpoint dependencies on its contents (which must also be
checkpointable), named based on its keys.
Note that once a key has been added, it may not be deleted or replaced. If
names may not be unique, see `tf.contrib.checkpoint.UniqueNameTracker`.
"""
def __init__(self, *args, **kwargs):
"""Construct a new sequence. Arguments are passed to `dict()`."""
super(Mapping, self).__init__()
self._storage = self._make_storage(*args, **kwargs)
self._storage.update(
{key: self._track_value(
value, name=self._name_element(key))
for key, value in self._storage.items()})
def __copy__(self):
return type(self)(copy.copy(self._storage))
def __deepcopy__(self, memo):
return type(self)(copy.deepcopy(self._storage, memo))
def _make_storage(self, *args, **kwargs):
return dict(*args, **kwargs)
@property
def _values(self):
# Sort items deterministically by key
ordered = list(zip(*sorted(self.items(), key=lambda it: it[0])))
if ordered:
return ordered[1]
return []
def _name_element(self, key):
if not isinstance(key, six.string_types):
raise TypeError(
"Mapping accepts only string keys, but got a key %s."
% repr(key))
return str(key)
def __setitem__(self, key, value):
name = self._name_element(key)
value = self._track_value(value, name=name)
current_value = self._storage.setdefault(key, value)
if current_value is not value:
raise ValueError(
("Mappings are an append-only data structure. Tried to overwrite the "
"key '%s' with value %s, but it already contains %s")
% (key, value, current_value))
def update(self, *args, **kwargs):
for key, value in dict(*args, **kwargs).items():
self[key] = value
def __getitem__(self, key):
return self._storage[key]
def __len__(self):
return len(self._storage)
def __repr__(self):
return "Mapping(%s)" % (repr(self._storage),)
def __iter__(self):
return iter(self._storage)
# Unlike _ListWrapper, having _DictWrapper inherit from dict and pass isinstance
# checks seems infeasible. CPython will not call Python methods/properties on
# dictionary subclasses when running e.g. {}.update(dict_subclass), and instead
# collects elements directly from dict_subclass's C structs. So subclassing dict
# implies that the storage has to be "self" (i.e. the C structs for the object
# must be updated correctly), but we also need that storage to be the wrapped
# dictionary to avoid synchronization bugs (un-tracked external modifications
# should still show up when the dict is accessed through the wrapper). Monkey
# patching all of the "wrapped" dict's methods instead of creating a wrapper
# object is an option, but not a very attractive one (replacing methods without
# creating reference cycles is difficult, and then dicts would need to be
# special cased everywhere as being checkpointable).
class _DictWrapper(Mapping, collections.MutableMapping):
"""Wraps built-in dicts to support restore-on-create for variables.
_DictWrapper is to Mapping as _ListWrapper is to List. Unlike Mapping,
_DictWrapper allows non-string keys and values and arbitrary mutations (delete
keys, reassign values). Like _ListWrapper, these mutations mean that
_DictWrapper will raise an exception on save.
"""
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], dict):
return super(_DictWrapper, cls).__new__(cls)
else:
# Allow construction from a sequence, e.g. for nest.pack_sequence_as. In
# this case there's nothing to wrap, so we make a normal dictionary. Also
# allows constructing empty instances of the _DictWrapper type, as Session
# is wont to do (and again there's nothing to wrap, so a normal dictionary
# makes more sense).
return dict(*args)
def __init__(self, wrapped_dict):
self._non_string_key = False
self._non_append_mutation = False
self._external_modification = False
super(_DictWrapper, self).__init__(wrapped_dict)
self._update_snapshot()
# pylint: disable=protected-access
def __copy__(self):
copied = super(_DictWrapper, self).__copy__()
copied._non_append_mutation = self._non_append_mutation
copied._external_modification = self._external_modification
copied._non_string_key = self._non_string_key
return copied
def __deepcopy__(self, memo):
copied = super(_DictWrapper, self).__deepcopy__(memo)
copied._non_append_mutation = self._non_append_mutation
copied._external_modification = self._external_modification
copied._non_string_key = self._non_string_key
return copied
# pylint: enable=protected-access
def _make_storage(self, wrapped_dict):
"""Re-use the wrapped dict for storage (to force them to be in sync)."""
return wrapped_dict
@property
def _checkpoint_dependencies(self):
"""Check that the object is saveable before listing its dependencies."""
self._check_external_modification()
if self._non_string_key:
raise ValueError(
"Unable to save the object %s (a dictionary wrapper constructed "
"automatically on attribute assignment). The wrapped dictionary "
"contains a non-string key which maps to a checkpointable object or "
"mutable data structure.\n\nIf you don't need this dictionary "
"checkpointed, wrap it in a tf.contrib.checkpoint.NoDependency "
"object; it will be automatically un-wrapped and subsequently "
"ignored." % (self,))
if self._non_append_mutation:
raise ValueError(
"Unable to save the object %s (a dictionary wrapper constructed "
"automatically on attribute assignment). A key mapping to a "
"checkpointable object was overwritten or deleted, which would "
"cause problems for restoration.\n\nIf you don't need this "
"dictionary checkpointed, wrap it in a "
"tf.contrib.checkpoint.NoDependency object; it will be automatically "
"un-wrapped and subsequently ignored." % (self,))
if self._external_modification:
raise ValueError(
"Unable to save the object %s (a dictionary wrapper constructed "
"automatically on attribute assignment). The wrapped dictionary was "
"modified outside the wrapper (its final value was %s, its value "
"when a checkpoint dependency was added was %s), which breaks "
"restoration on object creation.\n\nIf you don't need this "
"dictionary checkpointed, wrap it in a "
"tf.contrib.checkpoint.NoDependency object; it will be automatically "
"un-wrapped and subsequently ignored." % (
self, self, self._last_wrapped_dict_snapshot))
assert not self._dirty # Any reason for dirtiness should have an exception.
return super(_DictWrapper, self)._checkpoint_dependencies
@property
def _dirty(self):
"""Check if there has already been a mutation which prevents saving."""
return (self._external_modification
or self._non_append_mutation
or self._non_string_key)
def _check_external_modification(self):
"""Checks for any changes to the wrapped dict not through the wrapper."""
if self._dirty:
return
if self != self._last_wrapped_dict_snapshot:
self._external_modification = True
self._last_wrapped_dict_snapshot = None
def _update_snapshot(self):
"""Acknowledges tracked changes to the wrapped dict."""
if self._dirty:
return
self._last_wrapped_dict_snapshot = dict(self)
def _track_value(self, value, name):
"""Allows storage of non-checkpointable objects."""
if isinstance(name, six.string_types):
string_key = True
else:
name = "-non_string_key"
string_key = False
try:
no_dependency = isinstance(value, NoDependency)
value = super(_DictWrapper, self)._track_value(value=value, name=name)
if not (string_key or no_dependency):
# A non-string key maps to a checkpointable value. This data structure
# is not saveable.
self._non_string_key = True
return value
except ValueError:
# Even if this value isn't checkpointable, we need to make sure
# NoDependency objects get unwrapped.
return sticky_attribute_assignment(
checkpointable=self, value=value, name=name)
def _name_element(self, key):
"""Don't throw errors for non-string keys."""
if isinstance(key, six.string_types):
return super(_DictWrapper, self)._name_element(key)
else:
return key
def __setitem__(self, key, value):
"""Allow any modifications, but possibly mark the wrapper as unsaveable."""
self._check_external_modification()
no_dep = isinstance(value, NoDependency)
if isinstance(key, six.string_types):
existing_dependency = self._lookup_dependency(key)
value = self._track_value(value, name=key)
else:
value = _wrap_or_unwrap(value)
existing_dependency = None
if not no_dep and isinstance(value, base.CheckpointableBase):
# Non-string keys are OK as long as we have no reason to add a
# dependency on the value (either because the value is not
# checkpointable, or because it was wrapped in a NoDependency object).
self._non_string_key = True
current_value = self._storage.setdefault(key, value)
if current_value is not value:
if ((not no_dep and isinstance(value, base.CheckpointableBase))
# We don't want to just check that the existing object is
# checkpointable, since it may have been wrapped in a NoDependency
# object.
or existing_dependency is not None):
# A checkpointable object was replaced under the same key; this means
# that restoring would be error-prone, so we'll throw an exception on
# save.
self._non_append_mutation = True
self._storage[key] = value
self._update_snapshot()
def __delitem__(self, key):
self._check_external_modification()
existing_value = self[key]
if isinstance(existing_value, base.CheckpointableBase):
# Deleting tracked checkpointable values means restoring is problematic,
# so we'll throw an exception on save.
self._non_append_mutation = True
del self._storage[key]
self._update_snapshot()
def __repr__(self):
return "DictWrapper(%s)" % (repr(self._storage),)
def __hash__(self):
raise TypeError("unhashable type: 'DictWrapper'")
def __eq__(self, other):
return self._storage == getattr(other, "_storage", other)
def update(self, *args, **kwargs):
for key, value in dict(*args, **kwargs).items():
self[key] = value
revived_types.register_revived_type(
"checkpointable_dict_wrapper",
lambda obj: isinstance(obj, _DictWrapper),
versions=[revived_types.VersionedTypeRegistration(
object_factory=lambda _: _DictWrapper({}),
version=1,
min_producer_version=1,
min_consumer_version=1,
setter=operator.setitem)])
| [
"[email protected]"
] | |
a5aaebd396700872fe251036dd8234a37d473ff0 | c2b777fdd5b92aa4cbd25594b1ea877d6b280fc7 | /Max_number_of_zeroes.py | 19b4743d441dd8a5da8e493cf03a6223269ea584 | [] | no_license | pasbahar/python-practice | 2baa09c474fa5744a11dabcc75507f03cd75c6a5 | 23bcd774becaa720588feb7ba3cf6ea04aafcf86 | refs/heads/master | 2020-12-04T05:50:40.382790 | 2020-02-27T17:25:23 | 2020-02-27T17:25:23 | 231,641,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | '''Given an array of N values. Print the number which has maximum number of zeroes. If there are no zeroes then print -1.
Note: If there are multiple numbers with same (max) number of zeroes then print the Maximum number among them.
Input:
The first line of input contains an integer T denoting the number of test cases. T testcases follow. Each testcase contains two lines of input. The first line consists of an integer N. The next line consists of N spaced integers.
Output:
For each testcase, print the number with maximum number of zeroes.
Constraints:
1<=T<=100
1<=N<=107
1<=A[i]<=10100
Example:
Input:
1
5
10 20 3000 9999 200
Output:
3000
Explanation:
Testcase1: 3000 hsa maximum number of zeroes so we print it.'''
for i in range(int(input())):
n=int(input())
l=list(map(str,input().split()))
max_c=0
res='-1'
for x in l:
count=0
for j in x:
if j=='0':
count+=1
if max_c<count:
max_c=count
res=x
elif max_c==count and max_c!=0:
if int(x)>int(res):
res=x
print(res)
| [
"[email protected]"
] | |
231e25b593f6a5a2a5edfe24195d3197dd43078b | affdd053d94ec566c783eafabfc2483e77cf9fa8 | /performer/fast_self_attention/fast_self_attention.py | 41096dc71ddc7d9f6a2799990e6d747bd6196e94 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | bobisai/google-research | 6cbf0fea8f2c6bd09f9e9db44ca981b9bf234535 | 8ee84eaf7afca5ef42c381d86fac3ca44b5922d2 | refs/heads/master | 2022-12-28T05:57:37.631395 | 2020-10-14T19:15:45 | 2020-10-14T19:23:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,090 | py | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core Fast Attention Module for Flax.
Implementation of the approximate fast softmax and generalized
attention mechanism leveraging structured random feature maps [RFM] techniques
and low rank decomposition of the attention matrix.
"""
# pylint: disable=invalid-name, missing-function-docstring
import abc
from collections.abc import Iterable # pylint: disable=g-importing-member
import functools
from absl import logging
import gin
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
import numpy as onp
# Nonlinear mappings encoding different attention kernels.
gin.external_configurable(jnp.cos, 'jcos')
gin.external_configurable(jnp.sin, 'jsin')
gin.external_configurable(jnp.tanh, 'jtanh')
gin.external_configurable(jax.nn.sigmoid, 'jsigmoid')
gin.external_configurable(jax.nn.relu, 'jrelu')
gin.external_configurable(lambda x: x * x * (x > 0.0), 'jrequ')
gin.external_configurable(jax.nn.gelu, 'jgelu')
gin.external_configurable(jnp.exp, 'jexp')
gin.external_configurable(lambda x: x, 'jidentity')
def nonnegative_softmax_kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True,
eps=0.0001):
"""Constructs nonnegative kernel features for fast softmax attention.
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
is_query: predicate indicating whether input data corresponds to queries or
keys
normalize_data: predicate indicating whether data should be normalized,
eps: numerical stabilizer.
Returns:
Random features for fast softmax attention.
"""
del attention_dims_t
if normalize_data:
# We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where
# w_norm = w * data_normalizer for w in {q,k}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
if is_query:
last_dims_t = (len(data_dash.shape) - 1,)
data_dash = ratio * (
jnp.exp(data_dash - diag_data -
jnp.max(data_dash, axis=last_dims_t, keepdims=True)) + eps)
else:
data_dash = ratio * (
jnp.exp(data_dash - diag_data - jnp.max(data_dash)) + eps)
return data_dash
def sincos_softmax_kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
normalize_data=True):
"""Constructs kernel sin-cos features for fast softmax attention.
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
normalize_data: predicate indicating whether data should be normalized.
Returns:
Random features for fast softmax attention.
"""
if normalize_data:
# We have: exp(qk^T/sqrt{d}) = exp(|q|^2/2sqrt{d}) * exp(|k|^2/2sqrt{d}) *
# exp(-(|q*c-k*c|^2)/2), where c = 1.0 / sqrt{sqrt{d}}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
data_dash_cos = ratio * jnp.cos(data_dash)
data_dash_sin = ratio * jnp.sin(data_dash)
data_dash = jnp.concatenate((data_dash_cos, data_dash_sin), axis=-1)
# Constructing D_data and data^{'}
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
# Additional renormalization for numerical stability
data_renormalizer = jnp.max(diag_data, attention_dims_t, keepdims=True)
diag_data -= data_renormalizer
diag_data = jnp.exp(diag_data)
data_prime = data_dash * diag_data
return data_prime
def generalized_kernel_feature_creator(data, projection_matrix, batch_dims_t,
precision, kernel_fn, kernel_epsilon,
normalize_data):
"""Constructs kernel features for fast generalized attention.
Args:
data: input for which features are computes
projection_matrix: matrix used to compute features
batch_dims_t: tuple of batch dimensions
precision: precision parameter
kernel_fn: kernel function used
kernel_epsilon: additive positive term added to every feature for numerical
stability
normalize_data: predicate indicating whether data should be normalized.
Returns:
Random features for fast generalized attention.
"""
if normalize_data:
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
else:
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime
@gin.configurable
def make_fast_softmax_attention(qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.000001,
nb_features=256,
ortho_features=True,
ortho_scaling=0.0,
redraw_features=True,
unidirectional=False,
nonnegative_features=True,
lax_scan_unroll=1):
"""Construct a fast softmax attention method."""
logging.info(
'Fast softmax attention: %s features and orthogonal=%s, renormalize=%s',
nb_features, ortho_features, renormalize_attention)
if ortho_features:
matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix,
nb_features,
qkv_dim,
scaling=ortho_scaling)
else:
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix,
nb_features, qkv_dim)
if nonnegative_features:
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True):
return nonnegative_softmax_kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision,
is_query, normalize_data, numerical_stabilizer)
else:
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True):
del is_query
return sincos_softmax_kernel_feature_creator(data, projection_matrix,
attention_dims_t,
batch_dims_t, precision,
normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn
@gin.configurable
def make_fast_generalized_attention(qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.0,
nb_features=256,
features_type='deterministic',
kernel_fn=jax.nn.relu,
kernel_epsilon=0.001,
redraw_features=False,
unidirectional=False,
lax_scan_unroll=1):
"""Construct a fast generalized attention menthod."""
logging.info('Fast generalized attention.: %s features and renormalize=%s',
nb_features, renormalize_attention)
if features_type == 'ortho':
matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False)
elif features_type == 'iid':
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix,
nb_features, qkv_dim)
elif features_type == 'deterministic':
matrix_creator = None
else:
raise ValueError('Unknown feature value type')
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=False):
del attention_dims_t
del is_query
return generalized_kernel_feature_creator(data, projection_matrix,
batch_dims_t, precision,
kernel_fn, kernel_epsilon,
normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn
class RandomMatrix(object):
r"""Abstract class providing a method for constructing 2D random arrays.
Class is responsible for constructing 2D random arrays.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_2d_array(self):
raise NotImplementedError('Abstract method')
class GaussianUnstructuredRandomMatrix(RandomMatrix):
def __init__(self, nb_rows, nb_columns, key):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
def get_2d_array(self):
return random.normal(self.key, (self.nb_rows, self.nb_columns))
class GaussianOrthogonalRandomMatrix(RandomMatrix):
r"""Class providing a method to create Gaussian orthogonal matrix.
Class is responsible for constructing 2D Gaussian orthogonal arrays.
"""
def __init__(self, nb_rows, nb_columns, key, scaling=0):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
self.scaling = scaling
def get_2d_array(self):
nb_full_blocks = int(self.nb_rows / self.nb_columns)
block_list = []
rng = self.key
for _ in range(nb_full_blocks):
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input,
(self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q)
remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns
if remaining_rows > 0:
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input,
(self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q[0:remaining_rows])
final_matrix = jnp.vstack(block_list)
if self.scaling == 0:
multiplier = jnp.linalg.norm(
random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1)
elif self.scaling == 1:
multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows))
else:
raise ValueError('Scaling must be one of {0, 1}. Was %s' % self._scaling)
return jnp.matmul(jnp.diag(multiplier), final_matrix)
class FastAttention(object):
r"""Abstract class providing a method for fast attention.
Class is responsible for providing a method <dot_product_attention> for fast
approximate attention.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def dot_product_attention(self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying fast approximate dot-product
attention. It calculates the attention weights given query and key and
combines the values using the attention weights. This function supports
multi-dimensional inputs.
Args:
query: queries for calculating attention with shape of [batch_size, dim1,
dim2, ..., dimN, num_heads, mem_channels].
key: keys for calculating attention with shape of [batch_size, dim1, dim2,
..., dimN, num_heads, mem_channels].
value: values to be used in attention with shape of [batch_size, dim1,
dim2,..., dimN, num_heads, value_channels].
dtype: the dtype of the computation (default: float32)
bias: bias for the attention weights. This can be used for incorporating
autoregressive mask, padding mask, proximity bias.
axis: axises over which the attention is applied.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout.
dropout_rate: dropout rate.
deterministic: bool, deterministic or not (to apply dropout).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape [bs, dim1, dim2, ..., dimN,, num_heads, value_channels].
"""
raise NotImplementedError('Abstract method')
def _numerator_fwd(z_slice_shape, precision, qs, ks, vs):
def body(p, qkv):
(q, k, v) = qkv
p += jnp.einsum('...m,...d->...md', k, v, precision=precision)
X_slice = jnp.einsum('...m,...md->...d', q, p, precision=precision)
return p, X_slice
init_value = jnp.zeros(z_slice_shape)
p, W = lax.scan(body, init_value, (qs, ks, vs))
return W, (p, qs, ks, vs)
def _numerator_bwd(z_slice_shape, precision, pqkv, W_ct):
del z_slice_shape
def body(carry, qkv_xct):
p, p_ct = carry
q, k, v, x_ct = qkv_xct
q_ct = jnp.einsum('...d,...md->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...d,...m->...md', x_ct, q, precision=precision)
k_ct = jnp.einsum('...md,...d->...m', p_ct, v, precision=precision)
v_ct = jnp.einsum('...md,...m->...d', p_ct, k, precision=precision)
p -= jnp.einsum('...m,...d->...md', k, v, precision=precision)
return (p, p_ct), (q_ct, k_ct, v_ct)
p, qs, ks, vs = pqkv
_, (qs_ct, ks_ct, vs_ct) = lax.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, vs, W_ct), reverse=True)
return qs_ct, ks_ct, vs_ct
@functools.partial(jax.custom_vjp, nondiff_argnums=(0, 1))
def _numerator(z_slice_shape, precision, qs, ks, vs):
W, _ = _numerator_fwd(z_slice_shape, precision, qs, ks, vs)
return W
_numerator.defvjp(_numerator_fwd, _numerator_bwd)
def _denominator_fwd(t_slice_shape, precision, qs, ks):
def body(p, qk):
q, k = qk
p += k
x = jnp.einsum('...m,...m->...', q, p, precision=precision)
return p, x
p = jnp.zeros(t_slice_shape)
p, R = lax.scan(body, p, (qs, ks))
return R, (qs, ks, p)
def _denominator_bwd(_t_slice_shape, precision, qkp, R_ct):
def body(carry, qkx):
p, p_ct = carry
q, k, x_ct = qkx
q_ct = jnp.einsum('...,...m->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...,...m->...m', x_ct, q, precision=precision)
k_ct = p_ct
p -= k
return (p, p_ct), (q_ct, k_ct)
qs, ks, p = qkp
_, (qs_ct, ks_ct) = lax.scan(body, (p, jnp.zeros_like(p)),
(qs, ks, R_ct), reverse=True)
return (qs_ct, ks_ct)
@functools.partial(jax.custom_vjp, nondiff_argnums=(0, 1))
def _denominator(t_slice_shape, precision, qs, ks):
R, _ = _denominator_fwd(t_slice_shape, precision, qs, ks)
return R
_denominator.defvjp(_denominator_fwd, _denominator_bwd)
class FastAttentionviaLowRankDecomposition(FastAttention):
r"""Class providing a method for fast attention via low rank decomposition.
Class is responsible for providing a method <dot_product_attention> for fast
dot-product attention with the use of low rank decomposition (e.g. with
random feature maps).
"""
def __init__(self,
matrix_creator,
kernel_feature_creator,
renormalize_attention,
numerical_stabilizer,
redraw_features,
unidirectional,
lax_scan_unroll=1): # For optimal GPU performance, set to 16.
rng = random.PRNGKey(0)
self.matrix_creator = matrix_creator
self.projection_matrix = self.draw_weights(rng)
self.kernel_feature_creator = kernel_feature_creator
self.renormalize_attention = renormalize_attention
self.numerical_stabilizer = numerical_stabilizer
self.redraw_features = redraw_features
self.unidirectional = unidirectional
self.lax_scan_unroll = lax_scan_unroll
def draw_weights(self, key):
if self.matrix_creator is None:
return None
matrixrng, _ = random.split(key)
projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array()
return projection_matrix
def dot_product_attention(self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
assert key.shape[:-1] == value.shape[:-1]
assert (query.shape[0:1] == key.shape[0:1] and
query.shape[-1] == key.shape[-1])
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError('Attention axis must be between the batch '
'axis and the last-two axes.')
n = key.ndim
# Constructing projection tensor.
if self.redraw_features:
# TODO(kchoro): Get rid of the constant below.
query_seed = lax.convert_element_type(
jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32)
rng = random.PRNGKey(query_seed)
self.projection_matrix = self.draw_weights(rng)
# batch_dims is <bs, <non-attention dims>, num_heads>
batch_dims = tuple(onp.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
qk_perm = batch_dims + axis + (n - 1,)
k_extra_perm = axis + batch_dims + (n - 1,)
key_extra = key.transpose(k_extra_perm)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
v_perm = batch_dims + axis + (n - 1,)
value = value.transpose(v_perm)
batch_dims_t = tuple(range(len(batch_dims)))
attention_dims_t = tuple(
range(len(batch_dims),
len(batch_dims) + len(axis)))
# Constructing tensors Q^{'} and K^{'}.
query_prime = self.kernel_feature_creator(query, self.projection_matrix,
attention_dims_t, batch_dims_t,
precision, True)
key_prime = self.kernel_feature_creator(key, self.projection_matrix,
attention_dims_t, batch_dims_t,
precision, False)
if self.unidirectional:
index = attention_dims_t[0]
z_slice_shape = key_prime.shape[0:len(batch_dims_t)] + (
key_prime.shape[-1],) + (value.shape[-1],)
W = _numerator(z_slice_shape, precision,
jnp.moveaxis(query_prime, index, 0),
jnp.moveaxis(key_prime, index, 0),
jnp.moveaxis(value, index, 0))
# Constructing W = (Q^{'}(K^{'})^{T})_{masked}V
W = jnp.moveaxis(W, 0, index)
if not self.renormalize_attention:
# Unidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = W.transpose(perm_inv)
return result
else:
# Unidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(
key_extra.shape[0:len(axis)])
index = attention_dims_t[0]
t_slice_shape = key_prime.shape[0:len(batch_dims_t)] + (
key_prime.shape[-1],)
R = _denominator(t_slice_shape, precision,
jnp.moveaxis(query_prime, index, 0),
jnp.moveaxis(key_prime, index, 0))
R = jnp.moveaxis(R, 0, index)
else:
contract_query = tuple(
range(len(batch_dims) + len(axis),
len(batch_dims) + len(axis) + 1))
contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1))
# Constructing Z = (K^{'})^{T}V
# Z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
Z = lax.dot_general(
key_prime,
value,
((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)),
precision=precision)
# Constructing W = Q^{'}Z = Q^{'}(K^{'})^{T}V
# q (bs, <non-attention dims>, num_heads, <attention dims>, channels_m)
# Z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
# W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
W = lax.dot_general(
query_prime,
Z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)),
precision=precision)
if not self.renormalize_attention:
# Bidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = W.transpose(perm_inv)
return result
else:
# Bidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(
key_extra.shape[0:len(axis)])
contract_key = tuple(
range(len(batch_dims),
len(batch_dims) + len(axis)))
contract_thick_all_ones = tuple(
range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim))
# Construct T = (K^{'})^{T} 1_L
# k (bs, <non-attention dims>, num_heads, <attention dims>, channels)
T = lax.dot_general(
key_prime,
thick_all_ones, ((contract_key, contract_thick_all_ones),
(batch_dims_t, batch_dims_t)),
precision=precision)
# Construct partition function: R = Q^{'} T = Q^{'}(K^{'})^{T} 1_L
# q_p (bs, <non-attention dims>, num_heads, <attention dims>, channs_m)
# T (bs, <non-attention dims>, num_heads, channels_m)
R = lax.dot_general(
query_prime,
T, (((query_prime.ndim - 1,), (T.ndim - 1,)),
(batch_dims_t, range(0,
len(T.shape) - 1))),
precision=precision)
R = R + 2 * self.numerical_stabilizer * (
jnp.abs(R) <= self.numerical_stabilizer)
R = jnp.reciprocal(R)
R = jnp.expand_dims(R, len(R.shape))
# W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
# R (bs, <non-attention dims>, num_heads, <attention dims>, extra_channel)
result = W * R
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
result = result.transpose(perm_inv)
return result
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
| [
"[email protected]"
] | |
3f3511e74c158e797b7e8a76947e005431c1e9d0 | 4bed85e00fb4031d3f35051f645b224ec471d56a | /shadertoy/tests/__init__.py | 3382ad5c3787e64f9f27c303cc44f571f61d61ae | [] | no_license | defgsus/shadertoz | 04a8d56881eef5ccaca347993bb46d3b21f4016a | 77d419063d590b9a1b271d09203226a8a8527efa | refs/heads/master | 2020-04-12T00:44:35.049566 | 2019-06-23T20:20:35 | 2019-06-23T20:20:35 | 162,210,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | from .test_shadertoy_crawler_api import *
| [
"[email protected]"
] | |
72f8e492f29ea42a868c04f17d46f40e698fae2b | b0c8e0cafa4a8916faab3cce65756ae91426c43f | /study/Python/Week3/BOJ_4811_강의현.py | 19de46f493609fe6e4c68e9e3a1f45dc0ea0fe68 | [] | no_license | Rurril/IT-DA-3rd | b3e3ec3c2a5efbc75b76b84e9002c27a0ba4a1c4 | 9985e237cb1b90e9609656d534e0ed164723e281 | refs/heads/master | 2022-07-22T15:26:39.085369 | 2021-11-23T13:30:06 | 2021-11-23T13:30:06 | 288,980,334 | 3 | 29 | null | 2020-11-05T10:25:30 | 2020-08-20T10:49:17 | Java | UTF-8 | Python | false | false | 451 | py | #알약
import sys
def pill(W,H):
if dp[W][H]>0:
return dp[W][H]
if W==0:
return 1
if W>0 and H==0:
dp[W][H]+=pill(W-1,H+1)
elif W>0 and H>0:
dp[W][H]+=pill(W-1,H+1)
dp[W][H]+=pill(W,H-1)
return dp[W][H]
while True:
n=int(sys.stdin.readline())
dp=[[0 for _ in range(31)] for _ in range(31)]
if n==0:
break
else:
print(pill(n,0))
| [
"[email protected]"
] | |
f997ff5c8dc2cf41a469ffa1bd14cc67aa74d335 | 7c5fb33929116bb77b438de3ead93b3978b5af71 | /alf/examples/mbrl_pendulum.py | 068e609c739320a78caa5b8f2e8628c80e621db8 | [
"Apache-2.0"
] | permissive | HorizonRobotics/alf | d6dac891322a81ccb7e2a9749139627b1eda28cb | b00ff2fa5e660de31020338ba340263183fbeaa4 | refs/heads/pytorch | 2023-08-21T18:51:41.370566 | 2023-08-16T00:07:22 | 2023-08-16T00:07:22 | 178,459,453 | 288 | 57 | Apache-2.0 | 2023-09-14T20:40:20 | 2019-03-29T18:44:07 | Python | UTF-8 | Python | false | false | 1,653 | py | # Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import alf
# implement the respective reward functions for desired environments here
@alf.configurable
def reward_function_for_pendulum(obs, action):
"""Function for computing reward for gym Pendulum environment. It takes
as input:
(1) observation (Tensor of shape [batch_size, observation_dim])
(2) action (Tensor of shape [batch_size, num_actions])
and returns a reward Tensor of shape [batch_size].
"""
def _observation_cost(obs):
c_theta, s_theta, d_theta = obs[..., :1], obs[..., 1:2], obs[..., 2:3]
theta = torch.atan2(s_theta, c_theta)
cost = theta**2 + 0.1 * d_theta**2
cost = torch.sum(cost, dim=1)
cost = torch.where(
torch.isnan(cost), 1e6 * torch.ones_like(cost), cost)
return cost
def _action_cost(action):
return 0.001 * torch.sum(action**2, dim=-1)
cost = _observation_cost(obs) + _action_cost(action)
# negative cost as reward
reward = -cost
return reward
| [
"[email protected]"
] | |
7e580cd084da27021bf20c0df03fca7023304f10 | 768058e7f347231e06a28879922690c0b6870ed4 | /venv/lib/python3.7/site-packages/numba/core/errors.py | f7bd215c811424896b1da3733c44869197e78dd1 | [] | no_license | jciech/HeisenbergSpinChains | 58b4238281d8c158b11c6c22dd0da82025fd7284 | e43942bbd09f6675e7e2ff277f8930dc0518d08e | refs/heads/master | 2022-12-18T08:04:08.052966 | 2020-09-29T12:55:00 | 2020-09-29T12:55:00 | 258,476,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,528 | py | """
Numba-specific errors and warnings.
"""
import abc
import contextlib
import os
import sys
import warnings
import numba.core.config
import numpy as np
from collections import defaultdict
from numba.core.utils import add_metaclass, reraise, chain_exception
from functools import wraps
from abc import abstractmethod
from importlib import import_module
from types import ModuleType
# Filled at the end
__all__ = []
class NumbaWarning(Warning):
"""
Base category for all Numba compiler warnings.
"""
def __init__(
self, msg, loc=None, highlighting=True,
):
self.msg = msg
self.loc = loc
if highlighting:
highlight = termcolor().errmsg
else:
def highlight(x):
return x
if loc:
super(NumbaWarning, self).__init__(
highlight("%s\n%s\n" % (msg, loc.strformat()))
)
else:
super(NumbaWarning, self).__init__(highlight("%s" % (msg,)))
class NumbaPerformanceWarning(NumbaWarning):
"""
Warning category for when an operation might not be
as fast as expected.
"""
class NumbaDeprecationWarning(NumbaWarning):
"""
Warning category for use of a deprecated feature.
"""
class NumbaPendingDeprecationWarning(NumbaWarning):
"""
Warning category for use of a feature that is pending deprecation.
"""
class NumbaParallelSafetyWarning(NumbaWarning):
"""
Warning category for when an operation in a prange
might not have parallel semantics.
"""
class NumbaTypeSafetyWarning(NumbaWarning):
"""
Warning category for unsafe casting operations.
"""
class NumbaExperimentalFeatureWarning(NumbaWarning):
"""
Warning category for using an experimental feature.
"""
# These are needed in the color formatting of errors setup
@add_metaclass(abc.ABCMeta)
class _ColorScheme(object):
@abstractmethod
def code(self, msg):
pass
@abstractmethod
def errmsg(self, msg):
pass
@abstractmethod
def filename(self, msg):
pass
@abstractmethod
def indicate(self, msg):
pass
@abstractmethod
def highlight(self, msg):
pass
class _DummyColorScheme(_ColorScheme):
def __init__(self, theme=None):
pass
def code(self, msg):
pass
def errmsg(self, msg):
pass
def filename(self, msg):
pass
def indicate(self, msg):
pass
def highlight(self, msg):
pass
# holds reference to the instance of the terminal color scheme in use
_termcolor_inst = None
try:
import colorama
# If the colorama version is < 0.3.9 it can break stdout/stderr in some
# situations, as a result if this condition is met colorama is disabled and
# the user is warned. Note that early versions did not have a __version__.
colorama_version = getattr(colorama, "__version__", "0.0.0")
if tuple([int(x) for x in colorama_version.split(".")]) < (0, 3, 9):
msg = (
"Insufficiently recent colorama version found. "
"Numba requires colorama >= 0.3.9"
)
# warn the user
warnings.warn(msg)
# trip the exception to disable color errors
raise ImportError
# If Numba is running in testsuite mode then do not use error message
# coloring so CI system output is consistently readable without having
# to read between shell escape characters.
if os.environ.get("NUMBA_DISABLE_ERROR_MESSAGE_HIGHLIGHTING", None):
raise ImportError # just to trigger the exception handler below
except ImportError:
class NOPColorScheme(_DummyColorScheme):
def __init__(self, theme=None):
if theme is not None:
raise ValueError("specifying a theme has no effect")
_DummyColorScheme.__init__(self, theme=theme)
def code(self, msg):
return msg
def errmsg(self, msg):
return msg
def filename(self, msg):
return msg
def indicate(self, msg):
return msg
def highlight(self, msg):
return msg
def termcolor():
global _termcolor_inst
if _termcolor_inst is None:
_termcolor_inst = NOPColorScheme()
return _termcolor_inst
else:
from colorama import init, reinit, deinit, Fore, Style
class ColorShell(object):
_has_initialized = False
def __init__(self):
init()
self._has_initialized = True
def __enter__(self):
if self._has_initialized:
reinit()
def __exit__(self, *exc_detail):
Style.RESET_ALL
deinit()
class reset_terminal(object):
def __init__(self):
self._buf = bytearray(b"")
def __enter__(self):
return self._buf
def __exit__(self, *exc_detail):
self._buf += bytearray(Style.RESET_ALL.encode("utf-8"))
# define some default themes, if more are added, update the envvars docs!
themes = {}
# No color added, just bold weighting
themes["no_color"] = {
"code": None,
"errmsg": None,
"filename": None,
"indicate": None,
"highlight": None,
}
# suitable for terminals with a dark background
themes["dark_bg"] = {
"code": Fore.BLUE,
"errmsg": Fore.YELLOW,
"filename": Fore.WHITE,
"indicate": Fore.GREEN,
"highlight": Fore.RED,
}
# suitable for terminals with a light background
themes["light_bg"] = {
"code": Fore.BLUE,
"errmsg": Fore.BLACK,
"filename": Fore.MAGENTA,
"indicate": Fore.BLACK,
"highlight": Fore.RED,
}
# suitable for terminals with a blue background
themes["blue_bg"] = {
"code": Fore.WHITE,
"errmsg": Fore.YELLOW,
"filename": Fore.MAGENTA,
"indicate": Fore.CYAN,
"highlight": Fore.RED,
}
# suitable for use in jupyter notebooks
themes["jupyter_nb"] = {
"code": Fore.BLACK,
"errmsg": Fore.BLACK,
"filename": Fore.GREEN,
"indicate": Fore.CYAN,
"highlight": Fore.RED,
}
default_theme = themes["no_color"]
class HighlightColorScheme(_DummyColorScheme):
def __init__(self, theme=default_theme):
self._code = theme["code"]
self._errmsg = theme["errmsg"]
self._filename = theme["filename"]
self._indicate = theme["indicate"]
self._highlight = theme["highlight"]
_DummyColorScheme.__init__(self, theme=theme)
def _markup(self, msg, color=None, style=Style.BRIGHT):
features = ""
if color:
features += color
if style:
features += style
with ColorShell():
with reset_terminal() as mu:
mu += features.encode("utf-8")
mu += (msg).encode("utf-8")
return mu.decode("utf-8")
def code(self, msg):
return self._markup(msg, self._code)
def errmsg(self, msg):
return self._markup(msg, self._errmsg)
def filename(self, msg):
return self._markup(msg, self._filename)
def indicate(self, msg):
return self._markup(msg, self._indicate)
def highlight(self, msg):
return self._markup(msg, self._highlight)
def termcolor():
global _termcolor_inst
if _termcolor_inst is None:
scheme = themes[numba.core.config.COLOR_SCHEME]
_termcolor_inst = HighlightColorScheme(scheme)
return _termcolor_inst
feedback_details = """
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
"""
unsupported_error_info = """
Unsupported functionality was found in the code Numba was trying to compile.
If this functionality is important to you please file a feature request at:
https://github.com/numba/numba/issues/new
"""
interpreter_error_info = """
Unsupported Python functionality was found in the code Numba was trying to
compile. This error could be due to invalid code, does the code work
without Numba? (To temporarily disable Numba JIT, set the `NUMBA_DISABLE_JIT`
environment variable to non-zero, and then rerun the code).
If the code is valid and the unsupported functionality is important to you
please file a feature request at: https://github.com/numba/numba/issues/new
To see Python/NumPy features supported by the latest release of Numba visit:
http://numba.pydata.org/numba-doc/latest/reference/pysupported.html
and
http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
"""
constant_inference_info = (
"""
Numba could not make a constant out of something that it decided should be
a constant. This could well be a current limitation in Numba's internals,
however please first check that your code is valid for compilation,
particularly with respect to string interpolation (not supported!) and
the requirement of compile time constants as arguments to exceptions:
http://numba.pydata.org/numba-doc/latest/reference/pysupported.html?highlight=exceptions#constructs
If the code is valid and the unsupported functionality is important to you
please file a feature request at: https://github.com/numba/numba/issues/new
If you think your code should work with Numba. %s
"""
% feedback_details
)
typing_error_info = """
This is not usually a problem with Numba itself but instead often caused by
the use of unsupported features or an issue in resolving types.
To see Python/NumPy features supported by the latest release of Numba visit:
http://numba.pydata.org/numba-doc/latest/reference/pysupported.html
and
http://numba.pydata.org/numba-doc/latest/reference/numpysupported.html
For more information about typing errors and how to debug them visit:
http://numba.pydata.org/numba-doc/latest/user/troubleshoot.html#my-code-doesn-t-compile
If you think your code should work with Numba, please report the error message
and traceback, along with a minimal reproducer at:
https://github.com/numba/numba/issues/new
"""
reportable_issue_info = """
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
You are currently using Numba version %s.
%s
""" % (
numba.__version__,
feedback_details,
)
error_extras = dict()
error_extras["unsupported_error"] = unsupported_error_info
error_extras["typing"] = typing_error_info
error_extras["reportable"] = reportable_issue_info
error_extras["interpreter"] = interpreter_error_info
error_extras["constant_inference"] = constant_inference_info
def deprecated(arg):
"""Define a deprecation decorator.
An optional string should refer to the new API to be used instead.
Example:
@deprecated
def old_func(): ...
@deprecated('new_func')
def old_func(): ..."""
subst = arg if isinstance(arg, str) else None
def decorator(func):
def wrapper(*args, **kwargs):
msg = 'Call to deprecated function "{}".'
if subst:
msg += '\n Use "{}" instead.'
warnings.warn(
msg.format(func.__name__, subst),
category=DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return wraps(func)(wrapper)
if not subst:
return decorator(arg)
else:
return decorator
_moved_msg1 = (
"An import was requested from a module that has moved location."
"\nImport requested from: '{}', please update to use "
"'{}' or pin to Numba version 0.48.0. This alias will not be "
"present in Numba version 0.50.0."
)
_moved_msg2 = (
"An import was requested from a module that has moved location"
".\nImport of '{}' requested from: '{}', please update to use "
"'{}' or pin to Numba version 0.48.0. This alias will not be "
"present in Numba version 0.50.0."
)
_moved_no_replacement = (
"No direct replacement for '{}' available. Visit "
"https://gitter.im/numba/numba-dev to request help. "
"Thanks!"
)
def deprecate_moved_module(old_module, new_module, stacklevel=2):
"""Warn about a module level location move of some part of Numba's
internals. stacklevel is 3 by default as most warning locations are
from `numba.XYZ` shims.
"""
if new_module is None:
msg = _moved_no_replacement.format(old_module)
else:
msg = _moved_msg1.format(old_module, new_module)
warnings.warn(msg, category=NumbaDeprecationWarning, stacklevel=stacklevel + 1)
class _MovedModule(ModuleType):
def __init__(self, old_module_locals, new_module):
old_module = old_module_locals["__name__"]
super().__init__(old_module)
# copy across dunders so that package imports work too
for attr, value in old_module_locals.items():
if attr.startswith("__") and attr.endswith("__"):
setattr(self, attr, value)
self.__new_module = new_module
deprecate_moved_module(old_module, new_module, stacklevel=3)
def __getattr__(self, attr):
""" warn users above modules moving locations """
try:
# import from the moved module
if self.__new_module is not None:
mod = import_module(self.__new_module)
ret_attr = getattr(mod, attr)
msg = _moved_msg2.format(attr, self.__name__, self.__new_module)
warnings.warn(msg, category=NumbaDeprecationWarning, stacklevel=2)
return ret_attr
else:
# produce the usual error
return super().__getattribute__(attr)
except AttributeError:
# not a package, so no submodules to attempt to import.
# can't use hasattr here because that would recurse.
if "__path__" not in self.__dict__:
raise
# perhaps this is a submodule name that was previous importer, but
# is no longer
try:
return import_module("." + attr, package=self.__name__)
except ModuleNotFoundError:
raise AttributeError(
"Moved module {!r} has no attribute or submodule {!r}".format(
self.__name__, attr
)
)
class WarningsFixer(object):
"""
An object "fixing" warnings of a given category caught during
certain phases. The warnings can have their filename and lineno fixed,
and they are deduplicated as well.
"""
def __init__(self, category):
self._category = category
# {(filename, lineno, category) -> messages}
self._warnings = defaultdict(set)
@contextlib.contextmanager
def catch_warnings(self, filename=None, lineno=None):
"""
Store warnings and optionally fix their filename and lineno.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter("always", self._category)
yield
for w in wlist:
msg = str(w.message)
if issubclass(w.category, self._category):
# Store warnings of this category for deduplication
filename = filename or w.filename
lineno = lineno or w.lineno
self._warnings[filename, lineno, w.category].add(msg)
else:
# Simply emit other warnings again
warnings.warn_explicit(msg, w.category, w.filename, w.lineno)
def flush(self):
"""
Emit all stored warnings.
"""
def key(arg):
# It is possible through codegen to create entirely identical
# warnings, this leads to comparing types when sorting which breaks
# on Python 3. Key as str() and if the worse happens then `id`
# creates some uniqueness
return str(arg) + str(id(arg))
for (filename, lineno, category), messages in sorted(
self._warnings.items(), key=key
):
for msg in sorted(messages):
warnings.warn_explicit(msg, category, filename, lineno)
self._warnings.clear()
class NumbaError(Exception):
def __init__(self, msg, loc=None, highlighting=True):
self.msg = msg
self.loc = loc
if highlighting:
highlight = termcolor().errmsg
else:
def highlight(x):
return x
if loc:
super(NumbaError, self).__init__(
highlight("%s\n%s\n" % (msg, loc.strformat()))
)
else:
super(NumbaError, self).__init__(highlight("%s" % (msg,)))
@property
def contexts(self):
try:
return self._contexts
except AttributeError:
self._contexts = lst = []
return lst
def add_context(self, msg):
"""
Add contextual info. The exception message is expanded with the new
contextual information.
"""
self.contexts.append(msg)
f = termcolor().errmsg("{0}\n") + termcolor().filename("[{1}] During: {2}")
newmsg = f.format(self, len(self.contexts), msg)
self.args = (newmsg,)
return self
def patch_message(self, new_message):
"""
Change the error message to the given new message.
"""
self.args = (new_message,) + self.args[1:]
class UnsupportedError(NumbaError):
"""
Numba does not have an implementation for this functionality.
"""
pass
class UnsupportedRewriteError(UnsupportedError):
"""UnsupportedError from rewrite passes"""
pass
class IRError(NumbaError):
"""
An error occurred during Numba IR generation.
"""
pass
class RedefinedError(IRError):
"""
An error occurred during interpretation of IR due to variable redefinition.
"""
pass
class NotDefinedError(IRError):
"""
An undefined variable is encountered during interpretation of IR.
"""
def __init__(self, name, loc=None):
self.name = name
msg = "Variable '%s' is not defined." % name
super(NotDefinedError, self).__init__(msg, loc=loc)
class VerificationError(IRError):
"""
An error occurred during IR verification. Once Numba's internal
representation (IR) is constructed it is then verified to ensure that
terminators are both present and in the correct places within the IR. If
it is the case that this condition is not met, a VerificationError is
raised.
"""
pass
class MacroError(NumbaError):
"""
An error occurred during macro expansion.
"""
pass
class DeprecationError(NumbaError):
"""
Functionality is deprecated.
"""
pass
class LoweringError(NumbaError):
"""
An error occurred during lowering.
"""
def __init__(self, msg, loc=None):
super(LoweringError, self).__init__(msg, loc=loc)
class UnsupportedParforsError(NumbaError):
"""
An error ocurred because parfors is not supported on the platform.
"""
pass
class ForbiddenConstruct(LoweringError):
"""
A forbidden Python construct was encountered (e.g. use of locals()).
"""
pass
class TypingError(NumbaError):
"""
A type inference failure.
"""
pass
class UntypedAttributeError(TypingError):
def __init__(self, value, attr, loc=None):
module = getattr(value, "pymod", None)
if module is not None and module == np:
# unsupported numpy feature.
msg = (
"Use of unsupported NumPy function 'numpy.%s' "
"or unsupported use of the function."
% attr
)
else:
msg = "Unknown attribute '{attr}' of type {type}"
msg = msg.format(type=value, attr=attr)
super(UntypedAttributeError, self).__init__(msg, loc=loc)
class ByteCodeSupportError(NumbaError):
"""
Failure to extract the bytecode of the user's function.
"""
def __init__(self, msg, loc=None):
super(ByteCodeSupportError, self).__init__(msg, loc=loc)
class CompilerError(NumbaError):
"""
Some high-level error in the compiler.
"""
pass
class ConstantInferenceError(NumbaError):
"""
Failure during constant inference.
"""
def __init__(self, value, loc=None):
super(ConstantInferenceError, self).__init__(value, loc=loc)
class InternalError(NumbaError):
"""
For wrapping internal error occured within the compiler
"""
def __init__(self, exception):
super(InternalError, self).__init__(str(exception))
self.old_exception = exception
class RequireLiteralValue(TypingError):
"""
For signalling that a function's typing requires a constant value for
some of its arguments.
"""
pass
class ForceLiteralArg(NumbaError):
"""A Pseudo-exception to signal the dispatcher to type an argument literally
Attributes
----------
requested_args : frozenset[int]
requested positions of the arguments.
"""
def __init__(self, arg_indices, fold_arguments=None, loc=None):
"""
Parameters
----------
arg_indices : Sequence[int]
requested positions of the arguments.
fold_arguments: callable
A function ``(tuple, dict) -> tuple`` that binds and flattens
the ``args`` and ``kwargs``.
loc : numba.ir.Loc or None
"""
super(ForceLiteralArg, self).__init__(
"Pseudo-exception to force literal arguments in the dispatcher", loc=loc,
)
self.requested_args = frozenset(arg_indices)
self.fold_arguments = fold_arguments
def bind_fold_arguments(self, fold_arguments):
"""Bind the fold_arguments function"""
e = ForceLiteralArg(self.requested_args, fold_arguments, loc=self.loc)
return chain_exception(e, self)
def combine(self, other):
"""Returns a new instance by or'ing the requested_args."""
if not isinstance(other, ForceLiteralArg):
m = "*other* must be a {} but got a {} instead"
raise TypeError(m.format(ForceLiteralArg, type(other)))
return ForceLiteralArg(self.requested_args | other.requested_args)
def __or__(self, other):
"""Same as self.combine(other)"""
return self.combine(other)
class LiteralTypingError(TypingError):
"""
Failure in typing a Literal type
"""
pass
def _format_msg(fmt, args, kwargs):
return fmt.format(*args, **kwargs)
_numba_path = os.path.dirname(__file__)
loc_info = {}
@contextlib.contextmanager
def new_error_context(fmt_, *args, **kwargs):
"""
A contextmanager that prepend contextual information to any exception
raised within. If the exception type is not an instance of NumbaError,
it will be wrapped into a InternalError. The exception class can be
changed by providing a "errcls_" keyword argument with the exception
constructor.
The first argument is a message that describes the context. It can be a
format string. If there are additional arguments, it will be used as
``fmt_.format(*args, **kwargs)`` to produce the final message string.
"""
errcls = kwargs.pop("errcls_", InternalError)
loc = kwargs.get("loc", None)
if loc is not None and not loc.filename.startswith(_numba_path):
loc_info.update(kwargs)
try:
yield
except NumbaError as e:
e.add_context(_format_msg(fmt_, args, kwargs))
raise
except Exception as e:
newerr = errcls(e).add_context(_format_msg(fmt_, args, kwargs))
tb = sys.exc_info()[2] if numba.core.config.FULL_TRACEBACKS else None
reraise(type(newerr), newerr, tb)
__all__ += [
name
for (name, value) in globals().items()
if not name.startswith("_")
and isinstance(value, type)
and issubclass(value, (Exception, Warning))
]
| [
"[email protected]"
] | |
673f2d850b90b20a67a00755f8a9e59b28f7324f | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /sagemaker_write_2/human-task-ui_create.py | eebb4a0b6530ac76c1e2b8f1be0de382851e94ff | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,166 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_two_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/create-human-task-ui.html
if __name__ == '__main__':
"""
delete-human-task-ui : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/delete-human-task-ui.html
describe-human-task-ui : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/describe-human-task-ui.html
list-human-task-uis : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sagemaker/list-human-task-uis.html
"""
parameter_display_string = """
# human-task-ui-name : The name of the user interface you are creating.
# ui-template :
"""
add_option_dict = {}
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_two_parameter("sagemaker", "create-human-task-ui", "human-task-ui-name", "ui-template", add_option_dict)
| [
"[email protected]"
] | |
c78f086df8711a95739b5f59b62912beb82f0281 | 8574853abe4cfe95b5e03e0b41cf23d1ed865509 | /pyqtgraph/ui_mainWindow.py | b5cadc315fd621cf5e8b4cdbaa0e70b6d0df7e37 | [] | no_license | neutrons/GUI_Tools | 7ae8b90aad2cc1dc129d75618fc820c1c362dcda | 34932a86545b9d52b2fa63f01c7950aebde54d78 | refs/heads/master | 2021-01-17T04:48:06.202029 | 2016-11-09T18:31:57 | 2016-11-09T18:31:57 | 28,240,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,394 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_mainWindow.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1535, 845)
MainWindow.setMinimumSize(QtCore.QSize(300, 0))
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setMinimumSize(QtCore.QSize(0, 0))
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.widget_1 = QtGui.QWidget(self.centralwidget)
self.widget_1.setObjectName(_fromUtf8("widget_1"))
self.verticalLayout.addWidget(self.widget_1)
self.widget_2 = QtGui.QWidget(self.centralwidget)
self.widget_2.setObjectName(_fromUtf8("widget_2"))
self.verticalLayout.addWidget(self.widget_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1535, 22))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.menubar.sizePolicy().hasHeightForWidth())
self.menubar.setSizePolicy(sizePolicy)
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.action1_Data = QtGui.QAction(MainWindow)
self.action1_Data.setObjectName(_fromUtf8("action1_Data"))
self.action2_Normalization = QtGui.QAction(MainWindow)
self.action2_Normalization.setEnabled(True)
self.action2_Normalization.setObjectName(_fromUtf8("action2_Normalization"))
self.action3_Binning = QtGui.QAction(MainWindow)
self.action3_Binning.setObjectName(_fromUtf8("action3_Binning"))
self.action4_Fitting = QtGui.QAction(MainWindow)
self.action4_Fitting.setObjectName(_fromUtf8("action4_Fitting"))
self.action5_Results = QtGui.QAction(MainWindow)
self.action5_Results.setObjectName(_fromUtf8("action5_Results"))
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName(_fromUtf8("actionAbout"))
self.action1_Raw_Data = QtGui.QAction(MainWindow)
self.action1_Raw_Data.setObjectName(_fromUtf8("action1_Raw_Data"))
self.action2_Normalization_2 = QtGui.QAction(MainWindow)
self.action2_Normalization_2.setObjectName(_fromUtf8("action2_Normalization_2"))
self.action3_Normalized_Data = QtGui.QAction(MainWindow)
self.action3_Normalized_Data.setObjectName(_fromUtf8("action3_Normalized_Data"))
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow", None))
self.action1_Data.setText(_translate("MainWindow", "1. Data", None))
self.action2_Normalization.setText(_translate("MainWindow", "2. Normalization", None))
self.action3_Binning.setText(_translate("MainWindow", "4. Binning", None))
self.action4_Fitting.setText(_translate("MainWindow", "5. Fitting", None))
self.action5_Results.setText(_translate("MainWindow", "6. Strain Mapping", None))
self.actionAbout.setText(_translate("MainWindow", "About ...", None))
self.action1_Raw_Data.setText(_translate("MainWindow", "1. Raw Data", None))
self.action2_Normalization_2.setText(_translate("MainWindow", "2. Normalization", None))
self.action3_Normalized_Data.setText(_translate("MainWindow", "3. Normalized Data", None))
| [
"[email protected]"
] | |
c4c4ad976f5a937b2721e07f86ceec98b86e7c4c | d82d8bce58a64e579e8a5e5d9e3fbc2b5274ea0a | /code/compile_list.py | ad8e5e99242fcfbfab31f7d23f459ca752478fb6 | [] | no_license | annayqho/IcBL-master-list | 2f58ad8fa092296d0c999dcca8b7700cb8f41ef7 | c9d3884411fbdcb6f607ac76af72e5625a4266ba | refs/heads/master | 2021-07-19T19:22:06.205001 | 2020-05-14T23:50:26 | 2020-05-14T23:50:26 | 163,516,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,091 | py | """ Compile a list of Ic-BL SNe """
import numpy as np
import requests
from astropy.table import Table
from astropy.time import Time
from astropy.coordinates import SkyCoord,Distance
from astropy.cosmology import Planck15
from astropy.io import ascii
DATA_DIR = "/Users/annaho/Dropbox/Projects/Research/IcBL/data"
def todeg(ra, dec):
""" convert XX:XX:XX to decimal degrees """
radeg = []
decdeg = []
for ii,raval in enumerate(ra):
hh = raval.split(":")[0]
mm = raval.split(":")[1]
ss = raval.split(":")[2]
radegval = hh+"h"+mm+"m"+ss+"s"
dd = dec[ii].split(":")[0]
mm = dec[ii].split(":")[1]
ss = dec[ii].split(":")[2]
decdegval = dd+"d"+mm+"m"+ss+"s"
c = SkyCoord(radegval, decdegval, frame='icrs')
radeg.append(c.ra.deg)
decdeg.append(c.dec.deg)
return np.array(radeg), np.array(decdeg)
def opensn():
"""
Automatically grab all of the Ic-BL SNe from the open SN catalog """
print("Connecting to the open SN catalog...")
server = "https://api.sne.space/catalog"
r = requests.get(server, params={'claimedtype': 'Ic BL', 'format': 'json'})
dat = r.json()
# Retrieve the data you want
nsn = len(dat.keys())
print("Found %s claimed Ic-BL SNe on the open SN catalog" %nsn)
return dat
def tns():
""" Run this to automatically grab all of the Ic-BL SNe from TNS """
print("Connecting to TNS server...")
server = "https://wis-tns.weizmann.ac.il/search"
r = requests.get(server, params={'objtype': 7, 'format': 'csv'})
alldat = r.text.split('\n')
# Header
header = np.array(alldat[0].split('","'))
# Data
dat = alldat[1:]
# According to the formatting, you want to group things that live together
# in double quotation marks. So, the real split between items is ",", not ,
for ii,row in enumerate(dat):
dat[ii] = np.array(dat[ii].split('","'))
dat = np.array(dat)
# Retrieve the data you want
nsn = dat.shape[0]
print("Found %s Ic-BL SNe on TNS" %nsn)
name = dat[:,np.where(header=='Name')[0][0]]
ra = dat[:,np.where(header=='RA')[0][0]]
dec = dat[:,np.where(header=='DEC')[0][0]]
radeg, decdeg = todeg(ra,dec)
z = dat[:,np.where(header=='Redshift')[0][0]]
date = dat[:,np.where(header=='Discovery Date (UT)')[0][0]]
ref = ['TNS'] * nsn
return name, date, radeg, decdeg, z, ref
def ptf():
""" the PTF/iPTF sample of 34 Ic-BL SNe
I copied the table directly from the .tex file downloaded from the arXiv,
then ran the following two commands
%s/\\//g
%s/ //g
%s/\*//g
%s/xx//g
I also removed the commented-out lines
In this paper, they give estimated explosion epochs (with a typical
uncertainty of 2 days) for all of the SNe observed before
and after r maximum brightness.
A lot of them don't have an estimated explosion epoch, though.
So what I should do is use the estimate for the ones that have it,
and for the ones that don't have it, just report discovery date
as I found it on the marshal.
"""
# Discovery dates on the Marshal, for the ones that aren't in Table 2
# 27 out of 34 leaves 7
disc = {}
disc['PTF09sk'] = 2455002.74571
disc['PTF10cs'] = 2455203.74537
disc['PTF12grr'] = 2456117.84878
disc['iPTF14bfu'] = Time('2014-06-06T03:11:51.86').jd
disc['iPTF15dld'] = 2457318.82184
disc['iPTF16coi'] = 2457625.72566
disc['iPTF17axg'] = 2457784.97286
dat = Table.read(
"%s/taddia2018.dat" %DATA_DIR,
delimiter='&', format='ascii.fast_no_header')
# file with explosion epochs
dat_expl = Table.read(
"%s/taddia2018_t2.dat" %DATA_DIR,
delimiter='&', format='ascii.fast_no_header')
name_expl = dat_expl['col1']
texpl = dat_expl['col8']
name = dat['col1']
texpl = []
for n in name:
try:
ind = np.where(name_expl==n)[0][0]
texpl.append(texpl[ind])
except:
texpl.append(disc[n])
ra = dat['col2']
dec = dat['col3']
radeg, decdeg = todeg(ra, dec)
z = dat['col5']
ref = ['T18']*len(name)
return list(name), texpl, list(radeg), list(decdeg), list(z), ref
def ztf():
""" The list of Ic-BL discovered in ZTF """
dat = Table.read(
"%s/ztf.dat" %DATA_DIR,
delimiter='&', format='ascii.fast_no_header')
name = dat['col1']
date = dat['col3']
ra = dat['col5']
dec = dat['col6']
radeg, decdeg = todeg(ra, dec)
z = dat['col7']
ref = ['ZTF']*len(name)
return list(name), list(date), list(radeg), list(decdeg), list(z), ref
def add(name, disc, ra, dec, redshift, ref, n, di, r, d, z, re):
c = SkyCoord(ra, dec, unit='deg')
cadd = SkyCoord(r, d, unit='deg')
nadd = 0
for ii,val in enumerate(cadd):
dist = c.separation(val).arcsec
nopos = False
noname = False
# Is the position in there already?
if sum(dist <= 2) == 0:
nopos = True
# Is the name in there already?
if n[ii] not in name:
noname = True
if np.logical_and(nopos, noname):
name.append(n[ii])
disc.append(di[ii])
ra.append(r[ii])
dec.append(d[ii])
redshift.append(z[ii])
ref.append(re[ii])
nadd += 1
else:
print("%s is a duplicate, not adding" %n[ii])
print("added %s events" %str(nadd))
return name, disc, ra, dec, redshift, ref
if __name__=="__main__":
dat = opensn()
names = np.array(list(dat.keys()))
nsn = len(names)
ra = []
dec = []
for key,val in dat.items():
if len(val['ra']) > 0:
ra.append(val['ra'][0]['value'])
dec.append(val['dec'][0]['value'])
ra,dec = todeg(ra,dec)
opensnpos = SkyCoord(ra, dec, unit='deg')
# Question 1: are there any Ic-BL on TNS that are not on openSN?
name, date, radeg, decdeg, z, ref = tns()
name = np.array([val.replace(" ", "") for val in name])
missing = np.setdiff1d(name,names)
if len(missing) > 0:
print("There are TNS Ic-BL SNe missing from OpenSN")
print(missing)
else:
print("All TNS Ic-BL SNe are on OpenSN")
# Question 2: are there any Ic-BL from other papers that are not on openSN?
# Yes, a whole bunch from PTF and ZTF.
name, date, radeg, decdeg, z, ref = ztf()
name = np.array(name)
print(np.setdiff1d(name,names))
# compare positions, since some of these only have ZTF names...
ptfpos = SkyCoord(radeg, decdeg, unit='deg')
for ii,val in enumerate(ptfpos):
if min(val.separation(opensnpos).arcsec) < 1:
print("%s already in openSN" %name[ii])
else:
print("%s not in openSN" %name[ii])
# # Name, Expl./Disc. Date, RA, Dec, Redshift, Reference
# ascii.write(
# [names], 'all_icbl.html', names=['Name'], delimiter=',',
# overwrite=True, format='html')
| [
"[email protected]"
] | |
c36c23782d5de4c8d32ff18fa5c495be5c8bbb9e | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/eventhub/azure-mgmt-eventhub/generated_samples/private_link_resources_get.py | 09d9cb10ccc2b1df74331b0170522555e172219b | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,568 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.eventhub import EventHubManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-eventhub
# USAGE
python private_link_resources_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = EventHubManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subID",
)
response = client.private_link_resources.get(
resource_group_name="ArunMonocle",
namespace_name="sdk-Namespace-2924",
)
print(response)
# x-ms-original-file: specification/eventhub/resource-manager/Microsoft.EventHub/stable/2021-11-01/examples/NameSpaces/PrivateLinkResourcesGet.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c3f9cd65dcb33f02e8541e83d77676f71f7713de | 764e24bf9e8a4d68b3f06eb0e0b9e6c9140e72ba | /Deportes/Evento/form.py | c244a680e74e5c3e074d3387faf9f55f1eb7b198 | [] | no_license | efnaranjo6/Deportes | 27200d30f1f86364ed2d37f76342f63b657a5788 | 71f22790a8d059516aa35ac325cc9b284979712e | refs/heads/master | 2021-03-28T00:09:42.513669 | 2020-04-27T16:45:41 | 2020-04-27T16:45:41 | 247,819,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from django import forms
from Evento.models import Evento
class Eventoform(forms.ModelForm):
class Meta:
model = Evento
fields = ['nombre']
labels = {'nombre ': 'ingrese el nombre'
}
widget={'nombre' : forms.TextInput(),
}
def __init__(self,*args, **kwargs):
super().__init__(*args, **kwargs)
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class':'form-control'})
| [
"[email protected]"
] | |
0fe25c0230c5cbc00c32eedb12b54bfabfb330ae | 1b87d5f7cba7e068f7b2ea902bba494599d20a78 | /experimental/mt_media/drivers/directsound/__init__.py | 17aadeb2836801d257ee41453f288279e24e81a4 | [
"BSD-3-Clause"
] | permissive | jpaalasm/pyglet | 906d03fe53160885665beaed20314b5909903cc9 | bf1d1f209ca3e702fd4b6611377257f0e2767282 | refs/heads/master | 2021-01-25T03:27:08.941964 | 2014-01-25T17:50:57 | 2014-01-25T17:50:57 | 16,236,090 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,147 | py | #!/usr/bin/python
# $Id:$
import ctypes
import math
import sys
import threading
import time
import pyglet
_debug = pyglet.options['debug_media']
import mt_media
import lib_dsound as lib
from pyglet.window.win32 import _user32, _kernel32
class DirectSoundException(mt_media.MediaException):
pass
def _db(gain):
'''Convert linear gain in range [0.0, 1.0] to 100ths of dB.'''
if gain <= 0:
return -10000
return max(-10000, min(int(1000 * math.log(min(gain, 1))), 0))
class DirectSoundWorker(mt_media.MediaThread):
_min_write_size = 9600
# Time to wait if there are players, but they're all full.
_nap_time = 0.05
# Time to wait if there are no players.
_sleep_time = None
def __init__(self):
super(DirectSoundWorker, self).__init__()
self.players = set()
def run(self):
while True:
# This is a big lock, but ensures a player is not deleted while
# we're processing it -- this saves on extra checks in the
# player's methods that would otherwise have to check that it's
# still alive.
if _debug:
print 'DirectSoundWorker run attempt acquire'
self.condition.acquire()
if _debug:
print 'DirectSoundWorker run acquire'
if self.stopped:
self.condition.release()
break
sleep_time = -1
if self.players:
player = None
write_size = 0
for p in self.players:
s = p.get_write_size()
if s > write_size:
player = p
write_size = s
if write_size > self._min_write_size:
player.refill(write_size)
else:
sleep_time = self._nap_time
else:
sleep_time = self._sleep_time
self.condition.release()
if _debug:
print 'DirectSoundWorker run release'
if sleep_time != -1:
self.sleep(sleep_time)
if _debug:
print 'DirectSoundWorker exiting'
def add(self, player):
if _debug:
print 'DirectSoundWorker add', player
self.condition.acquire()
self.players.add(player)
self.condition.notify()
self.condition.release()
if _debug:
print 'return DirectSoundWorker add', player
def remove(self, player):
if _debug:
print 'DirectSoundWorker remove', player
self.condition.acquire()
try:
self.players.remove(player)
except KeyError:
pass
self.condition.notify()
self.condition.release()
if _debug:
print 'return DirectSoundWorker remove', player
class DirectSoundAudioPlayer(mt_media.AbstractAudioPlayer):
# How many bytes the ring buffer should be
_buffer_size = 44800 * 1
# Need to cache these because pyglet API allows update separately, but
# DSound requires both to be set at once.
_cone_inner_angle = 360
_cone_outer_angle = 360
def __init__(self, source_group, player):
super(DirectSoundAudioPlayer, self).__init__(source_group, player)
# Locking strategy:
# All DirectSound calls should be locked. All instance vars relating
# to buffering/filling/time/events should be locked (used by both
# application and worker thread). Other instance vars (consts and
# 3d vars) do not need to be locked.
self._lock = threading.RLock()
# Desired play state (may be actually paused due to underrun -- not
# implemented yet).
self._playing = False
# Up to one audio data may be buffered if too much data was received
# from the source that could not be written immediately into the
# buffer. See refill().
self._next_audio_data = None
# Theoretical write and play cursors for an infinite buffer. play
# cursor is always <= write cursor (when equal, underrun is
# happening).
self._write_cursor = 0
self._play_cursor = 0
# Cursor position of end of data. Silence is written after
# eos for one buffer size.
self._eos_cursor = None
# Indexes into DSound circular buffer. Complications ensue wrt each
# other to avoid writing over the play cursor. See get_write_size and
# write().
self._play_cursor_ring = 0
self._write_cursor_ring = 0
# List of (play_cursor, MediaEvent), in sort order
self._events = []
# List of (cursor, timestamp), in sort order (cursor gives expiry
# place of the timestamp)
self._timestamps = []
audio_format = source_group.audio_format
wfx = lib.WAVEFORMATEX()
wfx.wFormatTag = lib.WAVE_FORMAT_PCM
wfx.nChannels = audio_format.channels
wfx.nSamplesPerSec = audio_format.sample_rate
wfx.wBitsPerSample = audio_format.sample_size
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels // 8
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign
dsbdesc = lib.DSBUFFERDESC()
dsbdesc.dwSize = ctypes.sizeof(dsbdesc)
dsbdesc.dwFlags = (lib.DSBCAPS_GLOBALFOCUS |
lib.DSBCAPS_GETCURRENTPOSITION2 |
lib.DSBCAPS_CTRLFREQUENCY |
lib.DSBCAPS_CTRLVOLUME)
if audio_format.channels == 1:
dsbdesc.dwFlags |= lib.DSBCAPS_CTRL3D
dsbdesc.dwBufferBytes = self._buffer_size
dsbdesc.lpwfxFormat = ctypes.pointer(wfx)
# DSound buffer
self._buffer = lib.IDirectSoundBuffer()
driver._dsound.CreateSoundBuffer(dsbdesc,
ctypes.byref(self._buffer),
None)
if audio_format.channels == 1:
self._buffer3d = lib.IDirectSound3DBuffer()
self._buffer.QueryInterface(lib.IID_IDirectSound3DBuffer,
ctypes.byref(self._buffer3d))
else:
self._buffer3d = None
self._buffer.SetCurrentPosition(0)
self.refill(self._buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
if driver and driver.worker:
driver.worker.remove(self)
self.lock()
self._buffer.Stop()
self._buffer.Release()
self._buffer = None
if self._buffer3d:
self._buffer3d.Release()
self._buffer3d = None
self.unlock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def play(self):
if _debug:
print 'DirectSound play'
driver.worker.add(self)
self.lock()
if not self._playing:
self._playing = True
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self.unlock()
if _debug:
print 'return DirectSound play'
def stop(self):
if _debug:
print 'DirectSound stop'
driver.worker.remove(self)
self.lock()
if self._playing:
self._playing = False
self._buffer.Stop()
self.unlock()
if _debug:
print 'return DirectSound stop'
def clear(self):
if _debug:
print 'DirectSound clear'
self.lock()
self._buffer.SetCurrentPosition(0)
self._play_cursor_ring = self._write_cursor_ring = 0
self._play_cursor = self._write_cursor
self._eos_cursor = None
self._next_audio_data = None
del self._events[:]
del self._timestamps[:]
self.unlock()
def refill(self, write_size):
self.lock()
while write_size > 0:
if _debug:
print 'refill, write_size =', write_size
# Get next audio packet (or remains of last one)
if self._next_audio_data:
audio_data = self._next_audio_data
self._next_audio_data = None
else:
audio_data = self.source_group.get_audio_data(write_size)
# Write it, or silence if there are no more packets
if audio_data:
# Add events
for event in audio_data.events:
event_cursor = self._write_cursor + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((event_cursor, event))
# Add timestamp (at end of this data packet)
ts_cursor = self._write_cursor + audio_data.length
self._timestamps.append(
(ts_cursor, audio_data.timestamp + audio_data.duration))
# Write data
if _debug:
print 'write', audio_data.length
length = min(write_size, audio_data.length)
self.write(audio_data, length)
if audio_data.length:
self._next_audio_data = audio_data
write_size -= length
else:
# Write silence
if self._eos_cursor is None:
self._eos_cursor = self._write_cursor
self._events.append(
(self._eos_cursor,
mt_media.MediaEvent(0, 'on_eos')))
self._events.append(
(self._eos_cursor,
mt_media.MediaEvent(0, 'on_source_group_eos')))
self._events.sort()
if self._write_cursor > self._eos_cursor + self._buffer_size:
self.stop()
else:
self.write(None, write_size)
write_size = 0
self.unlock()
def update_play_cursor(self):
self.lock()
play_cursor_ring = lib.DWORD()
self._buffer.GetCurrentPosition(play_cursor_ring, None)
if play_cursor_ring.value < self._play_cursor_ring:
# Wrapped around
self._play_cursor += self._buffer_size - self._play_cursor_ring
self._play_cursor_ring = 0
self._play_cursor += play_cursor_ring.value - self._play_cursor_ring
self._play_cursor_ring = play_cursor_ring.value
# Dispatch pending events
pending_events = []
while self._events and self._events[0][0] <= self._play_cursor:
_, event = self._events.pop(0)
pending_events.append(event)
if _debug:
print 'Dispatching pending events:', pending_events
print 'Remaining events:', self._events
# Remove expired timestamps
while self._timestamps and self._timestamps[0][0] < self._play_cursor:
del self._timestamps[0]
self.unlock()
for event in pending_events:
event._sync_dispatch_to_player(self.player)
def get_write_size(self):
self.update_play_cursor()
self.lock()
play_cursor = self._play_cursor
write_cursor = self._write_cursor
self.unlock()
return self._buffer_size - (write_cursor - play_cursor)
def write(self, audio_data, length):
# Pass audio_data=None to write silence
if length == 0:
return 0
self.lock()
p1 = ctypes.c_void_p()
l1 = lib.DWORD()
p2 = ctypes.c_void_p()
l2 = lib.DWORD()
self._buffer.Lock(self._write_cursor_ring, length,
ctypes.byref(p1), l1, ctypes.byref(p2), l2, 0)
assert length == l1.value + l2.value
if audio_data:
ctypes.memmove(p1, audio_data.data, l1.value)
audio_data.consume(l1.value, self.source_group.audio_format)
if l2.value:
ctypes.memmove(p2, audio_data.data, l2.value)
audio_data.consume(l2.value, self.source_group.audio_format)
else:
ctypes.memset(p1, 0, l1.value)
if l2.value:
ctypes.memset(p2, 0, l2.value)
self._buffer.Unlock(p1, l1, p2, l2)
self._write_cursor += length
self._write_cursor_ring += length
self._write_cursor_ring %= self._buffer_size
self.unlock()
def get_time(self):
self.lock()
if self._timestamps:
cursor, ts = self._timestamps[0]
result = ts + (self._play_cursor - cursor) / \
float(self.source_group.audio_format.bytes_per_second)
else:
result = None
self.unlock()
return result
def set_volume(self, volume):
volume = _db(volume)
self.lock()
self._buffer.SetVolume(volume)
self.unlock()
def set_position(self, position):
if self._buffer3d:
x, y, z = position
self.lock()
self._buffer3d.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_min_distance(self, min_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMinDistance(min_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_max_distance(self, max_distance):
if self._buffer3d:
self.lock()
self._buffer3d.SetMaxDistance(max_distance, lib.DS3D_IMMEDIATE)
self.unlock()
def set_pitch(self, pitch):
frequency = int(pitch * self.audio_format.sample_rate)
self.lock()
self._buffer.SetFrequency(frequency)
self.unlock()
def set_cone_orientation(self, cone_orientation):
if self._buffer3d:
x, y, z = cone_orientation
self.lock()
self._buffer3d.SetConeOrientation(x, y, -z, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_inner_angle(self, cone_inner_angle):
if self._buffer3d:
self._cone_inner_angle = int(cone_inner_angle)
self._set_cone_angles()
def set_cone_outer_angle(self, cone_outer_angle):
if self._buffer3d:
self._cone_outer_angle = int(cone_outer_angle)
self._set_cone_angles()
def _set_cone_angles(self):
inner = min(self._cone_inner_angle, self._cone_outer_angle)
outer = max(self._cone_inner_angle, self._cone_outer_angle)
self.lock()
self._buffer3d.SetConeAngles(inner, outer, lib.DS3D_IMMEDIATE)
self.unlock()
def set_cone_outer_gain(self, cone_outer_gain):
if self._buffer3d:
volume = _db(cone_outer_gain)
self.lock()
self._buffer3d.SetConeOutsideVolume(volume, lib.DS3D_IMMEDIATE)
self.unlock()
class DirectSoundDriver(mt_media.AbstractAudioDriver):
def __init__(self):
self._dsound = lib.IDirectSound()
lib.DirectSoundCreate(None, ctypes.byref(self._dsound), None)
# A trick used by mplayer.. use desktop as window handle since it
# would be complex to use pyglet window handles (and what to do when
# application is audio only?).
hwnd = _user32.GetDesktopWindow()
self._dsound.SetCooperativeLevel(hwnd, lib.DSSCL_NORMAL)
# Create primary buffer with 3D and volume capabilities
self._buffer = lib.IDirectSoundBuffer()
dsbd = lib.DSBUFFERDESC()
dsbd.dwSize = ctypes.sizeof(dsbd)
dsbd.dwFlags = (lib.DSBCAPS_CTRL3D |
lib.DSBCAPS_CTRLVOLUME |
lib.DSBCAPS_PRIMARYBUFFER)
self._dsound.CreateSoundBuffer(dsbd, ctypes.byref(self._buffer), None)
# Create listener
self._listener = lib.IDirectSound3DListener()
self._buffer.QueryInterface(lib.IID_IDirectSound3DListener,
ctypes.byref(self._listener))
# Create worker thread
self.worker = DirectSoundWorker()
self.worker.start()
def __del__(self):
try:
if self._buffer:
self.delete()
except:
pass
def create_audio_player(self, source_group, player):
return DirectSoundAudioPlayer(source_group, player)
def delete(self):
self.worker.stop()
self._buffer.Release()
self._buffer = None
self._listener.Release()
self._listener = None
# Listener API
def _set_volume(self, volume):
self._volume = volume
self._buffer.SetVolume(_db(volume))
def _set_position(self, position):
self._position = position
x, y, z = position
self._listener.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def _set_forward_orientation(self, orientation):
self._forward_orientation = orientation
self._set_orientation()
def _set_up_orientation(self, orientation):
self._up_orientation = orientation
self._set_orientation()
def _set_orientation(self):
x, y, z = self._forward_orientation
ux, uy, uz = self._up_orientation
self._listener.SetOrientation(x, y, -z, ux, uy, -uz, lib.DS3D_IMMEDIATE)
def create_audio_driver():
global driver
driver = DirectSoundDriver()
return driver
# Global driver needed for access to worker thread and _dsound
driver = None
| [
"[email protected]"
] | |
46b2e0d46c51fbbd820fc3acb913dca4423c3fe0 | 3d2fa23d6df4f7e830ec0845f0f07c237eeed303 | /tests/test_noise.py | 1a82ff8603fe537f7a1fe92e5775c3f632700f6d | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | hbrunie/GalSim | 4e2595c5a8d65df3022acb9667ef20e50b737c33 | 332334a3e7593095779b22c789d7ad0c184d5261 | refs/heads/master | 2021-02-14T03:05:02.131834 | 2020-02-11T21:27:04 | 2020-02-11T21:27:04 | 244,760,764 | 0 | 0 | NOASSERTION | 2020-03-03T23:09:33 | 2020-03-03T23:09:32 | null | UTF-8 | Python | false | false | 35,521 | py | # Copyright (c) 2012-2019 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
from __future__ import print_function
import numpy as np
import os
import sys
import galsim
from galsim_test_helpers import *
testseed = 1000
precision = 10
# decimal point at which agreement is required for all double precision tests
precisionD = precision
precisionF = 5 # precision=10 does not make sense at single precision
precisionS = 1 # "precision" also a silly concept for ints, but allows all 4 tests to run in one go
precisionI = 1
@timer
def test_deviate_noise():
"""Test basic functionality of the DeviateNoise class
"""
u = galsim.UniformDeviate(testseed)
uResult = np.empty((10,10))
u.generate(uResult)
noise = galsim.DeviateNoise(galsim.UniformDeviate(testseed))
# Test filling an image with random values
testimage = galsim.ImageD(10,10)
testimage.addNoise(noise)
np.testing.assert_array_almost_equal(
testimage.array, uResult, precision,
err_msg='Wrong uniform random number sequence generated when applied to image.')
# Test filling a single-precision image
noise.rng.seed(testseed)
testimage = galsim.ImageF(10,10)
testimage.addNoise(noise)
np.testing.assert_array_almost_equal(
testimage.array, uResult, precisionF,
err_msg='Wrong uniform random number sequence generated when applied to ImageF.')
# Test filling an image with Fortran ordering
noise.rng.seed(testseed)
testimage = galsim.ImageD(np.zeros((10,10)).T)
testimage.addNoise(noise)
np.testing.assert_array_almost_equal(
testimage.array, uResult, precision,
err_msg="Wrong uniform randoms generated for Fortran-ordered Image")
# Check picklability
do_pickle(noise, drawNoise)
do_pickle(noise)
# Check copy, eq and ne
noise2 = galsim.DeviateNoise(noise.rng.duplicate()) # Separate but equivalent rng chain.
noise3 = noise.copy() # Always has exactly the same rng as noise.
noise4 = noise.copy(rng=galsim.BaseDeviate(11)) # Always has a different rng than noise
assert noise == noise2
assert noise == noise3
assert noise != noise4
assert noise.rng() == noise2.rng()
assert noise == noise2 # Still equal because both chains incremented one place.
assert noise == noise3 # Still equal because noise 3's rng is always equal to noise's rng.
noise.rng()
assert noise2 != noise3 # This is no longer equal, since only noise.rng is incremented.
assert noise == noise3
assert_raises(TypeError, galsim.DeviateNoise, 53)
assert_raises(NotImplementedError, galsim.BaseNoise().getVariance)
assert_raises(NotImplementedError, galsim.BaseNoise().withVariance, 23)
assert_raises(NotImplementedError, galsim.BaseNoise().withScaledVariance, 23)
assert_raises(TypeError, noise.applyTo, 23)
assert_raises(NotImplementedError, galsim.BaseNoise().applyTo, testimage)
assert_raises(galsim.GalSimError, noise.getVariance)
assert_raises(galsim.GalSimError, noise.withVariance, 23)
assert_raises(galsim.GalSimError, noise.withScaledVariance, 23)
@timer
def test_gaussian_noise():
"""Test Gaussian random number generator
"""
gSigma = 17.23
g = galsim.GaussianDeviate(testseed, sigma=gSigma)
gResult = np.empty((10,10))
g.generate(gResult)
noise = galsim.DeviateNoise(g)
# Test filling an image
testimage = galsim.ImageD(10,10)
noise.rng.seed(testseed)
testimage.addNoise(noise)
np.testing.assert_array_almost_equal(
testimage.array, gResult, precision,
err_msg='Wrong Gaussian random number sequence generated when applied to image.')
# Test filling a single-precision image
noise.rng.seed(testseed)
testimage = galsim.ImageF(10,10)
testimage.addNoise(noise)
np.testing.assert_array_almost_equal(
testimage.array, gResult, precisionF,
err_msg='Wrong Gaussian random number sequence generated when applied to ImageF.')
# GaussianNoise is equivalent, but no mean allowed.
gn = galsim.GaussianNoise(galsim.BaseDeviate(testseed), sigma=gSigma)
testimage = galsim.ImageD(10,10)
testimage.addNoise(gn)
np.testing.assert_array_almost_equal(
testimage.array, gResult, precision,
err_msg="GaussianNoise applied to Images does not reproduce expected sequence")
# Test filling an image with Fortran ordering
gn.rng.seed(testseed)
testimage = galsim.ImageD(np.zeros((10,10)).T)
testimage.addNoise(gn)
np.testing.assert_array_almost_equal(
testimage.array, gResult, precision,
err_msg="Wrong Gaussian noise generated for Fortran-ordered Image")
# Check GaussianNoise variance:
np.testing.assert_almost_equal(
gn.getVariance(), gSigma**2, precision,
err_msg="GaussianNoise getVariance returns wrong variance")
np.testing.assert_almost_equal(
gn.sigma, gSigma, precision,
err_msg="GaussianNoise sigma returns wrong value")
# Check that the noise model really does produce this variance.
big_im = galsim.Image(2048,2048,dtype=float)
gn.rng.seed(testseed)
big_im.addNoise(gn)
var = np.var(big_im.array)
print('variance = ',var)
print('getVar = ',gn.getVariance())
np.testing.assert_almost_equal(
var, gn.getVariance(), 1,
err_msg='Realized variance for GaussianNoise did not match getVariance()')
# Check that GaussianNoise adds to the image, not overwrites the image.
gal = galsim.Exponential(half_light_radius=2.3, flux=1.e4)
gal.drawImage(image=big_im)
gn.rng.seed(testseed)
big_im.addNoise(gn)
gal.withFlux(-1.e4).drawImage(image=big_im, add_to_image=True)
var = np.var(big_im.array)
np.testing.assert_almost_equal(
var, gn.getVariance(), 1,
err_msg='GaussianNoise wrong when already an object drawn on the image')
# Check that DeviateNoise adds to the image, not overwrites the image.
gal.drawImage(image=big_im)
gn.rng.seed(testseed)
big_im.addNoise(gn)
gal.withFlux(-1.e4).drawImage(image=big_im, add_to_image=True)
var = np.var(big_im.array)
np.testing.assert_almost_equal(
var, gn.getVariance(), 1,
err_msg='DeviateNoise wrong when already an object drawn on the image')
# Check withVariance
gn = gn.withVariance(9.)
np.testing.assert_almost_equal(
gn.getVariance(), 9, precision,
err_msg="GaussianNoise withVariance results in wrong variance")
np.testing.assert_almost_equal(
gn.sigma, 3., precision,
err_msg="GaussianNoise withVariance results in wrong sigma")
# Check withScaledVariance
gn = gn.withScaledVariance(4.)
np.testing.assert_almost_equal(
gn.getVariance(), 36., precision,
err_msg="GaussianNoise withScaledVariance results in wrong variance")
np.testing.assert_almost_equal(
gn.sigma, 6., precision,
err_msg="GaussianNoise withScaledVariance results in wrong sigma")
# Check arithmetic
gn = gn.withVariance(0.5)
gn2 = gn * 3
np.testing.assert_almost_equal(
gn2.getVariance(), 1.5, precision,
err_msg="GaussianNoise gn*3 results in wrong variance")
np.testing.assert_almost_equal(
gn.getVariance(), 0.5, precision,
err_msg="GaussianNoise gn*3 results in wrong variance for original gn")
gn2 = 5 * gn
np.testing.assert_almost_equal(
gn2.getVariance(), 2.5, precision,
err_msg="GaussianNoise 5*gn results in wrong variance")
np.testing.assert_almost_equal(
gn.getVariance(), 0.5, precision,
err_msg="GaussianNoise 5*gn results in wrong variance for original gn")
gn2 = gn/2
np.testing.assert_almost_equal(
gn2.getVariance(), 0.25, precision,
err_msg="GaussianNoise gn/2 results in wrong variance")
np.testing.assert_almost_equal(
gn.getVariance(), 0.5, precision,
err_msg="GaussianNoise 5*gn results in wrong variance for original gn")
gn *= 3
np.testing.assert_almost_equal(
gn.getVariance(), 1.5, precision,
err_msg="GaussianNoise gn*=3 results in wrong variance")
gn /= 2
np.testing.assert_almost_equal(
gn.getVariance(), 0.75, precision,
err_msg="GaussianNoise gn/=2 results in wrong variance")
# Check starting with GaussianNoise()
gn2 = galsim.GaussianNoise()
gn2 = gn2.withVariance(9.)
np.testing.assert_almost_equal(
gn2.getVariance(), 9, precision,
err_msg="GaussianNoise().withVariance results in wrong variance")
np.testing.assert_almost_equal(
gn2.sigma, 3., precision,
err_msg="GaussianNoise().withVariance results in wrong sigma")
gn2 = galsim.GaussianNoise()
gn2 = gn2.withScaledVariance(4.)
np.testing.assert_almost_equal(
gn2.getVariance(), 4., precision,
err_msg="GaussianNoise().withScaledVariance results in wrong variance")
np.testing.assert_almost_equal(
gn2.sigma, 2., precision,
err_msg="GaussianNoise().withScaledVariance results in wrong sigma")
# Check picklability
do_pickle(gn, lambda x: (x.rng.serialize(), x.sigma))
do_pickle(gn, drawNoise)
do_pickle(gn)
# Check copy, eq and ne
gn = gn.withVariance(gSigma**2)
gn2 = galsim.GaussianNoise(gn.rng.duplicate(), gSigma)
gn3 = gn.copy()
gn4 = gn.copy(rng=galsim.BaseDeviate(11))
gn5 = galsim.GaussianNoise(gn.rng, 2.*gSigma)
assert gn == gn2
assert gn == gn3
assert gn != gn4
assert gn != gn5
assert gn.rng.raw() == gn2.rng.raw()
assert gn == gn2
assert gn == gn3
gn.rng.raw()
assert gn != gn2
assert gn == gn3
@timer
def test_variable_gaussian_noise():
"""Test VariableGaussian random number generator
"""
# Make a checkerboard image with two values for the variance
gSigma1 = 17.23
gSigma2 = 28.55
var_image = galsim.ImageD(galsim.BoundsI(0,9,0,9))
coords = np.ogrid[0:10, 0:10]
var_image.array[ (coords[0] + coords[1]) % 2 == 1 ] = gSigma1**2
var_image.array[ (coords[0] + coords[1]) % 2 == 0 ] = gSigma2**2
print('var_image.array = ',var_image.array)
g = galsim.GaussianDeviate(testseed, sigma=1.)
vgResult = np.empty((10,10))
g.generate(vgResult)
vgResult *= np.sqrt(var_image.array)
# Test filling an image
vgn = galsim.VariableGaussianNoise(galsim.BaseDeviate(testseed), var_image)
testimage = galsim.ImageD(10,10)
testimage.addNoise(vgn)
np.testing.assert_array_almost_equal(
testimage.array, vgResult, precision,
err_msg="VariableGaussianNoise applied to Images does not reproduce expected sequence")
# Test filling an image with Fortran ordering
vgn.rng.seed(testseed)
testimage = galsim.ImageD(np.zeros((10,10)).T)
testimage.addNoise(vgn)
np.testing.assert_array_almost_equal(
testimage.array, vgResult, precision,
err_msg="Wrong VariableGaussian noise generated for Fortran-ordered Image")
# Check var_image property
np.testing.assert_almost_equal(
vgn.var_image.array, var_image.array, precision,
err_msg="VariableGaussianNoise var_image returns wrong var_image")
# Check that the noise model really does produce this variance.
big_var_image = galsim.ImageD(galsim.BoundsI(0,2047,0,2047))
big_coords = np.ogrid[0:2048, 0:2048]
mask1 = (big_coords[0] + big_coords[1]) % 2 == 0
mask2 = (big_coords[0] + big_coords[1]) % 2 == 1
big_var_image.array[mask1] = gSigma1**2
big_var_image.array[mask2] = gSigma2**2
big_vgn = galsim.VariableGaussianNoise(galsim.BaseDeviate(testseed), big_var_image)
big_im = galsim.Image(2048,2048,dtype=float)
big_im.addNoise(big_vgn)
var = np.var(big_im.array)
print('variance = ',var)
print('getVar = ',big_vgn.var_image.array.mean())
np.testing.assert_almost_equal(
var, big_vgn.var_image.array.mean(), 1,
err_msg='Realized variance for VariableGaussianNoise did not match var_image')
# Check realized variance in each mask
print('rms1 = ',np.std(big_im.array[mask1]))
print('rms2 = ',np.std(big_im.array[mask2]))
np.testing.assert_almost_equal(np.std(big_im.array[mask1]), gSigma1, decimal=1)
np.testing.assert_almost_equal(np.std(big_im.array[mask2]), gSigma2, decimal=1)
# Check that VariableGaussianNoise adds to the image, not overwrites the image.
gal = galsim.Exponential(half_light_radius=2.3, flux=1.e4)
gal.drawImage(image=big_im)
big_vgn.rng.seed(testseed)
big_im.addNoise(big_vgn)
gal.withFlux(-1.e4).drawImage(image=big_im, add_to_image=True)
var = np.var(big_im.array)
np.testing.assert_almost_equal(
var, big_vgn.var_image.array.mean(), 1,
err_msg='VariableGaussianNoise wrong when already an object drawn on the image')
# Check picklability
do_pickle(vgn, lambda x: (x.rng.serialize(), x.var_image))
do_pickle(vgn, drawNoise)
do_pickle(vgn)
# Check copy, eq and ne
vgn2 = galsim.VariableGaussianNoise(vgn.rng.duplicate(), var_image)
vgn3 = vgn.copy()
vgn4 = vgn.copy(rng=galsim.BaseDeviate(11))
vgn5 = galsim.VariableGaussianNoise(vgn.rng, 2.*var_image)
assert vgn == vgn2
assert vgn == vgn3
assert vgn != vgn4
assert vgn != vgn5
assert vgn.rng.raw() == vgn2.rng.raw()
assert vgn == vgn2
assert vgn == vgn3
vgn.rng.raw()
assert vgn != vgn2
assert vgn == vgn3
assert_raises(TypeError, vgn.applyTo, 23)
assert_raises(ValueError, vgn.applyTo, galsim.ImageF(3,3))
assert_raises(galsim.GalSimError, vgn.getVariance)
assert_raises(galsim.GalSimError, vgn.withVariance, 23)
assert_raises(galsim.GalSimError, vgn.withScaledVariance, 23)
@timer
def test_poisson_noise():
"""Test Poisson random number generator
"""
pMean = 17
p = galsim.PoissonDeviate(testseed, mean=pMean)
pResult = np.empty((10,10))
p.generate(pResult)
noise = galsim.DeviateNoise(p)
# Test filling an image
noise.rng.seed(testseed)
testimage = galsim.ImageI(10, 10)
testimage.addNoise(galsim.DeviateNoise(p))
np.testing.assert_array_equal(
testimage.array, pResult,
err_msg='Wrong poisson random number sequence generated when applied to image.')
# The PoissonNoise version also subtracts off the mean value
pn = galsim.PoissonNoise(galsim.BaseDeviate(testseed), sky_level=pMean)
testimage.fill(0)
testimage.addNoise(pn)
np.testing.assert_array_equal(
testimage.array, pResult-pMean,
err_msg='Wrong poisson random number sequence generated using PoissonNoise')
# Test filling a single-precision image
pn.rng.seed(testseed)
testimage = galsim.ImageF(10,10)
testimage.addNoise(pn)
np.testing.assert_array_almost_equal(
testimage.array, pResult-pMean, precisionF,
err_msg='Wrong Poisson random number sequence generated when applied to ImageF.')
# Test filling an image with Fortran ordering
pn.rng.seed(testseed)
testimage = galsim.ImageD(10,10)
testimage.addNoise(pn)
np.testing.assert_array_almost_equal(
testimage.array, pResult-pMean,
err_msg="Wrong Poisson noise generated for Fortran-ordered Image")
# Check PoissonNoise variance:
np.testing.assert_almost_equal(
pn.getVariance(), pMean, precision,
err_msg="PoissonNoise getVariance returns wrong variance")
np.testing.assert_almost_equal(
pn.sky_level, pMean, precision,
err_msg="PoissonNoise sky_level returns wrong value")
# Check that the noise model really does produce this variance.
big_im = galsim.Image(2048,2048,dtype=float)
big_im.addNoise(pn)
var = np.var(big_im.array)
print('variance = ',var)
print('getVar = ',pn.getVariance())
np.testing.assert_almost_equal(
var, pn.getVariance(), 1,
err_msg='Realized variance for PoissonNoise did not match getVariance()')
# Check that PoissonNoise adds to the image, not overwrites the image.
gal = galsim.Exponential(half_light_radius=2.3, flux=0.3)
# Note: in this case, flux/size^2 needs to be << sky_level or it will mess up the statistics.
gal.drawImage(image=big_im)
big_im.addNoise(pn)
gal.withFlux(-0.3).drawImage(image=big_im, add_to_image=True)
var = np.var(big_im.array)
np.testing.assert_almost_equal(
var, pn.getVariance(), 1,
err_msg='PoissonNoise wrong when already an object drawn on the image')
# Check withVariance
pn = pn.withVariance(9.)
np.testing.assert_almost_equal(
pn.getVariance(), 9., precision,
err_msg="PoissonNoise withVariance results in wrong variance")
np.testing.assert_almost_equal(
pn.sky_level, 9., precision,
err_msg="PoissonNoise withVariance results in wrong sky_level")
# Check withScaledVariance
pn = pn.withScaledVariance(4.)
np.testing.assert_almost_equal(
pn.getVariance(), 36, precision,
err_msg="PoissonNoise withScaledVariance results in wrong variance")
np.testing.assert_almost_equal(
pn.sky_level, 36., precision,
err_msg="PoissonNoise withScaledVariance results in wrong sky_level")
# Check arithmetic
pn = pn.withVariance(0.5)
pn2 = pn * 3
np.testing.assert_almost_equal(
pn2.getVariance(), 1.5, precision,
err_msg="PoissonNoise pn*3 results in wrong variance")
np.testing.assert_almost_equal(
pn.getVariance(), 0.5, precision,
err_msg="PoissonNoise pn*3 results in wrong variance for original pn")
pn2 = 5 * pn
np.testing.assert_almost_equal(
pn2.getVariance(), 2.5, precision,
err_msg="PoissonNoise 5*pn results in wrong variance")
np.testing.assert_almost_equal(
pn.getVariance(), 0.5, precision,
err_msg="PoissonNoise 5*pn results in wrong variance for original pn")
pn2 = pn/2
np.testing.assert_almost_equal(
pn2.getVariance(), 0.25, precision,
err_msg="PoissonNoise pn/2 results in wrong variance")
np.testing.assert_almost_equal(
pn.getVariance(), 0.5, precision,
err_msg="PoissonNoise 5*pn results in wrong variance for original pn")
pn *= 3
np.testing.assert_almost_equal(
pn.getVariance(), 1.5, precision,
err_msg="PoissonNoise pn*=3 results in wrong variance")
pn /= 2
np.testing.assert_almost_equal(
pn.getVariance(), 0.75, precision,
err_msg="PoissonNoise pn/=2 results in wrong variance")
# Check starting with PoissonNoise()
pn = galsim.PoissonNoise()
pn = pn.withVariance(9.)
np.testing.assert_almost_equal(
pn.getVariance(), 9., precision,
err_msg="PoissonNoise().withVariance results in wrong variance")
np.testing.assert_almost_equal(
pn.sky_level, 9., precision,
err_msg="PoissonNoise().withVariance results in wrong sky_level")
pn = pn.withScaledVariance(4.)
np.testing.assert_almost_equal(
pn.getVariance(), 36, precision,
err_msg="PoissonNoise().withScaledVariance results in wrong variance")
np.testing.assert_almost_equal(
pn.sky_level, 36., precision,
err_msg="PoissonNoise().withScaledVariance results in wrong sky_level")
# Check picklability
do_pickle(pn, lambda x: (x.rng.serialize(), x.sky_level))
do_pickle(pn, drawNoise)
do_pickle(pn)
# Check copy, eq and ne
pn = pn.withVariance(pMean)
pn2 = galsim.PoissonNoise(pn.rng.duplicate(), pMean)
pn3 = pn.copy()
pn4 = pn.copy(rng=galsim.BaseDeviate(11))
pn5 = galsim.PoissonNoise(pn.rng, 2*pMean)
assert pn == pn2
assert pn == pn3
assert pn != pn4
assert pn != pn5
assert pn.rng.raw() == pn2.rng.raw()
assert pn == pn2
assert pn == pn3
pn.rng.raw()
assert pn != pn2
assert pn == pn3
@timer
def test_ccdnoise():
"""Test CCD Noise generator
"""
# Start with some regression tests where we have known values that we expect to generate:
types = (np.int16, np.int32, np.float32, np.float64)
typestrings = ("S", "I", "F", "D")
testseed = 1000
gain = 3.
read_noise = 5.
sky = 50
# Tabulated results for the above settings and testseed value.
cResultS = np.array([[44, 47], [50, 49]], dtype=np.int16)
cResultI = np.array([[44, 47], [50, 49]], dtype=np.int32)
cResultF = np.array([[44.45332718, 47.79725266], [50.67744064, 49.58272934]], dtype=np.float32)
cResultD = np.array([[44.453328440057618, 47.797254142519577],
[50.677442088335162, 49.582730949808081]],dtype=np.float64)
for i in range(4):
prec = eval("precision"+typestrings[i])
cResult = eval("cResult"+typestrings[i])
rng = galsim.BaseDeviate(testseed)
ccdnoise = galsim.CCDNoise(rng, gain=gain, read_noise=read_noise)
testImage = galsim.Image((np.zeros((2, 2))+sky).astype(types[i]))
ccdnoise.applyTo(testImage)
np.testing.assert_array_almost_equal(
testImage.array, cResult, prec,
err_msg="Wrong CCD noise random sequence generated for Image"+typestrings[i]+".")
# Check that reseeding the rng reseeds the internal deviate in CCDNoise
rng.seed(testseed)
testImage.fill(sky)
ccdnoise.applyTo(testImage)
np.testing.assert_array_almost_equal(
testImage.array, cResult, prec,
err_msg="Wrong CCD noise random sequence generated for Image"+typestrings[i]+
" after seed")
# Check using addNoise
rng.seed(testseed)
testImage.fill(sky)
testImage.addNoise(ccdnoise)
np.testing.assert_array_almost_equal(
testImage.array, cResult, prec,
err_msg="Wrong CCD noise random sequence generated for Image"+typestrings[i]+
" using addNoise")
# Test filling an image with Fortran ordering
rng.seed(testseed)
testImageF = galsim.Image(np.zeros((2, 2)).T, dtype=types[i])
testImageF.fill(sky)
testImageF.addNoise(ccdnoise)
np.testing.assert_array_almost_equal(
testImageF.array, cResult, prec,
err_msg="Wrong CCD noise generated for Fortran-ordered Image"+typestrings[i])
# Now include sky_level in ccdnoise
rng.seed(testseed)
ccdnoise = galsim.CCDNoise(rng, sky_level=sky, gain=gain, read_noise=read_noise)
testImage.fill(0)
ccdnoise.applyTo(testImage)
np.testing.assert_array_almost_equal(
testImage.array, cResult-sky, prec,
err_msg="Wrong CCD noise random sequence generated for Image"+typestrings[i]+
" with sky_level included in noise")
rng.seed(testseed)
testImage.fill(0)
testImage.addNoise(ccdnoise)
np.testing.assert_array_almost_equal(
testImage.array, cResult-sky, prec,
err_msg="Wrong CCD noise random sequence generated for Image"+typestrings[i]+
" using addNoise with sky_level included in noise")
# Check CCDNoise variance:
var1 = sky/gain + (read_noise/gain)**2
np.testing.assert_almost_equal(
ccdnoise.getVariance(), var1, precision,
err_msg="CCDNoise getVariance returns wrong variance")
np.testing.assert_almost_equal(
ccdnoise.sky_level, sky, precision,
err_msg="CCDNoise sky_level returns wrong value")
np.testing.assert_almost_equal(
ccdnoise.gain, gain, precision,
err_msg="CCDNoise gain returns wrong value")
np.testing.assert_almost_equal(
ccdnoise.read_noise, read_noise, precision,
err_msg="CCDNoise read_noise returns wrong value")
# Check that the noise model really does produce this variance.
# NB. If default float32 is used here, older versions of numpy will compute the variance
# in single precision, and with 2048^2 values, the final answer comes out significantly
# wrong (19.33 instead of 19.42, which gets compared to the nominal value of 19.44).
big_im = galsim.Image(2048,2048,dtype=float)
big_im.addNoise(ccdnoise)
var = np.var(big_im.array)
print('variance = ',var)
print('getVar = ',ccdnoise.getVariance())
np.testing.assert_almost_equal(
var, ccdnoise.getVariance(), 1,
err_msg='Realized variance for CCDNoise did not match getVariance()')
# Check that CCDNoise adds to the image, not overwrites the image.
gal = galsim.Exponential(half_light_radius=2.3, flux=0.3)
# Note: again, flux/size^2 needs to be << sky_level or it will mess up the statistics.
gal.drawImage(image=big_im)
big_im.addNoise(ccdnoise)
gal.withFlux(-0.3).drawImage(image=big_im, add_to_image=True)
var = np.var(big_im.array)
np.testing.assert_almost_equal(
var, ccdnoise.getVariance(), 1,
err_msg='CCDNoise wrong when already an object drawn on the image')
# Check using a non-integer sky level which does some slightly different calculations.
rng.seed(testseed)
big_im_int = galsim.Image(2048,2048,dtype=int)
ccdnoise = galsim.CCDNoise(rng, sky_level=34.42, gain=1.6, read_noise=11.2)
big_im_int.fill(0)
big_im_int.addNoise(ccdnoise)
var = np.var(big_im_int.array)
np.testing.assert_almost_equal(var/ccdnoise.getVariance(), 1., decimal=2,
err_msg='CCDNoise wrong when sky_level is not an integer')
# Using gain=0 means the read_noise is in ADU, not e-
rng.seed(testseed)
ccdnoise = galsim.CCDNoise(rng, gain=0., read_noise=read_noise)
var2 = read_noise**2
np.testing.assert_almost_equal(
ccdnoise.getVariance(), var2, precision,
err_msg="CCDNoise getVariance returns wrong variance with gain=0")
np.testing.assert_almost_equal(
ccdnoise.sky_level, 0., precision,
err_msg="CCDNoise sky_level returns wrong value with gain=0")
np.testing.assert_almost_equal(
ccdnoise.gain, 0., precision,
err_msg="CCDNoise gain returns wrong value with gain=0")
np.testing.assert_almost_equal(
ccdnoise.read_noise, read_noise, precision,
err_msg="CCDNoise read_noise returns wrong value with gain=0")
big_im.fill(0)
big_im.addNoise(ccdnoise)
var = np.var(big_im.array)
np.testing.assert_almost_equal(var, ccdnoise.getVariance(), 1,
err_msg='CCDNoise wrong when gain=0')
# Check withVariance
ccdnoise = galsim.CCDNoise(rng, sky_level=sky, gain=gain, read_noise=read_noise)
ccdnoise = ccdnoise.withVariance(9.)
np.testing.assert_almost_equal(
ccdnoise.getVariance(), 9., precision,
err_msg="CCDNoise withVariance results in wrong variance")
np.testing.assert_almost_equal(
ccdnoise.sky_level, (9./var1)*sky, precision,
err_msg="CCDNoise withVariance results in wrong sky_level")
np.testing.assert_almost_equal(
ccdnoise.gain, gain, precision,
err_msg="CCDNoise withVariance results in wrong gain")
np.testing.assert_almost_equal(
ccdnoise.read_noise, np.sqrt(9./var1) * read_noise, precision,
err_msg="CCDNoise withVariance results in wrong ReadNoise")
# Check withScaledVariance
ccdnoise = ccdnoise.withScaledVariance(4.)
np.testing.assert_almost_equal(
ccdnoise.getVariance(), 36., precision,
err_msg="CCDNoise withVariance results in wrong variance")
np.testing.assert_almost_equal(
ccdnoise.sky_level, (36./var1)*sky, precision,
err_msg="CCDNoise withVariance results in wrong sky_level")
np.testing.assert_almost_equal(
ccdnoise.gain, gain, precision,
err_msg="CCDNoise withVariance results in wrong gain")
np.testing.assert_almost_equal(
ccdnoise.read_noise, np.sqrt(36./var1) * read_noise, precision,
err_msg="CCDNoise withVariance results in wrong ReadNoise")
# Check arithmetic
ccdnoise = ccdnoise.withVariance(0.5)
ccdnoise2 = ccdnoise * 3
np.testing.assert_almost_equal(
ccdnoise2.getVariance(), 1.5, precision,
err_msg="CCDNoise ccdnoise*3 results in wrong variance")
np.testing.assert_almost_equal(
ccdnoise.getVariance(), 0.5, precision,
err_msg="CCDNoise ccdnoise*3 results in wrong variance for original ccdnoise")
ccdnoise2 = 5 * ccdnoise
np.testing.assert_almost_equal(
ccdnoise2.getVariance(), 2.5, precision,
err_msg="CCDNoise 5*ccdnoise results in wrong variance")
np.testing.assert_almost_equal(
ccdnoise.getVariance(), 0.5, precision,
err_msg="CCDNoise 5*ccdnoise results in wrong variance for original ccdnoise")
ccdnoise2 = ccdnoise/2
np.testing.assert_almost_equal(
ccdnoise2.getVariance(), 0.25, precision,
err_msg="CCDNoise ccdnoise/2 results in wrong variance")
np.testing.assert_almost_equal(
ccdnoise.getVariance(), 0.5, precision,
err_msg="CCDNoise 5*ccdnoise results in wrong variance for original ccdnoise")
ccdnoise *= 3
np.testing.assert_almost_equal(
ccdnoise.getVariance(), 1.5, precision,
err_msg="CCDNoise ccdnoise*=3 results in wrong variance")
ccdnoise /= 2
np.testing.assert_almost_equal(
ccdnoise.getVariance(), 0.75, precision,
err_msg="CCDNoise ccdnoise/=2 results in wrong variance")
# Check starting with CCDNoise()
ccdnoise = galsim.CCDNoise()
ccdnoise = ccdnoise.withVariance(9.)
np.testing.assert_almost_equal(
ccdnoise.getVariance(), 9., precision,
err_msg="CCDNoise().withVariance results in wrong variance")
np.testing.assert_almost_equal(
ccdnoise.sky_level, 9., precision,
err_msg="CCDNoise().withVariance results in wrong sky_level")
np.testing.assert_almost_equal(
ccdnoise.gain, 1., precision,
err_msg="CCDNoise().withVariance results in wrong gain")
np.testing.assert_almost_equal(
ccdnoise.read_noise, 0., precision,
err_msg="CCDNoise().withVariance results in wrong ReadNoise")
ccdnoise = ccdnoise.withScaledVariance(4.)
np.testing.assert_almost_equal(
ccdnoise.getVariance(), 36., precision,
err_msg="CCDNoise().withScaledVariance results in wrong variance")
np.testing.assert_almost_equal(
ccdnoise.sky_level, 36., precision,
err_msg="CCDNoise().withScaledVariance results in wrong sky_level")
np.testing.assert_almost_equal(
ccdnoise.gain, 1., precision,
err_msg="CCDNoise().withScaledVariance results in wrong gain")
np.testing.assert_almost_equal(
ccdnoise.read_noise, 0., precision,
err_msg="CCDNoise().withScaledVariance results in wrong ReadNoise")
# Check picklability
do_pickle(ccdnoise, lambda x: (x.rng.serialize(), x.sky_level, x.gain, x.read_noise))
do_pickle(ccdnoise, drawNoise)
do_pickle(ccdnoise)
# Check copy, eq and ne
ccdnoise = galsim.CCDNoise(rng, sky, gain, read_noise)
ccdnoise2 = galsim.CCDNoise(ccdnoise.rng.duplicate(), gain=gain, read_noise=read_noise,
sky_level=sky)
ccdnoise3 = ccdnoise.copy()
ccdnoise4 = ccdnoise.copy(rng=galsim.BaseDeviate(11))
ccdnoise5 = galsim.CCDNoise(ccdnoise.rng, gain=2*gain, read_noise=read_noise, sky_level=sky)
ccdnoise6 = galsim.CCDNoise(ccdnoise.rng, gain=gain, read_noise=2*read_noise, sky_level=sky)
ccdnoise7 = galsim.CCDNoise(ccdnoise.rng, gain=gain, read_noise=read_noise, sky_level=2*sky)
assert ccdnoise == ccdnoise2
assert ccdnoise == ccdnoise3
assert ccdnoise != ccdnoise4
assert ccdnoise != ccdnoise5
assert ccdnoise != ccdnoise6
assert ccdnoise != ccdnoise7
assert ccdnoise.rng.raw() == ccdnoise2.rng.raw()
assert ccdnoise == ccdnoise2
assert ccdnoise == ccdnoise3
ccdnoise.rng.raw()
assert ccdnoise != ccdnoise2
assert ccdnoise == ccdnoise3
@timer
def test_addnoisesnr():
"""Test that addNoiseSNR is behaving sensibly.
"""
# Rather than reproducing the S/N calculation in addNoiseSNR(), we'll just check for
# self-consistency of the behavior with / without flux preservation.
# Begin by making some object that we draw into an Image.
gal_sigma = 3.7
pix_scale = 0.6
test_snr = 73.
gauss = galsim.Gaussian(sigma=gal_sigma)
im = gauss.drawImage(scale=pix_scale, dtype=np.float64)
# Now make the noise object to use.
# Use a default-constructed rng (i.e. rng=None) since we had initially had trouble
# with that. And use the duplicate feature to get a second copy of this rng.
gn = galsim.GaussianNoise()
rng2 = gn.rng.duplicate()
# Try addNoiseSNR with preserve_flux=True, so the RNG needs a different variance.
# Check what variance was added for this SNR, and that the RNG still has its original variance
# after this call.
var_out = im.addNoiseSNR(gn, test_snr, preserve_flux=True)
assert gn.getVariance()==1.0
max_val = im.array.max()
# Now apply addNoiseSNR to another (clean) image with preserve_flux=False, so we use the noise
# variance in the original RNG, i.e., 1. Check that the returned variance is 1, and that the
# value of the maximum pixel (presumably the peak of the galaxy light profile) is scaled as we
# expect for this SNR.
im2 = gauss.drawImage(scale=pix_scale, dtype=np.float64)
gn2 = galsim.GaussianNoise(rng=rng2)
var_out2 = im2.addNoiseSNR(gn2, test_snr, preserve_flux=False)
assert var_out2==1.0
expect_max_val2 = max_val*np.sqrt(var_out2/var_out)
np.testing.assert_almost_equal(
im2.array.max(), expect_max_val2, decimal=8,
err_msg='addNoiseSNR with preserve_flux = True and False give inconsistent results')
if __name__ == "__main__":
test_deviate_noise()
test_gaussian_noise()
test_variable_gaussian_noise()
test_poisson_noise()
test_ccdnoise()
test_addnoisesnr()
| [
"[email protected]"
] | |
17568aee9cbf26702497d31f6ef8bf6bdd3b0a1c | cf197880ad947b1706ae2ca19fa7010cc2dd12b8 | /Algorithms/Chapter2_Fundamentals2/04_Character_Art.py | b26daab9076f8155875eeae2d5c2f620deedb91e | [] | no_license | KevinMichaelCamp/Python-HardWay | 9b8adb5be31729da8f52c956b4d0638a79715013 | 25f21f4fb8934edb13649fea3d5d15033332a7eb | refs/heads/master | 2020-03-26T12:59:15.938322 | 2020-01-02T01:27:37 | 2020-01-02T01:27:37 | 144,917,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # From star art - do the same but allow for character to be passed in as argument
def drawLeftStars(num, char):
text = ""
text += char * num
text += " " * (75 - num)
return text
def drawRightStars(num, char):
text = ""
text += " " * (75 - num)
text += char * num
return text
def drawCenteredStars(num, char):
text = ""
text += " " * ((75 - num)//2)
text += char * num
text += " " * ((75 - num)//2)
return text
# Test Cases
print(drawLeftStars(35, "%"))
print(drawRightStars(35, "@"))
print(drawCenteredStars(35, "!"))
| [
"[email protected]"
] | |
61a8c8e94ab7bb78ec496570096aab6ab53d2709 | 78ef0d7736075ee33ac4230f47c078bbf2b0e014 | /joboffers/tests/test_models.py | dd0be5b07a29f6f9da0a8f9e15f4c7823f5fd09b | [
"Apache-2.0"
] | permissive | PyAr/pyarweb | e22e9350bf107329e5a79c2368fb182958a134d2 | 5f88d1ea0cea9bd67547b70dc2c8bbaa3b8b9d03 | refs/heads/master | 2023-08-31T10:24:53.220031 | 2023-08-29T16:21:57 | 2023-08-29T16:21:57 | 17,032,696 | 64 | 108 | Apache-2.0 | 2023-09-07T04:02:53 | 2014-02-20T19:28:31 | Python | UTF-8 | Python | false | false | 17,539 | py | import factory
import pytest
from datetime import date
from unittest.mock import patch
from django.contrib.contenttypes.models import ContentType
from django.contrib.sessions.backends.db import SessionStore
from django.db.utils import IntegrityError
from django.shortcuts import reverse
from django.utils.text import slugify
from easyaudit.models import CRUDEvent
from factory import Faker
from pycompanies.tests.factories import UserCompanyProfileFactory
from pycompanies.tests.fixtures import create_user_company_profile # noqa
from ..constants import STATE_LABEL_CLASSES
from ..models import (EventType, JobOffer, JobOfferHistory, JobOfferAccessLog, OfferState,
Remoteness)
from .factories import JobOfferAccessLogFactory, JobOfferCommentFactory, JobOfferFactory
from .joboffers_descriptions import (LONG_JOBOFFER_DESCRIPTION,
SHORT_JOBOFFER_DESCRIPTION,
STRIPPED_LONG_JOBOFFER_DESCRIPTION,
STRIPPED_SHORT_JOBOFFER_DESCRIPTION)
@pytest.mark.django_db
def test_assert_joboffer_when_remoteness_is_remote_location_can_be_null():
"""
Assert that a joboffer can be created with a null location when remoteness is Remote.
"""
JobOfferFactory.create(
remoteness=Remoteness.REMOTE,
location=None,
contact_mail=Faker('email')
)
assert JobOffer.objects.all().count() == 1
@pytest.mark.django_db
def test_assert_joboffer_when_remoteness_is_office_location_cannot_be_null():
"""
Assert that a joboffer cannot be created with a null location when remoteness is in office.
"""
with pytest.raises(IntegrityError):
JobOfferFactory.create(
remoteness=Remoteness.OFFICE,
location=None,
contact_mail=Faker('email')
)
@pytest.mark.django_db
def test_assert_joboffer_when_remoteness_is_hybrid_location_cannot_be_null():
"""
Assert the activation of a constraint when no location provided and the location is in office.
"""
with pytest.raises(IntegrityError):
JobOfferFactory.create(
remoteness=Remoteness.HYBRID,
location=None,
contact_mail=Faker('email')
)
@pytest.mark.django_db
def test_assert_constraint_contact_info_not_null():
"""
Check constraint that assures that at least mail phone or url contact info is present.
"""
with pytest.raises(IntegrityError):
JobOfferFactory.create(
remoteness=Remoteness.REMOTE,
location=None,
contact_mail=None,
contact_phone=None,
contact_url=None,
)
@pytest.mark.django_db
def test_assert_joboffer_ok_when_just_one_contact_info_is_present():
"""
Assert that a joboffer can be created with just one contact info.
"""
joboffer_1 = JobOfferFactory.create(
remoteness=Remoteness.REMOTE,
location=None,
contact_mail=Faker('email'),
contact_phone=None,
contact_url=None
)
company = joboffer_1.company
JobOfferFactory.create(
remoteness=Remoteness.REMOTE,
company=company,
location=None,
contact_mail=None,
contact_phone=Faker('pyint', min_value=11111111111, max_value=99999999999),
contact_url=None
)
JobOfferFactory.create(
remoteness=Remoteness.REMOTE,
company=company,
location=None,
contact_mail=None,
contact_phone=None,
contact_url=Faker('url')
)
assert JobOffer.objects.all().count() == 3
@pytest.mark.django_db
def test_get_joboffer_history_for_given_joboffer(user_company_profile, settings):
"""
Test that the manager retrieves only the changes of the specified jobofffer
"""
settings.TEST = True
# ^ This is needed so django-easyaudit creates the CRUDEvent objects in the
# same trasnaction and then we can test for it.
data = factory.build(
dict,
company=user_company_profile.company,
created_by=user_company_profile.user,
modified_by=user_company_profile.user,
FACTORY_CLASS=JobOfferFactory
)
joboffer = JobOffer(**data)
joboffer.save()
joboffer.state = OfferState.MODERATION
joboffer.save()
comment = JobOfferCommentFactory.create(
joboffer=joboffer, created_by=user_company_profile.user
)
JobOfferCommentFactory(created_by=user_company_profile.user)
changes = JobOfferHistory.objects.for_offer(joboffer)
actual_history = list(changes.values('event_type', 'content_type', 'object_id'))
offer_ctype = ContentType.objects.get(app_label='joboffers', model='joboffer')
offer_comment_ctype = ContentType.objects.get(
app_label='joboffers', model='joboffercomment'
)
expected_history = [
{
'event_type': CRUDEvent.CREATE,
'content_type': offer_comment_ctype.id,
'object_id': str(comment.id)
},
{
'event_type': CRUDEvent.UPDATE,
'content_type': offer_ctype.id,
'object_id': str(joboffer.id)
},
{
'event_type': CRUDEvent.CREATE,
'content_type': offer_ctype.id,
'object_id': str(joboffer.id)
}
]
assert actual_history == expected_history
@pytest.mark.django_db
def test_JobOfferHistory_joboffer_comment_with_wrong_model_object(settings):
"""
Test that calling comment_fields on JobOfferHistory object raises exceptions when it is called
with an object different that JobOfferComment
"""
settings.TEST = True
# ^ This is needed so django-easyaudit creates the CRUDEvent objects in the
# same trasnaction and then we can test for it.
JobOfferFactory.create()
history = JobOfferHistory.objects.first()
assert history.content_type.model == 'joboffer'
with pytest.raises(ValueError):
history.joboffer_comment
@pytest.mark.django_db
def test_JobOfferHistory_works_with_a_JobOfferComment_model(settings):
"""
Test that a JobOfferHistory returns the related JobOfferComment correctly
"""
settings.TEST = True
# ^ This is needed so django-easyaudit creates the CRUDEvent objects in the
# same trasnaction and then we can test for it.
comment = JobOfferCommentFactory.create()
history = JobOfferHistory.objects.first()
assert history.content_type.model == 'joboffercomment'
obtained_comment = history.joboffer_comment
assert comment == obtained_comment
@pytest.mark.django_db
def test_JobOfferHistory_changes(settings):
"""
Test that JobOfferHistory.fields returns the serialized fields for a joboffer
"""
settings.TEST = True
# ^ This is needed so django-easyaudit creates the CRUDEvent objects in the
# same trasnaction and then we can test for it.
joboffer = JobOfferFactory.create(state=OfferState.DEACTIVATED)
joboffer.state = OfferState.ACTIVE
joboffer.save()
history = JobOfferHistory.objects.filter(event_type=JobOfferHistory.UPDATE).first()
assert history.content_type.model == 'joboffer'
changes = history.changes
assert changes['state'] == [OfferState.DEACTIVATED, OfferState.ACTIVE]
@pytest.mark.django_db
def test_JobOfferHistory_fields(settings):
"""
Test that JobOfferHistory.fields returns the serialized fields for a joboffer
"""
settings.TEST = True
# ^ This is needed so django-easyaudit creates the CRUDEvent objects in the
# same trasnaction and then we can test for it.
joboffer = JobOfferFactory.create()
history = JobOfferHistory.objects.first()
assert history.content_type.model == 'joboffer'
fields = history.fields
assert joboffer.title == fields['title']
@pytest.mark.django_db
def test_JobOfferHistory_state_label(settings):
"""
Test that JobOfferHistory.state return a state correctly.
"""
settings.TEST = True
# ^ This is needed so django-easyaudit creates the CRUDEvent objects in the
# same trasnaction and then we can test for it.
joboffer = JobOfferFactory.create()
history = JobOfferHistory.objects.first()
assert history.content_type.model == 'joboffer'
state_label = history.state_label
assert joboffer.state.label == state_label
@pytest.mark.django_db
def test_JobOfferHistory_state_label_class(settings):
"""
Test that state_class return a class for a joboffer JobOfferHistory
"""
settings.TEST = True
# ^ This is needed so django-easyaudit creates the CRUDEvent objects in the
# same trasnaction and then we can test for it.
JobOfferFactory.create(state=OfferState.MODERATION)
history = JobOfferHistory.objects.first()
assert history.content_type.model == 'joboffer'
state_label_class = history.state_label_class
assert state_label_class == STATE_LABEL_CLASSES[OfferState.MODERATION]
@pytest.mark.django_db
def test_assert_slug_is_updated_on_title_change():
"""
Assert that a joboffer updates the slug after title update.
"""
UPDATED_TITLE = 'Job Offer Updated'
joboffer = JobOfferFactory.create(
remoteness=Remoteness.REMOTE,
title='Job Offer',
location=None,
contact_mail=Faker('email'),
contact_phone=None,
contact_url=None
)
joboffer.title = UPDATED_TITLE
joboffer.save()
assert slugify(UPDATED_TITLE) == joboffer.slug
@pytest.mark.django_db
def test_assert_short_description_is_set_with_stripped_description():
"""
Assert that a joboffer short description is created with the stripped description
if there is no short description given.
"""
joboffer = JobOfferFactory.create(
remoteness=Remoteness.REMOTE,
title='Job Offer',
location=None,
contact_mail=Faker('email'),
contact_phone=None,
contact_url=None,
description=SHORT_JOBOFFER_DESCRIPTION,
short_description='',
)
assert STRIPPED_SHORT_JOBOFFER_DESCRIPTION == joboffer.short_description
@pytest.mark.django_db
def test_assert_short_description_is_set_with_the_given_short_description():
"""
Assert that the joboffer doesn't update the short_description if it is provided in the model.
"""
short_description = 'short description'
joboffer = JobOfferFactory.create(
remoteness=Remoteness.REMOTE,
title='Job Offer',
location=None,
contact_mail=Faker('email'),
contact_phone=None,
contact_url=None,
description=SHORT_JOBOFFER_DESCRIPTION,
short_description=short_description,
)
assert short_description == joboffer.short_description
@pytest.mark.django_db
def test_assert_get_short_description_strip_the_description():
"""
Assert that get_short_description method strip the description correctly.
"""
short_description = JobOffer.get_short_description(SHORT_JOBOFFER_DESCRIPTION)
assert STRIPPED_SHORT_JOBOFFER_DESCRIPTION == short_description
@pytest.mark.django_db
def test_assert_get_short_description_strip_the_long_description():
"""
Assert that get_short_description method strip the description and limit to 512 chars.
"""
short_description = JobOffer.get_short_description(LONG_JOBOFFER_DESCRIPTION)
assert 512 == len(short_description)
assert STRIPPED_LONG_JOBOFFER_DESCRIPTION == short_description
@pytest.mark.django_db
def test_joboffer_last_comment():
"""
Test the joboffer.last_comment property
"""
joboffer = JobOfferFactory.create(state=OfferState.MODERATION)
JobOfferCommentFactory.create(joboffer=joboffer)
expected_comment = JobOfferCommentFactory.create(joboffer=joboffer)
assert joboffer.last_comment.text == expected_comment.text
@pytest.mark.django_db
def test_joboffer_track_visualization_with_empty_session():
"""
Test calling joboffer.track_visualization with an empty session
"""
joboffer = JobOfferFactory.create()
session = SessionStore()
track_record, created = joboffer.track_visualization(session, event_type=EventType.DETAIL_VIEW)
assert created is True
assert track_record.event_type == EventType.DETAIL_VIEW
assert track_record.joboffer == joboffer
assert JobOfferAccessLog.objects.count() == 1
@pytest.mark.django_db
def test_joboffer_track_visualization_with_initiated_session():
"""
Test calling joboffer.track_visualization with initiated sesion
"""
joboffer = JobOfferFactory.create()
session = SessionStore()
session.create()
track_record, created = joboffer.track_visualization(session, event_type=EventType.DETAIL_VIEW)
assert created is True
assert track_record.event_type == EventType.DETAIL_VIEW
assert track_record.joboffer == joboffer
assert JobOfferAccessLog.objects.count() == 1
@pytest.mark.django_db
def test_joboffer_track_visualization_should_not_repeat_multiple_hits():
"""
Test calling joboffer.track_visualization multiple times with the same session doesn't count
additional views
"""
joboffer = JobOfferFactory.create()
session = SessionStore()
session.create()
track_record, created = joboffer.track_visualization(session, event_type=EventType.DETAIL_VIEW)
assert created is True
for i in range(10):
joboffer.track_visualization(session, event_type=EventType.DETAIL_VIEW)
assert JobOfferAccessLog.objects.count() == 1
@pytest.mark.django_db
def test_joboffer_track_visualization_should_count_different_sessiones_on_the_same_day():
"""
Test calling joboffer.track_visualization multiple times with different sessions counts ok
"""
joboffer = JobOfferFactory.create()
EXPECTED_VISUALIZATIONS = 10
for i in range(EXPECTED_VISUALIZATIONS):
session = SessionStore()
session.create()
joboffer.track_visualization(session, event_type=EventType.DETAIL_VIEW)
assert JobOfferAccessLog.objects.count() == EXPECTED_VISUALIZATIONS
@pytest.mark.django_db
def test_joboffer_track_visualization_should_count_different_sessiones_on_different_months():
"""
Test that calling joboffer.track_visualization counts two hits from today and from a previous
month (same session).
"""
joboffer = JobOfferFactory.create()
EXPECTED_VISUALIZATIONS = 2
session = SessionStore()
session.create()
previous_date = date(2022, 2, 1)
with patch('joboffers.models.date') as mocked_date:
mocked_date.today.return_value = previous_date
# Previous month's hit
joboffer.track_visualization(session, event_type=EventType.DETAIL_VIEW)
# Today's hit
joboffer.track_visualization(session, event_type=EventType.DETAIL_VIEW)
assert JobOfferAccessLog.objects.count() == EXPECTED_VISUALIZATIONS
@pytest.mark.django_db
def test_joboffer_get_publisher_mail_addresses_with_multiple_users():
profile1 = UserCompanyProfileFactory.create()
company = profile1.company
profile2 = UserCompanyProfileFactory.create(company=company)
joboffer = JobOfferFactory.create(company=company)
EXPECTED_MAILS = {profile1.user.email, profile2.user.email}
mails = joboffer.get_publisher_mail_addresses()
assert mails == EXPECTED_MAILS
@pytest.mark.django_db
def test_joboffer_get_publisher_mail_addresses_without_users():
joboffer = JobOfferFactory.create()
EXPECTED_MAILS = set()
mails = joboffer.get_publisher_mail_addresses()
assert mails == EXPECTED_MAILS
def test_joboffer_get_full_url(settings):
"""Test that the url being crafted has the correct BASE_URL and the right format."""
dummy_url = 'example.com'
dummy_job_slug = 'python-job'
settings.BASE_URL = dummy_url
joboffer_url = reverse('joboffers:view', kwargs={'slug': dummy_job_slug})
expected_url = "".join(('https://example.com', joboffer_url))
joboffer = JobOffer(slug=dummy_job_slug)
result = joboffer.get_full_url()
assert expected_url == result
@pytest.mark.django_db
def test_joboffer_get_visualizations_full():
"""
Test get_visualizations with all the event types
"""
joboffer = JobOfferFactory.create()
JobOfferAccessLogFactory.create_batch(
size=1, event_type=EventType.LISTING_VIEW, joboffer=joboffer
)
JobOfferAccessLogFactory.create_batch(
size=2, event_type=EventType.DETAIL_VIEW, joboffer=joboffer
)
JobOfferAccessLogFactory.create_batch(
size=3, event_type=EventType.CONTACT_INFO_VIEW, joboffer=joboffer
)
visualizations = joboffer.get_visualizations_count()
assert visualizations[EventType.LISTING_VIEW] == 1
assert visualizations[EventType.DETAIL_VIEW] == 2
assert visualizations[EventType.CONTACT_INFO_VIEW] == 3
@pytest.mark.django_db
def test_joboffer_get_visualizations_some():
"""
Test get_visualizations with only listing view type
"""
joboffer = JobOfferFactory.create()
JobOfferAccessLogFactory.create_batch(
size=1, event_type=EventType.LISTING_VIEW, joboffer=joboffer
)
visualizations = joboffer.get_visualizations_count()
assert visualizations == {EventType.LISTING_VIEW: 1}
@pytest.mark.django_db
def test_joboffer_get_visualizations_empty():
"""
Test get_visualizations without access log
"""
joboffer = JobOfferFactory.create()
visualizations = joboffer.get_visualizations_count()
assert visualizations == {}
| [
"[email protected]"
] | |
291b9974fed522b446ecddbc35e473c3b12704c1 | 498474967e1480acf5cc0f25756e1d748c677195 | /mmdetection3d/mmdet3d/datasets/pipelines/data_augment_utils.py | 23b6aada74f4a7d27894228be2a3813e21b073ab | [
"MIT",
"Apache-2.0"
] | permissive | hustvl/MapTR | adc37f78cbae9d8c909dd8648088a4930bf55377 | feb0664e64684d3207859279f047fa54a1a806f6 | refs/heads/main | 2023-08-25T17:44:47.672149 | 2023-08-14T13:31:17 | 2023-08-14T13:31:17 | 518,672,305 | 643 | 95 | MIT | 2023-09-14T03:30:23 | 2022-07-28T02:20:43 | Python | UTF-8 | Python | false | false | 17,021 | py | # Copyright (c) OpenMMLab. All rights reserved.
import numba
import numpy as np
import warnings
from numba.errors import NumbaPerformanceWarning
from mmdet3d.core.bbox import box_np_ops
warnings.filterwarnings('ignore', category=NumbaPerformanceWarning)
@numba.njit
def _rotation_box2d_jit_(corners, angle, rot_mat_T):
"""Rotate 2D boxes.
Args:
corners (np.ndarray): Corners of boxes.
angle (float): Rotation angle.
rot_mat_T (np.ndarray): Transposed rotation matrix.
"""
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
corners[:] = corners @ rot_mat_T
@numba.jit(nopython=True)
def box_collision_test(boxes, qboxes, clockwise=True):
"""Box collision test.
Args:
boxes (np.ndarray): Corners of current boxes.
qboxes (np.ndarray): Boxes to be avoid colliding.
clockwise (bool): Whether the corners are in clockwise order.
Default: True.
"""
N = boxes.shape[0]
K = qboxes.shape[0]
ret = np.zeros((N, K), dtype=np.bool_)
slices = np.array([1, 2, 3, 0])
lines_boxes = np.stack((boxes, boxes[:, slices, :]),
axis=2) # [N, 4, 2(line), 2(xy)]
lines_qboxes = np.stack((qboxes, qboxes[:, slices, :]), axis=2)
# vec = np.zeros((2,), dtype=boxes.dtype)
boxes_standup = box_np_ops.corner_to_standup_nd_jit(boxes)
qboxes_standup = box_np_ops.corner_to_standup_nd_jit(qboxes)
for i in range(N):
for j in range(K):
# calculate standup first
iw = (
min(boxes_standup[i, 2], qboxes_standup[j, 2]) -
max(boxes_standup[i, 0], qboxes_standup[j, 0]))
if iw > 0:
ih = (
min(boxes_standup[i, 3], qboxes_standup[j, 3]) -
max(boxes_standup[i, 1], qboxes_standup[j, 1]))
if ih > 0:
for k in range(4):
for box_l in range(4):
A = lines_boxes[i, k, 0]
B = lines_boxes[i, k, 1]
C = lines_qboxes[j, box_l, 0]
D = lines_qboxes[j, box_l, 1]
acd = (D[1] - A[1]) * (C[0] -
A[0]) > (C[1] - A[1]) * (
D[0] - A[0])
bcd = (D[1] - B[1]) * (C[0] -
B[0]) > (C[1] - B[1]) * (
D[0] - B[0])
if acd != bcd:
abc = (C[1] - A[1]) * (B[0] - A[0]) > (
B[1] - A[1]) * (
C[0] - A[0])
abd = (D[1] - A[1]) * (B[0] - A[0]) > (
B[1] - A[1]) * (
D[0] - A[0])
if abc != abd:
ret[i, j] = True # collision.
break
if ret[i, j] is True:
break
if ret[i, j] is False:
# now check complete overlap.
# box overlap qbox:
box_overlap_qbox = True
for box_l in range(4): # point l in qboxes
for k in range(4): # corner k in boxes
vec = boxes[i, k] - boxes[i, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (
boxes[i, k, 0] - qboxes[j, box_l, 0])
cross -= vec[0] * (
boxes[i, k, 1] - qboxes[j, box_l, 1])
if cross >= 0:
box_overlap_qbox = False
break
if box_overlap_qbox is False:
break
if box_overlap_qbox is False:
qbox_overlap_box = True
for box_l in range(4): # point box_l in boxes
for k in range(4): # corner k in qboxes
vec = qboxes[j, k] - qboxes[j, (k + 1) % 4]
if clockwise:
vec = -vec
cross = vec[1] * (
qboxes[j, k, 0] - boxes[i, box_l, 0])
cross -= vec[0] * (
qboxes[j, k, 1] - boxes[i, box_l, 1])
if cross >= 0: #
qbox_overlap_box = False
break
if qbox_overlap_box is False:
break
if qbox_overlap_box:
ret[i, j] = True # collision.
else:
ret[i, j] = True # collision.
return ret
@numba.njit
def noise_per_box(boxes, valid_mask, loc_noises, rot_noises):
"""Add noise to every box (only on the horizontal plane).
Args:
boxes (np.ndarray): Input boxes with shape (N, 5).
valid_mask (np.ndarray): Mask to indicate which boxes are valid
with shape (N).
loc_noises (np.ndarray): Location noises with shape (N, M, 3).
rot_noises (np.ndarray): Rotation noises with shape (N, M).
Returns:
np.ndarray: Mask to indicate whether the noise is
added successfully (pass the collision test).
"""
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes, ), dtype=np.int64)
# print(valid_mask)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_corners[:] = box_corners[i]
current_corners -= boxes[i, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j],
rot_mat_T)
current_corners += boxes[i, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(
current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
# print(coll_mat)
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
break
return success_mask
@numba.njit
def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises,
global_rot_noises):
"""Add noise to every box (only on the horizontal plane). Version 2 used
when enable global rotations.
Args:
boxes (np.ndarray): Input boxes with shape (N, 5).
valid_mask (np.ndarray): Mask to indicate which boxes are valid
with shape (N).
loc_noises (np.ndarray): Location noises with shape (N, M, 3).
rot_noises (np.ndarray): Rotation noises with shape (N, M).
Returns:
np.ndarray: Mask to indicate whether the noise is
added successfully (pass the collision test).
"""
num_boxes = boxes.shape[0]
num_tests = loc_noises.shape[1]
box_corners = box_np_ops.box2d_to_corner_jit(boxes)
current_corners = np.zeros((4, 2), dtype=boxes.dtype)
current_box = np.zeros((1, 5), dtype=boxes.dtype)
rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)
dst_pos = np.zeros((2, ), dtype=boxes.dtype)
success_mask = -np.ones((num_boxes, ), dtype=np.int64)
corners_norm = np.zeros((4, 2), dtype=boxes.dtype)
corners_norm[1, 1] = 1.0
corners_norm[2] = 1.0
corners_norm[3, 0] = 1.0
corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)
corners_norm = corners_norm.reshape(4, 2)
for i in range(num_boxes):
if valid_mask[i]:
for j in range(num_tests):
current_box[0, :] = boxes[i]
current_radius = np.sqrt(boxes[i, 0]**2 + boxes[i, 1]**2)
current_grot = np.arctan2(boxes[i, 0], boxes[i, 1])
dst_grot = current_grot + global_rot_noises[i, j]
dst_pos[0] = current_radius * np.sin(dst_grot)
dst_pos[1] = current_radius * np.cos(dst_grot)
current_box[0, :2] = dst_pos
current_box[0, -1] += (dst_grot - current_grot)
rot_sin = np.sin(current_box[0, -1])
rot_cos = np.cos(current_box[0, -1])
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
current_corners[:] = current_box[
0, 2:4] * corners_norm @ rot_mat_T + current_box[0, :2]
current_corners -= current_box[0, :2]
_rotation_box2d_jit_(current_corners, rot_noises[i, j],
rot_mat_T)
current_corners += current_box[0, :2] + loc_noises[i, j, :2]
coll_mat = box_collision_test(
current_corners.reshape(1, 4, 2), box_corners)
coll_mat[0, i] = False
if not coll_mat.any():
success_mask[i] = j
box_corners[i] = current_corners
loc_noises[i, j, :2] += (dst_pos - boxes[i, :2])
rot_noises[i, j] += (dst_grot - current_grot)
break
return success_mask
def _select_transform(transform, indices):
"""Select transform.
Args:
transform (np.ndarray): Transforms to select from.
indices (np.ndarray): Mask to indicate which transform to select.
Returns:
np.ndarray: Selected transforms.
"""
result = np.zeros((transform.shape[0], *transform.shape[2:]),
dtype=transform.dtype)
for i in range(transform.shape[0]):
if indices[i] != -1:
result[i] = transform[i, indices[i]]
return result
@numba.njit
def _rotation_matrix_3d_(rot_mat_T, angle, axis):
"""Get the 3D rotation matrix.
Args:
rot_mat_T (np.ndarray): Transposed rotation matrix.
angle (float): Rotation angle.
axis (int): Rotation axis.
"""
rot_sin = np.sin(angle)
rot_cos = np.cos(angle)
rot_mat_T[:] = np.eye(3)
if axis == 1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 2] = -rot_sin
rot_mat_T[2, 0] = rot_sin
rot_mat_T[2, 2] = rot_cos
elif axis == 2 or axis == -1:
rot_mat_T[0, 0] = rot_cos
rot_mat_T[0, 1] = -rot_sin
rot_mat_T[1, 0] = rot_sin
rot_mat_T[1, 1] = rot_cos
elif axis == 0:
rot_mat_T[1, 1] = rot_cos
rot_mat_T[1, 2] = -rot_sin
rot_mat_T[2, 1] = rot_sin
rot_mat_T[2, 2] = rot_cos
@numba.njit
def points_transform_(points, centers, point_masks, loc_transform,
rot_transform, valid_mask):
"""Apply transforms to points and box centers.
Args:
points (np.ndarray): Input points.
centers (np.ndarray): Input box centers.
point_masks (np.ndarray): Mask to indicate which points need
to be transformed.
loc_transform (np.ndarray): Location transform to be applied.
rot_transform (np.ndarray): Rotation transform to be applied.
valid_mask (np.ndarray): Mask to indicate which boxes are valid.
"""
num_box = centers.shape[0]
num_points = points.shape[0]
rot_mat_T = np.zeros((num_box, 3, 3), dtype=points.dtype)
for i in range(num_box):
_rotation_matrix_3d_(rot_mat_T[i], rot_transform[i], 2)
for i in range(num_points):
for j in range(num_box):
if valid_mask[j]:
if point_masks[i, j] == 1:
points[i, :3] -= centers[j, :3]
points[i:i + 1, :3] = points[i:i + 1, :3] @ rot_mat_T[j]
points[i, :3] += centers[j, :3]
points[i, :3] += loc_transform[j]
break # only apply first box's transform
@numba.njit
def box3d_transform_(boxes, loc_transform, rot_transform, valid_mask):
"""Transform 3D boxes.
Args:
boxes (np.ndarray): 3D boxes to be transformed.
loc_transform (np.ndarray): Location transform to be applied.
rot_transform (np.ndarray): Rotation transform to be applied.
valid_mask (np.ndarray | None): Mask to indicate which boxes are valid.
"""
num_box = boxes.shape[0]
for i in range(num_box):
if valid_mask[i]:
boxes[i, :3] += loc_transform[i]
boxes[i, 6] += rot_transform[i]
def noise_per_object_v3_(gt_boxes,
points=None,
valid_mask=None,
rotation_perturb=np.pi / 4,
center_noise_std=1.0,
global_random_rot_range=np.pi / 4,
num_try=100):
"""Random rotate or remove each groundtruth independently. use kitti viewer
to test this function points_transform_
Args:
gt_boxes (np.ndarray): Ground truth boxes with shape (N, 7).
points (np.ndarray | None): Input point cloud with shape (M, 4).
Default: None.
valid_mask (np.ndarray | None): Mask to indicate which boxes are valid.
Default: None.
rotation_perturb (float): Rotation perturbation. Default: pi / 4.
center_noise_std (float): Center noise standard deviation.
Default: 1.0.
global_random_rot_range (float): Global random rotation range.
Default: pi/4.
num_try (int): Number of try. Default: 100.
"""
num_boxes = gt_boxes.shape[0]
if not isinstance(rotation_perturb, (list, tuple, np.ndarray)):
rotation_perturb = [-rotation_perturb, rotation_perturb]
if not isinstance(global_random_rot_range, (list, tuple, np.ndarray)):
global_random_rot_range = [
-global_random_rot_range, global_random_rot_range
]
enable_grot = np.abs(global_random_rot_range[0] -
global_random_rot_range[1]) >= 1e-3
if not isinstance(center_noise_std, (list, tuple, np.ndarray)):
center_noise_std = [
center_noise_std, center_noise_std, center_noise_std
]
if valid_mask is None:
valid_mask = np.ones((num_boxes, ), dtype=np.bool_)
center_noise_std = np.array(center_noise_std, dtype=gt_boxes.dtype)
loc_noises = np.random.normal(
scale=center_noise_std, size=[num_boxes, num_try, 3])
rot_noises = np.random.uniform(
rotation_perturb[0], rotation_perturb[1], size=[num_boxes, num_try])
gt_grots = np.arctan2(gt_boxes[:, 0], gt_boxes[:, 1])
grot_lowers = global_random_rot_range[0] - gt_grots
grot_uppers = global_random_rot_range[1] - gt_grots
global_rot_noises = np.random.uniform(
grot_lowers[..., np.newaxis],
grot_uppers[..., np.newaxis],
size=[num_boxes, num_try])
origin = (0.5, 0.5, 0)
gt_box_corners = box_np_ops.center_to_corner_box3d(
gt_boxes[:, :3],
gt_boxes[:, 3:6],
gt_boxes[:, 6],
origin=origin,
axis=2)
# TODO: rewrite this noise box function?
if not enable_grot:
selected_noise = noise_per_box(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises, rot_noises)
else:
selected_noise = noise_per_box_v2_(gt_boxes[:, [0, 1, 3, 4, 6]],
valid_mask, loc_noises, rot_noises,
global_rot_noises)
loc_transforms = _select_transform(loc_noises, selected_noise)
rot_transforms = _select_transform(rot_noises, selected_noise)
surfaces = box_np_ops.corner_to_surfaces_3d_jit(gt_box_corners)
if points is not None:
# TODO: replace this points_in_convex function by my tools?
point_masks = box_np_ops.points_in_convex_polygon_3d_jit(
points[:, :3], surfaces)
points_transform_(points, gt_boxes[:, :3], point_masks, loc_transforms,
rot_transforms, valid_mask)
box3d_transform_(gt_boxes, loc_transforms, rot_transforms, valid_mask)
| [
"[email protected]"
] | |
59f7e0dc619c13fdb4d809743f30f6d88e72df7c | 78d35bb7876a3460d4398e1cb3554b06e36c720a | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_express_route_ports_locations_operations.py | 4f8fd8e9b5f9c43e377a4b1f5bd034b2cdb2c574 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | catchsrinivas/azure-sdk-for-python | e35f59b60318a31b3c940a7a3a07b61b28118aa5 | 596227a7738a5342274486e30489239d539b11d1 | refs/heads/main | 2023-08-27T09:08:07.986249 | 2021-11-11T11:13:35 | 2021-11-11T11:13:35 | 427,045,896 | 0 | 0 | MIT | 2021-11-11T15:14:31 | 2021-11-11T15:14:31 | null | UTF-8 | Python | false | false | 7,901 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRoutePortsLocationsOperations:
"""ExpressRoutePortsLocationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRoutePortsLocationListResult"]:
"""Retrieves all ExpressRoutePort peering locations. Does not return available bandwidths for each
location. Available bandwidths can only be obtained when retrieving a specific peering
location.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRoutePortsLocationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.ExpressRoutePortsLocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRoutePortsLocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations'} # type: ignore
async def get(
self,
location_name: str,
**kwargs: Any
) -> "_models.ExpressRoutePortsLocation":
"""Retrieves a single ExpressRoutePort peering location, including the list of available
bandwidths available at said peering location.
:param location_name: Name of the requested ExpressRoutePort peering location.
:type location_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRoutePortsLocation, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.ExpressRoutePortsLocation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRoutePortsLocation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRoutePortsLocation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ExpressRoutePortsLocations/{locationName}'} # type: ignore
| [
"[email protected]"
] | |
dca9a6c7ed4c4e8ab5c5da27e4c28c00529a590e | 0111fa900ae348d9f06a4976567c7f3ab8b82be3 | /python/arbolBin.py | fbb13627c7728861f95e3ab72b6201555ab997d2 | [] | no_license | apdaza/universidad-ejercicios | 4e790f84637f8753a166a5318d21217161cbe0bc | 64a02fd7b63e2cd899a91297f652e2351ccbce2f | refs/heads/master | 2021-09-11T04:59:17.252072 | 2021-09-04T13:56:04 | 2021-09-04T13:56:04 | 86,726,523 | 6 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,849 | py | class Nodo():
def __init__(self,val,izq=None,der=None):
self.valor = val
self.izquierda = izq
self.derecha = der
def inorden(arbol):
if arbol == None:
return []
else:
return inorden(arbol.izquierda)+[arbol.valor]+inorden(arbol.derecha)
def preorden(arbol):
if arbol == None:
return []
else:
return [arbol.valor]+preorden(arbol.izquierda)+preorden(arbol.derecha)
def postorden(arbol):
if arbol == None:
return []
else:
return postorden(arbol.izquierda)+postorden(arbol.derecha)+[arbol.valor]
def evaluar(arbol):
if arbol.valor == '+':
return evaluar(arbol.izquierda) + evaluar(arbol.derecha)
elif arbol.valor == '-':
return evaluar(arbol.izquierda) - evaluar(arbol.derecha)
elif arbol.valor == '*':
return evaluar(arbol.izquierda) * evaluar(arbol.derecha)
elif arbol.valor == '/':
return evaluar(arbol.izquierda) / evaluar(arbol.derecha)
else:
return int(arbol.valor)
def suma(arbol):
if arbol == None:
return 0
else:
return suma(arbol.izquierda)+suma(arbol.derecha)+arbol.valor
def insertar(arbol, valor):
if arbol == None:
return Nodo(valor)
else:
if valor >= arbol.valor:
return Nodo(arbol.valor, arbol.izquierda, insertar(arbol.derecha, valor))
else:
return Nodo(arbol.valor, insertar(arbol.izquierda, valor), arbol.derecha)
def insertarLista(arbol, lista):
if lista==[]:
return arbol
else:
if arbol == None:
return insertarLista(Nodo(lista[0]), lista[1:])
else:
return insertarLista(insertar(arbol, lista[0]), lista[1:])
def imprimeArbolSangrado(arbol, nivel=0):
if arbol == None:
return
else:
imprimeArbolSangrado(arbol.izquierda, nivel+1)
print ' '*nivel + str(arbol.valor)
imprimeArbolSangrado(arbol.derecha, nivel+1)
def buscarEnArbol(valor, arbol):
if arbol == None:
return False
elif arbol.valor == valor:
return True
elif valor < arbol.valor:
return buscarEnArbol(valor, arbol.izquierda)
else:
return buscarEnArbol(valor, arbol.derecha)
def contarElementos(arbol):
if arbol == None:
return 0
else:
return 1 + contarElementos(arbol.derecha) + contarElementos(arbol.izquierda)
a = Nodo(15, Nodo(10, Nodo(4)), Nodo(25))
b = Nodo('+', Nodo('-', Nodo('14'),Nodo('4')), Nodo('25'))
print inorden(a)
print preorden(a)
print postorden(a)
print suma(a)
print inorden(insertar(a,67))
print inorden(insertarLista(a,[23,2,17,20]))
imprimeArbolSangrado(a,0)
print inorden(b)
print preorden(b)
print postorden(b)
print evaluar(b)
print buscarEnArbol(10, a)
print buscarEnArbol(110, a)
print contarElementos(a)
| [
"[email protected]"
] | |
e329ad301e71e357fe4efbf573cac5c78ea6436d | 83c92bdbab514a9630d74d24f91cbd77ec7e33f6 | /{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/settings/production.py | f8de424d0459cfbb0f1e441344801a9075bcb14b | [] | no_license | dnmellen/cookiecutter-simple-django-sqlite | 2874f890858aac2bac923d22b03c702c93a5ee0f | 51b5128b6335acc464b572fec34f5c75427d97d0 | refs/heads/master | 2021-01-23T23:51:56.699451 | 2013-09-26T10:34:46 | 2013-09-26T10:34:46 | 13,118,893 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | from .base import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('{{cookiecutter.author_name}}', '{{cookiecutter.email}}'),
)
MANAGERS = ADMINS
| [
"[email protected]"
] | |
900763796b0f0666b1b4561b359127ee4227b144 | 799f7938856a320423625c6a6a3881eacdd0e039 | /lldb/test/API/lang/cpp/class-template-type-parameter-pack/TestClassTemplateTypeParameterPack.py | 88beac18e891abbb7f08e342f38989c7c35f5851 | [
"NCSA",
"Apache-2.0",
"LLVM-exception"
] | permissive | shabalind/llvm-project | 3b90d1d8f140efe1b4f32390f68218c02c95d474 | d06e94031bcdfa43512bf7b0cdfd4b4bad3ca4e1 | refs/heads/main | 2022-10-18T04:13:17.818838 | 2021-02-04T13:06:43 | 2021-02-04T14:23:33 | 237,532,515 | 0 | 0 | Apache-2.0 | 2020-01-31T23:17:24 | 2020-01-31T23:17:23 | null | UTF-8 | Python | false | false | 4,146 | py | import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCaseClassTemplateTypeParameterPack(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(oslist=["windows"]) # Fails to read memory from target.
@no_debug_info_test
def test(self):
self.build()
self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.expect_expr("emptyTypePack", result_type="TypePack<>",
result_children=[ValueCheck(name="a", type="int")])
self.expect_expr("oneElemTypePack", result_type="TypePack<int>",
result_children=[ValueCheck(name="a", type="int")])
self.expect_expr("twoElemTypePack", result_type="TypePack<int, float>",
result_children=[ValueCheck(name="a", type="int")])
self.expect_expr("emptyAnonTypePack", result_type="AnonTypePack<>",
result_children=[ValueCheck(name="b", type="int")])
self.expect_expr("oneElemAnonTypePack", result_type="AnonTypePack<int>",
result_children=[ValueCheck(name="b", type="int")])
self.expect_expr("twoElemAnonTypePack", result_type="AnonTypePack<int, float>",
result_children=[ValueCheck(name="b", type="int")])
self.expect_expr("emptyAnonTypePackAfterTypeParam", result_type="AnonTypePackAfterTypeParam<int>",
result_children=[ValueCheck(name="c", type="int")])
self.expect_expr("oneElemAnonTypePackAfterTypeParam", result_type="AnonTypePackAfterTypeParam<int, float>",
result_children=[ValueCheck(name="c", type="int")])
self.expect_expr("emptyAnonTypePackAfterAnonTypeParam", result_type="AnonTypePackAfterAnonTypeParam<int>",
result_children=[ValueCheck(name="d", type="float")])
self.expect_expr("oneElemAnonTypePackAfterAnonTypeParam", result_type="AnonTypePackAfterAnonTypeParam<int, float>",
result_children=[ValueCheck(name="d", type="float")])
self.expect_expr("emptyTypePackAfterAnonTypeParam", result_type="TypePackAfterAnonTypeParam<int>",
result_children=[ValueCheck(name="e", type="int")])
self.expect_expr("oneElemTypePackAfterAnonTypeParam", result_type="TypePackAfterAnonTypeParam<int, float>",
result_children=[ValueCheck(name="e", type="int")])
self.expect_expr("emptyTypePackAfterTypeParam", result_type="TypePackAfterTypeParam<int>",
result_children=[ValueCheck(name="f", type="int")])
self.expect_expr("oneElemTypePackAfterTypeParam", result_type="TypePackAfterTypeParam<int, float>",
result_children=[ValueCheck(name="f", type="int")])
self.expect_expr("emptyAnonTypePackAfterNonTypeParam", result_type="AnonTypePackAfterNonTypeParam<1>",
result_children=[ValueCheck(name="g", type="int")])
self.expect_expr("oneElemAnonTypePackAfterNonTypeParam", result_type="AnonTypePackAfterNonTypeParam<1, int>",
result_children=[ValueCheck(name="g", type="int")])
self.expect_expr("emptyAnonTypePackAfterAnonNonTypeParam", result_type="AnonTypePackAfterAnonNonTypeParam<1>",
result_children=[ValueCheck(name="h", type="float")])
self.expect_expr("oneElemAnonTypePackAfterAnonNonTypeParam", result_type="AnonTypePackAfterAnonNonTypeParam<1, int>",
result_children=[ValueCheck(name="h", type="float")])
self.expect_expr("emptyTypePackAfterAnonNonTypeParam", result_type="TypePackAfterAnonNonTypeParam<1>",
result_children=[ValueCheck(name="i", type="int")])
self.expect_expr("oneElemTypePackAfterAnonNonTypeParam", result_type="TypePackAfterAnonNonTypeParam<1, int>",
result_children=[ValueCheck(name="i", type="int")])
self.expect_expr("emptyTypePackAfterNonTypeParam", result_type="TypePackAfterNonTypeParam<1>",
result_children=[ValueCheck(name="j", type="int")])
self.expect_expr("oneElemTypePackAfterNonTypeParam", result_type="TypePackAfterNonTypeParam<1, int>",
result_children=[ValueCheck(name="j", type="int")])
| [
"[email protected]"
] | |
dc244dc98111c4d6ce264ef5fb440adca03a7d4b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/3413.py | 2e81c2d19b8f1f7dccafdf9a831f4d28676309e6 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py |
import sys
def switcher(string , num):
final_string = ''
if len(string) < num:
return False
for element in string:
if element == '-' and num > 0:
final_string = final_string + '+'
elif element == '+' and num > 0:
final_string = final_string + '-'
else:
final_string = final_string + element
num = num -1
print(final_string)
return final_string
def plus_detonation(string):
return_string = string
print (string)
for element in string:
if element == '+':
string = string [1:]
print (string)
return_string = string
elif element == '-':
return return_string
break
return return_string
print (plus_detonation('+---'))
def solver(string, num):
temp_string = string
print (string)
temp_string = plus_detonation(temp_string)
counter = 0
print (temp_string)
if temp_string == '':
return counter
while temp_string != '':
temp_string = switcher(temp_string, num)
counter += 1
if temp_string == False:
return 'IMPOSSIBLE'
break
else:
temp_string = plus_detonation(temp_string)
return counter
input_file = sys.argv[1] + '.in'
output_file = sys.argv[1] + '.out'
def inputer(input_file):
output_list = []
with open (input_file) as fin:
finx = fin.read().split('\n')
biglist = [line.strip().split(' ') for line in finx]
biglist = biglist[1:-1]
return biglist
biglist = inputer(input_file)
return_list = []
for element in biglist:
test_string = element[0]
test_num = int(element[1])
return_list.append(solver(test_string, test_num))
def outputer(output_file, return_list):
with open (output_file, 'w') as out:
x = 1
for element in return_list:
if element == 'IMPOSSIBLE':
out.write('Case #%d: %s \n' %(x, element))
else:
out.write('Case #%d: %d \n' %(x, element))
x += 1
outputer(output_file, return_list)
| [
"[email protected]"
] | |
4175299a25cd5fffb0727a205562d11d2d0d6c10 | 624456f907a9cb5f8dbc8577a7400994272bc8c4 | /reports/dashboard.py | 2354b8a60ea2e3d45b3ecff0506f988ea4eae46f | [
"MIT"
] | permissive | YangZhang-GitHub/myems-api | 27a70fb76cfd3004cd84de0e94be26317211b5f7 | fc3f8087061310d6ae0aa19e1ef077a2fe2ccddf | refs/heads/master | 2023-03-11T04:38:58.177163 | 2021-01-21T02:43:46 | 2021-01-21T02:43:46 | 329,171,534 | 0 | 0 | MIT | 2021-01-19T07:31:28 | 2021-01-13T02:32:43 | null | UTF-8 | Python | false | false | 58,330 | py | import falcon
import simplejson as json
import mysql.connector
import config
from datetime import datetime, timedelta, timezone
from core import utilities
from decimal import Decimal
class Reporting:
@staticmethod
def __init__():
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
####################################################################################################################
# PROCEDURES
# Step 1: valid parameters
# Step 2: query the space
# Step 3: query energy categories
# Step 4: query associated sensors
# Step 5: query associated points
# Step 6: query child spaces
# Step 7: query base period energy input
# Step 8: query base period energy cost
# Step 9: query reporting period energy input
# Step 10: query reporting period energy cost
# Step 11: query tariff data
# Step 12: query associated sensors and points data
# Step 13: query child spaces energy input
# Step 14: query child spaces energy cost
# Step 15: construct the report
####################################################################################################################
@staticmethod
def on_get(req, resp):
print(req.params)
user_uuid = req.params.get('useruuid')
period_type = req.params.get('periodtype')
base_start_datetime_local = req.params.get('baseperiodstartdatetime')
base_end_datetime_local = req.params.get('baseperiodenddatetime')
reporting_start_datetime_local = req.params.get('reportingperiodstartdatetime')
reporting_end_datetime_local = req.params.get('reportingperiodenddatetime')
################################################################################################################
# Step 1: valid parameters
################################################################################################################
if user_uuid is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_USER_UUID')
else:
user_uuid = str.strip(user_uuid)
if len(user_uuid) != 36:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_USER_UUID')
if period_type is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
else:
period_type = str.strip(period_type)
if period_type not in ['hourly', 'daily', 'monthly', 'yearly']:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_PERIOD_TYPE')
timezone_offset = int(config.utc_offset[1:3]) * 60 + int(config.utc_offset[4:6])
if config.utc_offset[0] == '-':
timezone_offset = -timezone_offset
base_start_datetime_utc = None
if base_start_datetime_local is not None and len(str.strip(base_start_datetime_local)) > 0:
base_start_datetime_local = str.strip(base_start_datetime_local)
try:
base_start_datetime_utc = datetime.strptime(base_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_BEGINS_DATETIME")
base_end_datetime_utc = None
if base_end_datetime_local is not None and len(str.strip(base_end_datetime_local)) > 0:
base_end_datetime_local = str.strip(base_end_datetime_local)
try:
base_end_datetime_utc = datetime.strptime(base_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_BASE_PERIOD_ENDS_DATETIME")
if base_start_datetime_utc is not None and base_end_datetime_utc is not None and \
base_start_datetime_utc >= base_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_BASE_PERIOD_ENDS_DATETIME')
if reporting_start_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_BEGINS_DATETIME")
else:
reporting_start_datetime_local = str.strip(reporting_start_datetime_local)
try:
reporting_start_datetime_utc = datetime.strptime(reporting_start_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_BEGINS_DATETIME")
if reporting_end_datetime_local is None:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_ENDS_DATETIME")
else:
reporting_end_datetime_local = str.strip(reporting_end_datetime_local)
try:
reporting_end_datetime_utc = datetime.strptime(reporting_end_datetime_local,
'%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc) - \
timedelta(minutes=timezone_offset)
except ValueError:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description="API.INVALID_REPORTING_PERIOD_ENDS_DATETIME")
if reporting_start_datetime_utc >= reporting_end_datetime_utc:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_REPORTING_PERIOD_ENDS_DATETIME')
################################################################################################################
# Step 2: query the space
################################################################################################################
cnx_user = mysql.connector.connect(**config.myems_user_db)
cursor_user = cnx_user.cursor()
cursor_user.execute(" SELECT id, is_admin, privilege_id "
" FROM tbl_users "
" WHERE uuid = %s ", (user_uuid,))
row_user = cursor_user.fetchone()
if row_user is None:
if cursor_user:
cursor_user.close()
if cnx_user:
cnx_user.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, 'API.NOT_FOUND', 'API.USER_NOT_FOUND')
user = {'id': row_user[0], 'is_admin': row_user[1], 'privilege_id': row_user[2]}
if user['is_admin']:
# todo: make sure the space id is always 1 for admin
space_id = 1
else:
cursor_user.execute(" SELECT data "
" FROM tbl_privileges "
" WHERE id = %s ", (user['privilege_id'],))
row_privilege = cursor_user.fetchone()
if row_privilege is None:
if cursor_user:
cursor_user.close()
if cnx_user:
cnx_user.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, 'API.NOT_FOUND', 'API.USER_PRIVILEGE_NOT_FOUND')
privilege_data = json.loads(row_privilege[0])
if 'spaces' not in privilege_data.keys() \
or privilege_data['spaces'] is None \
or len(privilege_data['spaces']) == 0:
if cursor_user:
cursor_user.close()
if cnx_user:
cnx_user.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, 'API.NOT_FOUND', 'API.USER_PRIVILEGE_NOT_FOUND')
# todo: how to deal with multiple spaces in privilege data
space_id = privilege_data['spaces'][0]
if cursor_user:
cursor_user.close()
if cnx_user:
cnx_user.disconnect()
cnx_system = mysql.connector.connect(**config.myems_system_db)
cursor_system = cnx_system.cursor()
cursor_system.execute(" SELECT id, name, area, cost_center_id "
" FROM tbl_spaces "
" WHERE id = %s ", (space_id,))
row_space = cursor_system.fetchone()
if row_space is None:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND', description='API.SPACE_NOT_FOUND')
space = dict()
space['id'] = row_space[0]
space['name'] = row_space[1]
space['area'] = row_space[2]
space['cost_center_id'] = row_space[3]
################################################################################################################
# Step 3: query energy categories
################################################################################################################
cnx_energy = mysql.connector.connect(**config.myems_energy_db)
cursor_energy = cnx_energy.cursor()
cnx_billing = mysql.connector.connect(**config.myems_billing_db)
cursor_billing = cnx_billing.cursor()
energy_category_set = set()
# query energy categories in base period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(space['id'], base_start_datetime_utc, base_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query energy categories in reporting period
cursor_energy.execute(" SELECT DISTINCT(energy_category_id) "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s ",
(space['id'], reporting_start_datetime_utc, reporting_end_datetime_utc))
rows_energy_categories = cursor_energy.fetchall()
if rows_energy_categories is not None or len(rows_energy_categories) > 0:
for row_energy_category in rows_energy_categories:
energy_category_set.add(row_energy_category[0])
# query all energy categories in base period and reporting period
cursor_system.execute(" SELECT id, name, unit_of_measure, kgce, kgco2e "
" FROM tbl_energy_categories "
" ORDER BY id ", )
rows_energy_categories = cursor_system.fetchall()
if rows_energy_categories is None or len(rows_energy_categories) == 0:
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cursor_billing:
cursor_billing.close()
if cnx_billing:
cnx_billing.disconnect()
raise falcon.HTTPError(falcon.HTTP_404,
title='API.NOT_FOUND',
description='API.ENERGY_CATEGORY_NOT_FOUND')
energy_category_dict = dict()
for row_energy_category in rows_energy_categories:
if row_energy_category[0] in energy_category_set:
energy_category_dict[row_energy_category[0]] = {"name": row_energy_category[1],
"unit_of_measure": row_energy_category[2],
"kgce": row_energy_category[3],
"kgco2e": row_energy_category[4]}
################################################################################################################
# Step 4: query associated sensors
################################################################################################################
point_list = list()
cursor_system.execute(" SELECT po.id, po.name, po.units, po.object_type "
" FROM tbl_spaces sp, tbl_sensors se, tbl_spaces_sensors spse, "
" tbl_points po, tbl_sensors_points sepo "
" WHERE sp.id = %s AND sp.id = spse.space_id AND spse.sensor_id = se.id "
" AND se.id = sepo.sensor_id AND sepo.point_id = po.id "
" ORDER BY po.id ", (space['id'], ))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 5: query associated points
################################################################################################################
cursor_system.execute(" SELECT po.id, po.name, po.units, po.object_type "
" FROM tbl_spaces sp, tbl_spaces_points sppo, tbl_points po "
" WHERE sp.id = %s AND sp.id = sppo.space_id AND sppo.point_id = po.id "
" ORDER BY po.id ", (space['id'], ))
rows_points = cursor_system.fetchall()
if rows_points is not None and len(rows_points) > 0:
for row in rows_points:
point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]})
################################################################################################################
# Step 6: query child spaces
################################################################################################################
child_space_list = list()
cursor_system.execute(" SELECT id, name "
" FROM tbl_spaces "
" WHERE parent_space_id = %s "
" ORDER BY id ", (space['id'], ))
rows_child_spaces = cursor_system.fetchall()
if rows_child_spaces is not None and len(rows_child_spaces) > 0:
for row in rows_child_spaces:
child_space_list.append({"id": row[0], "name": row[1]})
################################################################################################################
# Step 7: query base period energy input
################################################################################################################
base_input = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
kgce = energy_category_dict[energy_category_id]['kgce']
kgco2e = energy_category_dict[energy_category_id]['kgco2e']
base_input[energy_category_id] = dict()
base_input[energy_category_id]['timestamps'] = list()
base_input[energy_category_id]['values'] = list()
base_input[energy_category_id]['subtotal'] = Decimal(0.0)
base_input[energy_category_id]['subtotal_in_kgce'] = Decimal(0.0)
base_input[energy_category_id]['subtotal_in_kgco2e'] = Decimal(0.0)
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(space['id'],
energy_category_id,
base_start_datetime_utc,
base_end_datetime_utc))
rows_space_hourly = cursor_energy.fetchall()
rows_space_periodically = utilities.aggregate_hourly_data_by_period(rows_space_hourly,
base_start_datetime_utc,
base_end_datetime_utc,
period_type)
for row_space_periodically in rows_space_periodically:
current_datetime_local = row_space_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_space_periodically[1] is None else row_space_periodically[1]
base_input[energy_category_id]['timestamps'].append(current_datetime)
base_input[energy_category_id]['values'].append(actual_value)
base_input[energy_category_id]['subtotal'] += actual_value
base_input[energy_category_id]['subtotal_in_kgce'] += actual_value * kgce
base_input[energy_category_id]['subtotal_in_kgco2e'] += actual_value * kgco2e
################################################################################################################
# Step 8: query base period energy cost
################################################################################################################
base_cost = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
base_cost[energy_category_id] = dict()
base_cost[energy_category_id]['timestamps'] = list()
base_cost[energy_category_id]['values'] = list()
base_cost[energy_category_id]['subtotal'] = Decimal(0.0)
cursor_billing.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(space['id'],
energy_category_id,
base_start_datetime_utc,
base_end_datetime_utc))
rows_space_hourly = cursor_billing.fetchall()
rows_space_periodically = utilities.aggregate_hourly_data_by_period(rows_space_hourly,
base_start_datetime_utc,
base_end_datetime_utc,
period_type)
for row_space_periodically in rows_space_periodically:
current_datetime_local = row_space_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_space_periodically[1] is None else row_space_periodically[1]
base_cost[energy_category_id]['timestamps'].append(current_datetime)
base_cost[energy_category_id]['values'].append(actual_value)
base_cost[energy_category_id]['subtotal'] += actual_value
################################################################################################################
# Step 9: query reporting period energy input
################################################################################################################
reporting_input = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
kgce = energy_category_dict[energy_category_id]['kgce']
kgco2e = energy_category_dict[energy_category_id]['kgco2e']
reporting_input[energy_category_id] = dict()
reporting_input[energy_category_id]['timestamps'] = list()
reporting_input[energy_category_id]['values'] = list()
reporting_input[energy_category_id]['subtotal'] = Decimal(0.0)
reporting_input[energy_category_id]['subtotal_in_kgce'] = Decimal(0.0)
reporting_input[energy_category_id]['subtotal_in_kgco2e'] = Decimal(0.0)
reporting_input[energy_category_id]['toppeak'] = Decimal(0.0)
reporting_input[energy_category_id]['onpeak'] = Decimal(0.0)
reporting_input[energy_category_id]['midpeak'] = Decimal(0.0)
reporting_input[energy_category_id]['offpeak'] = Decimal(0.0)
cursor_energy.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(space['id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows_space_hourly = cursor_energy.fetchall()
rows_space_periodically = utilities.aggregate_hourly_data_by_period(rows_space_hourly,
reporting_start_datetime_utc,
reporting_end_datetime_utc,
period_type)
for row_space_periodically in rows_space_periodically:
current_datetime_local = row_space_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_space_periodically[1] is None else row_space_periodically[1]
reporting_input[energy_category_id]['timestamps'].append(current_datetime)
reporting_input[energy_category_id]['values'].append(actual_value)
reporting_input[energy_category_id]['subtotal'] += actual_value
reporting_input[energy_category_id]['subtotal_in_kgce'] += actual_value * kgce
reporting_input[energy_category_id]['subtotal_in_kgco2e'] += actual_value * kgco2e
energy_category_tariff_dict = utilities.get_energy_category_peak_types(space['cost_center_id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc)
for row in rows_space_hourly:
peak_type = energy_category_tariff_dict.get(row[0], None)
if peak_type == 'toppeak':
reporting_input[energy_category_id]['toppeak'] += row[1]
elif peak_type == 'onpeak':
reporting_input[energy_category_id]['onpeak'] += row[1]
elif peak_type == 'midpeak':
reporting_input[energy_category_id]['midpeak'] += row[1]
elif peak_type == 'offpeak':
reporting_input[energy_category_id]['offpeak'] += row[1]
################################################################################################################
# Step 10: query reporting period energy cost
################################################################################################################
reporting_cost = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
reporting_cost[energy_category_id] = dict()
reporting_cost[energy_category_id]['timestamps'] = list()
reporting_cost[energy_category_id]['values'] = list()
reporting_cost[energy_category_id]['subtotal'] = Decimal(0.0)
reporting_cost[energy_category_id]['toppeak'] = Decimal(0.0)
reporting_cost[energy_category_id]['onpeak'] = Decimal(0.0)
reporting_cost[energy_category_id]['midpeak'] = Decimal(0.0)
reporting_cost[energy_category_id]['offpeak'] = Decimal(0.0)
cursor_billing.execute(" SELECT start_datetime_utc, actual_value "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(space['id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows_space_hourly = cursor_billing.fetchall()
rows_space_periodically = utilities.aggregate_hourly_data_by_period(rows_space_hourly,
reporting_start_datetime_utc,
reporting_end_datetime_utc,
period_type)
for row_space_periodically in rows_space_periodically:
current_datetime_local = row_space_periodically[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
if period_type == 'hourly':
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
elif period_type == 'daily':
current_datetime = current_datetime_local.strftime('%Y-%m-%d')
elif period_type == 'monthly':
current_datetime = current_datetime_local.strftime('%Y-%m')
elif period_type == 'yearly':
current_datetime = current_datetime_local.strftime('%Y')
actual_value = Decimal(0.0) if row_space_periodically[1] is None else row_space_periodically[1]
reporting_cost[energy_category_id]['timestamps'].append(current_datetime)
reporting_cost[energy_category_id]['values'].append(actual_value)
reporting_cost[energy_category_id]['subtotal'] += actual_value
energy_category_tariff_dict = utilities.get_energy_category_peak_types(space['cost_center_id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc)
for row in rows_space_hourly:
peak_type = energy_category_tariff_dict.get(row[0], None)
if peak_type == 'toppeak':
reporting_cost[energy_category_id]['toppeak'] += row[1]
elif peak_type == 'onpeak':
reporting_cost[energy_category_id]['onpeak'] += row[1]
elif peak_type == 'midpeak':
reporting_cost[energy_category_id]['midpeak'] += row[1]
elif peak_type == 'offpeak':
reporting_cost[energy_category_id]['offpeak'] += row[1]
################################################################################################################
# Step 11: query tariff data
################################################################################################################
parameters_data = dict()
parameters_data['names'] = list()
parameters_data['timestamps'] = list()
parameters_data['values'] = list()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
energy_category_tariff_dict = utilities.get_energy_category_tariffs(space['cost_center_id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc)
tariff_timestamp_list = list()
tariff_value_list = list()
for k, v in energy_category_tariff_dict.items():
# convert k from utc to local
k = k + timedelta(minutes=timezone_offset)
tariff_timestamp_list.append(k.isoformat()[0:19][0:19])
tariff_value_list.append(v)
parameters_data['names'].append('TARIFF-' + energy_category_dict[energy_category_id]['name'])
parameters_data['timestamps'].append(tariff_timestamp_list)
parameters_data['values'].append(tariff_value_list)
################################################################################################################
# Step 12: query associated sensors and points data
################################################################################################################
cnx_historical = mysql.connector.connect(**config.myems_historical_db)
cursor_historical = cnx_historical.cursor()
for point in point_list:
point_values = []
point_timestamps = []
if point['object_type'] == 'ANALOG_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_analog_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'ENERGY_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_energy_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s "
" ORDER BY utc_date_time ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
elif point['object_type'] == 'DIGITAL_VALUE':
query = (" SELECT utc_date_time, actual_value "
" FROM tbl_digital_value "
" WHERE point_id = %s "
" AND utc_date_time BETWEEN %s AND %s ")
cursor_historical.execute(query, (point['id'],
reporting_start_datetime_utc,
reporting_end_datetime_utc))
rows = cursor_historical.fetchall()
if rows is not None and len(rows) > 0:
for row in rows:
current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \
timedelta(minutes=timezone_offset)
current_datetime = current_datetime_local.strftime('%Y-%m-%dT%H:%M:%S')
point_timestamps.append(current_datetime)
point_values.append(row[1])
parameters_data['names'].append(point['name'] + ' (' + point['units'] + ')')
parameters_data['timestamps'].append(point_timestamps)
parameters_data['values'].append(point_values)
################################################################################################################
# Step 13: query child spaces energy input
################################################################################################################
child_space_input = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
child_space_input[energy_category_id] = dict()
child_space_input[energy_category_id]['child_space_names'] = list()
child_space_input[energy_category_id]['subtotals'] = list()
child_space_input[energy_category_id]['subtotals_in_kgce'] = list()
child_space_input[energy_category_id]['subtotals_in_kgco2e'] = list()
kgce = energy_category_dict[energy_category_id]['kgce']
kgco2e = energy_category_dict[energy_category_id]['kgco2e']
for child_space in child_space_list:
child_space_input[energy_category_id]['child_space_names'].append(child_space['name'])
cursor_energy.execute(" SELECT SUM(actual_value) "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(child_space['id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc))
row_subtotal = cursor_energy.fetchone()
subtotal = Decimal(0.0) if (row_subtotal is None or row_subtotal[0] is None) else row_subtotal[0]
child_space_input[energy_category_id]['subtotals'].append(subtotal)
child_space_input[energy_category_id]['subtotals_in_kgce'].append(subtotal * kgce)
child_space_input[energy_category_id]['subtotals_in_kgco2e'].append(subtotal * kgco2e)
################################################################################################################
# Step 14: query child spaces energy cost
################################################################################################################
child_space_cost = dict()
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
child_space_cost[energy_category_id] = dict()
child_space_cost[energy_category_id]['child_space_names'] = list()
child_space_cost[energy_category_id]['subtotals'] = list()
for child_space in child_space_list:
child_space_cost[energy_category_id]['child_space_names'].append(child_space['name'])
cursor_billing.execute(" SELECT SUM(actual_value) "
" FROM tbl_space_input_category_hourly "
" WHERE space_id = %s "
" AND energy_category_id = %s "
" AND start_datetime_utc >= %s "
" AND start_datetime_utc < %s "
" ORDER BY start_datetime_utc ",
(child_space['id'],
energy_category_id,
reporting_start_datetime_utc,
reporting_end_datetime_utc))
row_subtotal = cursor_billing.fetchone()
subtotal = Decimal(0.0) if (row_subtotal is None or row_subtotal[0] is None) else row_subtotal[0]
child_space_cost[energy_category_id]['subtotals'].append(subtotal)
################################################################################################################
# Step 15: construct the report
################################################################################################################
if cursor_system:
cursor_system.close()
if cnx_system:
cnx_system.disconnect()
if cursor_energy:
cursor_energy.close()
if cnx_energy:
cnx_energy.disconnect()
if cursor_billing:
cursor_billing.close()
if cnx_billing:
cnx_billing.disconnect()
if cursor_historical:
cursor_historical.close()
if cnx_historical:
cnx_historical.disconnect()
result = dict()
result['space'] = dict()
result['space']['name'] = space['name']
result['space']['area'] = space['area']
result['base_period_input'] = dict()
result['base_period_input']['names'] = list()
result['base_period_input']['units'] = list()
result['base_period_input']['timestamps'] = list()
result['base_period_input']['values'] = list()
result['base_period_input']['subtotals'] = list()
result['base_period_input']['subtotals_in_kgce'] = list()
result['base_period_input']['subtotals_in_kgco2e'] = list()
result['base_period_input']['total_in_kgce'] = Decimal(0.0)
result['base_period_input']['total_in_kgco2e'] = Decimal(0.0)
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['base_period_input']['names'].append(
energy_category_dict[energy_category_id]['name'])
result['base_period_input']['units'].append(
energy_category_dict[energy_category_id]['unit_of_measure'])
result['base_period_input']['timestamps'].append(
base_input[energy_category_id]['timestamps'])
result['base_period_input']['values'].append(
base_input[energy_category_id]['values'])
result['base_period_input']['subtotals'].append(
base_input[energy_category_id]['subtotal'])
result['base_period_input']['subtotals_in_kgce'].append(
base_input[energy_category_id]['subtotal_in_kgce'])
result['base_period_input']['subtotals_in_kgco2e'].append(
base_input[energy_category_id]['subtotal_in_kgco2e'])
result['base_period_input']['total_in_kgce'] += \
base_input[energy_category_id]['subtotal_in_kgce']
result['base_period_input']['total_in_kgco2e'] += \
base_input[energy_category_id]['subtotal_in_kgco2e']
result['base_period_cost'] = dict()
result['base_period_cost']['names'] = list()
result['base_period_cost']['units'] = list()
result['base_period_cost']['timestamps'] = list()
result['base_period_cost']['values'] = list()
result['base_period_cost']['subtotals'] = list()
result['base_period_cost']['total'] = Decimal(0.0)
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['base_period_cost']['names'].append(
energy_category_dict[energy_category_id]['name'])
result['base_period_cost']['units'].append(
energy_category_dict[energy_category_id]['unit_of_measure'])
result['base_period_cost']['timestamps'].append(
base_cost[energy_category_id]['timestamps'])
result['base_period_cost']['values'].append(
base_cost[energy_category_id]['values'])
result['base_period_cost']['subtotals'].append(
base_cost[energy_category_id]['subtotal'])
result['base_period_cost']['total'] += base_cost[energy_category_id]['subtotal']
result['reporting_period_input'] = dict()
result['reporting_period_input']['names'] = list()
result['reporting_period_input']['energy_category_ids'] = list()
result['reporting_period_input']['units'] = list()
result['reporting_period_input']['timestamps'] = list()
result['reporting_period_input']['values'] = list()
result['reporting_period_input']['subtotals'] = list()
result['reporting_period_input']['subtotals_in_kgce'] = list()
result['reporting_period_input']['subtotals_in_kgco2e'] = list()
result['reporting_period_input']['subtotals_per_unit_area'] = list()
result['reporting_period_input']['toppeaks'] = list()
result['reporting_period_input']['onpeaks'] = list()
result['reporting_period_input']['midpeaks'] = list()
result['reporting_period_input']['offpeaks'] = list()
result['reporting_period_input']['increment_rates'] = list()
result['reporting_period_input']['total_in_kgce'] = Decimal(0.0)
result['reporting_period_input']['total_in_kgco2e'] = Decimal(0.0)
result['reporting_period_input']['increment_rate_in_kgce'] = Decimal(0.0)
result['reporting_period_input']['increment_rate_in_kgco2e'] = Decimal(0.0)
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['reporting_period_input']['names'].append(energy_category_dict[energy_category_id]['name'])
result['reporting_period_input']['energy_category_ids'].append(energy_category_id)
result['reporting_period_input']['units'].append(
energy_category_dict[energy_category_id]['unit_of_measure'])
result['reporting_period_input']['timestamps'].append(
reporting_input[energy_category_id]['timestamps'])
result['reporting_period_input']['values'].append(
reporting_input[energy_category_id]['values'])
result['reporting_period_input']['subtotals'].append(
reporting_input[energy_category_id]['subtotal'])
result['reporting_period_input']['subtotals_in_kgce'].append(
reporting_input[energy_category_id]['subtotal_in_kgce'])
result['reporting_period_input']['subtotals_in_kgco2e'].append(
reporting_input[energy_category_id]['subtotal_in_kgco2e'])
result['reporting_period_input']['subtotals_per_unit_area'].append(
reporting_input[energy_category_id]['subtotal'] / space['area']
if space['area'] > 0.0 else None)
result['reporting_period_input']['toppeaks'].append(
reporting_input[energy_category_id]['toppeak'])
result['reporting_period_input']['onpeaks'].append(
reporting_input[energy_category_id]['onpeak'])
result['reporting_period_input']['midpeaks'].append(
reporting_input[energy_category_id]['midpeak'])
result['reporting_period_input']['offpeaks'].append(
reporting_input[energy_category_id]['offpeak'])
result['reporting_period_input']['increment_rates'].append(
(reporting_input[energy_category_id]['subtotal'] -
base_input[energy_category_id]['subtotal']) /
base_input[energy_category_id]['subtotal']
if base_input[energy_category_id]['subtotal'] > 0.0 else None)
result['reporting_period_input']['total_in_kgce'] += \
reporting_input[energy_category_id]['subtotal_in_kgce']
result['reporting_period_input']['total_in_kgco2e'] += \
reporting_input[energy_category_id]['subtotal_in_kgco2e']
result['reporting_period_input']['total_in_kgco2e_per_unit_area'] = \
result['reporting_period_input']['total_in_kgce'] / space['area'] if space['area'] > 0.0 else None
result['reporting_period_input']['increment_rate_in_kgce'] = \
(result['reporting_period_input']['total_in_kgce'] - result['base_period_input']['total_in_kgce']) / \
result['base_period_input']['total_in_kgce'] \
if result['base_period_input']['total_in_kgce'] > Decimal(0.0) else None
result['reporting_period_input']['total_in_kgce_per_unit_area'] = \
result['reporting_period_input']['total_in_kgco2e'] / space['area'] if space['area'] > 0.0 else None
result['reporting_period_input']['increment_rate_in_kgco2e'] = \
(result['reporting_period_input']['total_in_kgco2e'] - result['base_period_input']['total_in_kgco2e']) / \
result['base_period_input']['total_in_kgco2e'] \
if result['base_period_input']['total_in_kgco2e'] > Decimal(0.0) else None
result['reporting_period_cost'] = dict()
result['reporting_period_cost']['names'] = list()
result['reporting_period_cost']['energy_category_ids'] = list()
result['reporting_period_cost']['units'] = list()
result['reporting_period_cost']['timestamps'] = list()
result['reporting_period_cost']['values'] = list()
result['reporting_period_cost']['subtotals'] = list()
result['reporting_period_cost']['subtotals_per_unit_area'] = list()
result['reporting_period_cost']['toppeaks'] = list()
result['reporting_period_cost']['onpeaks'] = list()
result['reporting_period_cost']['midpeaks'] = list()
result['reporting_period_cost']['offpeaks'] = list()
result['reporting_period_cost']['increment_rates'] = list()
result['reporting_period_cost']['total'] = Decimal(0.0)
result['reporting_period_cost']['total_per_unit_area'] = Decimal(0.0)
result['reporting_period_cost']['total_increment_rate'] = Decimal(0.0)
result['reporting_period_cost']['total_unit'] = config.currency_unit
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['reporting_period_cost']['names'].append(energy_category_dict[energy_category_id]['name'])
result['reporting_period_cost']['energy_category_ids'].append(energy_category_id)
result['reporting_period_cost']['units'].append(config.currency_unit)
result['reporting_period_cost']['timestamps'].append(
reporting_cost[energy_category_id]['timestamps'])
result['reporting_period_cost']['values'].append(
reporting_cost[energy_category_id]['values'])
result['reporting_period_cost']['subtotals'].append(
reporting_cost[energy_category_id]['subtotal'])
result['reporting_period_cost']['subtotals_per_unit_area'].append(
reporting_cost[energy_category_id]['subtotal'] / space['area']
if space['area'] > 0.0 else None)
result['reporting_period_cost']['toppeaks'].append(
reporting_cost[energy_category_id]['toppeak'])
result['reporting_period_cost']['onpeaks'].append(
reporting_cost[energy_category_id]['onpeak'])
result['reporting_period_cost']['midpeaks'].append(
reporting_cost[energy_category_id]['midpeak'])
result['reporting_period_cost']['offpeaks'].append(
reporting_cost[energy_category_id]['offpeak'])
result['reporting_period_cost']['increment_rates'].append(
(reporting_cost[energy_category_id]['subtotal'] -
base_cost[energy_category_id]['subtotal']) /
base_cost[energy_category_id]['subtotal']
if base_cost[energy_category_id]['subtotal'] > 0.0 else None)
result['reporting_period_cost']['total'] += reporting_cost[energy_category_id]['subtotal']
result['reporting_period_cost']['total_per_unit_area'] = \
result['reporting_period_cost']['total'] / space['area'] if space['area'] > 0.0 else None
result['reporting_period_cost']['total_increment_rate'] = \
(result['reporting_period_cost']['total'] - result['base_period_cost']['total']) / \
result['reporting_period_cost']['total'] \
if result['reporting_period_cost']['total'] > Decimal(0.0) else None
result['parameters'] = {
"names": parameters_data['names'],
"timestamps": parameters_data['timestamps'],
"values": parameters_data['values']
}
result['child_space_input'] = dict()
result['child_space_input']['energy_category_names'] = list() # 1D array [energy category]
result['child_space_input']['units'] = list() # 1D array [energy category]
result['child_space_input']['child_space_names_array'] = list() # 2D array [energy category][child space]
result['child_space_input']['subtotals_array'] = list() # 2D array [energy category][child space]
result['child_space_input']['subtotals_in_kgce_array'] = list() # 2D array [energy category][child space]
result['child_space_input']['subtotals_in_kgco2e_array'] = list() # 2D array [energy category][child space]
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['child_space_input']['energy_category_names'].append(
energy_category_dict[energy_category_id]['name'])
result['child_space_input']['units'].append(
energy_category_dict[energy_category_id]['unit_of_measure'])
result['child_space_input']['child_space_names_array'].append(
child_space_input[energy_category_id]['child_space_names'])
result['child_space_input']['subtotals_array'].append(
child_space_input[energy_category_id]['subtotals'])
result['child_space_input']['subtotals_in_kgce_array'].append(
child_space_input[energy_category_id]['subtotals_in_kgce'])
result['child_space_input']['subtotals_in_kgco2e_array'].append(
child_space_input[energy_category_id]['subtotals_in_kgco2e'])
result['child_space_cost'] = dict()
result['child_space_cost']['energy_category_names'] = list() # 1D array [energy category]
result['child_space_cost']['units'] = list() # 1D array [energy category]
result['child_space_cost']['child_space_names_array'] = list() # 2D array [energy category][child space]
result['child_space_cost']['subtotals_array'] = list() # 2D array [energy category][child space]
if energy_category_set is not None and len(energy_category_set) > 0:
for energy_category_id in energy_category_set:
result['child_space_cost']['energy_category_names'].append(
energy_category_dict[energy_category_id]['name'])
result['child_space_cost']['units'].append(config.currency_unit)
result['child_space_cost']['child_space_names_array'].append(
child_space_cost[energy_category_id]['child_space_names'])
result['child_space_cost']['subtotals_array'].append(
child_space_cost[energy_category_id]['subtotals'])
resp.body = json.dumps(result)
| [
"[email protected]"
] | |
bbb9ab49c765200446265a8b66843165bf662912 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/c8697a4864f95672c58598eff207548b0bcc63e5-<__init__>-bug.py | f1f08670f92375243709b5a78775622ba949683e | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
self.interface = self.module.params['interface']
self.mode = self.module.params['mode']
self.state = self.module.params['state']
self.access_vlan = self.module.params['access_vlan']
self.native_vlan = self.module.params['native_vlan']
self.trunk_vlans = self.module.params['trunk_vlans']
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
self.intf_info = dict()
self.intf_type = None | [
"[email protected]"
] | |
fa8f1549524f0eccd088d762cb8dca59fd07e621 | 3e99db7d6c910bf9776561802fe60256e74a375b | /virtual/bin/pip3.6 | 0f29a919e0407b2f0d9ee5eff4eaeacae0191e8f | [
"MIT"
] | permissive | margaret254/Awwards | 979f7a196b1f52ab0d9c9d6145df1f70703f497b | 9af3de00c4200b86c92b16b141a4642fe8f02c87 | refs/heads/master | 2022-05-17T03:51:46.146152 | 2020-01-17T12:49:14 | 2020-01-17T12:49:14 | 233,032,878 | 0 | 1 | null | 2022-04-22T22:59:00 | 2020-01-10T11:26:53 | Python | UTF-8 | Python | false | false | 250 | 6 | #!/home/moringa/Documents/django/Awwards/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
5f24fc903847f0ff1332cc359731d9f2f9efbdcc | fdf1e1f4efc51bc024311d44a2fa4524f9b88bce | /girleffect/countries/migrations/0051_p1_changes.py | 2cce8514dd59a78260aa4e6259c718870d2d8fee | [] | no_license | girleffect/girleffect.org | 8327ffd6bbd1103033c92fbd4cbe5461aa1c7f03 | 55731b1c024f207211a161fd6d3ca796beea7a61 | refs/heads/master | 2023-04-07T21:40:43.910892 | 2022-06-14T11:50:21 | 2022-06-14T11:50:21 | 112,452,828 | 1 | 2 | null | 2023-04-01T12:05:55 | 2017-11-29T09:13:18 | Python | UTF-8 | Python | false | false | 16,084 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-18 12:13
from __future__ import unicode_literals
from django.db import migrations, models
import girleffect.utils.models
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
import wagtail.wagtailsnippets.blocks
class Migration(migrations.Migration):
dependencies = [
('countries', '0050_auto_20180105_1522'),
]
operations = [
migrations.AlterField(
model_name='countrypage',
name='body',
field=wagtail.wagtailcore.fields.StreamField((('heading', wagtail.wagtailcore.blocks.CharBlock(classname='full title')), ('body_text', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], label='Body Text')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('large_text', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'link', 'document-link'], label='Large Text', required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('extendable_body', wagtail.wagtailcore.blocks.StructBlock((('body_upper', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], label='Body Text')), ('extend_button_text', wagtail.wagtailcore.blocks.CharBlock(help_text='Customise text for the extend button', max_length=255, required=False)), ('collapse_button_text', wagtail.wagtailcore.blocks.CharBlock(help_text='Customise text for the collapse button', max_length=255, required=False)), ('body_lower', wagtail.wagtailcore.blocks.RichTextBlock(features=['h2', 'h3', 'h4', 'bold', 'italic', 'link', 'ol', 'ul', 'hr'], help_text='This body field is invisible until the user clicks the expand button', label='Extended body text')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('body_heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('image', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('caption', wagtail.wagtailcore.blocks.CharBlock(required=False))))), ('quote', wagtail.wagtailcore.blocks.StructBlock((('quotes', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80, required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=255, required=True)), ('citation', wagtail.wagtailcore.blocks.CharBlock(max_length=255, required=False)), ('link_block', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('drop_shadow_options', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))))), ('quote_mark_hex', wagtail.wagtailcore.blocks.CharBlock(label='Quote Mark Hex Code', max_length=7, required=False)))), icon='openquote', template='blocks/quote_block.html')), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('video', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(max_length=30, required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=255, required=False)), ('youtube_embed', wagtail.wagtailembeds.blocks.EmbedBlock(help_text="Your YouTube URL goes here. Only YouTube video URLs will be accepted. The custom 'play' button will be created for valid YouTube URLs.", label='YouTube Video URL')), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Girl Effect YouTube Video')), ('slider', wagtail.wagtailcore.blocks.StructBlock((('slider_delay', wagtail.wagtailcore.blocks.IntegerBlock(help_text='Enter the milliseconds of the delay between each slide', required=False)), ('slider_items', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('overview_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Slider item overview title', max_length=255, required=False)), ('overview_title_shadow', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))), required=False)), ('overview_text', wagtail.wagtailcore.blocks.TextBlock(help_text='Slider item overview text', required=False)), ('overview_text_shadow', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))), required=False)), ('textbox_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Slider item textbox title', max_length=255, required=False)), ('textbox_text', wagtail.wagtailcore.blocks.TextBlock(help_text='Slider item textbox text', required=False)), ('textbox_link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False))))))))), ('carousel_block', wagtail.wagtailcore.blocks.StreamBlock((('carousel_item', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('overview_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Slider item overview title', max_length=255, required=False)), ('overview_title_shadow', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))), required=False)), ('overview_text', wagtail.wagtailcore.blocks.TextBlock(help_text='Slider item overview text', required=False)), ('overview_text_shadow', wagtail.wagtailcore.blocks.StructBlock((('drop_shadow_is_on', wagtail.wagtailcore.blocks.BooleanBlock(help_text='Show or hide drop shadow', label='Drop Shadow Toggle', required=False)), ('text_hex', wagtail.wagtailcore.blocks.CharBlock(label='Text Hex Code', max_length=7, required=False))), required=False)), ('textbox_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Slider item textbox title', max_length=255, required=False)), ('textbox_text', wagtail.wagtailcore.blocks.TextBlock(help_text='Slider item textbox text', required=False)), ('textbox_link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('slide_title', wagtail.wagtailcore.blocks.CharBlock(help_text='Title to appear at bottom of carousel, for example "Youth Brands"', max_length=255, required=False)), ('slide_logo', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('slide_title_hex', wagtail.wagtailcore.blocks.CharBlock(help_text='Add valid hex for slide title and chevron colours.', max_length=7, required=False))))),), label='Carousel', max_num=3, min_num=2)), ('media_text_overlay', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(help_text='Appears above the module.', label='Title Text', max_length=255, required=False)), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('logo', wagtail.wagtailimages.blocks.ImageChooserBlock(label='Title Logo', required=False)), ('text', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'ol', 'ul', 'link', 'document-link'], max_length=75, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Full Width Media with Text Overlay')), ('list_block', wagtail.wagtailcore.blocks.StructBlock((('list_block', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=255, required=False)), ('description', wagtail.wagtailcore.blocks.RichTextBlock(features=['bold', 'italic', 'link', 'document-link'], icon='pilcrow', max_length=250, required=False)), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)))))), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))))), ('link_row', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False)))), icon='link', template='blocks/inline_link_block.html')), ('anchor', wagtail.wagtailcore.blocks.StructBlock((('anchor', wagtail.wagtailcore.blocks.CharBlock()),))), ('statistic', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=255, required=False)), ('statistics', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailsnippets.blocks.SnippetChooserBlock(girleffect.utils.models.Statistic))), ('link', wagtail.wagtailcore.blocks.StructBlock((('external_link', wagtail.wagtailcore.blocks.URLBlock(label='External Link', required=False)), ('internal_link', wagtail.wagtailcore.blocks.PageChooserBlock(label='Internal Link', required=False)), ('internal_link_anchor', wagtail.wagtailcore.blocks.CharBlock(label='Internal Link anchor', required=False)), ('document_link', wagtail.wagtaildocs.blocks.DocumentChooserBlock(label='Document Link', required=False)), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=255, required=False))), required=False)), ('customisation', wagtail.wagtailcore.blocks.StructBlock((('background_image', wagtail.wagtailimages.blocks.ImageChooserBlock(required=False)), ('background_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False)), ('heading_hex', wagtail.wagtailcore.blocks.CharBlock(max_length=7, required=False))), required=False))), label='Statistic Block')), ('call_to_action', wagtail.wagtailsnippets.blocks.SnippetChooserBlock(girleffect.utils.models.CallToActionSnippet, template='blocks/call_to_action.html')))),
),
migrations.AlterField(
model_name='countrypage',
name='hero_strapline',
field=models.CharField(blank=True, max_length=255),
),
]
| [
"[email protected]"
] | |
74d87371bae04314db7cc6408b89dcbd74ccc9ab | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-config/huaweicloudsdkconfig/v1/model/__init__.py | 6c754c93f34dbc7c3033769264f1ce7cc41e9af4 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 23,171 | py | # coding: utf-8
from __future__ import absolute_import
# import models into model package
from huaweicloudsdkconfig.v1.model.account_aggregation_source import AccountAggregationSource
from huaweicloudsdkconfig.v1.model.aggregate_compliance_detail_request import AggregateComplianceDetailRequest
from huaweicloudsdkconfig.v1.model.aggregate_discovered_resource_counts_request import AggregateDiscoveredResourceCountsRequest
from huaweicloudsdkconfig.v1.model.aggregate_discovered_resources_request import AggregateDiscoveredResourcesRequest
from huaweicloudsdkconfig.v1.model.aggregate_policy_assignment_detail_request import AggregatePolicyAssignmentDetailRequest
from huaweicloudsdkconfig.v1.model.aggregate_policy_assignments import AggregatePolicyAssignments
from huaweicloudsdkconfig.v1.model.aggregate_policy_assignments_filters import AggregatePolicyAssignmentsFilters
from huaweicloudsdkconfig.v1.model.aggregate_policy_assignments_request import AggregatePolicyAssignmentsRequest
from huaweicloudsdkconfig.v1.model.aggregate_policy_compliance_summary_result import AggregatePolicyComplianceSummaryResult
from huaweicloudsdkconfig.v1.model.aggregate_policy_states_request import AggregatePolicyStatesRequest
from huaweicloudsdkconfig.v1.model.aggregate_resource_config_request import AggregateResourceConfigRequest
from huaweicloudsdkconfig.v1.model.aggregated_source_status import AggregatedSourceStatus
from huaweicloudsdkconfig.v1.model.aggregation_authorization_request import AggregationAuthorizationRequest
from huaweicloudsdkconfig.v1.model.aggregation_authorization_resp import AggregationAuthorizationResp
from huaweicloudsdkconfig.v1.model.channel_config_body import ChannelConfigBody
from huaweicloudsdkconfig.v1.model.collect_all_resources_summary_request import CollectAllResourcesSummaryRequest
from huaweicloudsdkconfig.v1.model.collect_all_resources_summary_response import CollectAllResourcesSummaryResponse
from huaweicloudsdkconfig.v1.model.collect_conformance_pack_compliance_summary_request import CollectConformancePackComplianceSummaryRequest
from huaweicloudsdkconfig.v1.model.collect_conformance_pack_compliance_summary_response import CollectConformancePackComplianceSummaryResponse
from huaweicloudsdkconfig.v1.model.compliance import Compliance
from huaweicloudsdkconfig.v1.model.configuration_aggregator_request import ConfigurationAggregatorRequest
from huaweicloudsdkconfig.v1.model.configuration_aggregator_resp import ConfigurationAggregatorResp
from huaweicloudsdkconfig.v1.model.conformance_pack import ConformancePack
from huaweicloudsdkconfig.v1.model.conformance_pack_compliance import ConformancePackCompliance
from huaweicloudsdkconfig.v1.model.conformance_pack_compliance_detail import ConformancePackComplianceDetail
from huaweicloudsdkconfig.v1.model.conformance_pack_compliance_summary import ConformancePackComplianceSummary
from huaweicloudsdkconfig.v1.model.conformance_pack_request_body import ConformancePackRequestBody
from huaweicloudsdkconfig.v1.model.conformance_pack_score import ConformancePackScore
from huaweicloudsdkconfig.v1.model.conformance_pack_template import ConformancePackTemplate
from huaweicloudsdkconfig.v1.model.count_all_resources_request import CountAllResourcesRequest
from huaweicloudsdkconfig.v1.model.count_all_resources_response import CountAllResourcesResponse
from huaweicloudsdkconfig.v1.model.create_aggregation_authorization_request import CreateAggregationAuthorizationRequest
from huaweicloudsdkconfig.v1.model.create_aggregation_authorization_response import CreateAggregationAuthorizationResponse
from huaweicloudsdkconfig.v1.model.create_configuration_aggregator_request import CreateConfigurationAggregatorRequest
from huaweicloudsdkconfig.v1.model.create_configuration_aggregator_response import CreateConfigurationAggregatorResponse
from huaweicloudsdkconfig.v1.model.create_conformance_pack_request import CreateConformancePackRequest
from huaweicloudsdkconfig.v1.model.create_conformance_pack_response import CreateConformancePackResponse
from huaweicloudsdkconfig.v1.model.create_organization_policy_assignment_request import CreateOrganizationPolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.create_organization_policy_assignment_response import CreateOrganizationPolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.create_policy_assignments_request import CreatePolicyAssignmentsRequest
from huaweicloudsdkconfig.v1.model.create_policy_assignments_response import CreatePolicyAssignmentsResponse
from huaweicloudsdkconfig.v1.model.create_stored_query_request import CreateStoredQueryRequest
from huaweicloudsdkconfig.v1.model.create_stored_query_response import CreateStoredQueryResponse
from huaweicloudsdkconfig.v1.model.create_tracker_config_request import CreateTrackerConfigRequest
from huaweicloudsdkconfig.v1.model.create_tracker_config_response import CreateTrackerConfigResponse
from huaweicloudsdkconfig.v1.model.custom_policy import CustomPolicy
from huaweicloudsdkconfig.v1.model.delete_aggregation_authorization_request import DeleteAggregationAuthorizationRequest
from huaweicloudsdkconfig.v1.model.delete_aggregation_authorization_response import DeleteAggregationAuthorizationResponse
from huaweicloudsdkconfig.v1.model.delete_configuration_aggregator_request import DeleteConfigurationAggregatorRequest
from huaweicloudsdkconfig.v1.model.delete_configuration_aggregator_response import DeleteConfigurationAggregatorResponse
from huaweicloudsdkconfig.v1.model.delete_conformance_pack_request import DeleteConformancePackRequest
from huaweicloudsdkconfig.v1.model.delete_conformance_pack_response import DeleteConformancePackResponse
from huaweicloudsdkconfig.v1.model.delete_organization_policy_assignment_request import DeleteOrganizationPolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.delete_organization_policy_assignment_response import DeleteOrganizationPolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.delete_pending_aggregation_request_request import DeletePendingAggregationRequestRequest
from huaweicloudsdkconfig.v1.model.delete_pending_aggregation_request_response import DeletePendingAggregationRequestResponse
from huaweicloudsdkconfig.v1.model.delete_policy_assignment_request import DeletePolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.delete_policy_assignment_response import DeletePolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.delete_stored_query_request import DeleteStoredQueryRequest
from huaweicloudsdkconfig.v1.model.delete_stored_query_response import DeleteStoredQueryResponse
from huaweicloudsdkconfig.v1.model.delete_tracker_config_request import DeleteTrackerConfigRequest
from huaweicloudsdkconfig.v1.model.delete_tracker_config_response import DeleteTrackerConfigResponse
from huaweicloudsdkconfig.v1.model.disable_policy_assignment_request import DisablePolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.disable_policy_assignment_response import DisablePolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.enable_policy_assignment_request import EnablePolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.enable_policy_assignment_response import EnablePolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.grouped_resource_count import GroupedResourceCount
from huaweicloudsdkconfig.v1.model.history_item import HistoryItem
from huaweicloudsdkconfig.v1.model.list_aggregate_compliance_by_policy_assignment_request import ListAggregateComplianceByPolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.list_aggregate_compliance_by_policy_assignment_response import ListAggregateComplianceByPolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.list_aggregate_discovered_resources_request import ListAggregateDiscoveredResourcesRequest
from huaweicloudsdkconfig.v1.model.list_aggregate_discovered_resources_response import ListAggregateDiscoveredResourcesResponse
from huaweicloudsdkconfig.v1.model.list_aggregation_authorizations_request import ListAggregationAuthorizationsRequest
from huaweicloudsdkconfig.v1.model.list_aggregation_authorizations_response import ListAggregationAuthorizationsResponse
from huaweicloudsdkconfig.v1.model.list_all_resources_request import ListAllResourcesRequest
from huaweicloudsdkconfig.v1.model.list_all_resources_response import ListAllResourcesResponse
from huaweicloudsdkconfig.v1.model.list_all_tags_request import ListAllTagsRequest
from huaweicloudsdkconfig.v1.model.list_all_tags_response import ListAllTagsResponse
from huaweicloudsdkconfig.v1.model.list_built_in_conformance_pack_templates_request import ListBuiltInConformancePackTemplatesRequest
from huaweicloudsdkconfig.v1.model.list_built_in_conformance_pack_templates_response import ListBuiltInConformancePackTemplatesResponse
from huaweicloudsdkconfig.v1.model.list_built_in_policy_definitions_request import ListBuiltInPolicyDefinitionsRequest
from huaweicloudsdkconfig.v1.model.list_built_in_policy_definitions_response import ListBuiltInPolicyDefinitionsResponse
from huaweicloudsdkconfig.v1.model.list_configuration_aggregators_request import ListConfigurationAggregatorsRequest
from huaweicloudsdkconfig.v1.model.list_configuration_aggregators_response import ListConfigurationAggregatorsResponse
from huaweicloudsdkconfig.v1.model.list_conformance_pack_compliance_by_pack_id_request import ListConformancePackComplianceByPackIdRequest
from huaweicloudsdkconfig.v1.model.list_conformance_pack_compliance_by_pack_id_response import ListConformancePackComplianceByPackIdResponse
from huaweicloudsdkconfig.v1.model.list_conformance_pack_compliance_details_by_pack_id_request import ListConformancePackComplianceDetailsByPackIdRequest
from huaweicloudsdkconfig.v1.model.list_conformance_pack_compliance_details_by_pack_id_response import ListConformancePackComplianceDetailsByPackIdResponse
from huaweicloudsdkconfig.v1.model.list_conformance_pack_compliance_scores_request import ListConformancePackComplianceScoresRequest
from huaweicloudsdkconfig.v1.model.list_conformance_pack_compliance_scores_response import ListConformancePackComplianceScoresResponse
from huaweicloudsdkconfig.v1.model.list_conformance_packs_request import ListConformancePacksRequest
from huaweicloudsdkconfig.v1.model.list_conformance_packs_response import ListConformancePacksResponse
from huaweicloudsdkconfig.v1.model.list_organization_policy_assignments_request import ListOrganizationPolicyAssignmentsRequest
from huaweicloudsdkconfig.v1.model.list_organization_policy_assignments_response import ListOrganizationPolicyAssignmentsResponse
from huaweicloudsdkconfig.v1.model.list_pending_aggregation_requests_request import ListPendingAggregationRequestsRequest
from huaweicloudsdkconfig.v1.model.list_pending_aggregation_requests_response import ListPendingAggregationRequestsResponse
from huaweicloudsdkconfig.v1.model.list_policy_assignments_request import ListPolicyAssignmentsRequest
from huaweicloudsdkconfig.v1.model.list_policy_assignments_response import ListPolicyAssignmentsResponse
from huaweicloudsdkconfig.v1.model.list_policy_states_by_assignment_id_request import ListPolicyStatesByAssignmentIdRequest
from huaweicloudsdkconfig.v1.model.list_policy_states_by_assignment_id_response import ListPolicyStatesByAssignmentIdResponse
from huaweicloudsdkconfig.v1.model.list_policy_states_by_domain_id_request import ListPolicyStatesByDomainIdRequest
from huaweicloudsdkconfig.v1.model.list_policy_states_by_domain_id_response import ListPolicyStatesByDomainIdResponse
from huaweicloudsdkconfig.v1.model.list_policy_states_by_resource_id_request import ListPolicyStatesByResourceIdRequest
from huaweicloudsdkconfig.v1.model.list_policy_states_by_resource_id_response import ListPolicyStatesByResourceIdResponse
from huaweicloudsdkconfig.v1.model.list_providers_request import ListProvidersRequest
from huaweicloudsdkconfig.v1.model.list_providers_response import ListProvidersResponse
from huaweicloudsdkconfig.v1.model.list_regions_request import ListRegionsRequest
from huaweicloudsdkconfig.v1.model.list_regions_response import ListRegionsResponse
from huaweicloudsdkconfig.v1.model.list_resources_request import ListResourcesRequest
from huaweicloudsdkconfig.v1.model.list_resources_response import ListResourcesResponse
from huaweicloudsdkconfig.v1.model.list_schemas_request import ListSchemasRequest
from huaweicloudsdkconfig.v1.model.list_schemas_response import ListSchemasResponse
from huaweicloudsdkconfig.v1.model.list_stored_queries_request import ListStoredQueriesRequest
from huaweicloudsdkconfig.v1.model.list_stored_queries_response import ListStoredQueriesResponse
from huaweicloudsdkconfig.v1.model.managed_policy_assignment_metadata import ManagedPolicyAssignmentMetadata
from huaweicloudsdkconfig.v1.model.organization_policy_assignment_detailed_status_response import OrganizationPolicyAssignmentDetailedStatusResponse
from huaweicloudsdkconfig.v1.model.organization_policy_assignment_request import OrganizationPolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.organization_policy_assignment_response import OrganizationPolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.organization_policy_assignment_status_response import OrganizationPolicyAssignmentStatusResponse
from huaweicloudsdkconfig.v1.model.page_info import PageInfo
from huaweicloudsdkconfig.v1.model.pending_aggregation_request import PendingAggregationRequest
from huaweicloudsdkconfig.v1.model.policy_assignment import PolicyAssignment
from huaweicloudsdkconfig.v1.model.policy_assignment_request_body import PolicyAssignmentRequestBody
from huaweicloudsdkconfig.v1.model.policy_compliance_summary_unit import PolicyComplianceSummaryUnit
from huaweicloudsdkconfig.v1.model.policy_definition import PolicyDefinition
from huaweicloudsdkconfig.v1.model.policy_definition_default_resource_types import PolicyDefinitionDefaultResourceTypes
from huaweicloudsdkconfig.v1.model.policy_filter_definition import PolicyFilterDefinition
from huaweicloudsdkconfig.v1.model.policy_parameter_definition import PolicyParameterDefinition
from huaweicloudsdkconfig.v1.model.policy_parameter_value import PolicyParameterValue
from huaweicloudsdkconfig.v1.model.policy_resource import PolicyResource
from huaweicloudsdkconfig.v1.model.policy_state import PolicyState
from huaweicloudsdkconfig.v1.model.policy_state_request_body import PolicyStateRequestBody
from huaweicloudsdkconfig.v1.model.query_info import QueryInfo
from huaweicloudsdkconfig.v1.model.query_run_request_body import QueryRunRequestBody
from huaweicloudsdkconfig.v1.model.region import Region
from huaweicloudsdkconfig.v1.model.resource_counts_filters import ResourceCountsFilters
from huaweicloudsdkconfig.v1.model.resource_entity import ResourceEntity
from huaweicloudsdkconfig.v1.model.resource_identifier import ResourceIdentifier
from huaweicloudsdkconfig.v1.model.resource_provider_response import ResourceProviderResponse
from huaweicloudsdkconfig.v1.model.resource_relation import ResourceRelation
from huaweicloudsdkconfig.v1.model.resource_schema_response import ResourceSchemaResponse
from huaweicloudsdkconfig.v1.model.resource_summary_response_item import ResourceSummaryResponseItem
from huaweicloudsdkconfig.v1.model.resource_summary_response_item_regions import ResourceSummaryResponseItemRegions
from huaweicloudsdkconfig.v1.model.resource_summary_response_item_types import ResourceSummaryResponseItemTypes
from huaweicloudsdkconfig.v1.model.resource_type_response import ResourceTypeResponse
from huaweicloudsdkconfig.v1.model.resources_filters import ResourcesFilters
from huaweicloudsdkconfig.v1.model.run_aggregate_resource_query_request import RunAggregateResourceQueryRequest
from huaweicloudsdkconfig.v1.model.run_aggregate_resource_query_response import RunAggregateResourceQueryResponse
from huaweicloudsdkconfig.v1.model.run_evaluation_by_policy_assignment_id_request import RunEvaluationByPolicyAssignmentIdRequest
from huaweicloudsdkconfig.v1.model.run_evaluation_by_policy_assignment_id_response import RunEvaluationByPolicyAssignmentIdResponse
from huaweicloudsdkconfig.v1.model.run_query_request import RunQueryRequest
from huaweicloudsdkconfig.v1.model.run_query_response import RunQueryResponse
from huaweicloudsdkconfig.v1.model.selector_config_body import SelectorConfigBody
from huaweicloudsdkconfig.v1.model.show_aggregate_compliance_details_by_policy_assignment_request import ShowAggregateComplianceDetailsByPolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.show_aggregate_compliance_details_by_policy_assignment_response import ShowAggregateComplianceDetailsByPolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.show_aggregate_discovered_resource_counts_request import ShowAggregateDiscoveredResourceCountsRequest
from huaweicloudsdkconfig.v1.model.show_aggregate_discovered_resource_counts_response import ShowAggregateDiscoveredResourceCountsResponse
from huaweicloudsdkconfig.v1.model.show_aggregate_policy_assignment_detail_request import ShowAggregatePolicyAssignmentDetailRequest
from huaweicloudsdkconfig.v1.model.show_aggregate_policy_assignment_detail_response import ShowAggregatePolicyAssignmentDetailResponse
from huaweicloudsdkconfig.v1.model.show_aggregate_policy_state_compliance_summary_request import ShowAggregatePolicyStateComplianceSummaryRequest
from huaweicloudsdkconfig.v1.model.show_aggregate_policy_state_compliance_summary_response import ShowAggregatePolicyStateComplianceSummaryResponse
from huaweicloudsdkconfig.v1.model.show_aggregate_resource_config_request import ShowAggregateResourceConfigRequest
from huaweicloudsdkconfig.v1.model.show_aggregate_resource_config_response import ShowAggregateResourceConfigResponse
from huaweicloudsdkconfig.v1.model.show_built_in_conformance_pack_template_request import ShowBuiltInConformancePackTemplateRequest
from huaweicloudsdkconfig.v1.model.show_built_in_conformance_pack_template_response import ShowBuiltInConformancePackTemplateResponse
from huaweicloudsdkconfig.v1.model.show_built_in_policy_definition_request import ShowBuiltInPolicyDefinitionRequest
from huaweicloudsdkconfig.v1.model.show_built_in_policy_definition_response import ShowBuiltInPolicyDefinitionResponse
from huaweicloudsdkconfig.v1.model.show_configuration_aggregator_request import ShowConfigurationAggregatorRequest
from huaweicloudsdkconfig.v1.model.show_configuration_aggregator_response import ShowConfigurationAggregatorResponse
from huaweicloudsdkconfig.v1.model.show_configuration_aggregator_sources_status_request import ShowConfigurationAggregatorSourcesStatusRequest
from huaweicloudsdkconfig.v1.model.show_configuration_aggregator_sources_status_response import ShowConfigurationAggregatorSourcesStatusResponse
from huaweicloudsdkconfig.v1.model.show_conformance_pack_request import ShowConformancePackRequest
from huaweicloudsdkconfig.v1.model.show_conformance_pack_response import ShowConformancePackResponse
from huaweicloudsdkconfig.v1.model.show_evaluation_state_by_assignment_id_request import ShowEvaluationStateByAssignmentIdRequest
from huaweicloudsdkconfig.v1.model.show_evaluation_state_by_assignment_id_response import ShowEvaluationStateByAssignmentIdResponse
from huaweicloudsdkconfig.v1.model.show_organization_policy_assignment_detailed_status_request import ShowOrganizationPolicyAssignmentDetailedStatusRequest
from huaweicloudsdkconfig.v1.model.show_organization_policy_assignment_detailed_status_response import ShowOrganizationPolicyAssignmentDetailedStatusResponse
from huaweicloudsdkconfig.v1.model.show_organization_policy_assignment_request import ShowOrganizationPolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.show_organization_policy_assignment_response import ShowOrganizationPolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.show_organization_policy_assignment_statuses_request import ShowOrganizationPolicyAssignmentStatusesRequest
from huaweicloudsdkconfig.v1.model.show_organization_policy_assignment_statuses_response import ShowOrganizationPolicyAssignmentStatusesResponse
from huaweicloudsdkconfig.v1.model.show_policy_assignment_request import ShowPolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.show_policy_assignment_response import ShowPolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.show_resource_by_id_request import ShowResourceByIdRequest
from huaweicloudsdkconfig.v1.model.show_resource_by_id_response import ShowResourceByIdResponse
from huaweicloudsdkconfig.v1.model.show_resource_detail_request import ShowResourceDetailRequest
from huaweicloudsdkconfig.v1.model.show_resource_detail_response import ShowResourceDetailResponse
from huaweicloudsdkconfig.v1.model.show_resource_history_request import ShowResourceHistoryRequest
from huaweicloudsdkconfig.v1.model.show_resource_history_response import ShowResourceHistoryResponse
from huaweicloudsdkconfig.v1.model.show_resource_relations_detail_request import ShowResourceRelationsDetailRequest
from huaweicloudsdkconfig.v1.model.show_resource_relations_detail_response import ShowResourceRelationsDetailResponse
from huaweicloudsdkconfig.v1.model.show_resource_relations_request import ShowResourceRelationsRequest
from huaweicloudsdkconfig.v1.model.show_resource_relations_response import ShowResourceRelationsResponse
from huaweicloudsdkconfig.v1.model.show_stored_query_request import ShowStoredQueryRequest
from huaweicloudsdkconfig.v1.model.show_stored_query_response import ShowStoredQueryResponse
from huaweicloudsdkconfig.v1.model.show_tracker_config_request import ShowTrackerConfigRequest
from huaweicloudsdkconfig.v1.model.show_tracker_config_response import ShowTrackerConfigResponse
from huaweicloudsdkconfig.v1.model.stored_query import StoredQuery
from huaweicloudsdkconfig.v1.model.stored_query_request_body import StoredQueryRequestBody
from huaweicloudsdkconfig.v1.model.tag_detail import TagDetail
from huaweicloudsdkconfig.v1.model.template_parameter_definition import TemplateParameterDefinition
from huaweicloudsdkconfig.v1.model.tracker_config_body import TrackerConfigBody
from huaweicloudsdkconfig.v1.model.tracker_obs_channel_config_body import TrackerOBSChannelConfigBody
from huaweicloudsdkconfig.v1.model.tracker_smn_channel_config_body import TrackerSMNChannelConfigBody
from huaweicloudsdkconfig.v1.model.update_configuration_aggregator_request import UpdateConfigurationAggregatorRequest
from huaweicloudsdkconfig.v1.model.update_configuration_aggregator_response import UpdateConfigurationAggregatorResponse
from huaweicloudsdkconfig.v1.model.update_policy_assignment_request import UpdatePolicyAssignmentRequest
from huaweicloudsdkconfig.v1.model.update_policy_assignment_response import UpdatePolicyAssignmentResponse
from huaweicloudsdkconfig.v1.model.update_policy_state_request import UpdatePolicyStateRequest
from huaweicloudsdkconfig.v1.model.update_policy_state_response import UpdatePolicyStateResponse
from huaweicloudsdkconfig.v1.model.update_stored_query_request import UpdateStoredQueryRequest
from huaweicloudsdkconfig.v1.model.update_stored_query_response import UpdateStoredQueryResponse
from huaweicloudsdkconfig.v1.model.vars_structure import VarsStructure
| [
"[email protected]"
] | |
3eae3203390b548511b0f67aaa046b3793f0e674 | caace044baf7a6f2b0bda65ae361eed06bddfc3c | /dailyQuestion/2020/2020-05/05-30/python/solution_mono_stack.py | c291e66a6dccdffb17a62db905e6b0ac18ddf515 | [
"Apache-2.0"
] | permissive | russellgao/algorithm | fd6126e89c40d7d351c53bbd5fde690c9be899ef | ad5e724d20a8492b8eba03fc0f24e4ff5964b3ea | refs/heads/master | 2023-03-28T03:00:02.370660 | 2021-03-28T10:56:38 | 2021-03-28T10:56:38 | 259,038,372 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 808 | py | # 单调栈
def largestRectangleArea(heights: [int]) -> int:
n = len(heights)
left, right = [0] * n, [0] * n
mono_stack = []
for i in range(n):
while mono_stack and heights[mono_stack[-1]] >= heights[i]:
mono_stack.pop()
left[i] = mono_stack[-1] if mono_stack else -1
mono_stack.append(i)
mono_stack = []
for i in range(n - 1, -1, -1):
while mono_stack and heights[mono_stack[-1]] >= heights[i]:
mono_stack.pop()
right[i] = mono_stack[-1] if mono_stack else n
mono_stack.append(i)
result = max((right[i] - left[i] - 1) * heights[i] for i in range(n)) if n > 0 else 0
return result
if __name__ == "__main__":
heights = [2, 1, 5, 6, 2, 3]
result = largestRectangleArea(heights)
print(result) | [
"[email protected]"
] | |
d65b720c7fe482af48ba1cb8dacd00f00571ed21 | dfffc423a768475e680f954e3eea6c944df965d5 | /main.py | 72aba8f9dea35dccd8ba3daf532a91ab0b87e329 | [] | no_license | ssfdust/wechatheadless | 13948ea86cfd7cb71d6cdea74fc4931b4d111843 | 544a8a74c5adcabb35b0828b52de49df43f222e1 | refs/heads/master | 2023-05-10T05:29:29.290177 | 2021-06-01T02:57:34 | 2021-06-01T02:57:34 | 372,156,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,436 | py | from selenium import webdriver
from pathlib import Path
from selenium.webdriver.firefox.options import Options
from time import sleep
from selenium.common.exceptions import NoSuchElementException
import os
url_prefix = os.environ.get("INJECTOR_URL_PREFIX", "https://127.0.0.1")
injectjs = f"""
var script = document.createElement('script')
script.src = '{url_prefix}/injector.js'
document.getElementsByTagName('head')[0].appendChild(script)
"""
options = Options()
options.headless = True
# profile_path = Path(__file__).parent / "ffprofile"
geckodriver_path = str(Path(__file__).parent / "bin/geckodriver")
driver = webdriver.Firefox(options=options, executable_path=geckodriver_path)
driver.get("https://wx.qq.com")
sleep(8)
element = driver.find_element_by_xpath("/html/body/div[1]/div[2]/div[1]/img")
element.screenshot("./qrcode.png")
print("生成qrcode.png")
while True:
try:
driver.find_element_by_xpath("/html/body/div[1]/div/div[1]/div[1]/div[1]/img")
os.remove("./qrcode.png")
print("删除qrcode.png")
break
except NoSuchElementException:
print("not login")
sleep(2)
def load(webdriver):
webdriver.execute_script(injectjs)
sleep(2)
webdriver.execute_script("injector.run()")
def reload_(webdriver):
webdriver.refresh()
sleep(6)
load(webdriver)
load(driver)
while True:
sleep(7200)
print("刷新页面")
reload_(driver)
| [
"[email protected]"
] | |
1c81599ad7026475eacd7d54f0fd2ea3bc926b78 | 6d8a42478faa8eedb7efcc60aeeeb3d34beb0fab | /lottery/models.py | adaf021fcbdd7f4206fbfd81fd8b4c07efa1f61a | [] | no_license | zjleifeng/lottery | 7509c45d39e71f3e47c4ffed18723335888a3c96 | bb6e7ba9acff0659a82d431bee17a2c6f7c65731 | refs/heads/master | 2020-04-11T11:00:44.328193 | 2018-07-05T15:47:14 | 2018-07-05T15:47:14 | 77,206,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,407 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2016/12/8 12:35
# @Author : eric
# @Site :
# @File : models.py
# @Software: PyCharm
from django.db import models
# Create your models here.
class Person(models.Model):
name=models.CharField(max_length=50,verbose_name=u'姓名')
tel=models.BigIntegerField(verbose_name=u'手机号码')
num=models.CharField(max_length=50,default=100,verbose_name=u'奖券号码')
isWin=models.IntegerField(default=0,verbose_name=u'是否中奖')
mWin=models.IntegerField(default=0)
cWin=models.IntegerField(default=1)
class Meta:
verbose_name_plural=u'抽奖人员信息'
def __unicode__(self):
return self.name
__str__=__unicode__
class Result(models.Model):
uid=models.IntegerField()
name=models.CharField(max_length=50,verbose_name=u'中奖人姓名')
tel=models.CharField(max_length=50,verbose_name=u'中奖人电话')
num=models.CharField(max_length=50,default=100,verbose_name=u'奖券号码')
createtime=models.DateTimeField(auto_now_add=True,verbose_name=u'中奖时间')
awardname=models.CharField(max_length=50,verbose_name=u'奖项名称')
isdel=models.IntegerField(default=0,verbose_name=u'是否被删除1是0否')
class Meta:
verbose_name_plural=u'中奖人员信息'
def __unicode__(self):
return self.name
__str__=__unicode__
| [
"[email protected]"
] | |
8b9d544b0d8141bf0019d1b34f8dda0b58913596 | 83686519904694228204a481aa949ded7b4f65f8 | /Python/python_stack/Django/JDsTruckTracker/apps/truck_tracker/urls.py | 3580bb4c4738300c450ae5a810b1e56a2aebd98a | [] | no_license | andres121985/DojoAssignments | 5621a9253ab5113a5038f62474582a59fec11a27 | 62f525e8574628c72e01df5725a2495bdeb5d3a9 | refs/heads/master | 2021-01-23T12:44:48.501795 | 2017-06-02T18:40:33 | 2017-06-02T18:40:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 521 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^trucks$', views.trucks),
url(r'^addtruck$', views.add_truck),
url(r'^add$', views.add),
url(r'^delete$', views.delete),
url(r'^logout$', views.logout),
url(r'^login$', views.login),
url(r'^register$', views.register),
url(r'^search$', views.search),
url(r'^category/(?P<id>\d+)$', views.category),
url(r'^category/(?P<id>\d+)/(?P<truck_id>\d+)$', views.specific_truck),
]
| [
"[email protected]"
] | |
bea3b00a62b7195cd1722b1ab7beea5807b6cd01 | ea94fc6a5c16f3266a7a48b005b3f10fef92c845 | /src/maths/nodes/ListNode.py | 90819c6904b829984f52466b69ed61fda4235ed3 | [
"MIT"
] | permissive | Grahack/Turing | ea340ee3f3db33a27bdf4a053f34f4c9bec525c3 | fad060121d45b91f82af8952dd9f64e5d7bd9ed2 | refs/heads/master | 2020-03-17T00:49:45.176435 | 2018-05-11T20:07:11 | 2018-05-11T20:07:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # -*- coding: utf-8 -*-
import util.html
from util.math import proper_str
from .AstNode import *
class ListNode(AstNode):
"""Identifier node
value -- value (list of AstNode)"""
value = None
def __init__(self, value: List[AstNode]):
super().__init__(True)
self.value = value
def __str__(self):
return "[List %s]" % self.value
def __repr__(self):
return "ListNode(%r)" % self.value
def code(self, bb=False) -> str:
return (util.html.sanitize("[%s]") if bb else "[%s]") % proper_str([node.code(bb) for node in self.value])[1:-1]
def python(self) -> str:
return "list([%s])" % ", ".join(x.python() for x in self.value)
def children(self) -> List["AstNode"]:
return self.value
| [
"[email protected]"
] | |
2269e3f344eb0175a3534a568e2586a9132d6e67 | 2db5bf5832ddb99e93bb949ace1fad1fde847319 | /beginLearn/AndrewNg/exercise/c2/L_layer_network.py | 64efd67d5e58e163209f4e2f658bd28b8d10c20c | [] | no_license | RoderickAdriance/PythonDemo | 2d92b9aa66fcd77b6f797e865df77fbc8c2bcd14 | 98b124fecd3a972d7bc46661c6a7de8787b8e761 | refs/heads/master | 2020-04-06T17:36:46.000133 | 2018-11-15T07:07:03 | 2018-11-15T07:07:03 | 157,666,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from exercise.c6.dnn_app_utils_v2 import *
def L_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False):
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
costs = []
parameters = initialize_parameters_deep(layers_dims)
for i in range(0, num_iterations):
# AL最后的预测值,caches每层计算的Z和参数w x b
# AL [0.5,0.8,0.3.......]
AL, caches = L_model_forward(X, parameters)
# 计算损失
cost = compute_cost(AL, Y)
# 反向传播
grads = L_model_backward(AL, Y, caches)
| [
"[email protected]"
] | |
fe8925b066e6f71c81f76db9c4cfec33be3c51fa | f22778fb83b4723dd3dbc26834a8475c4b8c442c | /nana_utils.py | d10f46774c1b1861c6af2babc1c15319ef167d3d | [] | no_license | innovationgarage/deprecated-PortPollution-AISreduction-dask | 070b3011b505de74b2126b83fa93a7bb0eace84c | d66cfa96529f54a6f12faa39560beab10d7ef5a8 | refs/heads/master | 2020-04-01T15:10:53.598911 | 2018-10-25T07:38:47 | 2018-10-25T07:38:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | import dask.distributed, distributed.client, dask.bag, daskutils.io.msgpack, daskutils.base, os.path, uuid, msgpack, daskutils.sort
#client = dask.distributed.Client('ymslanda.innovationgarage.tech:8786')
data = [uuid.uuid4().hex for a in range(0, 100000)]
s = daskutils.sort.MergeSort("/tmp/")
res = s.sort(dask.bag.from_sequence(data, npartitions=4))
res = res.compute()
assert len(res) == len(data)
assert res == sorted(res)
assert res == sorted(data)
| [
"[email protected]"
] | |
339a75ee64661331f4a5fd9250006afbdd1e8c4c | 1c390cd4fd3605046914767485b49a929198b470 | /leetcode/partition-equal-subset-sum.py | 6cceb12902e683d1ba9985b592a5f627bf4f9821 | [] | no_license | wwwwodddd/Zukunft | f87fe736b53506f69ab18db674311dd60de04a43 | 03ffffee9a76e99f6e00bba6dbae91abc6994a34 | refs/heads/master | 2023-01-24T06:14:35.691292 | 2023-01-21T15:42:32 | 2023-01-21T15:42:32 | 163,685,977 | 7 | 8 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | class Solution:
def canPartition(self, a: List[int]) -> bool:
s = sum(a)
if s % 2:
return False
f = [0 for i in range(s + 1)]
f[0] = 1
for i in a:
for j in range(i, s + 1)[::-1]:
f[j] |= f[j - i]
return f[s // 2] == 1 | [
"[email protected]"
] | |
1f76ebb4e0a1abe77d320a39f677e53be6742b30 | 5b4b1866571453f78db5b06a08ff0eda17b91b04 | /test/vanilla/Expected/AcceptanceTests/ModelFlattening/modelflattening/operations/__init__.py | 97acfd337b399a280c13c5ee50b4e2c3f81f01c1 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | koek67/autorest.azure-functions-python | ba345f1d194ca7431daab1210a0cd801d4946991 | b0896d8aec6b0fd6f0bcb12ea8e0489652dc2783 | refs/heads/main | 2022-12-20T13:27:56.405901 | 2020-09-30T08:23:11 | 2020-09-30T08:23:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._auto_rest_resource_flattening_test_service_operations import AutoRestResourceFlatteningTestServiceOperationsMixin
__all__ = [
'AutoRestResourceFlatteningTestServiceOperationsMixin',
]
| [
"[email protected]"
] | |
ef7a98380f2dc3cb9f4e993171957dd70c6761f7 | d41d18d3ea6edd2ec478b500386375a8693f1392 | /plotly/validators/scatter3d/error_x/_value.py | db1f336f929396b215dbe5a268a8760d26d12b9a | [
"MIT"
] | permissive | miladrux/plotly.py | 38921dd6618650d03be9891d6078e771ffccc99a | dbb79e43e2cc6c5762251537d24bad1dab930fff | refs/heads/master | 2020-03-27T01:46:57.497871 | 2018-08-20T22:37:38 | 2018-08-20T22:37:38 | 145,742,203 | 1 | 0 | MIT | 2018-08-22T17:37:07 | 2018-08-22T17:37:07 | null | UTF-8 | Python | false | false | 434 | py | import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name='value', parent_name='scatter3d.error_x', **kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='calc',
min=0,
role='info',
**kwargs
)
| [
"[email protected]"
] | |
e000f3f97c731836cb809f51e139ba47ba132579 | ddf002d1084d5c63842a6f42471f890a449966ee | /basics/Python/Recursion/recursion_factorial.py | 09684087ef55e781859db5daf3d5512e54a8269a | [] | no_license | RaghavJindal2000/Python | 0ab3f198cbc5559bdf46ac259c7136356f7f09aa | 8e5c646585cff28ba3ad9bd6c384bcb5537d671a | refs/heads/master | 2023-01-01T23:56:02.073029 | 2020-10-18T19:30:01 | 2020-10-18T19:30:01 | 263,262,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | def fact(n):
if(n<=0):
return 1
else:
return n*fact(n-1)
n=int(input("Enter the Number : "))
print("Factorial of ",n," is = ",fact(n))
input()
| [
"[email protected]"
] | |
f928f4e34606749623dbabda74342d7c8d15021e | 56fd2d92b8327cfb7d8f95b89c52e1700343b726 | /examples/notebooks/rebalance_etfs/strategy.py | d5636616438b0a536a14bc605986acb7f768e20f | [
"MIT"
] | permissive | stjordanis/Odin | fecb640ccf4f2e6eb139389d25cbe37da334cdb6 | e2e9d638c68947d24f1260d35a3527dd84c2523f | refs/heads/master | 2020-04-15T09:13:17.850126 | 2017-02-09T00:25:55 | 2017-02-09T00:25:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | import pandas as pd
from odin.strategy import AbstractStrategy
from odin.strategy.templates import BuyAndHoldStrategy
from odin.utilities.mixins.strategy_mixins import (
LongStrategyMixin,
TotalSellProportionMixin,
AlwaysBuyIndicatorMixin,
NeverSellIndicatorMixin,
DefaultPriorityMixin,
DefaultFeaturesMixin,
)
class BuyAndHoldSpyderStrategy(BuyAndHoldStrategy):
def buy_indicator(self, feats):
return feats.name in ("SPY", )
class RebalanceETFStrategy(
LongStrategyMixin,
TotalSellProportionMixin,
AlwaysBuyIndicatorMixin,
NeverSellIndicatorMixin,
DefaultPriorityMixin,
DefaultFeaturesMixin,
):
def compute_buy_proportion(self, feats):
"""Implementation of abstract base class method."""
if feats.name == "SPY":
return 0.6
elif feats.name == "AGG":
return 0.4
def exit_indicator(self, feats):
"""Implementation of abstract base class method."""
symbol = feats.name
pos = self.portfolio.portfolio_handler.filled_positions[symbol]
date = self.portfolio.data_handler.current_date
return pos.compute_holding_period(date).days > 63
| [
"[email protected]"
] | |
8ad8780e0d55bc33de20a87a14f2635eba86d737 | dfcd2ab21b4b87b15a908cae58650eb8f4e34539 | /accounts/views.py | 542b21c365142be1d526953ce0c17e0b83451680 | [] | no_license | janak29292/esnayproductions | 29a73eb064165e80a4729cf4b26095cb6dfb09f1 | 762dfa02630668ed97f86760c3b20824cf89d6b2 | refs/heads/master | 2023-02-27T23:10:44.697353 | 2021-02-06T09:44:04 | 2021-02-06T09:44:04 | 325,578,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,932 | py | from django.http import Http404
from django.shortcuts import render
# Create your views here.
from accounts.models import Team, Portfolio, PortfolioCategory, Blog
def index(request, *args, **kwargs):
team_players = Team.objects.all()
portfolios = Portfolio.objects.filter(active=True)[:11]
categories = PortfolioCategory.objects.all()
blogs = Blog.objects.all().order_by('-id')[:10]
context = {
"team": team_players,
"portfolios": portfolios,
"categories": categories,
"blogs": blogs
}
return render(request, 'index.html', context=context)
def about(request, *args, **kwargs):
return render(request, 'about.html')
def team(request, *args, **kwargs):
team_players = Team.objects.all()
context = {"team": team_players}
return render(request, 'team.html', context=context)
def portfolio(request, *args, **kwargs):
portfolios = Portfolio.objects.filter(active=True)[:50]
categories = PortfolioCategory.objects.all()
context = {
"portfolios": portfolios,
"categories": categories
}
return render(request, 'portfolio-four-columns.html', context=context)
def portfolio_detail(request, *args, **kwargs):
pk = kwargs.get('pk')
try:
instance = Portfolio.objects.get(id=pk)
except Portfolio.DoesNotExist:
raise Http404
context = {
"portfolio": instance
}
return render(request, 'portfolio-single-item.html', context=context)
def blog(request, *args, **kwargs):
blogs = Blog.objects.all().order_by('-id')
context = {
"blogs": blogs
}
return render(request, 'blog-fullwidth.html', context=context)
def blog_detail(request, *args, **kwargs):
return render(request, 'blog-single-post.html')
def contact(request, *args, **kwargs):
return render(request, 'contact.html')
| [
"[email protected]"
] | |
bef0ca1abde4a21dfb30146a13f94cc01fc77e1e | 1c40a5b1e7ffb7ffed2cfe831c1686aa4af82284 | /omm/analysis/aes/aes_excel_bio.py | f4ccead5ecbb61b5ce170fb185f60b30a91a0d48 | [] | no_license | rhodges/oregon-marinemap | 3c4bb3c13f15ec5f2efd405b0006c8a85d3b42b0 | 4a3797f507a48fd158be5f751fa0ca8c24a47cb7 | refs/heads/master | 2016-09-05T17:49:44.702600 | 2011-11-04T15:56:18 | 2011-11-04T15:56:18 | 32,354,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | import xlwt
from analysis.excel.utils import major_heading_style
from analysis.excel.biology import bio_terrestrial_headers, bio_terrestrial_data, bio_intertidal_headers, bio_intertidal_data, bio_subtidal_headers, bio_subtidal_data
def populate_bio_sheet(ws, context):
bio_header(ws, context)
bio_terrestrial_headers(ws)
bio_terrestrial_data(ws, context)
bio_intertidal_headers(ws)
bio_intertidal_data(ws, context)
bio_subtidal_headers(ws, context)
bio_subtidal_data(ws, context)
def bio_header(ws, context):
ws.write(0, 0, "Energy Site Biology Report for %s" % context['aes'].name, major_heading_style)
| [
"[email protected]"
] | |
1001396875ee79948e767bf2febdfa60c88bc214 | f8b36ef6debb317e7213346b11df932eb3b6af35 | /winner's code/2nd_code.py | 10725b67993d69c10ebc395d4d75d088bba13d60 | [] | no_license | YoungriKIM/lotte | ec767eb5eaa8f13f38115f1c30d25ed5bac7ef2b | e3ceda9918210ee418269d1586ec200ce9d9ca33 | refs/heads/main | 2023-03-30T15:44:00.387934 | 2021-03-29T13:16:44 | 2021-03-29T13:16:44 | 348,300,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # 롯데정보통신 Vision AI 경진대회 - Public LB 2nd place Solution
# https://dev-hunmin.tistory.com/entry/%EB%A1%AF%EB%8D%B0%EC%A0%95%EB%B3%B4%ED%86%B5%EC%8B%A0-Vision-AI-%EA%B2%BD%EC%A7%84%EB%8C%80%ED%9A%8C-Public-LB-2nd-place-Solution
# 깃허브
# https://github.com/hunmin-hub/LotteVisionAI | [
"[email protected]"
] | |
e5c47acc73444f8227a478fe41b8fe625e1f7218 | 8a03b8459902d1bf0806f8d3387fb962bb57cf58 | /Testscripts/click_on_crcreports.py | 79a662d8d429268a4b00cb60ea7b5510f66d6cd5 | [] | no_license | chetandg123/cQube | f95a0e86b1e98cb418de209ad26ae2ba463cfcbc | a862a1cdf46faaaff5cad49d78c4e5f0454a6407 | refs/heads/master | 2022-07-18T12:43:06.839896 | 2020-05-22T13:23:52 | 2020-05-22T13:23:52 | 258,089,042 | 0 | 0 | null | 2020-05-08T16:28:26 | 2020-04-23T03:55:52 | HTML | UTF-8 | Python | false | false | 1,033 | py | import time
import unittest
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.support.select import Select
# script to click on crc reports
from Data.Paramters import Data
class CRC(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(Data.Path)
self.driver.maximize_window()
self.driver.implicitly_wait(10)
self.driver.get(Data.URL)
self.driver.find_element_by_xpath(Data.email).send_keys(Data.username)
self.driver.find_element_by_xpath(Data.pwd).send_keys(Data.password)
self.driver.find_element_by_xpath(Data.loginbtn).click()
time.sleep(10)
def test_crcreports(self):
self.driver.find_element_by_xpath(Data.Dashboard).click()
time.sleep(5)
self.driver.find_element_by_xpath(Data.crc).click()
print(self.driver.title)
def tearDown(self):
time.sleep(5)
self.driver.close()
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
463fb4daf35270c14d78f343872184d855614ad0 | 6f562994c50361ceebe1e806653ff329ce2ea06c | /Code/mar_14/page2.py | e0e722b06740c8badb19c5b166e00c69b04025a5 | [] | no_license | ManishaHingne/PythonML | aaef74ca0cab84e7dca0e1e829768e737b4cc7af | 17623a0699d3f7719c73fbe8e9f0bef7b69e8aec | refs/heads/master | 2020-05-21T15:35:09.785203 | 2019-05-11T06:21:09 | 2019-05-11T06:21:09 | 186,094,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5]
y = [20, 29, 21, 22, 21]
plt.plot(x, y)
plt.savefig('./images/chart1.png')
# plt.show()
| [
"[email protected]"
] | |
c5962280fb2ab8ba6cb8738531e6d1ec77fdffce | 06b2eed882d8d7034fb7c57b648d5aa37d7f617b | /pycharmproject/django-rest/auth_ob/ob1/ob1/wsgi.py | c71ef1ec2bd6bbd96df7e830582e77a0b3fb725d | [] | no_license | 1974410167/python_projects | 558e2e7a4ea66b083ebd6d2f808b725e1bd153d6 | 81a97cbf41de12bdc3dbc46edf2931075ac4f758 | refs/heads/main | 2023-06-22T09:41:22.038620 | 2023-06-09T15:09:44 | 2023-06-09T15:09:44 | 355,036,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for ob1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ob1.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
65848bc4fcbe36380c69d094d6413ad41f733a82 | c99c272181eb43df688cc6af10bfb17659014ab9 | /01_Python-Basics/Exercise2/venv/Scripts/easy_install-script.py | f0035179eada71584342a2c53231180b3c97981e | [] | no_license | LachezarKostov/SoftUni | ce89d11a4796c10c8975dc5c090edecac993cb03 | 47559e9f01f7aabd73d84aa175be37140e2d5621 | refs/heads/master | 2023-01-29T20:49:57.196136 | 2020-12-10T12:34:09 | 2020-12-10T12:34:09 | 283,491,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!C:\Users\dream\Desktop\Python\Exercise2\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
429e24ce3134c6a853d16b512de97706843ab40a | 9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c | /baomoicrawl/venv/Lib/site-packages/scrapy/utils/http.py | 6e20649f5181a54d6d1787f641e3ac42ad182e66 | [] | no_license | thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler | b0fdedee2942a12d9f64dfed93f43802dc5ab340 | 87c8c07433466bbc43a24ea089f75baeb467c356 | refs/heads/master | 2022-11-27T21:36:33.917491 | 2020-08-10T23:24:42 | 2020-08-10T23:24:42 | 286,583,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 956 | py | """
Transitional module for moving to the w3lib library.
For new code, always import from w3lib.http instead of this module
"""
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.decorators import deprecated
from w3lib.http import * # noqa: F401
warnings.warn("Module `scrapy.utils.http` is deprecated, "
"Please import from `w3lib.http` instead.",
ScrapyDeprecationWarning, stacklevel=2)
@deprecated
def decode_chunked_transfer(chunked_body):
"""Parsed body received with chunked transfer encoding, and return the
decoded body.
For more info see:
https://en.wikipedia.org/wiki/Chunked_transfer_encoding
"""
body, h, t = '', '', chunked_body
while t:
h, t = t.split('\r\n', 1)
if h == '0':
break
size = int(h, 16)
body += t[:size]
t = t[size + 2:]
return body
| [
"[email protected]"
] | |
d24bb4ef7f0abf71c58f7e83c21133ec8c481125 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_4/lxxjin012/ndom.py | 3ddda13fca45dc51450355becc52d34ba11c8df0 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | def ndom_to_decimal (a):
x=str(a)
if len(str(a))==1:
digit1=int(str(a)[0])*1
number=digit
elif len(str(a))==2:
digit2=int(str(a)[0])*6
digit3=int(str(a)[1])*1
number=digit2+digit3
elif len(str(a))==3:
digit4=int(str(a)[0])*36
digit5=int(str(a)[1])*6
digit6=int(str(a)[2])*1
number=digit4+digit5+digit6
return number
def decimal_to_ndom (a):
digit1=a//36
b=a-(digit1*36)
digit2=b//6
c=b-(digit2*6)
number=(digit1*100)+(digit2*10)+(c)
return number
def ndom_add (a, b):
add=ndom_to_decimal(a)+ndom_to_decimal(b)
ndom=decimal_to_ndom (add)
return ndom
def ndom_multiply(a,b):
multiply=(ndom_to_decimal(a))*(ndom_to_decimal(b))
ndom=decimal_to_ndom (multiply)
return ndom | [
"[email protected]"
] | |
1bbd9872d8797ac523e3882012ec1954661bbb15 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/synapse/v20210601preview/list_integration_runtime_auth_key.py | 6ba5abeaa2f156a92731038bf8807a01933558a9 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 3,096 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListIntegrationRuntimeAuthKeyResult',
'AwaitableListIntegrationRuntimeAuthKeyResult',
'list_integration_runtime_auth_key',
]
@pulumi.output_type
class ListIntegrationRuntimeAuthKeyResult:
"""
The integration runtime authentication keys.
"""
def __init__(__self__, auth_key1=None, auth_key2=None):
if auth_key1 and not isinstance(auth_key1, str):
raise TypeError("Expected argument 'auth_key1' to be a str")
pulumi.set(__self__, "auth_key1", auth_key1)
if auth_key2 and not isinstance(auth_key2, str):
raise TypeError("Expected argument 'auth_key2' to be a str")
pulumi.set(__self__, "auth_key2", auth_key2)
@property
@pulumi.getter(name="authKey1")
def auth_key1(self) -> Optional[str]:
"""
The primary integration runtime authentication key.
"""
return pulumi.get(self, "auth_key1")
@property
@pulumi.getter(name="authKey2")
def auth_key2(self) -> Optional[str]:
"""
The secondary integration runtime authentication key.
"""
return pulumi.get(self, "auth_key2")
class AwaitableListIntegrationRuntimeAuthKeyResult(ListIntegrationRuntimeAuthKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListIntegrationRuntimeAuthKeyResult(
auth_key1=self.auth_key1,
auth_key2=self.auth_key2)
def list_integration_runtime_auth_key(integration_runtime_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListIntegrationRuntimeAuthKeyResult:
"""
The integration runtime authentication keys.
:param str integration_runtime_name: Integration runtime name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['integrationRuntimeName'] = integration_runtime_name
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:synapse/v20210601preview:listIntegrationRuntimeAuthKey', __args__, opts=opts, typ=ListIntegrationRuntimeAuthKeyResult).value
return AwaitableListIntegrationRuntimeAuthKeyResult(
auth_key1=__ret__.auth_key1,
auth_key2=__ret__.auth_key2)
| [
"[email protected]"
] | |
dea54118e83a7cbf3499e2efc569d2e35c8355b5 | 680bd46e8eae20e78a425f766432711a47235374 | /models/site_monitor_checkpoint.py | a48c4844c14f12ba8c06fcc5feafa3d14d250fd4 | [
"Apache-2.0"
] | permissive | ILMostro/lm-sdk-python | 9f45217d64c0fc49caf2f4b279a124c2efe3d24d | 40da5812ab4d50dd1c6c3c68f7ea13c4d8f4fb49 | refs/heads/master | 2022-02-01T16:51:12.810483 | 2019-07-16T17:54:11 | 2019-07-16T17:54:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,061 | py | # coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SiteMonitorCheckpoint(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'display_prio': 'int',
'geo_info': 'str',
'id': 'int',
'is_enabled_in_root': 'bool',
'name': 'str'
}
attribute_map = {
'description': 'description',
'display_prio': 'displayPrio',
'geo_info': 'geoInfo',
'id': 'id',
'is_enabled_in_root': 'isEnabledInRoot',
'name': 'name'
}
def __init__(self, description=None, display_prio=None, geo_info=None, id=None, is_enabled_in_root=None, name=None): # noqa: E501
"""SiteMonitorCheckpoint - a model defined in Swagger""" # noqa: E501
self._description = None
self._display_prio = None
self._geo_info = None
self._id = None
self._is_enabled_in_root = None
self._name = None
self.discriminator = None
self.description = description
if display_prio is not None:
self.display_prio = display_prio
if geo_info is not None:
self.geo_info = geo_info
if id is not None:
self.id = id
if is_enabled_in_root is not None:
self.is_enabled_in_root = is_enabled_in_root
if name is not None:
self.name = name
@property
def description(self):
"""Gets the description of this SiteMonitorCheckpoint. # noqa: E501
:return: The description of this SiteMonitorCheckpoint. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this SiteMonitorCheckpoint.
:param description: The description of this SiteMonitorCheckpoint. # noqa: E501
:type: str
"""
if description is None:
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def display_prio(self):
"""Gets the display_prio of this SiteMonitorCheckpoint. # noqa: E501
:return: The display_prio of this SiteMonitorCheckpoint. # noqa: E501
:rtype: int
"""
return self._display_prio
@display_prio.setter
def display_prio(self, display_prio):
"""Sets the display_prio of this SiteMonitorCheckpoint.
:param display_prio: The display_prio of this SiteMonitorCheckpoint. # noqa: E501
:type: int
"""
self._display_prio = display_prio
@property
def geo_info(self):
"""Gets the geo_info of this SiteMonitorCheckpoint. # noqa: E501
:return: The geo_info of this SiteMonitorCheckpoint. # noqa: E501
:rtype: str
"""
return self._geo_info
@geo_info.setter
def geo_info(self, geo_info):
"""Sets the geo_info of this SiteMonitorCheckpoint.
:param geo_info: The geo_info of this SiteMonitorCheckpoint. # noqa: E501
:type: str
"""
self._geo_info = geo_info
@property
def id(self):
"""Gets the id of this SiteMonitorCheckpoint. # noqa: E501
:return: The id of this SiteMonitorCheckpoint. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SiteMonitorCheckpoint.
:param id: The id of this SiteMonitorCheckpoint. # noqa: E501
:type: int
"""
self._id = id
@property
def is_enabled_in_root(self):
"""Gets the is_enabled_in_root of this SiteMonitorCheckpoint. # noqa: E501
:return: The is_enabled_in_root of this SiteMonitorCheckpoint. # noqa: E501
:rtype: bool
"""
return self._is_enabled_in_root
@is_enabled_in_root.setter
def is_enabled_in_root(self, is_enabled_in_root):
"""Sets the is_enabled_in_root of this SiteMonitorCheckpoint.
:param is_enabled_in_root: The is_enabled_in_root of this SiteMonitorCheckpoint. # noqa: E501
:type: bool
"""
self._is_enabled_in_root = is_enabled_in_root
@property
def name(self):
"""Gets the name of this SiteMonitorCheckpoint. # noqa: E501
:return: The name of this SiteMonitorCheckpoint. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SiteMonitorCheckpoint.
:param name: The name of this SiteMonitorCheckpoint. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SiteMonitorCheckpoint, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SiteMonitorCheckpoint):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
c751ad79e8fae529b101b6f67f960895117723a7 | 890c8b8e90e516a5a3880eca9b2d217662fe7d84 | /armulator/armv6/opcodes/arm_instruction_set/arm_data_processing_and_miscellaneous_instructions/arm_extra_load_store_instructions/ldrsb_literal_a1.py | 5b2ac9460c52668437168816a8f42ea9fd81d9d8 | [
"MIT"
] | permissive | doronz88/armulator | b864135996f876c7857b79a314d4aa06cc19c549 | 0294feac2785c8947e5943ac0c34f941ee4b5fff | refs/heads/master | 2022-11-05T08:14:42.405335 | 2020-06-18T23:53:17 | 2020-06-18T23:53:17 | 273,363,061 | 2 | 0 | null | 2020-06-18T23:51:03 | 2020-06-18T23:51:02 | null | UTF-8 | Python | false | false | 825 | py | from armulator.armv6.opcodes.abstract_opcodes.ldrsb_literal import LdrsbLiteral
from armulator.armv6.opcodes.opcode import Opcode
class LdrsbLiteralA1(LdrsbLiteral, Opcode):
def __init__(self, instruction, add, imm32, t):
Opcode.__init__(self, instruction)
LdrsbLiteral.__init__(self, add, imm32, t)
def is_pc_changing_opcode(self):
return False
@staticmethod
def from_bitarray(instr, processor):
w = instr[10]
p = instr[7]
imm4_l = instr[-4:]
imm4_h = instr[20:24]
rt = instr[16:20]
add = instr[8]
imm32 = "0b000000000000000000000000" + imm4_h + imm4_l
if p == w or rt.uint == 15:
print "unpredictable"
else:
return LdrsbLiteralA1(instr, **{"add": add, "imm32": imm32, "t": rt.uint})
| [
"[email protected]"
] | |
eab2c83f1f6b77cae668f340fa4e8d50c06049c8 | 263fb97b6ab614f1e25533ba30b37ef94c0a2e81 | /parser/team08/Tytus_SQLPARSER_G8/Instrucciones/FunctionBinaryString/GetByte.py | e8dcc14bd94144d729a4212ee31d0bef1bb7f35c | [
"MIT"
] | permissive | ElbaAlvarez/tytus | f77e57df2338270b331172e270e2e44a94b68d2e | 795e660f3005d4fac5cb68af92b178ba1318e96e | refs/heads/main | 2023-02-12T08:55:32.347236 | 2020-12-24T14:00:06 | 2020-12-24T14:00:06 | 320,481,467 | 0 | 0 | MIT | 2020-12-11T05:57:41 | 2020-12-11T05:57:41 | null | UTF-8 | Python | false | false | 459 | py | from Instrucciones.TablaSimbolos.Instruccion import Instruccion
class GetByte(Instruccion):
def __init__(self, valor, tipo, linea, columna):
Instruccion.__init__(self,tipo,linea,columna)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
bytes(self.valor, 'utf-8')
return bytes(self.valor,'utf-8')
instruccion = GetByte("hola mundo",None, 1,2)
instruccion.ejecutar(None,None) | [
"[email protected]"
] | |
7cd9427d739426a4e43e28b4b98aceceb117c961 | 051c3ee44478265c4510530888335335ec9f7fdf | /ML_Applications/CNNs/Mutants/Mutants_generated_by_MutPy_(code)/mutants_resnet/mutants_resnet_model_VERML_basedOn_nas_fix4_deterministic/187.py | 3228e7750ebd955620d2f820d99d0a269bfc2ddf | [] | no_license | PinjiaHe/VerifyML | b581c016012c62d8439adfce0caef4f098b36d5e | 3bd7c49e45720c1cdfe0af4ac7dd35b201056e65 | refs/heads/master | 2020-03-25T19:40:39.996370 | 2018-01-30T08:58:58 | 2018-01-30T08:58:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,660 | py | """Contains definitions for the preactivation form of Residual Networks.
Residual networks (ResNets) were originally proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
The full preactivation 'v2' ResNet variant implemented in this module was
introduced by:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The key difference of the full preactivation 'v2' variant compared to the
'v1' variant in [1] is the use of batch normalization before every weight layer
rather than after.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_BATCH_NORM_DECAY = 0.997
_BATCH_NORM_EPSILON = 1e-05
_SEED = 7
tf.set_random_seed(_SEED)
def batch_norm_relu(inputs, is_training, data_format):
"""Performs a batch normalization followed by a ReLU."""
inputs = tf.layers.batch_normalization(inputs=
inputs, axis=1 if data_format == 'channels_first' else 3, momentum=
_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True, scale=True, training=
is_training, fused=True)
inputs = tf.nn.relu(inputs)
return inputs
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0], [
pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end], [
pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format):
"""Strided 2-D convolution with explicit padding."""
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format)
return tf.layers.conv2d(inputs=
inputs, filters=filters, kernel_size=kernel_size, strides=strides, padding='SAME' if
strides == 1 else 'VALID', use_bias=False, kernel_initializer=
tf.variance_scaling_initializer(), data_format=
data_format)
def building_block(inputs, filters, is_training, projection_shortcut, strides, data_format):
"""Standard building block for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the convolutions.
is_training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, is_training, data_format)
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(inputs=
inputs, filters=filters, kernel_size=3, strides=strides, data_format=
data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = conv2d_fixed_padding(inputs=
inputs, filters=filters, kernel_size=3, strides=1, data_format=
data_format)
return inputs + shortcut
def bottleneck_block(inputs, filters, is_training, projection_shortcut, strides, data_format):
"""Bottleneck block variant for residual networks with BN before convolutions.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first two convolutions. Note that the
third and final convolution will use 4 times as many filters.
is_training: A Boolean for whether the model is in training or inference
mode. Needed for batch normalization.
projection_shortcut: The function to use for projection shortcuts (typically
a 1x1 convolution when downsampling the input).
strides: The block's stride. If greater than 1, this block will ultimately
downsample the input.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block.
"""
shortcut = inputs
inputs = batch_norm_relu(inputs, is_training, data_format)
if projection_shortcut is not None:
shortcut = projection_shortcut(inputs)
inputs = conv2d_fixed_padding(inputs=
inputs, filters=filters, kernel_size=1, strides=1, data_format=
data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = conv2d_fixed_padding(inputs=
inputs, filters=filters, kernel_size=3, strides=strides, data_format=
data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = conv2d_fixed_padding(inputs=
inputs, filters=4 * filters, kernel_size=1, strides=1, data_format=
data_format)
return inputs + shortcut
def block_layer(inputs, filters, block_fn, blocks, strides, is_training, name, data_format):
"""Creates one layer of blocks for the ResNet model.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
filters: The number of filters for the first convolution of the layer.
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
blocks: The number of blocks contained in the layer.
strides: The stride to use for the first convolution of the layer. If
greater than 1, this layer will ultimately downsample the input.
is_training: Either True or False, whether we are currently training the
model. Needed for batch norm.
name: A string name for the tensor output of the block layer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
The output tensor of the block layer.
"""
filters_out = 4 * filters if block_fn is bottleneck_block else filters
def projection_shortcut(inputs):
return conv2d_fixed_padding(inputs=
inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=
data_format)
inputs = block_fn(inputs, filters, is_training, projection_shortcut, strides,
data_format)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, is_training, None, 1, data_format)
return tf.identity(inputs, name)
def cifar10_resnet_v2_generator(resnet_size, num_classes, data_format=None):
"""Generator for CIFAR-10 ResNet v2 models.
Args:
resnet_size: A single integer for the size of the ResNet model.
num_classes: The number of possible classes for image classification.
data_format: The input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
Returns:
The model function that takes in `inputs` and `is_training` and
returns the output tensor of the ResNet model.
Raises:
ValueError: If `resnet_size` is invalid.
"""
if resnet_size % 6 != 2:
raise ValueError('resnet_size must be 6n + 2:', resnet_size)
num_blocks = resnet_size - 2 // 6
if data_format is None:
data_format = 'channels_first' if
tf.test.is_built_with_cuda() else 'channels_last'
def model(inputs, is_training):
"""Constructs the ResNet model given the inputs."""
if data_format == 'channels_first':
inputs = tf.transpose(inputs, [0, 3, 1, 2])
inputs = conv2d_fixed_padding(inputs=
inputs, filters=16, kernel_size=3, strides=1, data_format=
data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = block_layer(inputs=
inputs, filters=16, block_fn=building_block, blocks=num_blocks, strides=1, is_training=
is_training, name='block_layer1', data_format=
data_format)
inputs = block_layer(inputs=
inputs, filters=32, block_fn=building_block, blocks=num_blocks, strides=2, is_training=
is_training, name='block_layer2', data_format=
data_format)
inputs = block_layer(inputs=
inputs, filters=64, block_fn=building_block, blocks=num_blocks, strides=2, is_training=
is_training, name='block_layer3', data_format=
data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = tf.layers.average_pooling2d(inputs=
inputs, pool_size=8, strides=1, padding='VALID', data_format=
data_format)
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(inputs, [(-1), 64])
inputs = tf.layers.dense(inputs=inputs, units=num_classes)
inputs = tf.identity(inputs, 'final_dense')
return inputs
return model
def imagenet_resnet_v2_generator(block_fn, layers, num_classes, data_format=None):
"""Generator for ImageNet ResNet v2 models.
Args:
block_fn: The block to use within the model, either `building_block` or
`bottleneck_block`.
layers: A length-4 array denoting the number of blocks to include in each
layer. Each layer consists of blocks that take inputs of the same size.
num_classes: The number of possible classes for image classification.
data_format: The input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
Returns:
The model function that takes in `inputs` and `is_training` and
returns the output tensor of the ResNet model.
"""
if data_format is None:
data_format = 'channels_first' if
tf.test.is_built_with_cuda() else 'channels_last'
def model(inputs, is_training):
"""Constructs the ResNet model given the inputs."""
if data_format == 'channels_first':
inputs = tf.transpose(inputs, [0, 3, 1, 2])
inputs = conv2d_fixed_padding(inputs=
inputs, filters=64, kernel_size=7, strides=2, data_format=
data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = tf.layers.max_pooling2d(inputs=
inputs, pool_size=3, strides=2, padding='SAME', data_format=
data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
inputs = block_layer(inputs=
inputs, filters=64, block_fn=block_fn, blocks=layers[0], strides=1, is_training=
is_training, name='block_layer1', data_format=
data_format)
inputs = block_layer(inputs=
inputs, filters=128, block_fn=block_fn, blocks=layers[1], strides=2, is_training=
is_training, name='block_layer2', data_format=
data_format)
inputs = block_layer(inputs=
inputs, filters=256, block_fn=block_fn, blocks=layers[2], strides=2, is_training=
is_training, name='block_layer3', data_format=
data_format)
inputs = block_layer(inputs=
inputs, filters=512, block_fn=block_fn, blocks=layers[3], strides=2, is_training=
is_training, name='block_layer4', data_format=
data_format)
inputs = batch_norm_relu(inputs, is_training, data_format)
inputs = tf.layers.average_pooling2d(inputs=
inputs, pool_size=7, strides=1, padding='VALID', data_format=
data_format)
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(inputs, [(-1), 512 if
block_fn is building_block else 2048])
inputs = tf.layers.dense(inputs=inputs, units=num_classes)
inputs = tf.identity(inputs, 'final_dense')
return inputs
return model
def imagenet_resnet_v2(resnet_size, num_classes, data_format=None):
"""Returns the ResNet model for a given size and number of output classes."""
model_params = {18: {'block':
building_block, 'layers': [2, 2, 2, 2]}, 34: {'block':
building_block, 'layers': [3, 4, 6, 3]}, 50: {'':
bottleneck_block, 'layers': [3, 4, 6, 3]}, 101: {'block':
bottleneck_block, 'layers': [3, 4, 23, 3]}, 152: {'block':
bottleneck_block, 'layers': [3, 8, 36, 3]}, 200: {'block':
bottleneck_block, 'layers': [3, 24, 36, 3]}}
if resnet_size not in model_params:
raise ValueError('Not a valid resnet_size:', resnet_size)
params = model_params[resnet_size]
return imagenet_resnet_v2_generator(
params['block'], params['layers'], num_classes, data_format) | [
"[email protected]"
] | |
58b4e869b8fbf743ba16b0be22137d18b172cdf6 | 7d43ba52d958537905cfdde46cc194a97c45dc56 | /ProgrammingParadigms/OOP/SOLID/solid_workbook.py | 361c598c444742f3305a9a384d65f599a2f7e871 | [] | no_license | Koshmatova/workbook | 3e4d1f698a01f2be65c1abc83ee251ebc8a6bbcd | 902695e8e660689a1730c23790dbdc51737085c9 | refs/heads/master | 2023-05-01T02:30:46.868027 | 2021-05-10T03:49:08 | 2021-05-10T03:49:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,380 | py | СТРУКТУРНАЯ ПАРАДИГМА
Дейкстра -> ЛЮБОЙ алг можно выразить через 3 способа выбора команд:
линейное exe
ветвление по усл
exe цикла при exe условия
#рекомендовал использовать only их
Никлаус Вирт -> Алгоритмы + Структуры данных = Программы
возможность записать подпрограмму в v ПРИНАДЛЕЖ даже asm
ИНКАПСУЛЯЦИЯ
# защита инварианта
# Любая программная сущность, обладающая нетривиальным состоянием, должна быть превращена в замкнутую систему, которую можно только перевести из одного корректного состояния в другое.(чтобы его нельзя было сломать)
* все что касается obj - внутри одной архитектурной границы(упаковка данных и поведения в единый компонент)
#все что касается класса пакуется в один модуль
#между классами четкие границы
* четкое разделение интерфейса и реализации
* КАЖДЫЙ obj должен иметь свой pi - таким чтобы не было необходимости лезть в реализацию или использовать его неподходящим образом
#ВСЕ КЛАССЫ имеют интерфейс
* в этом помогает сокрытие данных
код не может пересечь границу о которой не знает, и получить данные к которым нет доступа
СОКРЫТИЕ РЕАЛИЗАЦИИ В РЕЛЯЦИОННЫХ БД
#] СУЩ бд, используемая несколькими программами, к реализации которых нет доступа
создаем
набор хранимых процедур, компонуем в схему Interface
для каждой программы создаем по пользователю и разрешаем доступ только к этой схеме
#теперь сущность с нетривиальным поведением закрыта интерфейсом
АЛЬТЕРНАТИВА СОКРЫТИЯ В PYTHON
* _
* Документировать only интерфейс, ВСЕ что НЕ_ИМЕЕТ доков - реализация
* Отделять интерфейс через code-conventions
__all__
* Сделать code-convention строгими
# автоматические проверки -> нарушение приравнивается к ошибке и ломает сборку
#базовыи класс определяет fx которая должна быть общеи для ∀ производных
объекты предоставляют интерфейсы. if объект предоставляет интерфейс -> интерфейс специфицирует поведение объекта.
классы реализуют интерфейсы. if класс реализует интерфейс -> его экземпляры предоставляют данный интерфейс
Экземпляры предоставляют интерфейсы которые их классы реализуют, & могут напрямую предоставлять дополнительные интерфейсы не реализованные в классе.
классы обычно не предоставляют интерфейсы которые они реализуют
#можно обобщить это до фабрик - можно создать callable производящий obj предоставляющие интерфейсы ~ фабрика реализует интерфейсы.
ПОЛИМОРФИЗМ
# Страуструп -> один интерфейс - мн-во реализаций
# пользователь интерфейса не будет знать о реализации ничего
поменялась ли она
ПОЛИМОРФИЗМ ЗА ПРЕДЕЛАМИ ООП
# Erlang СОДЕРЖ фичу behaviour
# код делится на модули, имя модуля можно исп как v ->
# вызов fx из модуля:
foobar:function()
или
Module = foobar
Module:function()
# behavior нужен для уверенности что модуль ИМЕЕТ определенные fx
# в модуле использующем другие модули с помощью behavior_info задаются требования к модулям-v, в свою очередь модули с помощью декларации behaviour обязуются реализовать это поведение
#es:
# модуль gen_server позволяет создать сервер в отдельном процессе, выполняющий запросы других процессов, gen_server СОДЕРЖ ВСЮ логику запросов других процессов
# но обработка запросов делается реализацией поведения gen_server, и пока другие модули реализуют его правильно(пусть там пустые заглушки) - gen_server плевать как обрабатываются эти запросы и более того, обрабатывающий модуль можно сменить на лету
НАСЛЕДОВАНИЕ
#позволяет объединить переиспользование кода с полиморфизмом
SINGLE RESPONSIBILITY
# где и как должны пролегать границы между классами(интерфейс, реализация | [
"[email protected]"
] | |
ee0a8b4383627724adac95c5b9fe794045bbd923 | 2c9db62ddaffd77c097b3da4990021270912ea40 | /백준/1966.py | 757df2ca0f3b4c3cc1fd65161b5b06dc6f61a950 | [] | no_license | nahyun119/algorithm | 9ae120fbe047819a74e06fc6879f55405bc9ea71 | 40e291305a4108266073d489e712787df1dbae4b | refs/heads/master | 2023-08-27T23:18:44.133748 | 2021-10-03T11:32:12 | 2021-10-03T11:32:12 | 326,661,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | import sys
import heapq # -> 기본적으로 min heap이므로 max heap을 구현하려면 우선순위에 -1을 곱한다.
from collections import deque
input = sys.stdin.readline
result = []
def solve():
global result
n, m = map(int, input().split())
documents = list(map(int, input().split()))
q = deque()
answer = []
for i in range(n):
q.append((documents[i], i))
count = 1
while True:
max_value = max(q)
priority, index = q.popleft()
#print(max_value, priority, index)
if priority < max_value[0]:
q.append((priority, index))
else:
if index == m:
break
count += 1 # 프린트한 경우만 카운트
result.append(count)
#print(count)
def main():
global result
T = int(input())
for _ in range(T):
solve()
for r in result:
print(r)
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
615be1b30f756944ef2e0597b47f152d45a6918d | 11ca0c393c854fa7212e783a34269f9dae84e8c7 | /Python/785. 判断二分图.py | 0ac82063c14ffcf55dc1f21b8d60715c5a8a6092 | [] | no_license | VictoriqueCQ/LeetCode | dc84d81163eed26fa9dbc2114bba0b5c2ea881f4 | a77b3ead157f97f5d9599badb4d4c5da69de44ba | refs/heads/master | 2021-06-05T06:40:24.659909 | 2021-03-31T08:31:51 | 2021-03-31T08:31:51 | 97,978,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | from typing import List
import collections
class Solution:
def isBipartite(self, graph: List[List[int]]) -> bool:
n = len(graph)
UNCOLORED, RED, GREEN = 0, 1, 2
color = [UNCOLORED] * n
for i in range(n):
if color[i] == UNCOLORED:
q = collections.deque([i])
color[i] = RED
while q:
node = q.popleft()
cNei = (GREEN if color[node] == RED else RED)
for neighbor in graph[node]:
if color[neighbor] == UNCOLORED:
q.append(neighbor)
color[neighbor] = cNei
elif color[neighbor] != cNei:
return False
return True
class Solution1:
def isBipartite(self, graph: List[List[int]]) -> bool:
# dfs time O(E+V), space O(V)
n = len(graph)
visited = [0] * n
stack = []
for i in range(n):
if visited[i] == 0:
stack.append(i)
visited[i] = 1
while stack:
cur = stack.pop()
for neighbor in graph[cur]:
if visited[neighbor] == 0:
stack.append(neighbor)
visited[neighbor] = -visited[cur]
else:
if visited[neighbor] != -visited[cur]:
return False
return True
s = Solution1()
print(s.isBipartite([[1,2,3],[0,2],[0,1,3],[0,2]])) | [
"1997Victorique0317"
] | 1997Victorique0317 |
b275f191e0be5fa4db2b3ccde251feb3d9006183 | 03e3138f99f275d15d41a5c5bfb212f85d64d02e | /source/res/scripts/client/gui/Scaleform/daapi/view/lobby/techtree/nodes.py | 714f0eb3b7e577318cfd0270afe2a825647bcf3e | [] | no_license | TrenSeP/WorldOfTanks-Decompiled | e428728e7901146d0b599d02c930d70532232a97 | 1faa748acec1b7e435b657fd054ecba23dd72778 | refs/heads/1.4.1 | 2020-04-27T08:07:49.813023 | 2019-03-05T17:37:06 | 2019-03-05T17:37:06 | 174,159,837 | 1 | 0 | null | 2019-03-06T14:33:33 | 2019-03-06T14:24:36 | Python | UTF-8 | Python | false | false | 8,811 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/techtree/nodes.py
from gui.Scaleform.daapi.view.lobby.techtree.settings import DEFAULT_UNLOCK_PROPS
from gui.shared.formatters import getItemUnlockPricesVO, getItemPricesVO, text_styles, getItemRentOrRestorePricesVO
from gui.shared.gui_items import GUI_ITEM_TYPE, GUI_ITEM_TYPE_NAMES
from gui.shared.money import MONEY_UNDEFINED
from helpers.time_utils import getCurrentTimestamp
from helpers import i18n, dependency
from skeletons.gui.server_events import IEventsCache
class BaseNode(object):
__slots__ = ('nodeName', 'nodeCD', 'nationID', 'itemTypeID', 'isFound', 'isAnnouncement', 'order')
def __init__(self, nodeName, nationID, itemTypeID, nodeCD, isFound=True, isAnnouncement=False, order=0):
super(BaseNode, self).__init__()
self.nodeName = nodeName
self.nationID = nationID
self.itemTypeID = itemTypeID
self.nodeCD = nodeCD
self.isFound = isFound
self.isAnnouncement = isAnnouncement
self.order = order
class ExposedNode(object):
__slots__ = ('__nodeCD', '__earnedXP', '__state', '__unlockProps', '__bpfProps', '__guiPrice', '__displayInfo')
def __init__(self, nodeCD, earnedXP, state, displayInfo, unlockProps=None, bpfProps=None, price=None):
super(ExposedNode, self).__init__()
self.__nodeCD = nodeCD
self.__earnedXP = earnedXP
self.__state = state
self.__displayInfo = displayInfo
self.__unlockProps = unlockProps or DEFAULT_UNLOCK_PROPS
self.__bpfProps = bpfProps
self.__guiPrice = price or MONEY_UNDEFINED
def clear(self):
self.__displayInfo = None
self.__unlockProps = DEFAULT_UNLOCK_PROPS
self.__bpfProps = None
self.__guiPrice = MONEY_UNDEFINED
return
def getNodeCD(self):
return self.__nodeCD
def getEarnedXP(self):
return self.__earnedXP
def getState(self):
return self.__state
def setState(self, state):
self.__state = state
def addStateFlag(self, flag):
self.__state |= flag
def getDisplayInfo(self):
return self.__displayInfo
def getUnlockTuple(self):
return self.__unlockProps.makeTuple()
def getUnlockProps(self):
return self.__unlockProps
def setUnlockProps(self, unlockProps):
self.__unlockProps = unlockProps
def getBpfProps(self):
return self.__bpfProps
def setBpfProps(self, bpfProps):
self.__bpfProps = bpfProps
def setGuiPrice(self, price):
self.__guiPrice = price
def getTags(self):
raise NotImplementedError
def getLevel(self):
raise NotImplementedError
def getTypeName(self):
raise NotImplementedError
def getShortUserName(self):
raise NotImplementedError
def getIcon(self):
raise NotImplementedError
def getSmallIcon(self):
raise NotImplementedError
def isVehicle(self):
raise NotImplementedError
def isRented(self):
raise NotImplementedError
def getItemPrices(self):
raise NotImplementedError
def getBuyPrices(self):
raise NotImplementedError
def getCompareData(self):
raise NotImplementedError
def getExtraInfo(self, rootItem):
raise NotImplementedError
def isActionPrice(self):
raise NotImplementedError
def getActionDiscount(self):
raise NotImplementedError
def getBlueprintLabel(self):
raise NotImplementedError
def getBlueprintProgress(self):
raise NotImplementedError
def getActionFinishTime(self):
raise NotImplementedError
def getRestoreFinishTime(self):
raise NotImplementedError
def getRentInfo(self):
raise NotImplementedError
class RealNode(ExposedNode):
__slots__ = ('__item',)
__eventsCache = dependency.descriptor(IEventsCache)
def __init__(self, nodeCD, item, earnedXP, state, displayInfo, unlockProps=None, bpfProps=None, price=None):
super(RealNode, self).__init__(nodeCD, earnedXP, state, displayInfo, unlockProps=unlockProps, bpfProps=bpfProps, price=price)
self.__item = item
def clear(self):
super(RealNode, self).clear()
self.__item = None
return
def getTags(self):
return self.__item.tags
def getLevel(self):
return self.__item.level
def getTypeName(self):
return self.__item.getGUIEmblemID()
def getShortUserName(self):
return self.__item.shortUserName
def getIcon(self):
return self.__item.icon
def getSmallIcon(self):
return self.__item.iconSmall
def isVehicle(self):
return self.__item.itemTypeID == GUI_ITEM_TYPE.VEHICLE
def isRented(self):
return self.__item.isRented
def getItemPrices(self):
item = self.__item
unlockProps = self.getUnlockProps()
if not item.isUnlocked and unlockProps is not None:
return getItemUnlockPricesVO(unlockProps)
else:
return getItemRentOrRestorePricesVO(item.restorePrice) if item.isRestoreAvailable() else getItemPricesVO(item.getBuyPrice())
def getBuyPrices(self):
return getItemPricesVO(self.__item.getBuyPrice())
def isActionPrice(self):
itemPrice = self.__item.buyPrices.itemPrice
return itemPrice.isActionPrice()
def getActionDiscount(self):
return self.__item.buyPrices.itemPrice.getActionPrc()
def getCompareData(self):
if self.__item is not None and self.__item.itemTypeID == GUI_ITEM_TYPE.VEHICLE:
from gui.Scaleform.daapi.view.lobby.vehicle_compare import formatters
return formatters.getTreeNodeCompareData(self.__item)
else:
return {}
def getExtraInfo(self, rootItem):
descriptor = rootItem.descriptor if rootItem else None
return self.__item.getExtraIconInfo(descriptor)
def getBlueprintLabel(self):
bpfProps = self.getBpfProps()
label = ''
if bpfProps is not None:
label = text_styles.counterLabelText(' '.join((str(bpfProps.filledCount), '/', str(bpfProps.totalCount))))
return label
def getBlueprintProgress(self):
bpfProps = self.getBpfProps()
progress = 0.0
if bpfProps is not None and bpfProps.totalCount != 0:
progress = float(bpfProps.filledCount) / bpfProps.totalCount
return progress
def getActionFinishTime(self):
actions = self.__eventsCache.getItemAction(self.__item)
actions = sorted(actions, key=lambda elem: elem[0])
if not actions:
return 0
bestAction = self.__eventsCache.getActions().get(actions[0][1], '')
return bestAction.getFinishTime() if bestAction else 0
def getRestoreFinishTime(self):
return self.__item.restoreInfo.getRestoreTimeLeft() + getCurrentTimestamp() if self.__item.isRestorePossible() and self.__item.hasLimitedRestore() else 0
def getRentInfo(self):
rentMoney = self.__item.minRentPrice
return (rentMoney, rentMoney.getCurrency()) if rentMoney else (0, None)
class AnnouncementNode(ExposedNode):
__slots__ = ('__announcementInfo',)
def __init__(self, nodeCD, info, state, displayInfo):
super(AnnouncementNode, self).__init__(nodeCD, 0, state, displayInfo, unlockProps=None, bpfProps=None, price=None)
self.__announcementInfo = info
return
def clear(self):
super(AnnouncementNode, self).clear()
self.__announcementInfo = None
return
def getTags(self):
return self.__announcementInfo.tags
def getLevel(self):
return self.__announcementInfo.level
def getTypeName(self):
return GUI_ITEM_TYPE_NAMES[GUI_ITEM_TYPE.VEHICLE]
def getShortUserName(self):
return i18n.makeString(self.__announcementInfo.userString)
def getIcon(self):
return self.__announcementInfo.icon
def getSmallIcon(self):
return self.__announcementInfo.icon
def isRented(self):
return False
def isVehicle(self):
return True
def getItemPrices(self):
return None
def getBuyPrices(self):
return None
def getCompareData(self):
return {}
def getExtraInfo(self, rootItem):
return None
def isActionPrice(self):
return False
def getActionDiscount(self):
pass
def getBlueprintLabel(self):
pass
def getBlueprintProgress(self):
pass
def getActionFinishTime(self):
pass
def getRestoreFinishTime(self):
pass
def getRentInfo(self):
return (0, None)
| [
"[email protected]"
] | |
54455505d3762eae077685337d9117b9749a5e0a | a7a115b000cd40be9378174777da4f1b56b99de0 | /web_crawl_book/demo4.py | 18d7d8d1d1b889b512a3291e57c3fc15f15cb7d1 | [] | no_license | fireinrain/python_spider | 316f7cc230989223e6177c5ba2443eba9b54a52a | 364273278efa6629ec7d79f86c2ce54555ff7691 | refs/heads/master | 2022-06-26T20:38:56.462771 | 2017-06-27T00:53:42 | 2017-06-27T00:53:42 | 60,531,584 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,024 | py | #! /usr/bin/python3
# _encoding:utf-8_
# Written by liuzhaoyang
# wcontact:[email protected]
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import random
import datetime
import lxml
# 获取页面中的所有内链的列表
def get_inter_links(bsobj,include_url):
inter_links = []
# 找出所有以/为开头的链接
for link in bsobj.findAll("a",href=re.compile("^(/|.*"+include_url+")")):
if link.attrs['href'] is not None:
inner_link = link.attrs['href']
if inner_link not in include_url:
include_url.append(inner_link)
return inter_links
# 获取页面的所有外链的列表
def get_external_links(bsobj,external_url):
external_links = []
for link in bsobj.findAll("a",href=re.compile("^(http|www)((?!"+external_url+").)*$")):
if link.attrs['href'] is not None:
inner_link = link.attrs['href']
if inner_link not in external_links:
external_links.append(inner_link)
return external_links
# 分割地址
def split_address(address):
address_parts = address.replace("http://","").split("/")
return address_parts
# 获取随机外链
def get_random_external_link(start_page):
html = urlopen(start_page)
bsobj = BeautifulSoup(html.read(),"lxml")
# print(html.read())
external_links = get_external_links(bsobj,split_address(start_page)[0])
if len(external_links) == 0:
inter_links = get_inter_links(start_page)
return get_external_links(random.choice(inter_links))
else:
return random.choice(external_links)
def follow_external_only(start_site):
external_link = get_random_external_link("http://oreilly.com")
print("随机外链:"+external_link)
follow_external_only(external_link)
if __name__ == "__main__":
# strs = "http://www.baidu.com/music"
# sss = split_address(strs)
# print(sss)
# get_random_external_link(strs)
follow_external_only("http://oreilly.com") | [
"[email protected]"
] | |
70a043697ede733abf2b38349e5054591c900233 | 17c280ade4159d4d8d5a48d16ba3989470eb3f46 | /16/data/ExoDiBosonResonances/EDBRTreeMaker/test/114.py | 4c26aa3c6bf43f3f5d6ceb82aa1b8b60e31ea6b2 | [] | no_license | chengchen1993/run2_ntuple | 798ff18489ff5185dadf3d1456a4462e1dbff429 | c16c2b203c05a3eb77c769f63a0bcdf8b583708d | refs/heads/master | 2021-06-25T18:27:08.534795 | 2021-03-15T06:08:01 | 2021-03-15T06:08:01 | 212,079,804 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 27,623 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process( "TEST" )
#process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True),allowUnscheduled=cms.untracked.bool(True))
#,
# SkipEvent = cms.untracked.vstring('ProductNotFound'))
filterMode = False # True
######## Sequence settings ##########
corrJetsOnTheFly = True
runOnMC = False
runOnSig = False
DOHLTFILTERS = True
#useJSON = not (runOnMC)
#JSONfile = 'Cert_246908-258750_13TeV_PromptReco_Collisions15_25ns_JSON.txt'
#****************************************************************************************************#
#process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_condDBv2_cff')
from Configuration.AlCa.GlobalTag import GlobalTag
if runOnMC:
process.GlobalTag.globaltag = '80X_mcRun2_asymptotic_2016_TrancheIV_v8'#'MCRUN2_74_V9::All'
#process.GlobalTag.globaltag = '94X_mc2017_realistic_v14'#'MCRUN2_74_V9::All'
elif not(runOnMC):
process.GlobalTag.globaltag = '80X_dataRun2_2016SeptRepro_v7'
# https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookMiniAOD2015#ETmiss_filters
# For the RunIISummer15DR74 MC campaing, the process name in PAT.
# For Run2015B PromptReco Data, the process name is RECO.
# For Run2015B re-MiniAOD Data 17Jul2015, the process name is PAT.
hltFiltersProcessName = 'RECO'
if runOnMC:
hltFiltersProcessName = 'PAT' #'RECO'
#if DOHLTFILTERS and not(runOnMC):
process.load('CommonTools.RecoAlgos.HBHENoiseFilterResultProducer_cfi')
process.HBHENoiseFilterResultProducer.minZeros = cms.int32(99999)
process.HBHENoiseFilterResultProducer.IgnoreTS4TS5ifJetInLowBVRegion=cms.bool(False)
process.HBHENoiseFilterResultProducer.defaultDecision = cms.string("HBHENoiseFilterResultRun2Loose")
process.ApplyBaselineHBHENoiseFilter = cms.EDFilter('BooleanFlagFilter',
inputLabel = cms.InputTag('HBHENoiseFilterResultProducer','HBHENoiseFilterResult'),
reverseDecision = cms.bool(False)
)
process.ApplyBaselineHBHEIsoNoiseFilter = cms.EDFilter('BooleanFlagFilter',
inputLabel = cms.InputTag('HBHENoiseFilterResultProducer','HBHEIsoNoiseFilterResult'),
reverseDecision = cms.bool(False)
)
######### read JSON file for data ##########
'''if not(runOnMC) and useJSON:
import FWCore.PythonUtilities.LumiList as LumiList
import FWCore.ParameterSet.Types as CfgTypes
process.source.lumisToProcess = CfgTypes.untracked(CfgTypes.VLuminosityBlockRange())
myLumis = LumiList.LumiList(filename = JSONfile).getCMSSWString().split(',')
process.source.lumisToProcess.extend(myLumis)
'''
# ---------------------------------------------------------
# DeepAK8: set up TransientTrackBuilder
process.load('Configuration.StandardSequences.MagneticField_cff')
process.TransientTrackBuilderESProducer = cms.ESProducer("TransientTrackBuilderESProducer",
ComponentName=cms.string('TransientTrackBuilder')
)
# ---------------------------------------------------------
####### Redo Jet clustering sequence ##########
from RecoJets.Configuration.RecoPFJets_cff import ak4PFJetsCHS, ak8PFJetsCHS, ak8PFJetsCHSPruned, ak8PFJetsCHSSoftDrop, ak8PFJetsCHSPrunedMass, ak8PFJetsCHSSoftDropMass# , ak8PFJetsCSTrimmed, ak8PFJetsCSFiltered, ak8PFJetsCHSFilteredMass, ak8PFJetsCHSTrimmedMass
from CommonTools.PileupAlgos.Puppi_cff import puppi
process.puppi = puppi.clone()
process.puppi.useExistingWeights = True
process.puppi.candName = cms.InputTag('packedPFCandidates')
process.puppi.vertexName = cms.InputTag('offlineSlimmedPrimaryVertices')
process.ak8PFJetsCHS = ak8PFJetsCHS.clone( src = 'puppi', jetPtMin = 100.0 )
process.ak8PFJetsCHSPruned = ak8PFJetsCHSPruned.clone( src = 'puppi', jetPtMin = 100.0 )
process.ak8PFJetsCHSPrunedMass = ak8PFJetsCHSPrunedMass.clone()
process.ak8PFJetsCHSSoftDrop = ak8PFJetsCHSSoftDrop.clone( src = 'puppi', jetPtMin = 100.0 )
process.ak8PFJetsCHSSoftDropMass = ak8PFJetsCHSSoftDropMass.clone()
process.NjettinessAK8 = cms.EDProducer("NjettinessAdder",
src = cms.InputTag("ak8PFJetsCHS"),
Njets = cms.vuint32(1, 2, 3, 4),
# variables for measure definition :
measureDefinition = cms.uint32( 0 ), # CMS default is normalized measure
beta = cms.double(1.0), # CMS default is 1
R0 = cms.double( 0.8 ), # CMS default is jet cone size
Rcutoff = cms.double( 999.0), # not used by default
# variables for axes definition :
axesDefinition = cms.uint32( 6 ), # CMS default is 1-pass KT axes
nPass = cms.int32(0), # not used by default
akAxesR0 = cms.double(-999.0) # not used by default
)
process.substructureSequence = cms.Sequence()
process.substructureSequence+=process.puppi
process.substructureSequence+=process.ak8PFJetsCHS
process.substructureSequence+=process.NjettinessAK8
process.substructureSequence+=process.ak8PFJetsCHSPruned
process.substructureSequence+=process.ak8PFJetsCHSPrunedMass
process.substructureSequence+=process.ak8PFJetsCHSSoftDrop
process.substructureSequence+=process.ak8PFJetsCHSSoftDropMass
####### Redo pat jets sequence ##########
process.redoPatJets = cms.Sequence()
process.redoPrunedPatJets = cms.Sequence()
process.redoSoftDropPatJets = cms.Sequence()
from ExoDiBosonResonances.EDBRJets.redoPatJets_cff import patJetCorrFactorsAK8, patJetsAK8, selectedPatJetsAK8
# Redo pat jets from ak8PFJetsCHS
process.patJetCorrFactorsAK8 = patJetCorrFactorsAK8.clone( src = 'ak8PFJetsCHS' )
process.patJetsAK8 = patJetsAK8.clone( jetSource = 'ak8PFJetsCHS' )
process.patJetsAK8.userData.userFloats.src = [ cms.InputTag("ak8PFJetsCHSPrunedMass"), cms.InputTag("ak8PFJetsCHSSoftDropMass"), cms.InputTag("NjettinessAK8:tau1"), cms.InputTag("NjettinessAK8:tau2"), cms.InputTag("NjettinessAK8:tau3"), cms.InputTag("NjettinessAK8:tau4")]
process.patJetsAK8.jetCorrFactorsSource = cms.VInputTag( cms.InputTag("patJetCorrFactorsAK8") )
process.selectedPatJetsAK8 = selectedPatJetsAK8.clone( cut = cms.string('pt > 100') )
process.redoPatJets+=process.patJetCorrFactorsAK8
process.redoPatJets+=process.patJetsAK8
process.redoPatJets+=process.selectedPatJetsAK8
# Redo pat jets ak8PFJetsCHSPruned
process.patJetCorrFactorsAK8Pruned = patJetCorrFactorsAK8.clone( src = 'ak8PFJetsCHSPruned' )
process.patJetsAK8Pruned = patJetsAK8.clone( jetSource = 'ak8PFJetsCHSPruned' )
process.patJetsAK8Pruned.userData.userFloats.src = [ "" ]
#process.patJetsAK8Pruned.userData.userFloats =cms.PSet(src = cms.VInputTag(""))
process.patJetsAK8Pruned.jetCorrFactorsSource = cms.VInputTag( cms.InputTag("patJetCorrFactorsAK8Pruned") )
process.selectedPatJetsAK8Pruned = selectedPatJetsAK8.clone(cut = 'pt > 100', src = "patJetsAK8Pruned")
process.redoPrunedPatJets+=process.patJetCorrFactorsAK8Pruned
process.redoPrunedPatJets+=process.patJetsAK8Pruned
process.redoPrunedPatJets+=process.selectedPatJetsAK8Pruned
# Redo pat jets ak8PFJetsCHSSoftDrop
process.patJetCorrFactorsAK8Softdrop = patJetCorrFactorsAK8.clone( src = 'ak8PFJetsCHSSoftDrop' )
process.patJetsAK8Softdrop = patJetsAK8.clone( jetSource = 'ak8PFJetsCHSSoftDrop' )
process.patJetsAK8Softdrop.userData.userFloats.src = [ "" ]
#process.patJetsAK8Softdrop.userData.userFloats =cms.PSet(src = cms.VInputTag(""))
process.patJetsAK8Softdrop.jetCorrFactorsSource = cms.VInputTag( cms.InputTag("patJetCorrFactorsAK8Softdrop") )
process.selectedPatJetsAK8Softdrop = selectedPatJetsAK8.clone(cut = 'pt > 100', src = "patJetsAK8Softdrop")
from PhysicsTools.PatAlgos.tools.jetTools import addJetCollection
## PATify soft drop subjets
addJetCollection(
process,
labelName = 'AK8SoftDropSubjets',
jetSource = cms.InputTag('ak8PFJetsCHSSoftDrop','SubJets'),
algo = 'ak', # needed for subjet flavor clustering
rParam = 0.8, # needed for subjet flavor clustering
getJetMCFlavour = False,
pvSource = cms.InputTag( 'offlineSlimmedPrimaryVertices' ),
genJetCollection = cms.InputTag('slimmedGenJets'),
genParticles = cms.InputTag( 'prunedGenParticles' ),
btagDiscriminators = ['None'],
jetCorrections = ('AK4PFPuppi', ['L2Relative', 'L3Absolute'], 'None'),
# explicitJTA = True, # needed for subjet b tagging
# svClustering = True, # needed for subjet b tagging
# fatJets=cms.InputTag('ak8PFJetsCHS'), # needed for subjet flavor clustering
# groomedFatJets=cms.InputTag('ak8PFJetsCHSSoftDrop') # needed for subjet flavor clustering
)
#'''
#from RecoBTag.DeepFlavour.DeepFlavourJetTagsProducer_cfi import *
# this loads all available b-taggers
#process.load("RecoBTag.Configuration.RecoBTag_cff")
#process.load("RecoBTag.DeepFlavour.DeepFlavourJetTagsProducer_cfi")
#process.load("RecoBTag.DeepFlavour.deepFlavour_cff")
#'''
from RecoBTag.Configuration.RecoBTag_EventContent_cff import *
from RecoBTag.Configuration.RecoBTag_cff import *
from RecoBTag.DeepFlavour.DeepFlavourJetTagsProducer_cfi import deepFlavourJetTags
from RecoBTag.DeepFlavour.deepFlavour_cff import *
from PhysicsTools.PatAlgos.tools.jetTools import updateJetCollection
updateJetCollection(
process,
labelName = 'DeepFlavour',
jetSource = cms.InputTag('cleanPuppiAK4'),
pvSource = cms.InputTag('offlineSlimmedPrimaryVertices'),
svSource = cms.InputTag('slimmedSecondaryVertices'),
jetCorrections = ('AK4PFchs', cms.vstring(['L1FastJet', 'L2Relative', 'L3Absolute']), 'None'),
btagDiscriminators = ['deepFlavourJetTags:probb', 'deepFlavourJetTags:probbb','deepFlavourJetTags:probc','deepFlavourJetTags:probudsg','deepFlavourJetTags:probcc'],
postfix='NewDFTraining'
)
#process.selectedUpdatedPatJetsDeepFlavourNewDFTraining.userData.userFloats.src =[]
#'''
'''
process.patjets = cms.EDAnalyzer('EDBRTreeMaker',
PatJets = cms.InputTag("selectedUpdatedPatJets"),
PTMin = cms.double(-1),
BTag = cms.string("deepFlavourJetTags:probb"),
)
'''
process.selectedPatJetsAK8SoftDropPacked = cms.EDProducer("BoostedJetMerger",
jetSrc = cms.InputTag("selectedPatJetsAK8Softdrop"),
subjetSrc = cms.InputTag("selectedPatJetsAK8SoftDropSubjets")
)
process.redoSoftDropPatJets+=process.patJetCorrFactorsAK8Softdrop
process.redoSoftDropPatJets+=process.patJetsAK8Softdrop
process.redoSoftDropPatJets+=process.selectedPatJetsAK8Softdrop
option = 'RECO'
process.load("ExoDiBosonResonances.EDBRCommon.goodMuons_cff")
process.load("ExoDiBosonResonances.EDBRCommon.goodElectrons_cff")
process.load("ExoDiBosonResonances.EDBRCommon.goodJets_cff")
process.load("ExoDiBosonResonances.EDBRCommon.leptonicW_cff")
process.load("ExoDiBosonResonances.EDBRCommon.hadronicW_cff")
process.load("ExoDiBosonResonances.EDBRCommon.goodPuppi_cff")
if option == 'RECO':
process.goodMuons.src = "slimmedMuons"
process.goodElectrons.src = "slimmedElectrons"
process.goodJets.src = "slimmedJetsAK8"
# process.goodJets.src = "selectedPatJetsAK8"
process.Wtoenu.MET = "slimmedMETs"
process.Wtomunu.MET = "slimmedMETs"
process.goodPuppi.src = "selectedPatJetsAK8"
process.goodOfflinePrimaryVertex = cms.EDFilter("VertexSelector",
src = cms.InputTag("offlineSlimmedPrimaryVertices"),
cut = cms.string("chi2!=0 && ndof >= 4.0 && abs(z) <= 24.0 && abs(position.Rho) <= 2.0"),
filter = cms.bool(True)
)
if option == 'RECO':
process.hadronicV.cut = ' '
if option == 'GEN':
process.hadronicV.cut = ' '
WBOSONCUT = "pt > 200.0"
process.leptonicVSelector = cms.EDFilter("CandViewSelector",
src = cms.InputTag("leptonicV"),
cut = cms.string( WBOSONCUT ),
filter = cms.bool(True)
)
process.leptonicVFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("leptonicV"),
minNumber = cms.uint32(1),
filter = cms.bool(True)
)
process.hadronicVFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("hadronicV"),
minNumber = cms.uint32(1),
filter = cms.bool(True)
)
process.graviton = cms.EDProducer("CandViewCombiner",
decay = cms.string("leptonicV hadronicV"),
checkCharge = cms.bool(False),
cut = cms.string("mass > 180"),
roles = cms.vstring('leptonicV', 'hadronicV'),
)
process.gravitonFilter = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("graviton"),
minNumber = cms.uint32(1),
filter = cms.bool(True)
)
from PhysicsTools.SelectorUtils.tools.vid_id_tools import *
switchOnVIDElectronIdProducer(process, DataFormat.MiniAOD)
my_id_modules = ['RecoEgamma.ElectronIdentification.Identification.heepElectronID_HEEPV70_cff']
for idmod in my_id_modules:
setupAllVIDIdsInModule(process,idmod,setupVIDElectronSelection)
process.leptonSequence = cms.Sequence(process.muSequence +
process.egmGsfElectronIDSequence*process.eleSequence +
process.leptonicVSequence +
process.leptonicVSelector +
process.leptonicVFilter )
process.jetSequence = cms.Sequence(process.substructureSequence +
process.redoPatJets +
process.redoPrunedPatJets+
process.redoSoftDropPatJets+
process.fatJetsSequence +
process.fatPuppiSequence+
process.hadronicV +
process.hadronicVFilter)
process.gravitonSequence = cms.Sequence(process.graviton +
process.gravitonFilter)
if filterMode == False:
process.goodOfflinePrimaryVertex.filter = False
process.Wtomunu.cut = ''
process.Wtoenu.cut = ''
process.leptonicVSelector.filter = False
process.leptonicVSelector.cut = ''
process.hadronicV.cut = ''
process.graviton.cut = ''
process.leptonicVFilter.minNumber = 0
process.hadronicVFilter.minNumber = 0
process.gravitonFilter.minNumber = 0
process.load('RecoMET.METFilters.BadPFMuonFilter_cfi')
process.load("RecoMET.METFilters.BadChargedCandidateFilter_cfi")
process.BadPFMuonFilter.muons = cms.InputTag("slimmedMuons")
process.BadPFMuonFilter.PFCandidates = cms.InputTag("packedPFCandidates")
process.BadChargedCandidateFilter.muons = cms.InputTag("slimmedMuons")
process.BadChargedCandidateFilter.PFCandidates = cms.InputTag("packedPFCandidates")
process.metfilterSequence = cms.Sequence(process.BadPFMuonFilter+process.BadChargedCandidateFilter)
######### JEC ########
METS = "slimmedMETs"
jetsAK8 = "slimmedJetsAK8"
jetsAK8pruned = "slimmedJetsAK8"
jetsAK8softdrop = "slimmedJetsAK8"
jetsAK8puppi = "cleanPuppi"
if runOnMC:
jecLevelsAK8chs = [
'Summer16_23Sep2016V3_MC_L1FastJet_AK8PFchs.txt',
'Summer16_23Sep2016V3_MC_L2Relative_AK8PFchs.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK8PFchs.txt'
]
jecLevelsAK8chsGroomed = [
'Summer16_23Sep2016V3_MC_L2Relative_AK8PFchs.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK8PFchs.txt'
]
jecLevelsAK8puppi = [
'Summer16_23Sep2016V3_MC_L1FastJet_AK8PFPuppi.txt',
'Summer16_23Sep2016V3_MC_L2Relative_AK8PFPuppi.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK8PFPuppi.txt'
]
jecLevelsAK8puppiGroomed = [
'Summer16_23Sep2016V3_MC_L2Relative_AK8PFPuppi.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK8PFPuppi.txt'
]
BjecLevelsAK4chs = [
'Summer16_23Sep2016V3_MC_L1FastJet_AK4PFPuppi.txt',
'Summer16_23Sep2016V3_MC_L2Relative_AK4PFPuppi.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFPuppi.txt'
]
jecLevelsAK4chs = [
'Summer16_23Sep2016V3_MC_L1FastJet_AK4PFchs.txt',
'Summer16_23Sep2016V3_MC_L2Relative_AK4PFchs.txt',
'Summer16_23Sep2016V3_MC_L3Absolute_AK4PFchs.txt'
]
else:
jecLevelsAK8chs = [
'Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK8PFchs.txt',
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK8PFchs.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK8PFchs.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK8PFchs.txt'
]
jecLevelsAK8chsGroomed = [
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK8PFchs.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK8PFchs.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK8PFchs.txt'
]
jecLevelsAK8puppi = [
'Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK8PFPuppi.txt'
]
jecLevelsAK8puppiGroomed = [
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK8PFPuppi.txt'
]
BjecLevelsAK4chs = [
'Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK8PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK8PFPuppi.txt'
]
jecLevelsAK4chs = [
'Summer16_23Sep2016BCDV4_DATA_L1FastJet_AK4PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2Relative_AK4PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L3Absolute_AK4PFPuppi.txt',
'Summer16_23Sep2016BCDV4_DATA_L2L3Residual_AK4PFPuppi.txt'
]
process.treeDumper = cms.EDAnalyzer("EDBRTreeMaker",
originalNEvents = cms.int32(1),
crossSectionPb = cms.double(1),
targetLumiInvPb = cms.double(1.0),
EDBRChannel = cms.string("VW_CHANNEL"),
lhe = cms.InputTag("externalLHEProducer"),
isGen = cms.bool(False),
isJEC = cms.bool(corrJetsOnTheFly),
RunOnMC = cms.bool(runOnMC),
RunOnSig = cms.bool(runOnSig),
generator = cms.InputTag("generator"),
genSrc = cms.InputTag("prunedGenParticles"),
pileup = cms.InputTag("slimmedAddPileupInfo"),
leptonicVSrc = cms.InputTag("leptonicV"),
gravitonSrc = cms.InputTag("graviton"),
looseMuonSrc = cms.InputTag("looseMuons"),
looseElectronSrc = cms.InputTag("looseElectrons"),
vetoMuonSrc = cms.InputTag("vetoMuons"),
vetoElectronSrc = cms.InputTag("vetoElectrons"),
goodMuSrc = cms.InputTag("goodMuons"),
MuSrc = cms.InputTag("slimmedMuons"),
EleSrc = cms.InputTag("slimmedElectrons"),
t1muSrc = cms.InputTag("slimmedMuons"),
metSrc = cms.InputTag("slimmedMETs"),
mets = cms.InputTag(METS),
#ak4jetsSrc = cms.InputTag("cleanAK4Jets"),
ak4jetsSrc = cms.InputTag("selectedUpdatedPatJetsDeepFlavourNewDFTraining"),
#ak4jetsSrc = cms.InputTag("slimmedJetPuppi"),
hadronicVSrc = cms.InputTag("hadronicV"),
hadronicVSrc_raw = cms.InputTag("slimmedJetsAK8"),
hadronicVSoftDropSrc = cms.InputTag("selectedPatJetsAK8SoftDropPacked"),
jets = cms.InputTag("slimmedJets"),
ak8JetSrc = cms.InputTag(jetsAK8),
fatjets = cms.InputTag(jetsAK8),
prunedjets = cms.InputTag(jetsAK8pruned),
softdropjets = cms.InputTag(jetsAK8softdrop),
puppijets = cms.InputTag(jetsAK8puppi),
jecAK8chsPayloadNames = cms.vstring( jecLevelsAK8chs ),
jecAK8chsPayloadNamesGroomed = cms.vstring( jecLevelsAK8chsGroomed ),
jecAK4chsPayloadNames = cms.vstring( jecLevelsAK4chs ),
BjecAK4chsPayloadNames = cms.vstring( BjecLevelsAK4chs ),
jecAK8puppiPayloadNames = cms.vstring( jecLevelsAK8puppi ),
jecAK8puppiPayloadNamesGroomed = cms.vstring( jecLevelsAK8puppiGroomed ),
jecpath = cms.string(''),
rho = cms.InputTag("fixedGridRhoFastjetAll"),
electronIDs = cms.InputTag("heepElectronID-HEEPV50-CSA14-25ns"),
muons = cms.InputTag("slimmedMuons"),
vertices = cms.InputTag("offlineSlimmedPrimaryVertices"),
hltToken = cms.InputTag("TriggerResults","","HLT"),
muPaths1 = cms.vstring("HLT_PFHT650_WideJetMJJ900DEtaJJ1p5_v*"),
muPaths2 = cms.vstring("HLT_PFHT800_v*"),
muPaths3 = cms.vstring("HLT_PFHT900_v*"),
muPaths4 = cms.vstring("HLT_PFJet450_v*"),
muPaths5 = cms.vstring("HLT_PFJet500_v*"),
muPaths6 = cms.vstring("HLT_AK8PFJet450_v*"),
muPaths7 = cms.vstring("HLT_AK8PFJet500_v*"),
muPaths8 = cms.vstring("HLT_AK8PFJet360_TrimMass30_v*"),
muPaths9 = cms.vstring("HLT_AK8PFHT700_TrimR0p1PT0p03Mass50_v*"),
muPaths10 = cms.vstring("HLT_PFHT650_WideJetMJJ950DEtaJJ1p5_v*"),
el1 = cms.vstring("HLT_Ele45_WPLoose_Gsf_v*"),
el2 = cms.vstring("HLT_Ele115_CaloIdVT_GsfTrkIdT_v*"),#("HLT_Ele35_WPLoose_Gsf_v*"),
el3 = cms.vstring("HLT_Ele27_WPTight_Gsf_v*"),
mu1 = cms.vstring("HLT_Mu50_v*"), #B2G-15-005
mu2 = cms.vstring("HLT_TkMu50_v*"), #B2G-15-005
mu3 = cms.vstring("HLT_PFMETNoMu120_PFMHTNoMu120_IDTight_v*"),
mu4 = cms.vstring("HLT_PFMETNoMu110_PFMHTNoMu110_IDTight_v*"),
noiseFilter = cms.InputTag('TriggerResults','', hltFiltersProcessName),
noiseFilterSelection_HBHENoiseFilter = cms.string('Flag_HBHENoiseFilter'),
noiseFilterSelection_HBHENoiseIsoFilter = cms.string("Flag_HBHENoiseIsoFilter"),
noiseFilterSelection_GlobalTightHaloFilter = cms.string('Flag_globalTightHalo2016Filter'),
noiseFilterSelection_EcalDeadCellTriggerPrimitiveFilter = cms.string('Flag_EcalDeadCellTriggerPrimitiveFilter'),
noiseFilterSelection_goodVertices = cms.string('Flag_goodVertices'),
noiseFilterSelection_eeBadScFilter = cms.string('Flag_eeBadScFilter'),
noiseFilterSelection_badMuon = cms.InputTag('BadPFMuonFilter'),
noiseFilterSelection_badChargedHadron = cms.InputTag('BadChargedCandidateFilter'),
)
if option=='GEN':
process.treeDumper.metSrc = 'genMetTrue'
process.treeDumper.isGen = True
process.analysis = cms.Path(process.leptonSequence +
#process.substructureSequence+
#process.redoPatJets+
#process.redoPrunedPatJets+
#process.redoSoftDropPatJets+
process.HBHENoiseFilterResultProducer+
process.ApplyBaselineHBHENoiseFilter+
process.ApplyBaselineHBHEIsoNoiseFilter+
process.jetSequence +
process.metfilterSequence +
process.gravitonSequence +
process.treeDumper)
if option=='RECO':
process.analysis.replace(process.leptonSequence, process.goodOfflinePrimaryVertex + process.leptonSequence)
process.load("ExoDiBosonResonances.EDBRCommon.data.RSGravitonToWW_kMpl01_M_1000_Tune4C_13TeV_pythia8")
process.source.fileNames = [
"/store/data/Run2016B/JetHT/MINIAOD/23Sep2016-v1/90000/1069E38E-5982-E611-8CCB-008CFA110C74.root"
#"/store/data/Run2016B/JetHT/MINIAOD/23Sep2016-v1/90000/FE47EB9B-EB81-E611-B475-24BE05CEEB81.root"
#"/store/data/Run2016E/JetHT/MINIAOD/23Sep2016-v1/50000/483CEE4F-FB86-E611-94C8-0CC47A7C3572.root"
]
process.maxEvents.input = 2000
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 5000
process.MessageLogger.cerr.FwkReport.limit = 99999999
process.TFileService = cms.Service("TFileService",
fileName = cms.string("RStreeEDBR_pickup114.root")
)
| [
"[email protected]"
] | |
fb47ce858e2a7f7448b2da3fcc9ab0fc47525d9e | b72ef2946dafccebf621afbc904fdc55b4bfb5c0 | /mlens/parallel/learner.py | 00946d8f73095136450f79ddf4f7a84b57f70e8b | [
"MIT"
] | permissive | JoshuaC3/mlens | 6b4da909e2f60d7736c36eddd90a871f31e227e5 | f2ea2e8b403e3e46581de37cc94092529a4dc54d | refs/heads/master | 2021-08-06T20:04:32.146766 | 2017-11-06T23:08:24 | 2017-11-06T23:08:24 | 108,462,155 | 0 | 0 | null | 2017-10-26T20:28:33 | 2017-10-26T20:28:33 | null | UTF-8 | Python | false | false | 33,015 | py | """ML-Ensemble
:author: Sebastian Flennerhag
:copyright: 2017
:license: MIT
Computational graph nodes. Job generator classes spawning jobs and executing
estimation on cross-validation sub-graphs.
"""
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-arguments
# pylint: disable=too-many-instance-attributes
from __future__ import print_function, division
import warnings
from copy import deepcopy
from abc import ABCMeta, abstractmethod
from ._base_functions import (
slice_array, set_output_columns, assign_predictions, score_predictions,
replace, save, load, prune_files, check_params)
from .base import OutputMixin, ProbaMixin, IndexMixin, BaseEstimator
from ..metrics import Data
from ..utils import safe_print, print_time, format_name, assert_valid_pipeline
from ..utils.exceptions import (NotFittedError, FitFailedWarning,
ParallelProcessingError, NotInitializedError)
from ..externals.sklearn.base import clone
from ..externals.joblib.parallel import delayed
try:
from time import perf_counter as time
except ImportError:
from time import time
# Types of indexers that require fits only on subsets or only on the full data
ONLY_SUB = []
ONLY_ALL = ['fullindex', 'nonetype']
GLOBAL_LEARNER_NAMES = list()
GLOBAL_TRANSFORMER_NAMES = list()
###############################################################################
class IndexedEstimator(object):
"""Indexed Estimator
Lightweight wrapper around estimator dumps during fitting.
"""
__slots__ = [
'_estimator', 'name', 'index', 'in_index', 'out_index', 'data']
def __init__(self, estimator, name, index, in_index, out_index, data):
self._estimator = estimator
self.name = name
self.index = index
self.in_index = in_index
self.out_index = out_index
self.data = data
@property
def estimator(self):
"""Deep copy of estimator"""
return deepcopy(self._estimator)
@estimator.setter
def estimator(self, estimator):
self._estimator = estimator
def __getstate__(self):
"""Return pickable object"""
return (self._estimator, self.name, self.index, self.in_index,
self.out_index, self.data)
def __setstate__(self, state):
"""Load tuple into instance"""
(self._estimator, self.name, self.index, self.in_index,
self.out_index, self.data) = state
class SubLearner(object):
"""Estimation task
Wrapper around a sub_learner job.
"""
def __init__(self, job, parent, estimator, in_index, out_index,
in_array, targets, out_array, index):
self.job = job
self.estimator = estimator
self.in_index = in_index
self.out_index = out_index
self.in_array = in_array
self.targets = targets
self.out_array = out_array
self.score_ = None
self.index = tuple(index)
self.path = parent._path
self.attr = parent.attr
self.preprocess = parent.preprocess
self.scorer = parent.scorer
self.raise_on_exception = parent.raise_on_exception
self.verbose = parent.verbose
if not parent.__no_output__:
self.output_columns = parent.output_columns[index[0]]
self.score_ = None
self.fit_time_ = None
self.pred_time_ = None
self.name = parent.cache_name
self.name_index = '.'.join([self.name] + [str(i) for i in index])
if self.preprocess is not None:
self.preprocess_index = '.'.join(
[self.preprocess] + [str(i) for i in index])
else:
self.processing_index = ''
def __call__(self):
"""Launch job"""
return getattr(self, self.job)()
def fit(self, path=None):
"""Fit sub-learner"""
if not path:
path = self.path
t0 = time()
transformers = self._load_preprocess(path)
self._fit(transformers)
if self.out_array is not None:
self._predict(transformers, self.scorer is not None)
o = IndexedEstimator(estimator=self.estimator,
name=self.name_index,
index=self.index,
in_index=self.in_index,
out_index=self.out_index,
data=self.data)
save(path, self.name_index, o)
if self.verbose:
msg = "{:<30} {}".format(self.name_index, "done")
f = "stdout" if self.verbose < 10 - 3 else "stderr"
print_time(t0, msg, file=f)
def predict(self, path=None):
"""Predict with sublearner"""
if not path:
path = self.path
t0 = time()
transformers = self._load_preprocess(path)
self._predict(transformers, False)
if self.verbose:
msg = "{:<30} {}".format(self.name_index, "done")
f = "stdout" if self.verbose < 10 - 3 else "stderr"
print_time(t0, msg, file=f)
def transform(self, path=None):
"""Predict with sublearner"""
return self.predict(path)
def _fit(self, transformers):
"""Sub-routine to fit sub-learner"""
xtemp, ytemp = slice_array(self.in_array, self.targets, self.in_index)
# Transform input (triggers copying)
t0 = time()
if transformers:
xtemp, ytemp = transformers.transform(xtemp, ytemp)
# Fit estimator
self.estimator.fit(xtemp, ytemp)
self.fit_time_ = time() - t0
def _load_preprocess(self, path):
"""Load preprocessing pipeline"""
if self.preprocess is not None:
obj = load(path, self.preprocess_index, self.raise_on_exception)
return obj.estimator
return
def _predict(self, transformers, score_preds):
"""Sub-routine to with sublearner"""
n = self.in_array.shape[0]
# For training, use ytemp to score predictions
# During test time, ytemp is None
xtemp, ytemp = slice_array(self.in_array, self.targets, self.out_index)
t0 = time()
if transformers:
xtemp, ytemp = transformers.transform(xtemp, ytemp)
predictions = getattr(self.estimator, self.attr)(xtemp)
self.pred_time_ = time() - t0
# Assign predictions to matrix
assign_predictions(self.out_array, predictions,
self.out_index, self.output_columns, n)
# Score predictions if applicable
if score_preds:
self.score_ = score_predictions(
ytemp, predictions, self.scorer, self.name_index, self.name)
@property
def data(self):
"""fit data"""
out = {'score': self.score_,
'ft': self.fit_time_,
'pt': self.pred_time_}
return out
class SubTransformer(object):
"""Sub-routine for fitting a pipeline
"""
def __init__(self, job, parent, estimator, in_index, in_array,
targets, index, out_index=None, out_array=None):
self.job = job
self.estimator = estimator
self.in_index = in_index
self.out_index = out_index
self.in_array = in_array
self.out_array = out_array
self.targets = targets
self.index = index
self.transform_time_ = None
self.path = parent._path
self.verbose = parent.verbose
self.name = parent.cache_name
self.name_index = '.'.join(
[self.name] + [str(i) for i in index])
if not parent.__no_output__:
self.output_columns = parent.output_columns[index[0]]
def __call__(self):
"""Launch job"""
return getattr(self, self.job)()
def predict(self):
"""Dump transformers for prediction"""
self._transform()
def transform(self):
"""Dump transformers for prediction"""
self._transform()
def _transform(self):
"""Run a transformation"""
t0 = time()
n = self.in_array.shape[0]
xtemp, ytemp = slice_array(
self.in_array, self.targets, self.out_index)
xtemp, ytemp = self.estimator.transform(xtemp, ytemp)
assign_predictions(
self.out_array, xtemp, self.out_index, self.output_columns, n)
if self.verbose:
msg = "{:<30} {}".format(self.name_index, "done")
f = "stdout" if self.verbose < 10 - 3 else "stderr"
print_time(t0, msg, file=f)
def fit(self, path=None):
"""Fit transformers"""
path = path if path else self.path
t0 = time()
xtemp, ytemp = slice_array(
self.in_array, self.targets, self.in_index)
t0_f = time()
self.estimator.fit(xtemp, ytemp)
self.transform_time_ = time() - t0_f
if self.out_array is not None:
self._transform()
o = IndexedEstimator(estimator=self.estimator,
name=self.name_index,
index=self.index,
in_index=self.in_index,
out_index=self.out_index,
data=self.data)
save(path, self.name_index, o)
if self.verbose:
f = "stdout" if self.verbose < 10 else "stderr"
msg = "{:<30} {}".format(self.name_index, "done")
print_time(t0, msg, file=f)
@property
def data(self):
"""fit data"""
return {'ft': self.transform_time_}
class EvalSubLearner(SubLearner):
"""EvalSubLearner
sub-routine for cross-validated evaluation.
"""
def __init__(self, job, parent, estimator, in_index, out_index,
in_array, targets, index):
super(EvalSubLearner, self).__init__(
job=job, parent=parent, estimator=estimator,
in_index=in_index, out_index=out_index,
in_array=in_array, out_array=None,
targets=targets, index=index)
self.error_score = parent.error_score
self.train_score_ = None
self.test_score_ = None
self.train_pred_time_ = None
self.test_pred_time_ = None
def fit(self, path=None):
"""Evaluate sub-learner"""
path = path if path else self.path
if self.scorer is None:
raise ValueError("Cannot generate CV-scores without a scorer")
t0 = time()
transformers = self._load_preprocess(path)
self._fit(transformers)
self._predict(transformers)
o = IndexedEstimator(estimator=self.estimator,
name=self.name_index,
index=self.index,
in_index=self.in_index,
out_index=self.out_index,
data=self.data)
save(path, self.name_index, o)
if self.verbose:
f = "stdout" if self.verbose else "stderr"
msg = "{:<30} {}".format(self.name_index, "done")
print_time(t0, msg, file=f)
def _predict(self, transformers, score_preds=None):
"""Sub-routine to with sublearner"""
# Train set
self.train_score_, self.train_pred_time_ = self._score_preds(
transformers, self.in_index)
# Validation set
self.test_score_, self.test_pred_time_ = self._score_preds(
transformers, self.out_index)
def _score_preds(self, transformers, index):
# Train scores
xtemp, ytemp = slice_array(self.in_array, self.targets, index)
if transformers:
xtemp, ytemp = transformers.transform(xtemp, ytemp)
t0 = time()
if self.error_score is not None:
try:
scores = self.scorer(self.estimator, xtemp, ytemp)
except Exception as exc: # pylint: disable=broad-except
warnings.warn(
"Scoring failed. Setting error score %r."
"Details:\n%r" % (self.error_score, exc),
FitFailedWarning)
scores = self.error_score
else:
scores = self.scorer(self.estimator, xtemp, ytemp)
pred_time = time() - t0
return scores, pred_time
@property
def data(self):
"""Score data"""
out = {'test_score': self.test_score_,
'train_score': self.train_score_,
'fit_time': self.fit_time_,
'pred_time': self.train_pred_time_,
# 'test_pred_time': self.train_pred_time_,
}
return out
class Cache(object):
"""Cache wrapper for IndexedEstimator
"""
def __init__(self, obj, path, verbose):
self.obj = obj
self.path = path
self.name = obj.name
self.verbose = verbose
def __call__(self, path=None):
"""Cache estimator to path"""
path = path if path else self.path
save(path, self.name, self.obj)
if self.verbose:
msg = "{:<30} {}".format(self.name, "cached")
f = "stdout" if self.verbose < 10 - 3 else "stderr"
safe_print(msg, file=f)
###############################################################################
class BaseNode(OutputMixin, IndexMixin, BaseEstimator):
"""Base computational node inherited by job generators.
Common API for job generators. A class that inherits the base
need to set a ``__subtype__`` in the constructor. The sub-type should be
the class that runs estimations and must implement a ``__call__``,
``fit``, ``transform`` and ``predict`` method.
"""
__meta_class__ = ABCMeta
# Reset subtype class attribute in any class that inherits the base
__subtype__ = None
def __init__(self, name, estimator, indexer=None, verbose=False, **kwargs):
super(BaseNode, self).__init__(name, **kwargs)
# Variables
self._path = None
self._data_ = None
self._times_ = None
self._learner_ = None
self._sublearners_ = None
self.__collect__ = False
self._partitions = None
self.__only_all__ = None
self.__only_sub__ = None
# Parameters
self.indexer = indexer
if self.indexer:
self.set_indexer(self.indexer)
self.estimator = estimator
self.verbose = verbose
self.cache_name = None
self.output_columns = None
self.feature_span = None
self.__static__.extend(['estimator', 'name', 'indexer'])
def __iter__(self):
yield self
def __call__(self, args, arg_type='main', parallel=None):
"""Caller for producing jobs"""
job = args['job']
self._path = args['dir']
_threading = self.backend == 'threading'
if not self.__indexer__:
raise NotInitializedError(
"Instance has no indexer attached. Call set_indexer first.")
if job != 'fit' and not self.__fitted__:
raise NotFittedError(
"Instance not fitted with current params. Call 'fit' first.")
if job == 'fit':
if self.__fitted__ and args.pop('refit', False):
# Check refit
if self.__no_output__:
return
args['job'] = 'transform'
return self(args, arg_type, parallel)
# Record static params
self._store_static_params()
generator = getattr(self, 'gen_%s' % job)(**args[arg_type])
if not parallel:
return generator
parallel(delayed(subtask, not _threading)()
for subtask in generator)
if self.__collect__:
self.collect()
def _gen_pred(self, job, X, P, generator):
"""Generator for predicting with fitted learner
Parameters
----------
job: str
type of job
X : array-like of shape [n_samples, n_features]
input array
P : array-like of shape [n_samples, n_prediction_features]
output array to populate. Must be writeable.
generator : iterable
iterator of learners of sub-learners to predict with.
One of ``self.learner_`` and ``self.sublearners_``.
"""
for estimator in generator:
yield self.__subtype__(
job=job,
parent=self,
estimator=estimator.estimator,
in_index=estimator.in_index,
out_index=estimator.out_index,
in_array=X,
out_array=P,
index=estimator.index,
targets=None,
)
def gen_fit(self, X, y, P=None):
"""Routine for generating fit jobs conditional on refit
Parameters
----------
X: array-like of shape [n_samples, n_features]
input array
y: array-like of shape [n_samples,]
targets
P: array-like of shape [n_samples, n_prediction_features], optional
output array to populate. Must be writeable. Only pass if
predictions are desired.
"""
# We use a derived cache_name during estimation: if the name of the
# instance or the name of the preprocessing dependency changes, this
# allows us to pick up on that.
if hasattr(self, 'preprocess'):
self.cache_name = '%s.%s' % (
self.preprocess, self.name) if self.preprocess else self.name
else:
self.cache_name = self.name
if self.__subtype__ is None:
raise ParallelProcessingError(
"Class incorrectly constructed. Need to set class attribute "
"__subtype__")
self.__collect__ = True
# We use an index to keep track of partition and fold
# For single-partition estimations, index[0] is constant
i = 0
if not self.__only_sub__:
out = P if self.__only_all__ else None
for partition_index in self.indexer.partition():
yield self.__subtype__(
job='fit',
parent=self,
estimator=self.cloned_estimator,
in_index=partition_index,
out_index=None,
in_array=X,
targets=y,
out_array=out,
index=(i, 0),
)
i += 1
if not self.__only_all__:
# Fit sub-learners on cv folds
for i, (train_index, test_index) in enumerate(
self.indexer.generate()):
# Note that we bump index[1] by 1 to have index[1] start at 1
if self._partitions == 1:
index = (0, i + 1)
else:
splits = self.indexer.folds
index = (i // splits, i % splits + 1)
yield self.__subtype__(
job='fit',
parent=self,
estimator=self.cloned_estimator,
in_index=train_index,
out_index=test_index,
in_array=X,
targets=y,
out_array=P,
index=index,
)
def gen_transform(self, X, P=None):
"""Generate cross-validated predict jobs
Parameters
----------
X: array-like of shape [n_samples, n_features]
input array
y: array-like of shape [n_samples,]
targets
P: array-like of shape [n_samples, n_prediction_features], optional
output array to populate. Must be writeable. Only pass if
predictions are desired.
"""
return self._gen_pred('transform', X, P, self.sublearners)
def gen_predict(self, X, P=None):
"""Generate predicting jobs
Parameters
----------
X: array-like of shape [n_samples, n_features]
input array
y: array-like of shape [n_samples,]
targets
P: array-like of shape [n_samples, n_prediction_features], optional
output array to populate. Must be writeable. Only pass if
predictions are desired.
"""
return self._gen_pred('predict', X, P, self.learner)
def collect(self, path=None):
"""Load fitted estimator from cache
Parameters
----------
path: str, list, optional
path to cache.
"""
if not path:
path = self._path
if self.__collect__:
(learner_files,
learner_data,
sublearner_files,
sublearner_data) = self._collect(path)
self.clear()
self._learner_ = learner_files
self._sublearners_ = sublearner_files
self._data_ = sublearner_data
self._times_ = learner_data
# Collection complete, turn off
self.__collect__ = False
def clear(self):
"""Clear load"""
self._sublearners_ = None
self._learner_ = None
self._data_ = None
self._times_ = None
self._path = None
def set_indexer(self, indexer):
"""Set indexer and auxiliary attributes
Parameters
----------
indexer: obj
indexer to build instance with.
"""
self.indexer = indexer
self._partitions = indexer.partitions
self.__only_all__ = indexer.__class__.__name__.lower() in ONLY_ALL
self.__only_sub__ = indexer.__class__.__name__.lower() in ONLY_SUB
def _collect(self, path):
"""Collect files from cache"""
files = prune_files(path, self.cache_name)
learner_files = list()
learner_data = list()
sublearner_files = list()
sublearner_data = list()
while files:
f = files.pop(0)
if f in files:
raise ParallelProcessingError(
"Corrupt cache: duplicate cache entry found.\n%r" % f)
if f.index[1] == 0:
learner_files.append(f)
learner_data.append((f.name, f.data))
else:
sublearner_files.append(f)
sublearner_data.append((f.name, f.data))
if self.__only_sub__:
# Full learners are the same as the sub-learners
learner_files, learner_data = replace(sublearner_files)
if self.__only_all__:
# Sub learners are the same as the sub-learners
sublearner_files, sublearner_data = replace(learner_files)
return learner_files, learner_data, sublearner_files, sublearner_data
def _return_attr(self, attr):
if not self.__fitted__:
raise NotFittedError("Instance not fitted.")
return getattr(self, attr)
def set_output_columns(self, X=None, y=None, job=None, n_left_concats=0):
"""Set the output_columns attribute"""
# pylint: disable=unused-argument
multiplier = self._get_multiplier(X, y)
target = self._partitions * multiplier + n_left_concats
set_output_columns(
[self], self._partitions, multiplier, n_left_concats, target)
mi = n_left_concats
mx = max([i for i in self.output_columns.values()]) + multiplier
self.feature_span = (mi, mx)
@abstractmethod
def _get_multiplier(self, X, y):
"""Get the prediction multiplier given input (X, y)"""
return 1
@property
def __fitted__(self):
"""Fit status"""
if (not self._learner_ or not self._sublearners_ or
not self.indexer.__fitted__):
return False
# Check estimator param overlap
fitted = self._learner_ + self._sublearners_
fitted_params = fitted[0].estimator.get_params(deep=True)
model_estimator_params = self.estimator.get_params(deep=True)
if not check_params(fitted_params, model_estimator_params):
self.clear() # Release obsolete estimators
return False
# Check that hyper-params hasn't changed
if not self._check_static_params():
return False
return True
@property
def cloned_estimator(self):
"""Copy of estimator"""
return clone(self.estimator)
@property
def learner(self):
"""Generator for learner fitted on full data"""
# pylint: disable=not-an-iterable
out = self._return_attr('_learner_')
for estimator in out:
yield deepcopy(estimator)
@property
def sublearners(self):
"""Generator for learner fitted on folds"""
# pylint: disable=not-an-iterable
out = self._return_attr('_sublearners_')
for estimator in out:
yield deepcopy(estimator)
@property
def raw_data(self):
"""List of data collected from each sub-learner during fitting."""
return self._return_attr('_data_')
@property
def data(self):
"""Dictionary with aggregated data from fitting sub-learners."""
out = self._return_attr('_data_')
return Data(out)
@property
def times(self):
"""Fit and predict times for the final learners"""
out = self._return_attr('_times_')
return Data(out)
class Learner(ProbaMixin, BaseNode):
"""Learner
Wrapper for base learners.
Parameters
__________
estimator : obj
estimator to construct learner from
preprocess : str, obj
preprocess transformer. Pass either the string
cache reference or the transformer instance. If the latter,
the :attr:`preprocess` will refer to the transformer name.
name : str
name of learner. If ``preprocess`` is not ``None``,
the name will be prepended to ``preprocess__name``.
attr : str (default='predict')
predict attribute, typically one of 'predict' and 'predict_proba'
scorer : func
function to use for scoring predictions during cross-validated
fitting.
output_columns : dict, optional
mapping of prediction feature columns from learner to columns in
output array. Normally, this map is ``{0: x}``, but if the ``indexer``
creates partitions, each partition needs to be mapped:
``{0: x, 1: x + 1}``. Note that if ``output_columns`` are not given at
initialization, the ``set_output_columns`` method must be called before
running estimations.
verbose : bool, int (default = False)
whether to report completed fits.
**kwargs : bool (default=True)
Optional ParallelProcessing arguments. See :class:`BaseParallel`.
"""
__subtype__ = SubLearner
def __init__(self, estimator, indexer=None, name=None, preprocess=None,
attr=None, scorer=None, proba=False, **kwargs):
super(Learner, self).__init__(
name=format_name(name, 'learner', GLOBAL_LEARNER_NAMES),
estimator=estimator, indexer=indexer, **kwargs)
self._classes = None
self.proba = proba
self._scorer = scorer
self.preprocess = preprocess
self.n_pred = self._partitions
self.attr = attr if attr else self._predict_attr
# Protect preprocess against later changes
self.__static__.append('preprocess')
@property
def scorer(self):
"""Copy of scorer"""
return deepcopy(self._scorer)
@scorer.setter
def scorer(self, scorer):
"""Copy of scorer"""
self._scorer = scorer
class Transformer(BaseNode):
"""Preprocessing handler.
Wrapper for transformation pipeline.
Parameters
__________
indexer : obj, None
indexer to use for generating fits.
Set to ``None`` to fit only on all data.
estimator : obj
transformation pipeline to construct learner from
name : str
name of learner. If ``preprocess`` is not ``None``,
the name will be prepended to ``preprocess__name``.
output_columns : dict, optional
If transformer is to be used to output data, need to
set ``output_columns``. Normally, this map is
``{0: x}``, but if the ``indexer``
creates partitions, each partition needs to be mapped:
``{0: x, 1: x + 1}``.
verbose : bool, int (default = False)
whether to report completed fits.
raise_on_exception : bool (default=True)
whether to warn on non-fatal exceptions or raise an error.
"""
__subtype__ = SubTransformer
def __init__(self, estimator, indexer=None, name=None, **kwargs):
assert_valid_pipeline(estimator)
name = format_name(name, 'transformer', GLOBAL_TRANSFORMER_NAMES)
super(Transformer, self).__init__(
name=name, estimator=estimator, indexer=indexer, **kwargs)
self.__no_output__ = True
def _get_multiplier(self, X, y=None, alt=None):
"""Number of cols produced in prediction"""
return X.shape[1]
def _gen_pred(self, job, X, P, generator):
if self.__no_output__:
def gen():
for o in generator:
yield Cache(o, self._path, self.verbose)
return gen()
else:
return super(Transformer, self)._gen_pred(job, X, P, generator)
class EvalTransformer(Transformer):
r"""Evaluator version of the Transformer.
Derived class from Transformer adapted to cross\-validated grid-search.
See :class:`Transformer` for more details.
"""
def __init__(self, estimator, indexer=None, name=None, **kwargs):
super(EvalTransformer, self).__init__(
estimator, indexer=indexer, name=name, **kwargs)
self.output_columns = {0: 0} # For compatibility with SubTransformer
self.__only_all__ = False
self.__only_sub__ = True
class EvalLearner(Learner):
"""EvalLearner
EvalLearner is a derived class from Learner used for cross-validated
scoring of an estimator.
Parameters
__________
estimator : obj
estimator to construct learner from
preprocess : str
preprocess cache refernce
indexer : obj, None
indexer to use for generating fits.
Set to ``None`` to fit only on all data.
name : str
name of learner. If ``preprocess`` is not ``None``,
the name will be prepended to ``preprocess__name``.
attr : str (default='predict')
predict attribute, typically one of 'predict' and 'predict_proba'
scorer : func
function to use for scoring predictions during cross-validated
fitting.
error_score : int, float, None (default = None)
score to set if cross-validation fails. Set to ``None`` to raise error.
verbose : bool, int (default = False)
whether to report completed fits.
raise_on_exception : bool (default=True)
whether to warn on non-fatal exceptions or raise an error.
"""
__subtype__ = EvalSubLearner
def __init__(self, estimator, preprocess, name, attr, scorer,
error_score=None, verbose=False, **kwargs):
super(EvalLearner, self).__init__(
estimator=estimator, preprocess=preprocess,
name=name, attr=attr, scorer=scorer, verbose=verbose, **kwargs)
self.__only_sub__ = True
self.__only_all__ = False
self.output_columns = {0: 0} # For compatibility with SubLearner
self.error_score = error_score
def gen_fit(self, X, y, P=None, refit=True):
"""Generator for fitting learner on given data"""
self.cache_name = '%s.%s' % (
self.preprocess, self.name) if self.preprocess else self.name
if not refit and self.__fitted__:
self.gen_transform(X, P)
# We use an index to keep track of partition and fold
# For single-partition estimations, index[0] is constant
if self.indexer is None:
raise ValueError("Cannot run cross-validation without an indexer")
self.__collect__ = True
for i, (train_index, test_index) in enumerate(
self.indexer.generate()):
# Note that we bump index[1] by 1 to have index[1] start at 1
if self._partitions == 1:
index = (0, i + 1)
else:
index = (0, i % self._partitions + 1)
yield EvalSubLearner(
job='fit',
parent=self,
estimator=self.cloned_estimator,
in_index=train_index,
out_index=test_index,
in_array=X,
targets=y,
index=index,
)
| [
"[email protected]"
] | |
7f7037fa21290dcb8418f1c9c2eeb83c713b4296 | 40d404e424489bb48c3673dee6664de64dbab39c | /myWebsite/.venv/lib/python3.8/site-packages/zope/annotation/attribute.py | 5c009ce2b71fbbf979a61e014fc06a8c534ed00c | [] | no_license | alyasamba/me | 07c9f5f27aa16f768e0432780ac8f6f5ab6afbd1 | 978053c867181bad8eb316a0920ba290a7b1ceae | refs/heads/main | 2023-01-28T09:57:46.616285 | 2020-12-02T02:31:25 | 2020-12-02T02:31:25 | 315,935,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,466 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Attribute Annotations implementation"""
import logging
try:
from collections.abc import MutableMapping as DictMixin
except ImportError:
# Python 2
from collections import MutableMapping as DictMixin
try:
from BTrees.OOBTree import OOBTree as _STORAGE
except ImportError: # pragma: no cover
logging.getLogger(__name__).warning(
'BTrees not available: falling back to dict for attribute storage')
_STORAGE = dict
from zope import component, interface
from zope.annotation import interfaces
_EMPTY_STORAGE = _STORAGE()
@interface.implementer(interfaces.IAnnotations)
@component.adapter(interfaces.IAttributeAnnotatable)
class AttributeAnnotations(DictMixin):
"""Store annotations on an object
Store annotations in the `__annotations__` attribute on a
`IAttributeAnnotatable` object.
"""
# Yes, there's a lot of repetition of the `getattr` call,
# but that turns out to be the most efficient for the ways
# instances are typically used without sacrificing any semantics.
# See https://github.com/zopefoundation/zope.annotation/issues/8
# for a discussion of alternatives (which included functools.partial,
# a closure, capturing the annotations in __init__, and versions
# with getattr and exceptions).
def __init__(self, obj, context=None):
self.obj = obj
@property
def __parent__(self):
return self.obj
def __bool__(self):
return bool(getattr(self.obj, '__annotations__', 0))
__nonzero__ = __bool__
def get(self, key, default=None):
"""See zope.annotation.interfaces.IAnnotations"""
annotations = getattr(self.obj, '__annotations__', _EMPTY_STORAGE)
return annotations.get(key, default)
def __getitem__(self, key):
annotations = getattr(self.obj, '__annotations__', _EMPTY_STORAGE)
return annotations[key]
def keys(self):
annotations = getattr(self.obj, '__annotations__', _EMPTY_STORAGE)
return annotations.keys()
def __iter__(self):
annotations = getattr(self.obj, '__annotations__', _EMPTY_STORAGE)
return iter(annotations)
def __len__(self):
annotations = getattr(self.obj, '__annotations__', _EMPTY_STORAGE)
return len(annotations)
def __setitem__(self, key, value):
"""See zope.annotation.interfaces.IAnnotations"""
try:
annotations = self.obj.__annotations__
except AttributeError:
annotations = self.obj.__annotations__ = _STORAGE()
annotations[key] = value
def __delitem__(self, key):
"""See zope.app.interfaces.annotation.IAnnotations"""
try:
annotation = self.obj.__annotations__
except AttributeError:
raise KeyError(key)
del annotation[key]
| [
"[email protected]"
] | |
98a489879ec8c353e34f3c02e3732f79b32f4943 | 12a012ace19a14fc2c4ce2daec7a9df94cd5d925 | /[1차] 비밀지도.py | 00fedf556e4d6eaf4e149b313258d139fa9b2ee1 | [] | no_license | newfull5/Programmers | a0a25fd72c0a8a7932122cb72e65b28ecd29ff71 | b880a8043427f6aa7dc72caa3e46b1f6584a8962 | refs/heads/master | 2022-12-28T13:46:52.215347 | 2022-12-12T13:50:53 | 2022-12-12T13:50:53 | 211,209,943 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,852 | py | '''
def solution(n, arr1, arr2):
answer1 = []
answer2 = []
answer = []
for arr in arr1:
if len(bin(arr)[2:]) != n:
answer1.append((n - len(bin(arr)[2:]))*'0' + bin(arr)[2:])
else:
answer1.append(bin(arr)[2:])
for arr in arr2:
if len(bin(arr)[2:]) != n:
answer2.append((n - len(bin(arr)[2:]))*'0' + bin(arr)[2:])
else:
answer2.append(bin(arr)[2:])
for i in range(0, n):
temp = ''
for j in range(0,n):
if answer1[i][j] == '1' or answer2[i][j] == '1':
temp += '#'
else:
temp += ' '
answer.append(temp)
return answer
'''
"""
# 2020.02.26
# 20일전 풀이에서 조금도 달라진게 없다. 풀이가 하나밖에 없는 문제인건가? 아니면 성장하지 못한 것인가?
def solution(n, arr1, arr2):
ar1 = []
ar2 = []
answer = []
for num in arr1:
if len(bin(num)[2:]) != n:
ar1.append('0'*(n - len(bin(num)[2:])) + bin(num)[2:])
else:
ar1.append(bin(num)[2:])
for num in arr2:
if len(bin(num)[2:]) != n:
ar2.append('0'*(n - len(bin(num)[2:])) + bin(num)[2:])
else:
ar2.append(bin(num)[2:])
for i in range(0, n):
string = ''
for j in range(0, n):
if ar1[i][j] == '1' or ar2[i][j] == '1':
string += '#'
else:
string += ' '
answer.append(string)
return answer
"""
#2022.11.12
def _geunsub(string, n):
string = string[2:]
string = '00000' + string
string = string[-n:]
return string.replace('1', '#').replace('0', ' ')
def solution(n, arr1, arr2):
return [_geunsub(bin(a | b), n) for a,b, in zip(arr1, arr2)]
| [
"[email protected]"
] | |
445f3536583b311826de4e39ab680a8d10b37ae2 | 800b5166148d4e3cd03825d7d20e2900fbc6c789 | /report_form/migrations/0040_poorpeopledataform_offpoor_year.py | a92d794dfc6987387dfa5eac23d67a0145623cc5 | [] | no_license | JiSuPiaoYi/dawufupin | 4ffc979a93502eb576776673c98aaeb16021827e | 57756a501436fabe9b27ebca2e80e60932da30dc | refs/heads/master | 2020-04-07T11:37:35.728108 | 2018-11-20T09:09:50 | 2018-11-20T09:09:50 | 158,334,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2018-09-24 11:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('report_form', '0039_auto_20180924_0918'),
]
operations = [
migrations.AddField(
model_name='poorpeopledataform',
name='offpoor_year',
field=models.CharField(blank=True, db_column='offpoor_year', max_length=20),
),
]
| [
"[email protected]"
] | |
022f87a546a6119342777ea2100ad81a1be36b7d | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/Ablation4_ch016_ep003_7_10/Gather2_W_fixGood_C_change/train/pyr_6s/L4/step10_a.py | baa0b1da65dac66b042bc462388e0182438ca561 | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148,472 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_6side_L4 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
import Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.W_w_M_to_C_pyr.pyr_6s.L4.step10_a as W_w_M_to_C_p20_pyr
from Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.I_w_M_to_W_pyr.pyr_3s.L5.step10_a import ch032_1side_6__2side_5__3side_2__ep010 as I_w_M_to_W_p20_3s_L5_Good
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_v2
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
###################
############# 1s1
######### 2s1
##### 3s1
### 4s1
ch032_1side_1__2side_1__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s1__2s1__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_1__2side_1__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
###################
############# 1s2
######### 2s1
##### 3s1
### 4s1
ch032_1side_2__2side_1__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s1__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_1__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
######### 2s1
##### 3s1
### 4s1
ch032_1side_2__2side_2__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s2
### 4s1
ch032_1side_2__2side_2__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s2__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_2_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_2__2side_2__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s2__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_2_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s2__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_2_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s2__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_2_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
###################
############# 1s3
######### 2s1
##### 3s1
### 4s1
ch032_1side_3__2side_1__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s1__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_1__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
######### 2s2
##### 3s1
### 4s1
ch032_1side_3__2side_2__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s2
### 4s1
ch032_1side_3__2side_2__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s2__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_2_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_3__2side_2__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s2__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_2_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s2__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_2_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s2__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_2_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
######### 2s3
##### 3s1
### 4s1
ch032_1side_3__2side_3__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s2
### 4s1
ch032_1side_3__2side_3__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s2__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_2_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_3__2side_3__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s2__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_2_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s2__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_2_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s2__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_2_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s3
### 4s1
ch032_1side_3__2side_3__3side_3_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_3__2side_3__3side_3_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s3
ch032_1side_3__2side_3__3side_3_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s3__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_3_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s3__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_3_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s3__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_3_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s3__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_3_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s3__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_3_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s3__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_3_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
###################
############# 1s4
######### 2s1
##### 3s1
### 4s1
ch032_1side_4__2side_1__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s1__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_1__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
######### 2s2
##### 3s1
### 4s1
ch032_1side_4__2side_2__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s2
### 4s1
ch032_1side_4__2side_2__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s2__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_2_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_4__2side_2__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s2__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_2_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s2__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_2_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s2__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_2_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
######### 2s3
##### 3s1
### 4s1
ch032_1side_4__2side_3__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s2
### 4s1
ch032_1side_4__2side_3__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s2__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_2_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_4__2side_3__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s2__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_2_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s2__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_2_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s2__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_2_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s3
### 4s1
ch032_1side_4__2side_3__3side_3_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_4__2side_3__3side_3_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s3
ch032_1side_4__2side_3__3side_3_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s3__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_3_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s3__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_3_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s3__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_3_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s3__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_3_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s3__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_3_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s3__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_3_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
######### 2s4
##### 3s1
### 4s1
ch032_1side_4__2side_4__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s2
### 4s1
ch032_1side_4__2side_4__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s2__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_2_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_4__2side_4__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s2__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_2_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s2__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_2_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s2__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_2_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s3
### 4s1
ch032_1side_4__2side_4__3side_3_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_4__2side_4__3side_3_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s3
ch032_1side_4__2side_4__3side_3_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s3__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_3_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s3__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_3_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s3__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_3_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s3__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_3_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s3__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_3_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s3__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_3_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s4
### 4s1
ch032_1side_4__2side_4__3side_4_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_4__2side_4__3side_4_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s3
ch032_1side_4__2side_4__3side_4_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s3__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_3_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s3__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_3_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s3__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_3_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s3__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_3_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s3__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_3_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s3__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_3_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s4
ch032_1side_4__2side_4__3side_4_4side_4_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4__5s4__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4__5s4__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4__5s4__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_5s4_6s4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4__5s4__6s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4_5s4_6s4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
###################
############# 1s5
######### 2s1
##### 3s1
### 4s1
ch032_1side_5__2side_1__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s1__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_1__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
######### 2s2
##### 3s1
### 4s1
ch032_1side_5__2side_2__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s2
### 4s1
ch032_1side_5__2side_2__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s2__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_2_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_5__2side_2__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s2__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_2_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s2__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_2_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s2__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_2_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
######### 2s3
##### 3s1
### 4s1
ch032_1side_5__2side_3__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s2
### 4s1
ch032_1side_5__2side_3__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s2__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_2_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_5__2side_3__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s2__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_2_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s2__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_2_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s2__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_2_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s3
### 4s1
ch032_1side_5__2side_3__3side_3_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_5__2side_3__3side_3_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s3
ch032_1side_5__2side_3__3side_3_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s3__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_3_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s3__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_3_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s3__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_3_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s3__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_3_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s3__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_3_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s3__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_3_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
######### 2s4
##### 3s1
### 4s1
ch032_1side_5__2side_4__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s2
### 4s1
ch032_1side_5__2side_4__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s2__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_2_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_5__2side_4__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s2__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_2_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s2__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_2_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s2__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_2_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s3
### 4s1
ch032_1side_5__2side_4__3side_3_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_5__2side_4__3side_3_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s3
ch032_1side_5__2side_4__3side_3_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s3__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_3_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s3__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_3_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s3__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_3_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s3__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_3_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s3__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_3_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s3__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_3_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s4
### 4s1
ch032_1side_5__2side_4__3side_4_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_5__2side_4__3side_4_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s3
ch032_1side_5__2side_4__3side_4_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s3__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_3_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s3__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_3_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s3__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_3_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s3__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_3_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s3__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_3_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s3__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_3_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s4
ch032_1side_5__2side_4__3side_4_4side_4_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s4_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s4_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4__5s4__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4_5s4_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s4_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s4_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4__5s4__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4_5s4_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s4_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s4_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4__5s4__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4_5s4_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4_5s4_6s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_5s4_6s4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4__5s4__6s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4_5s4_6s4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
######### 2s5
##### 3s1
### 4s1
ch032_1side_5__2side_5__3side_1_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_1_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s1__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_1_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s2
### 4s1
ch032_1side_5__2side_5__3side_2_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s2__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_2_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_5__2side_5__3side_2_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s2__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_2_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s2__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_2_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s2__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_2_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s3
### 4s1
ch032_1side_5__2side_5__3side_3_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_5__2side_5__3side_3_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s3
ch032_1side_5__2side_5__3side_3_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s3__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_3_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s3__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_3_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s3__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_3_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s3__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_3_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s3__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_3_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s3__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_3_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s4
### 4s1
ch032_1side_5__2side_5__3side_4_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_5__2side_5__3side_4_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s3
ch032_1side_5__2side_5__3side_4_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s3__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_3_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s3__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_3_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s3__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_3_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s3__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_3_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s3__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_3_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s3__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_3_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s4
ch032_1side_5__2side_5__3side_4_4side_4_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s4_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s4_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4__5s4__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4_5s4_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s4_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s4_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4__5s4__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4_5s4_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s4_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s4_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4__5s4__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4_5s4_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4_5s4_6s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_5s4_6s4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4__5s4__6s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4_5s4_6s4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
##### 3s5
### 4s1
ch032_1side_5__2side_5__3side_5_4side_1_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_1_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s1__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_1_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s2
ch032_1side_5__2side_5__3side_5_4side_2_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_2_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s2__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_2_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_2_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_2_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s2__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_2_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_2_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_2_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s2__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_2_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s3
ch032_1side_5__2side_5__3side_5_4side_3_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s3__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_3_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s3__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_3_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s3__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_3_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s3__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_3_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s3__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_3_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s3__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_3_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s4
ch032_1side_5__2side_5__3side_5_4side_4_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s4_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s4_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4__5s4__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4_5s4_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s4_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s4_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4__5s4__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4_5s4_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s4_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s4_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4__5s4__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4_5s4_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4_5s4_6s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_5s4_6s4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4__5s4__6s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4_5s4_6s4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
### 4s5
ch032_1side_5__2side_5__3side_5_4side_5_5s1_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s1_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s1__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s1_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s2_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s2_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s2__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s2_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s2_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s2_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s2__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s2_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s3_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s3_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s3__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s3_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s3_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s3_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s3__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s3_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s3_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s3_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s3__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s3_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s4_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s4_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s4__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s4_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s4_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s4_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s4__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s4_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s4_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s4_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s4__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s4_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s4_6s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s4_6s4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s4__6s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s4_6s4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s5_6s1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s5_6s1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s5__6s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s5_6s1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s5_6s2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s5_6s2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s5__6s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s5_6s2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s5_6s3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s5_6s3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s5__6s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s5_6s3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s5_6s4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s5_6s4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s5__6s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s5_6s4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5_5s5_6s5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_5s5_6s5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5__5s5__6s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5_5s5_6s5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1__3side_1_4side_1_5s1_6s1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
| [
"[email protected]"
] | |
747fccee4d243a477338db4e3c8eb9374a0b38f5 | 53181572c4b22df4b569a9901bcd5347a3459499 | /tuit_200202/py200306/review_input.py | 15b13efd525e58aa765a21602912b88ef25afaaa | [] | no_license | edu-athensoft/ceit4101python_student | 80ef067b77421fce76d04f778d5c6de8b12f676c | 33cfa438c062d45e8d246b853e93d3c14b92ff2d | refs/heads/master | 2020-07-30T01:04:21.084384 | 2020-07-27T02:21:57 | 2020-07-27T02:21:57 | 210,027,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | """
input
"""
# input()
# input("Prompt:")
my_input = input("Prompt:")
print(type(my_input))
num1 = float(input("Enter a floating number:"))
num2 = int(input("Enter an integer"))
| [
"[email protected]"
] | |
5f9451699991e4faaa4b152a2074561f28165aa0 | 9dfb3372a1e4516d970a6e9d0a9fd8360580eae7 | /game/grafics/idea.py | c5d531d74ba20540c8a4411ceb550c78e140ef81 | [] | no_license | clambering-goat/cameron_pyton | d1cd0e7b04da14e7ba4f89dcb4d973f297a4626c | df0b0365b86e75cfcfc2c1fc21608f1536a3b79f | refs/heads/master | 2021-07-14T20:37:37.021401 | 2019-02-28T07:52:11 | 2019-02-28T07:52:11 | 137,251,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | import pygame
from math import sin,cos,radians,atan,degrees,atanh
pygame.init()
y_size,x_size=500,500
screen = pygame.display.set_mode((y_size,x_size))
done = False
point_to_point_at=pygame.mouse.get_pos()
def distamnce(x,y,x2,y2):
x_main=x-x2
y_main=y-y2
c=x_main**2+y_main**2
c=c*0.5
print(c)
distance_apart=50
count=0
distance=20
pointion1=250,250
pointion2=pointion1[0]+(distance_apart*(3**0.5)),pointion1[1]+(distance_apart/2)
pointion3=pointion1[0]+(distance_apart*(3**0.5)),pointion1[1]-(distance_apart/2)
orgin=pointion1[0],pointion1[1]+((distance_apart*(3**0.5))/2)
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
screen.fill((255, 255, 255))
#angle=angle+1
point_to_point_at=pygame.mouse.get_pos()
pygame.draw.line(screen, (0, 0, 255), orgin,(point_to_point_at[0],point_to_point_at[1]),5)
pygame.display.flip()
pygame.time.wait(20)
| [
"[email protected]"
] | |
3d34fee17bcd028dc25f7a73358b7e5542e3f493 | 556db265723b0cc30ad2917442ed6dad92fd9044 | /tensorflow/python/kernel_tests/unicode_decode_op_test.py | bd38dae393cea070fc65ca2cc23d1490037e8758 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | graphcore/tensorflow | c1669b489be0e045b3ec856b311b3139858de196 | 085b20a4b6287eff8c0b792425d52422ab8cbab3 | refs/heads/r2.6/sdk-release-3.2 | 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 | Apache-2.0 | 2023-03-25T01:13:37 | 2018-12-21T13:30:38 | C++ | UTF-8 | Python | false | false | 32,207 | py | # -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unicode_decode and unicode_decode_with_splits."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.platform import test
def _nested_encode(x, encoding):
"""Encode each string in a nested list with `encoding`."""
if isinstance(x, list):
return [_nested_encode(v, encoding) for v in x]
else:
return x.encode(encoding)
def _nested_codepoints(x):
"""Replace each string in a nested list with a list of its codepoints."""
# Works for Python 2 and 3, and for both UCS2 and UCS4 builds
if isinstance(x, list):
return [_nested_codepoints(v) for v in x]
else:
b = list(x.encode("utf-32-be"))
if any(isinstance(c, str) for c in b):
b = [ord(c) for c in b]
return [(b0 << 24) + (b1 << 16) + (b2 << 8) + b3
for b0, b1, b2, b3 in zip(b[::4], b[1::4], b[2::4], b[3::4])]
def _nested_offsets(x, encoding):
"""Replace each string in a nested list with a list of start offsets."""
if isinstance(x, list):
return [_nested_offsets(v, encoding) for v in x]
else:
if not x:
return []
encoded_x = x.encode("utf-32-be")
encoded_chars = [encoded_x[i:i + 4] for i in range(0, len(encoded_x), 4)]
char_lens = [
len(c.decode("utf-32-be").encode(encoding)) for c in encoded_chars
]
return [0] + np.cumsum(char_lens).tolist()[:-1]
def _nested_splitchars(x, encoding):
"""Replace each string in a nested list with a list of char substrings."""
if isinstance(x, list):
return [_nested_splitchars(v, encoding) for v in x]
else:
b = x.encode("utf-32-be")
chars = zip(b[::4], b[1::4], b[2::4], b[3::4])
if str is bytes:
return [b"".join(c).decode("utf-32-be").encode(encoding) for c in chars]
else:
return [bytes(c).decode("utf-32-be").encode(encoding) for c in chars]
def _make_sparse_tensor(indices, values, dense_shape, dtype=np.int32):
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64), np.array(values, dtype),
np.array(dense_shape, np.int64))
@test_util.run_all_in_graph_and_eager_modes
class UnicodeDecodeTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testScalarDecode(self):
text = constant_op.constant(u"仅今年前".encode("utf-8"))
chars = ragged_string_ops.unicode_decode(text, "utf-8")
self.assertAllEqual(chars, [ord(c) for c in u"仅今年前"])
def testScalarDecodeWithOffset(self):
text = constant_op.constant(u"仅今年前".encode("utf-8"))
chars, starts = ragged_string_ops.unicode_decode_with_offsets(text, "utf-8")
self.assertAllEqual(chars, [ord(c) for c in u"仅今年前"])
self.assertAllEqual(starts, [0, 3, 6, 9])
def testVectorDecode(self):
text = constant_op.constant([u"仅今年前".encode("utf-8"), b"hello"])
chars = ragged_string_ops.unicode_decode(text, "utf-8")
expected_chars = [[ord(c) for c in u"仅今年前"],
[ord(c) for c in u"hello"]]
self.assertAllEqual(chars, expected_chars)
def testVectorDecodeWithOffset(self):
text = constant_op.constant([u"仅今年前".encode("utf-8"), b"hello"])
chars, starts = ragged_string_ops.unicode_decode_with_offsets(text, "utf-8")
expected_chars = [[ord(c) for c in u"仅今年前"],
[ord(c) for c in u"hello"]]
self.assertAllEqual(chars, expected_chars)
self.assertAllEqual(starts, [[0, 3, 6, 9], [0, 1, 2, 3, 4]])
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": [[[u"😊"], [u"🤠🧐"]], [[u"🤓👻🤖"]]], "ragged_rank": 1},
{"texts": [[[u"😊"], [u"🤠🧐"]], [[u"🤓"], [u"👻"]]], "ragged_rank": 0},
{"texts": []}
]) # pyformat: disable
def testBasicDecode(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode(input_tensor, "UTF-8")
expected = _nested_codepoints(texts)
self.assertAllEqual(expected, result)
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicDecodeWithOffsets(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode_with_offsets(
input_tensor, "UTF-8")
expected_codepoints = _nested_codepoints(texts)
expected_offsets = _nested_offsets(texts, "UTF-8")
self.assertAllEqual(expected_codepoints, result[0])
self.assertAllEqual(expected_offsets, result[1])
def testDocstringExamples(self):
texts = [s.encode("utf8") for s in [u"G\xf6\xf6dnight", u"\U0001f60a"]]
codepoints1 = ragged_string_ops.unicode_decode(texts, "UTF-8")
codepoints2, offsets = ragged_string_ops.unicode_decode_with_offsets(
texts, "UTF-8")
self.assertAllEqual(
codepoints1, [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
self.assertAllEqual(
codepoints2, [[71, 246, 246, 100, 110, 105, 103, 104, 116], [128522]])
self.assertAllEqual(offsets, [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]])
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=_make_sparse_tensor(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1],
[1, 2], [1, 3], [1, 4], [3, 0]],
values=[72, 101, 108, 108, 111, 119, 111, 114, 108, 100, 128077],
dense_shape=[4, 5])),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=_make_sparse_tensor(
indices=[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[0, 1, 3], [0, 1, 4], [1, 1, 0]],
values=[72, 105, 116, 104, 101, 114, 101, 128522],
dense_shape=[2, 2, 5])),
dict(
texts=[],
expected=_make_sparse_tensor(np.zeros([0, 2], np.int64), [], [0, 0])),
])
def testDecodeWithSparseOutput(self, texts, expected):
input_tensor = np.array(_nested_encode(texts, "UTF-8"), dtype=bytes)
result = ragged_string_ops.unicode_decode(input_tensor, "UTF-8").to_sparse()
self.assertIsInstance(result, sparse_tensor.SparseTensor)
self.assertAllEqual(expected.indices, result.indices)
self.assertAllEqual(expected.values, result.values)
self.assertAllEqual(expected.dense_shape, result.dense_shape)
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=[[72, 101, 108, 108, 111], [119, 111, 114, 108, 100],
[-1, -1, -1, -1, -1], [0x1F44D, -1, -1, -1, -1]]),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=[[[72, 105, -1, -1, -1], [116, 104, 101, 114, 101]],
[[-1, -1, -1, -1, -1], [128522, -1, -1, -1, -1]]],
ragged_rank=0),
dict(
texts=[["Hi", "there", ""], [u"😊"]],
expected=[[[72, 105, -1, -1, -1],
[116, 104, 101, 114, 101],
[-1, -1, -1, -1, -1]],
[[128522, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1]]]),
dict(
texts=[[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]],
expected=[
[[[128522, -1, -1], [129312, 129488, -1]],
[[-1, -1, -1], [-1, -1, -1]]],
[[[129299, 128123, 129302], [-1, -1, -1]],
[[-1, -1, -1], [-1, -1, -1]]]]),
dict(texts=[], expected=np.zeros([0, 0], np.int64)),
]) # pyformat: disable
def testDecodeWithPaddedOutput(self, texts, expected, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_decode(
input_tensor, "UTF-8").to_tensor(default_value=-1)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[0xFFFD],
[ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0xFFFD, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[0], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
expected=[[0xFFFD],
[ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[61, 61, 65533, 61, 61],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
replacement_char=0,
expected=[[0], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]]),
]) # pyformat: disable
def testErrorModes(self, expected=None, **args):
result = ragged_string_ops.unicode_decode(**args)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[0xFFFD],
[ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0xFFFD, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[0], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]],
expected_offsets=[[], [0, 1, 2, 3, 4],
[0, 1, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
expected=[[0xFFFD],
[ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[ord('='), ord('='), 0xFFFD, ord('='), ord('=')],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\x00", b"hello", b"==\x01==", b"world"],
input_encoding="UTF-8",
replace_control_characters=True,
replacement_char=0,
expected=[[0], [ord('h'), ord('e'), ord('l'), ord('l'), ord('o')],
[0x3D, 0x3D, 0, 0x3D, 0x3D],
[ord('w'), ord('o'), ord('r'), ord('l'), ord('d')]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xD8\x01"],
input_encoding="UTF-8",
replacement_char=0x41,
expected=[[0x41, 1]],
expected_offsets=[[0, 1]]),
]) # pyformat: disable
def testErrorModesWithOffsets(self,
expected=None,
expected_offsets=None,
**args):
result = ragged_string_ops.unicode_decode_with_offsets(**args)
self.assertAllEqual(result[0], expected)
self.assertAllEqual(result[1], expected_offsets)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
("US-ASCII", [u"Hello", "world"]),
("ISO-8859-1", [u"ÀÈÓ", "AEO"]),
("SHIFT-JIS", [u"Hello", u"こんにちは"]),
)
def testDecodeWithDifferentEncodings(self, encoding, texts):
expected = _nested_codepoints(texts)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_decode(input_tensor, encoding)
self.assertAllEqual(expected, result)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
("US-ASCII", [u"Hello", "world"]),
("ISO-8859-1", [u"ÀÈÓ", "AEO"]),
("SHIFT-JIS", [u"Hello", u"こんにちは"]),
)
def testDecodeWithOffsetsWithDifferentEncodings(self, encoding, texts):
expected_codepoints = _nested_codepoints(texts)
expected_offsets = _nested_offsets(texts, encoding)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_decode_with_offsets(
input_tensor, encoding)
self.assertAllEqual(expected_codepoints, result[0])
self.assertAllEqual(expected_offsets, result[1])
@parameterized.parameters([
dict(input=[b"\xFEED"],
errors="strict",
input_encoding="UTF-8",
exception=errors.InvalidArgumentError,
message="Invalid formatting on input string"),
dict(input="x",
input_encoding="UTF-8",
replacement_char=11141111,
exception=errors.InvalidArgumentError,
message="replacement_char out of unicode codepoint range"),
dict(input="x",
input_encoding="UTF-8",
errors="oranguatan",
exception=(ValueError, errors.InvalidArgumentError)),
]) # pyformat: disable
def testExceptions(self, exception=None, message=None, **args):
with self.assertRaisesRegex(exception, message):
self.evaluate(ragged_string_ops.unicode_decode(**args))
def testUnknownRankError(self):
if context.executing_eagerly():
return
s = array_ops.placeholder(dtypes.string)
message = "Rank of `input` must be statically known."
with self.assertRaisesRegex(ValueError, message):
self.evaluate(ragged_string_ops.unicode_decode(s, input_encoding="UTF-8"))
@parameterized.parameters([
dict(
doc="Single string",
input=_nested_encode([u"仅今年前"], "utf-8"),
input_encoding="UTF-8",
expected_char_values=_nested_codepoints(u"仅今年前"),
expected_row_splits=[0, 4],
expected_char_to_byte_starts=[0, 3, 6, 9]),
dict(
doc="Multiple strings",
input=_nested_encode([u"仅今年前", u"你好"], "utf-8"),
input_encoding="UTF-8",
expected_char_values=_nested_codepoints(u"仅今年前你好"),
expected_row_splits=[0, 4, 6],
expected_char_to_byte_starts=[0, 3, 6, 9, 0, 3]),
dict(
doc="errors=replace",
input=b"=\xFE=",
input_encoding="UTF-8",
errors="replace",
expected_char_values=[0x3D, 0xFFFD, 0x3D],
expected_row_splits=[0, 3],
expected_char_to_byte_starts=[0, 1, 2]),
dict(
doc="errors=ignore",
input=b"=\xFE=",
input_encoding="UTF-8",
errors="ignore",
expected_char_values=[61, 61],
expected_row_splits=[0, 2],
expected_char_to_byte_starts=[0, 2]),
])
def testDecodeGenOp(self,
doc,
expected_row_splits=None,
expected_char_values=None,
expected_char_to_byte_starts=None,
**args):
"""Test for the c++ interface (gen_string_ops.unicode_decode)."""
result = gen_string_ops.unicode_decode_with_offsets(**args)
self.assertAllEqual(expected_row_splits, result.row_splits)
self.assertAllEqual(expected_char_values, result.char_values)
self.assertAllEqual(expected_char_to_byte_starts,
result.char_to_byte_starts)
@test_util.run_all_in_graph_and_eager_modes
class UnicodeSplitTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def testScalarSplit(self):
text = constant_op.constant(u"仅今年前".encode("UTF-8"))
chars = ragged_string_ops.unicode_split(text, "UTF-8")
self.assertAllEqual(chars, [c.encode("UTF-8") for c in u"仅今年前"])
def testScalarSplitWithOffset(self):
text = constant_op.constant(u"仅今年前".encode("UTF-8"))
chars, starts = ragged_string_ops.unicode_split_with_offsets(text, "UTF-8")
self.assertAllEqual(chars, [c.encode("UTF-8") for c in u"仅今年前"])
self.assertAllEqual(starts, [0, 3, 6, 9])
def testVectorSplit(self):
text = constant_op.constant([u"仅今年前".encode("UTF-8"), b"hello"])
chars = ragged_string_ops.unicode_split(text, "UTF-8")
expected_chars = [[c.encode("UTF-8") for c in u"仅今年前"],
[c.encode("UTF-8") for c in u"hello"]]
self.assertAllEqual(chars, expected_chars)
def testVectorSplitWithOffset(self):
text = constant_op.constant([u"仅今年前".encode("UTF-8"), b"hello"])
chars, starts = ragged_string_ops.unicode_split_with_offsets(text, "UTF-8")
expected_chars = [[c.encode("UTF-8") for c in u"仅今年前"],
[c.encode("UTF-8") for c in u"hello"]]
self.assertAllEqual(chars, expected_chars)
self.assertAllEqual(starts, [[0, 3, 6, 9], [0, 1, 2, 3, 4]])
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicSplit(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_split(input_tensor, "UTF-8")
expected = _nested_splitchars(texts, "UTF-8")
self.assertAllEqual(expected, result)
@parameterized.parameters([
{"texts": u"仅今年前"},
{"texts": [u"G\xf6\xf6dnight", u"\U0001f60a"]},
{"texts": ["Hello", "world", "", u"👍"]},
{"texts": [["Hi", "there"], ["", u"\U0001f60a"]], "ragged_rank": 0},
{"texts": [["Hi", "there", ""], [u"😊"]], "ragged_rank": 1},
{"texts": [[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]], "ragged_rank": 2},
{"texts": []}
]) # pyformat: disable
def testBasicSplitWithOffsets(self, texts, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_split_with_offsets(input_tensor, "UTF-8")
expected_codepoints = _nested_splitchars(texts, "UTF-8")
expected_offsets = _nested_offsets(texts, "UTF-8")
self.assertAllEqual(expected_codepoints, result[0])
self.assertAllEqual(expected_offsets, result[1])
def testDocstringExamples(self):
texts = [s.encode("utf8") for s in [u"G\xf6\xf6dnight", u"\U0001f60a"]]
codepoints1 = ragged_string_ops.unicode_split(texts, "UTF-8")
codepoints2, offsets = ragged_string_ops.unicode_split_with_offsets(
texts, "UTF-8")
self.assertAllEqual(
codepoints1,
[[b"G", b"\xc3\xb6", b"\xc3\xb6", b"d", b"n", b"i", b"g", b"h", b"t"],
[b"\xf0\x9f\x98\x8a"]])
self.assertAllEqual(
codepoints2,
[[b"G", b"\xc3\xb6", b"\xc3\xb6", b"d", b"n", b"i", b"g", b"h", b"t"],
[b"\xf0\x9f\x98\x8a"]])
self.assertAllEqual(offsets, [[0, 1, 3, 5, 6, 7, 8, 9, 10], [0]])
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=_make_sparse_tensor(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1],
[1, 2], [1, 3], [1, 4], [3, 0]],
values=[b"H", b"e", b"l", b"l", b"o",
b"w", b"o", b"r", b"l", b"d", b"\xf0\x9f\x91\x8d"],
dense_shape=[4, 5],
dtype=bytes)),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=_make_sparse_tensor(
indices=[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[0, 1, 3], [0, 1, 4], [1, 1, 0]],
values=[b"H", b"i", b"t", b"h", b"e", b"r", b"e",
b"\xf0\x9f\x98\x8a"],
dense_shape=[2, 2, 5],
dtype=bytes)),
dict(
texts=[],
expected=_make_sparse_tensor(
np.zeros([0, 2], np.int64), [], [0, 0], dtype=bytes)),
]) # pyformat: disable
def testSplitWithSparseOutput(self, texts, expected):
input_tensor = np.array(_nested_encode(texts, "UTF-8"), dtype=bytes)
result = ragged_string_ops.unicode_split(input_tensor, "UTF-8").to_sparse()
self.assertIsInstance(result, sparse_tensor.SparseTensor)
self.assertAllEqual(expected.indices, result.indices)
self.assertAllEqual(expected.values, result.values)
self.assertAllEqual(expected.dense_shape, result.dense_shape)
@parameterized.parameters([
dict(
texts=["Hello", "world", "", u"👍"],
expected=[[b"H", b"e", b"l", b"l", b"o"],
[b"w", b"o", b"r", b"l", b"d"],
["", "", "", "", ""],
[b"\xf0\x9f\x91\x8d", "", "", "", ""]]),
dict(
texts=[["Hi", "there"], ["", u"\U0001f60a"]],
expected=[[[b"H", b"i", "", "", ""],
[b"t", b"h", b"e", b"r", b"e"]],
[["", "", "", "", ""],
[b"\xf0\x9f\x98\x8a", "", "", "", ""]]],
ragged_rank=0),
dict(
texts=[["Hi", "there", ""], [u"😊"]],
expected=[[[b"H", b"i", "", "", ""],
[b"t", b"h", b"e", b"r", b"e"],
["", "", "", "", ""]],
[[b"\xf0\x9f\x98\x8a", "", "", "", ""],
["", "", "", "", ""],
["", "", "", "", ""]]]),
dict(
texts=[[[u"😊", u"🤠🧐"], []], [[u"🤓👻🤖"]]],
expected=[[[[b"\xf0\x9f\x98\x8a", "", ""],
[b"\xf0\x9f\xa4\xa0", b"\xf0\x9f\xa7\x90", ""]],
[["", "", ""],
["", "", ""]]],
[[[b"\xf0\x9f\xa4\x93", b"\xf0\x9f\x91\xbb",
b"\xf0\x9f\xa4\x96"],
["", "", ""]],
[["", "", ""],
["", "", ""]]]]),
dict(texts=[], expected=np.zeros([0, 0], np.int64)),
]) # pyformat: disable
def testSplitWithPaddedOutput(self, texts, expected, ragged_rank=None):
input_tensor = ragged_factory_ops.constant_value(
_nested_encode(texts, "UTF-8"), ragged_rank=ragged_rank, dtype=bytes)
result = ragged_string_ops.unicode_split(
input_tensor, "UTF-8").to_tensor(default_value="")
self.assertAllEqual(np.array(expected, dtype=bytes), result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[b"\xef\xbf\xbd"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\xef\xbf\xbd", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[b"\x00"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\x00", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]]),
]) # pyformat: disable
def testErrorModes(self, expected=None, **args):
result = ragged_string_ops.unicode_split(**args)
self.assertAllEqual(expected, result)
@parameterized.parameters([
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
expected=[[b"\xef\xbf\xbd"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\xef\xbf\xbd", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="replace",
replacement_char=0,
expected=[[b"\x00"],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"\x00", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]],
expected_offsets=[[0], [0, 1, 2, 3, 4],
[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]),
dict(
input=[b"\xFE", b"hello", b"==\xFF==", b"world"],
input_encoding="UTF-8",
errors="ignore",
expected=[[],
[b"h", b"e", b"l", b"l", b"o"],
[b"=", b"=", b"=", b"="],
[b"w", b"o", b"r", b"l", b"d"]],
expected_offsets=[[], [0, 1, 2, 3, 4],
[0, 1, 3, 4], [0, 1, 2, 3, 4]]),
]) # pyformat: disable
def testErrorModesWithOffsets(self,
expected=None,
expected_offsets=None,
**args):
result = ragged_string_ops.unicode_split_with_offsets(**args)
self.assertAllEqual(expected, result[0])
self.assertAllEqual(expected_offsets, result[1])
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
)
def testSplitWithDifferentEncodings(self, encoding, texts):
expected = _nested_splitchars(texts, encoding)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_split(input_tensor, encoding)
self.assertAllEqual(expected, result)
@parameterized.parameters(
("UTF-8", [u"こんにちは", u"你好", u"Hello"]),
("UTF-16-BE", [u"こんにちは", u"你好", u"Hello"]),
("UTF-32-BE", [u"こんにちは", u"你好", u"Hello"]),
)
def testSplitWithOffsetsWithDifferentEncodings(self, encoding, texts):
expected_codepoints = _nested_splitchars(texts, encoding)
expected_offsets = _nested_offsets(texts, encoding)
input_tensor = constant_op.constant(_nested_encode(texts, encoding))
result = ragged_string_ops.unicode_split_with_offsets(
input_tensor, encoding)
self.assertAllEqual(expected_codepoints, result[0])
self.assertAllEqual(expected_offsets, result[1])
@parameterized.parameters([
dict(input=[b"\xFEED"],
errors="strict",
input_encoding="UTF-8",
exception=errors.InvalidArgumentError,
message="Invalid formatting on input string"),
dict(input="x",
input_encoding="UTF-8",
replacement_char=11141111,
exception=errors.InvalidArgumentError,
message="replacement_char out of unicode codepoint range"),
dict(input="x",
input_encoding="UTF-8",
errors="oranguatan",
exception=(ValueError, errors.InvalidArgumentError)),
]) # pyformat: disable
def testExceptions(self, exception=None, message=None, **args):
with self.assertRaisesRegex(exception, message):
self.evaluate(ragged_string_ops.unicode_split(**args))
def testUnknownRankError(self):
if context.executing_eagerly():
return
s = array_ops.placeholder(dtypes.string)
message = "Rank of `input` must be statically known."
with self.assertRaisesRegex(ValueError, message):
self.evaluate(ragged_string_ops.unicode_decode(s, input_encoding="UTF-8"))
if __name__ == "__main__":
test.main()
| [
"[email protected]"
] | |
d0dfa176c55af006ba3041061a0a878a1418a113 | 3eb877ab6d9aba74c63acfc7d9dfe83fe77195ba | /google-cloud-sdk/lib/surface/compute/target_https_proxies/update.py | f648093f60593faf8dd500347de7363742e32be1 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Gilolume/HeuApplication | cd65267e6171277fc50f31a582b6ff6634758209 | e48c68ba9bc4f952b7bd5a0ba47f4c810ed56812 | refs/heads/master | 2022-11-25T06:18:47.892562 | 2017-11-24T09:21:16 | 2017-11-24T09:21:16 | 104,208,662 | 0 | 1 | null | 2020-07-25T12:32:09 | 2017-09-20T11:47:10 | Python | UTF-8 | Python | false | false | 7,762 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for updating target HTTPS proxies."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import target_proxies_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute.ssl_certificates import (
flags as ssl_certificates_flags)
from googlecloudsdk.command_lib.compute.target_https_proxies import flags
from googlecloudsdk.command_lib.compute.url_maps import flags as url_map_flags
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class UpdateGA(base.SilentCommand):
"""Update a target HTTPS proxy.
*{command}* is used to change the SSL certificate and/or URL map of
existing target HTTPS proxies. A target HTTPS proxy is referenced
by one or more forwarding rules which
define which packets the proxy is responsible for routing. The
target HTTPS proxy in turn points to a URL map that defines the rules
for routing the requests. The URL map's job is to map URLs to
backend services which handle the actual requests. The target
HTTPS proxy also points to at most 10 SSL certificates used for
server-side authentication.
"""
SSL_CERTIFICATE_ARG = None
SSL_CERTIFICATES_ARG = None
TARGET_HTTPS_PROXY_ARG = None
URL_MAP_ARG = None
@classmethod
def Args(cls, parser):
certs = parser.add_mutually_exclusive_group()
cls.SSL_CERTIFICATE_ARG = (
ssl_certificates_flags.SslCertificateArgumentForOtherResource(
'target HTTPS proxy', required=False))
cls.SSL_CERTIFICATE_ARG.AddArgument(parser, mutex_group=certs)
cls.SSL_CERTIFICATES_ARG = (
ssl_certificates_flags.SslCertificatesArgumentForOtherResource(
'target HTTPS proxy', required=False))
cls.SSL_CERTIFICATES_ARG.AddArgument(
parser, mutex_group=certs, cust_metavar='SSL_CERTIFICATE')
cls.TARGET_HTTPS_PROXY_ARG = flags.TargetHttpsProxyArgument()
cls.TARGET_HTTPS_PROXY_ARG.AddArgument(parser, operation_type='update')
cls.URL_MAP_ARG = url_map_flags.UrlMapArgumentForTargetProxy(
required=False, proxy_type='HTTPS')
cls.URL_MAP_ARG.AddArgument(parser)
@property
def service(self):
return self.compute.targetHttpsProxies
@property
def method(self):
pass
@property
def resource_type(self):
return 'targetHttpProxies'
def _CreateRequestsWithCertRefs(self, args, ssl_cert_refs,
quic_override=None):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
requests = []
target_https_proxy_ref = self.TARGET_HTTPS_PROXY_ARG.ResolveAsResource(
args, holder.resources)
if ssl_cert_refs:
requests.append(
(client.apitools_client.targetHttpsProxies, 'SetSslCertificates',
client.messages.ComputeTargetHttpsProxiesSetSslCertificatesRequest(
project=target_https_proxy_ref.project,
targetHttpsProxy=target_https_proxy_ref.Name(),
targetHttpsProxiesSetSslCertificatesRequest=(
client.messages.TargetHttpsProxiesSetSslCertificatesRequest(
sslCertificates=[
ref.SelfLink() for ref in ssl_cert_refs
])))))
if args.url_map:
url_map_ref = self.URL_MAP_ARG.ResolveAsResource(args, holder.resources)
requests.append(
(client.apitools_client.targetHttpsProxies, 'SetUrlMap',
client.messages.ComputeTargetHttpsProxiesSetUrlMapRequest(
project=target_https_proxy_ref.project,
targetHttpsProxy=target_https_proxy_ref.Name(),
urlMapReference=client.messages.UrlMapReference(
urlMap=url_map_ref.SelfLink()))))
if quic_override:
requests.append(
(client.apitools_client.targetHttpsProxies, 'SetQuicOverride',
client.messages.ComputeTargetHttpsProxiesSetQuicOverrideRequest(
project=target_https_proxy_ref.project,
targetHttpsProxy=target_https_proxy_ref.Name(),
targetHttpsProxiesSetQuicOverrideRequest=(
client.messages.TargetHttpsProxiesSetQuicOverrideRequest(
quicOverride=quic_override)))))
return client.MakeRequests(requests)
def _GetSslCertificatesList(self, args):
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
if args.ssl_certificate:
log.warn(
'The --ssl-certificate flag is deprecated and will be removed soon. '
'Use equivalent --ssl-certificates %s flag.', args.ssl_certificate)
return [
self.SSL_CERTIFICATE_ARG.ResolveAsResource(args, holder.resources)
]
if args.ssl_certificates:
return self.SSL_CERTIFICATES_ARG.ResolveAsResource(args, holder.resources)
return []
def _CheckMissingArgument(self, args):
if not (args.IsSpecified('ssl_certificates') or
args.IsSpecified('ssl_certificate') or args.IsSpecified('url_map')):
raise exceptions.ToolException(
'You must specify at least one of [--ssl-certificates] or '
'[--url-map].')
def Run(self, args):
self._CheckMissingArgument(args)
ssl_certificate_refs = self._GetSslCertificatesList(args)
return self._CreateRequestsWithCertRefs(args, ssl_certificate_refs)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateAlpha(UpdateGA):
"""Update a target HTTPS proxy.
*{command}* is used to change the SSL certificate and/or URL map of
existing target HTTPS proxies. A target HTTPS proxy is referenced
by one or more forwarding rules which
define which packets the proxy is responsible for routing. The
target HTTPS proxy in turn points to a URL map that defines the rules
for routing the requests. The URL map's job is to map URLs to
backend services which handle the actual requests. The target
HTTPS proxy also points to at most 10 SSL certificates used for
server-side authentication.
"""
@classmethod
def Args(cls, parser):
super(UpdateAlpha, cls).Args(parser)
target_proxies_utils.AddQuicOverrideUpdateArgs(parser)
def _CheckMissingArgument(self, args):
if not (args.IsSpecified('ssl_certificates') or
args.IsSpecified('ssl_certificate') or
args.IsSpecified('url_map') or args.IsSpecified('quic_override')):
raise exceptions.ToolException(
'You must specify at least one of [--ssl-certificates], '
'[--url-map] or [--quic-override].')
def Run(self, args):
self._CheckMissingArgument(args)
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
messages = holder.client.messages
quic_override = (messages.TargetHttpsProxiesSetQuicOverrideRequest.
QuicOverrideValueValuesEnum(args.quic_override)
) if args.IsSpecified('quic_override') else None
ssl_certificate_refs = self._GetSslCertificatesList(args)
return self._CreateRequestsWithCertRefs(args, ssl_certificate_refs,
quic_override)
| [
"[email protected]"
] | |
805db6ad18e36c63ce129addd2b726f9f329426d | 3d7039903da398ae128e43c7d8c9662fda77fbdf | /database/Node.js/juejin_1053.py | 331d195a313bfca90a59f4dac1a740d540c748d9 | [] | no_license | ChenYongChang1/spider_study | a9aa22e6ed986193bf546bb567712876c7be5e15 | fe5fbc1a5562ff19c70351303997d3df3af690db | refs/heads/master | 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,367 | py | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "6844903954002755592", "article_info": {"article_id": "6844903954002755592", "user_id": "1662117313518663", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "https://juejin.im/post/6844903954002755592", "cover_image": "", "is_gfw": 0, "title": "nodejs(十一)json-server使用教程", "brief_content": "一. json-server介绍 二. json-server的使用 三. 支持的方法 当你发送POST,PUT,PATCH 或者 DELETE请求时,变化会自动安全的保存到你的db.json文件中。 你的请求体body应该是封闭对象。比如{\"name\": \"Foobar\"} …", "is_english": 0, "is_original": 1, "user_index": 4.6433865942702, "original_type": 0, "original_author": "", "content": "", "ctime": "1569548571", "mtime": "1598525883", "rtime": "1569570254", "draft_id": "6845076477583573000", "view_count": 2697, "collect_count": 6, "digg_count": 4, "comment_count": 1, "hot_index": 139, "is_hot": 0, "rank_index": 0.00044594, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1662117313518663", "user_name": "mapengfei47", "company": "", "job_title": "前端开发工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/d34ba888c4673d2746a79fbb39a92fe1~300x300.image", "level": 2, "description": "戒骄戒躁,潜心修炼", "followee_count": 42, "follower_count": 174, "post_article_count": 36, "digg_article_count": 32, "got_digg_count": 286, "got_view_count": 50075, "post_shortmsg_count": 3, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 786, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6844903954002755592, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844903602054660103", "article_info": {"article_id": "6844903602054660103", "user_id": "4142615541853783", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470, 6809640398105870343, 6809640528267706382, 6809640583162757128], "visible_level": 0, "link_url": "https://juejin.im/post/6844903602054660103", "cover_image": "", "is_gfw": 0, "title": " webpack系列之-原理篇", "brief_content": "本系列将会从原理、开发、优化、对比四个方面给大家介绍webpack的工作流程。【默认是以webpack v3为例子】 我们可以把webpack看做一个黑盒,只要会用就可以。先来体验一次很简单的webpack打包过程 npx 会自动查找当前依赖包中的可执行文件,如果找不到,就会去…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1525484438", "mtime": "1598453100", "rtime": "1525745949", "draft_id": "6845075413824176135", "view_count": 4504, "collect_count": 59, "digg_count": 62, "comment_count": 0, "hot_index": 287, "is_hot": 0, "rank_index": 0.00044526, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4142615541853783", "user_name": "行走的柯南", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/6da4690f8fbed0addd6cb437bda7427f~300x300.image", "level": 2, "description": "前端程序媛一枚", "followee_count": 24, "follower_count": 37, "post_article_count": 18, "digg_article_count": 14, "got_digg_count": 280, "got_view_count": 35436, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 634, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546614, "tag_id": "6809640528267706382", "tag_name": "Webpack", "color": "#6F94DB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/73e856b07f83b4231c1e.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1440920866, "mtime": 1631692726, "id_type": 9, "tag_alias": "", "post_article_count": 6704, "concern_user_count": 204077}, {"id": 2546653, "tag_id": "6809640583162757128", "tag_name": "编译器", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/4e6b1fae46570c191c18.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1444716669, "mtime": 1631666736, "id_type": 9, "tag_alias": "", "post_article_count": 1931, "concern_user_count": 21838}], "user_interact": {"id": 6844903602054660103, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844904190129471495", "article_info": {"article_id": "6844904190129471495", "user_id": "870468942044472", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "https://juejin.im/post/6844904190129471495", "cover_image": "", "is_gfw": 0, "title": "「预热桶」限流算法详解(附 Node.js 实现)", "brief_content": "在使用「限流器」的时候,我们需要给设置一个合适的 阈值 ,这个阈值通常是一个系统能够正常工作所能承受的最大 QPS 。当系统的请求量达到阈值时,不同的限流器主要有两种处理方式,一种是对超过阈值的请求直接返回失败,另一种是让超过阈值的请求排队等待。 要控制系统的 QPS 不超过阈…", "is_english": 0, "is_original": 1, "user_index": 2.9693622959161, "original_type": 0, "original_author": "", "content": "", "ctime": "1592014192", "mtime": "1599032358", "rtime": "1592050040", "draft_id": "6845076820614709261", "view_count": 1260, "collect_count": 13, "digg_count": 11, "comment_count": 3, "hot_index": 77, "is_hot": 0, "rank_index": 0.00044513, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "870468942044472", "user_name": "阿里巴巴业务中台前端", "company": "阿里巴巴", "job_title": "大前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/f69ab93c309ab1fa8959fd049d3268e7~300x300.image", "level": 2, "description": "本专栏由新零售技术事业群-业务平台-体验技术部创办", "followee_count": 17, "follower_count": 342, "post_article_count": 29, "digg_article_count": 3, "got_digg_count": 278, "got_view_count": 23943, "post_shortmsg_count": 27, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 521, "study_point": 290, "university": {"university_id": "6888594415809396750", "name": "浙江工贸职业技术学院", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 10, "annual_list_type": 1, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6844904190129471495, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": {"org_type": 1, "org_id": "6945655623149158408", "online_version_id": 6944933297554210854, "latest_version_id": 6944933297554210854, "power": 517, "ctime": 1617161469, "mtime": 1631692819, "audit_status": 2, "status": 0, "org_version": {"version_id": "6944933297554210854", "icon": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/64258f83c90b426e883b76107e506204~tplv-k3u1fbpfcp-watermark.image", "background": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/41d1c0cd091e42b1b52de07f7fff87e4~tplv-k3u1fbpfcp-zoom-1.image", "name": "阿里巴巴业务中台前端团队", "introduction": "阿里巴巴新零售业务中台大前端团队,致力于用技术创造新商业。\n距离电商业务最近的核心前端团队,同时团队有技术开源项目fusion、bizcharts、arms前端监控系统。\n\n", "weibo_link": "", "github_link": "", "homepage_link": "", "ctime": 1617162095, "mtime": 1617162095, "org_id": "6945655623149158408", "brief_introduction": "", "introduction_preview": "阿里巴巴新零售业务中台大前端团队,致力于用技术创造新商业。\n距离电商业务最近的核心前端团队,同时团队有技术开源项目fusion、bizcharts、arms前端监控系统。"}, "follower_count": 529, "article_view_count": 23942, "article_digg_count": 278}, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844903619704143886", "article_info": {"article_id": "6844903619704143886", "user_id": "289926801002808", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470, 6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "https://juejin.im/post/6844903619704143886", "cover_image": "", "is_gfw": 0, "title": "ES6与CommonJS中的模块处理", "brief_content": "ES6和CommonJS都有自己的一套处理模块化代码的措施,即JS文件之间的相互引用。 nodejs默认是不支持ES6的模块处理方案的。 但是在8.5.0之后,ES6代码的文件格式定为mjs后,可使用node --experimental-modules xxx.mjs运行。 …", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1528789364", "mtime": "1598456144", "rtime": "1528799145", "draft_id": "6845075537270931469", "view_count": 3606, "collect_count": 36, "digg_count": 91, "comment_count": 5, "hot_index": 276, "is_hot": 0, "rank_index": 0.00044474, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "289926801002808", "user_name": "zsfblank", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/mosaic-legacy/3795/3047680722~300x300.image", "level": 2, "description": "", "followee_count": 5, "follower_count": 4, "post_article_count": 1, "digg_article_count": 10, "got_digg_count": 91, "got_view_count": 3606, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 127, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6844903619704143886, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6894658363251965965", "article_info": {"article_id": "6894658363251965965", "user_id": "782508010536478", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "webpack, babel, eslint,typescript,koa服务器应用简单入门配置", "brief_content": "至于其他配置,暂且就默认吧。现在,我们已经能用 tsc 了,算是搭建了一个基础环境。 这一步结束,咱们的项目已经能够跑起来了。 上面的 json 文件内容,主要就是添加了两个预设配置,和两个插件。 运行成功。每次需要编译代码的时候就运行一遍上面的 npx babel xx 就好…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1605298855", "mtime": "1605315272", "rtime": "1605315272", "draft_id": "6894667013914886151", "view_count": 755, "collect_count": 5, "digg_count": 8, "comment_count": 2, "hot_index": 47, "is_hot": 0, "rank_index": 0.00044435, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "782508010536478", "user_name": "拾清风", "company": "。。。", "job_title": "前端工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/feff3e68cc85efe80d19e870dc1d037e~300x300.image", "level": 1, "description": "", "followee_count": 0, "follower_count": 1, "post_article_count": 5, "digg_article_count": 18, "got_digg_count": 14, "got_view_count": 2226, "post_shortmsg_count": 0, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 39, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6894658363251965965, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844903664239411208", "article_info": {"article_id": "6844903664239411208", "user_id": "3702810891525805", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640402103042061, 6809640361531539470, 6809640366896054286], "visible_level": 0, "link_url": "https://chaoxuelei.github.io/2018/08/24/nodejs-express发送验证码完全实践,排坑之旅!/", "cover_image": "", "is_gfw": 0, "title": "NODEJS+EXPRESS发送验证码完全实践,排坑之旅!", "brief_content": "最近在深入学习nodejs+express+mysql的全栈开发,我知道现在mysql、express是老旧的,但是个人感觉还是挺成熟的,而且比较熟悉,先学学看,后面再看看kao、MongoDB,下面就来说说我用nodejs和腾讯云的短信sdk开发的验证码服务,大神嘴下留情啊~…", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1535077677", "mtime": "1599571560", "rtime": "1535102947", "draft_id": "0", "view_count": 3281, "collect_count": 30, "digg_count": 83, "comment_count": 7, "hot_index": 254, "is_hot": 0, "rank_index": 0.00044433, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3702810891525805", "user_name": "游刃有余", "company": "太无科技", "job_title": "UFO", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/c3b59903c674d6a9b80213dc54ee127c~300x300.image", "level": 2, "description": "一句话说不清", "followee_count": 9, "follower_count": 2105, "post_article_count": 32, "digg_article_count": 34, "got_digg_count": 256, "got_view_count": 22890, "post_shortmsg_count": 6, "digg_shortmsg_count": 5, "isfollowed": false, "favorable_author": 0, "power": 175, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546522, "tag_id": "6809640402103042061", "tag_name": "前端框架", "color": "#F2AB5B", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f7a198f1e1aeb6d79878.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435964339, "mtime": 1631690383, "id_type": 9, "tag_alias": "", "post_article_count": 4037, "concern_user_count": 256973}, {"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546496, "tag_id": "6809640366896054286", "tag_name": "MySQL", "color": "#026088", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/b2fbb06dce13eeb3bb9b.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234510, "mtime": 1631692702, "id_type": 9, "tag_alias": "", "post_article_count": 10213, "concern_user_count": 189339}], "user_interact": {"id": 6844903664239411208, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6854573220033462285", "article_info": {"article_id": "6854573220033462285", "user_id": "1451011080474247", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "记一次简单的Node.js爬取某网站列表中a标签的属性", "brief_content": "需求:根据关键词搜索爬取某网站的列表跳转的链接及标题并导入excel感觉这次的思路并不是很正确是一种野路子个人理解正常的爬虫是爬取页面的内容而我这次是直接调用接口获取数据拿到数据后整理导入excel感", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1595816923", "mtime": "1598589774", "rtime": "1595821592", "draft_id": "6854812678104285198", "view_count": 1270, "collect_count": 4, "digg_count": 7, "comment_count": 0, "hot_index": 70, "is_hot": 0, "rank_index": 0.00044375, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1451011080474247", "user_name": "你亲爱的甜橙树", "company": "", "job_title": "WEB前端开发", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/a5bb21ff61506a253c6ac48eba2ec86c~300x300.image", "level": 1, "description": "", "followee_count": 5, "follower_count": 6, "post_article_count": 6, "digg_article_count": 7, "got_digg_count": 40, "got_view_count": 3371, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 73, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6854573220033462285, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844904021484896270", "article_info": {"article_id": "6844904021484896270", "user_id": "641770489918542", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "https://juejin.im/post/6844904021484896270", "cover_image": "", "is_gfw": 0, "title": "2020年Node.js将会有哪些新功能", "brief_content": "2019 年是 Node.js 诞生的第 10 个年头,npm 上可用的包数量超过了 100 万。 Node.js 本身的下载量也在持续增长,同比增长了 40%。另一个重要的里程碑是 最近 Node.js加入了 OpenJS 基金会,该基金会承诺改善项目的状况和可持续性,并改善…", "is_english": 0, "is_original": 1, "user_index": 9.3326553452616, "original_type": 0, "original_author": "", "content": "", "ctime": "1576468901", "mtime": "1598539193", "rtime": "1576468901", "draft_id": "6845076577483505678", "view_count": 1932, "collect_count": 5, "digg_count": 10, "comment_count": 7, "hot_index": 113, "is_hot": 0, "rank_index": 0.00044286, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "641770489918542", "user_name": "前端先锋", "company": "公众号:前端先锋", "job_title": "佛系前端码农,公众号:前端先锋", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/c216b6d330e8833ba5491d606a8e5d3f~300x300.image", "level": 5, "description": "前端技术狂热(佛系)爱好者", "followee_count": 17, "follower_count": 8492, "post_article_count": 252, "digg_article_count": 29, "got_digg_count": 10086, "got_view_count": 831223, "post_shortmsg_count": 11, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 1, "power": 18646, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 15, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6844904021484896270, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844903859089866760", "article_info": {"article_id": "6844903859089866760", "user_id": "4107431172122503", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "https://juejin.im/post/6844903859089866760", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/6/4/16b2136e726186b3~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "超详细的node垃圾回收机制", "brief_content": "垃圾回收器是一把十足的双刃剑。其好处是可以大幅简化程序的内存管理代码,因为内存管理无需程序员来操作,由此也减少了(但没有根除)长时间运转的程序的内存泄漏。对于某些程序员来说,它甚至能够提升代码的性能。 另一方面,选择垃圾回收器也就意味着程序当中无法完全掌控内存,而这正是移动终端…", "is_english": 0, "is_original": 1, "user_index": 1.1523994497068, "original_type": 0, "original_author": "", "content": "", "ctime": "1559554533", "mtime": "1599925213", "rtime": "1559575911", "draft_id": "6845076328870313991", "view_count": 2971, "collect_count": 21, "digg_count": 24, "comment_count": 0, "hot_index": 172, "is_hot": 0, "rank_index": 0.00044274, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4107431172122503", "user_name": "神经叨", "company": "骑🐯 360", "job_title": "前端开发", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/824938df7b971338b664c803f443c6bc~300x300.image", "level": 2, "description": "但愿长醉不复醒", "followee_count": 12, "follower_count": 6, "post_article_count": 7, "digg_article_count": 3, "got_digg_count": 54, "got_view_count": 8832, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 142, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6844903859089866760, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844903585960951815", "article_info": {"article_id": "6844903585960951815", "user_id": "2207475077227406", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470, 6809640375880253447, 6809640407484334093], "visible_level": 0, "link_url": "https://juejin.im/post/6844903585960951815", "cover_image": "", "is_gfw": 0, "title": "ES6展开运算符(扩展运算符)", "brief_content": "在ES6中,我们有了一个新的运算符--展开运算符,它可以使我们的代码的数量有一定的减少,有时候甚至有很有用的作用,我们来举几个常用的例子,同时通过例子来了解展开运算符的使用。 总的来说,使多参数的函数调用有了新的方式, 而且避免的作用域的问题。 这里的对象是指类似{a:1}这样…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1522486643", "mtime": "1598450396", "rtime": "1522647179", "draft_id": "6845075403757846536", "view_count": 5086, "collect_count": 28, "digg_count": 40, "comment_count": 2, "hot_index": 296, "is_hot": 0, "rank_index": 0.00044219, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2207475077227406", "user_name": "跳跳的小记", "company": "", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/bf345c18017a4c734cc540d254374143~300x300.image", "level": 2, "description": "", "followee_count": 8, "follower_count": 8, "post_article_count": 6, "digg_article_count": 43, "got_digg_count": 210, "got_view_count": 17052, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 380, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546502, "tag_id": "6809640375880253447", "tag_name": "GitHub", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/0d614af263aa63aa6a77.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234558, "mtime": 1631692670, "id_type": 9, "tag_alias": "", "post_article_count": 10402, "concern_user_count": 388765}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6844903585960951815, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6901838829217382407", "article_info": {"article_id": "6901838829217382407", "user_id": "1380642337084974", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/de1e982d5b4f41e69659be1b4facf15d~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "Node系列之解决同源策略跨域问题的9种方案(最新最全)", "brief_content": "跨域是指一个域下的文档或脚本试图去请求另一个域下的资源,这里跨域是广义的。 其实我们通常所说的跨域是狭义的,是由浏览器同源策略限制的一类请求场景。 同源策略/SOP(Same origin policy)是一种约定,由Netscape公司1995年引入浏览器,它是浏览器最核心也…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1606959690", "mtime": "1606965274", "rtime": "1606965274", "draft_id": "6901832889286000648", "view_count": 683, "collect_count": 8, "digg_count": 6, "comment_count": 3, "hot_index": 43, "is_hot": 0, "rank_index": 0.00044216, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1380642337084974", "user_name": "前端开发小马哥", "company": "", "job_title": "全栈进修工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/703b76328f743a9782d81d2d5e95a27d~300x300.image", "level": 3, "description": "IOS、swift开发、Vue、React、express、koa、egg.js、webpack、云开发小程序", "followee_count": 36, "follower_count": 799, "post_article_count": 66, "digg_article_count": 111, "got_digg_count": 628, "got_view_count": 55275, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 1186, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6901838829217382407, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844903842568470536", "article_info": {"article_id": "6844903842568470536", "user_id": "3825956194095597", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "https://juejin.im/post/6844903842568470536", "cover_image": "", "is_gfw": 0, "title": "用Puppeteer爬自己想偷的猫,推翻闰土统治掘金的时代!", "brief_content": "Puppeteer是 Google Chrome 团队官方的无界面(Headless)Chrome 工具,它是一个 Node 库,提供了一个高级的 API 来控制 DevTools协议上的无头版 Chrome 。也可以配置为使用完整(非无头)的 Chrome。Chrome 素来…", "is_english": 0, "is_original": 1, "user_index": 5.4865967676086, "original_type": 0, "original_author": "", "content": "", "ctime": "1557684235", "mtime": "1599900913", "rtime": "1557729958", "draft_id": "6845076306464358407", "view_count": 2404, "collect_count": 21, "digg_count": 33, "comment_count": 20, "hot_index": 173, "is_hot": 0, "rank_index": 0.00044188, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3825956194095597", "user_name": "Thoughtful valiant.", "company": "Thoughtworks", "job_title": "前端相声演员", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/15c9a69f7b047a4bd9d577b3e516ecfc~300x300.image", "level": 2, "description": "九层之台,起于累土。欢迎各位关注公众号:Refactor~", "followee_count": 42, "follower_count": 113, "post_article_count": 41, "digg_article_count": 74, "got_digg_count": 227, "got_view_count": 29096, "post_shortmsg_count": 10, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 514, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6844903842568470536, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844904035997188103", "article_info": {"article_id": "6844904035997188103", "user_id": "2049145402829832", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "https://juejin.im/post/6844904035997188103", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/1/1/16f6010d9e437c2d~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "【重学Node.js 第5篇】部署项目到腾讯云服务器", "brief_content": "通过前面的讲解,可以完成一个基本的node api服务了,接下尝试把项目部署到刚买的腾讯云上。 购买腾讯云容器,购买后会把账号信息通过腾讯云站内信发给你,里面有公网ip和初始密码,后面ssh登陆会用到。 登陆云linux。本地Mac命令行输入:ssh [email protected]…", "is_english": 0, "is_original": 1, "user_index": 3.2044261223672, "original_type": 0, "original_author": "", "content": "", "ctime": "1576735894", "mtime": "1600348158", "rtime": "1576735932", "draft_id": "6845076581883314183", "view_count": 2100, "collect_count": 18, "digg_count": 9, "comment_count": 4, "hot_index": 118, "is_hot": 0, "rank_index": 0.00044155, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2049145402829832", "user_name": "张大然", "company": "美团", "job_title": "前端开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/2307369d7ab08b126daebc77917ffc08~300x300.image", "level": 1, "description": "", "followee_count": 17, "follower_count": 19, "post_article_count": 6, "digg_article_count": 57, "got_digg_count": 36, "got_view_count": 6146, "post_shortmsg_count": 2, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 97, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6844904035997188103, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844903571197001742", "article_info": {"article_id": "6844903571197001742", "user_id": "3579665589774568", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640398105870343, 6809640361531539470, 6809640465407672333], "visible_level": 0, "link_url": "https://juejin.im/post/6844903571197001742", "cover_image": "", "is_gfw": 0, "title": "nuxtjs+express+vue2+vuex搭建的服务端渲染(SSR)个人网站项目", "brief_content": "nuxtjs+express+vue2.0+vuex搭建的服务端渲染个人网站项目. nodejs的出现,让前端能做的更多,让js不仅仅只是浏览器端的语言。 这里为什么要说这个呢,有些前端开发者会说node不是做后端的吗?我为什么要学nodejs呢?其实随着前端的发展,尤其是no…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1520486831", "mtime": "1599445114", "rtime": "1520487613", "draft_id": "6845075387882422286", "view_count": 3544, "collect_count": 52, "digg_count": 121, "comment_count": 5, "hot_index": 303, "is_hot": 0, "rank_index": 0.0004413, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3579665589774568", "user_name": "se7en-1992", "company": "龙湖", "job_title": "前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/782159ece2c90a1d4ab8c670227ef45c~300x300.image", "level": 2, "description": "", "followee_count": 0, "follower_count": 9, "post_article_count": 1, "digg_article_count": 0, "got_digg_count": 121, "got_view_count": 3544, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 156, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546568, "tag_id": "6809640465407672333", "tag_name": "Express", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f932e447c5104bad088d.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1436556655, "mtime": 1631638567, "id_type": 9, "tag_alias": "", "post_article_count": 634, "concern_user_count": 26754}], "user_interact": {"id": 6844903571197001742, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6892302012294103053", "article_info": {"article_id": "6892302012294103053", "user_id": "360295544403838", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Node.js系列二 - Node基础知识", "brief_content": "一. Node执行代码 1.1. JavaScript文件执行 1.2. Node的REPL 二. Node输入输出 2.1. 给node程序传递参数 argv:argument vector的缩写,传入的具体参数。 vector翻译过来是矢量的意思,在程序中表示的是一种数据结…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1604739388", "mtime": "1604745101", "rtime": "1604744987", "draft_id": "6892301692025438222", "view_count": 819, "collect_count": 4, "digg_count": 7, "comment_count": 1, "hot_index": 48, "is_hot": 0, "rank_index": 0.00044129, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "360295544403838", "user_name": "coderwhy", "company": "", "job_title": "", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/7/31/164efaccfd55f53a~tplv-t2oaga2asx-image.image", "level": 3, "description": "", "followee_count": 11, "follower_count": 2453, "post_article_count": 58, "digg_article_count": 8, "got_digg_count": 976, "got_view_count": 130934, "post_shortmsg_count": 6, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 2365, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6892302012294103053, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6953507752027750431", "article_info": {"article_id": "6953507752027750431", "user_id": "870468937590366", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "tcp如何实现http协议", "brief_content": "目标: 了解 http 协议的内容 理解node中tcp的基本使用 用net实现一个http协议 启动一个node http服务器; tcp实现http/demo1 通过wireshark 过滤条件 ", "is_english": 0, "is_original": 1, "user_index": 3.969362295916118, "original_type": 0, "original_author": "", "content": "", "ctime": "1618989706", "mtime": "1619000446", "rtime": "1619000446", "draft_id": "6953507181849378823", "view_count": 206, "collect_count": 1, "digg_count": 3, "comment_count": 1, "hot_index": 14, "is_hot": 0, "rank_index": 0.00044095, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "870468937590366", "user_name": "hpstream_", "company": "", "job_title": "前端开发工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/97956f8c21ebe8b94c697ea86da6ecda~300x300.image", "level": 2, "description": "", "followee_count": 21, "follower_count": 24, "post_article_count": 29, "digg_article_count": 28, "got_digg_count": 88, "got_view_count": 11571, "post_shortmsg_count": 2, "digg_shortmsg_count": 4, "isfollowed": false, "favorable_author": 0, "power": 203, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6953507752027750431, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6978472077586071559", "article_info": {"article_id": "6978472077586071559", "user_id": "3720403078100071", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "child_process.exec接口引起的服务异常排查记录", "brief_content": "最近在用Beidou同构框架搭建一个SSR同构服务,本地开发时毫无问题,但部署到测试环境和线上环境后,服务会不定期进程会收到exit事件而异常退出。本文记录了排查的过程及一些收获,与大家共勉。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1624802146", "mtime": "1624853721", "rtime": "1624851689", "draft_id": "6978466127013756942", "view_count": 154, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 8, "is_hot": 0, "rank_index": 0.00043952, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3720403078100071", "user_name": "山下有风", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/9a742b0050909935474743a22bc10675~300x300.image", "level": 1, "description": "", "followee_count": 29, "follower_count": 1, "post_article_count": 3, "digg_article_count": 9, "got_digg_count": 7, "got_view_count": 2320, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 30, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6978472077586071559, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844904128951369742", "article_info": {"article_id": "6844904128951369742", "user_id": "3051900006317549", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "https://juejin.im/post/6844904128951369742", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/4/16/1718172ed5f37ea4~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "如何使用 yargs", "brief_content": "Yargs 是一个很好的命令行程序库,简单地说,它可以让创建一个在控制台中运行的应用程序的过程变得轻而易举。还有什么能让它变得更好呢?它是以海盗为主题的(它的名字叫 YARgs),让它正式成为有史以来最好的工具。 你可能知道其他的 CLI,比如 vue-cli,可以轻松设置一个…", "is_english": 0, "is_original": 1, "user_index": 0.07580413392071, "original_type": 0, "original_author": "", "content": "", "ctime": "1587014759", "mtime": "1606696235", "rtime": "1587014759", "draft_id": "6845076731351711751", "view_count": 1816, "collect_count": 8, "digg_count": 2, "comment_count": 0, "hot_index": 92, "is_hot": 0, "rank_index": 0.00043879, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3051900006317549", "user_name": "水墨寒", "company": "Compass", "job_title": "前端", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/1676cc6400cf1af25e2ed3a446a46ccc~300x300.image", "level": 3, "description": "前端🐶", "followee_count": 108, "follower_count": 33398, "post_article_count": 52, "digg_article_count": 130, "got_digg_count": 2640, "got_view_count": 101644, "post_shortmsg_count": 27, "digg_shortmsg_count": 60, "isfollowed": false, "favorable_author": 1, "power": 1335, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6844904128951369742, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6950848116228620296", "article_info": {"article_id": "6950848116228620296", "user_id": "3368559359558574", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Node.js性能分析工具", "brief_content": "ab压测-c 并发数(同时有200个人,在请求我们的服务器)-n 总共执行1000次请求找到服务器瓶颈通过压测的qps可以大概判断性能瓶颈在不在网卡,如果不在网卡,就可能在cpu、内存、硬盘等等top", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1618370608", "mtime": "1618381461", "rtime": "1618381461", "draft_id": "6950835630230208548", "view_count": 345, "collect_count": 2, "digg_count": 1, "comment_count": 1, "hot_index": 19, "is_hot": 0, "rank_index": 0.0004382, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3368559359558574", "user_name": "路明非_", "company": "", "job_title": "自由开发者", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/1eb8b13f6b021b27200425e833f08109~300x300.image", "level": 1, "description": "something for nothing", "followee_count": 17, "follower_count": 2, "post_article_count": 5, "digg_article_count": 10, "got_digg_count": 15, "got_view_count": 1317, "post_shortmsg_count": 15, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 28, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6950848116228620296, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}, {"article_id": "6844903564729581575", "article_info": {"article_id": "6844903564729581575", "user_id": "2594503171254328", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470], "visible_level": 0, "link_url": "https://juejin.im/post/6844903564729581575", "cover_image": "", "is_gfw": 0, "title": "掌握Node.js中的Async和Await", "brief_content": "在本文中,你将学习如何使用Node.js中的async函数(async/await)来简化callback或Promise. 异步语言结构在其他语言中已经存在了,像c#的async/await、Kotlin的coroutines、go的goroutines,随着Node.js …", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1518364822", "mtime": "1598446712", "rtime": "1519266955", "draft_id": "6845075379573686280", "view_count": 5679, "collect_count": 13, "digg_count": 22, "comment_count": 0, "hot_index": 305, "is_hot": 0, "rank_index": 0.00043789, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2594503171254328", "user_name": "Monster000", "company": "CodeGree", "job_title": "打杂的呢", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/eefad554d5c733599b7864ff51c54647~300x300.image", "level": 2, "description": "主要工作是做前端,懂一点后端,偶尔也做做产品,目前主要在做https://codegree.net😼", "followee_count": 30, "follower_count": 509, "post_article_count": 20, "digg_article_count": 385, "got_digg_count": 463, "got_view_count": 43450, "post_shortmsg_count": 10, "digg_shortmsg_count": 46, "isfollowed": false, "favorable_author": 0, "power": 622, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}], "user_interact": {"id": 6844903564729581575, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516031901020405010224005086"}], "cursor": "eyJ2IjoiNzAwODAwNjMyNjg1NzEzODE4NCIsImkiOjE2MjB9", "count": 3231, "has_more": true} | [
"[email protected]"
] | |
df3a234e51d43397b86be585b6914f5ef896cd45 | 832852c679816673f708860929a36a20ca8d3e32 | /Configurations/ggH_SF/Full2016_nAODv4/maxDNN/comb_2j_dymvaOptim.py | ddfa9667d553dc87552a9171f6eafc15ee4d708f | [] | no_license | UniMiBAnalyses/PlotsConfigurations | c4ec7376e2757b838930dfb2615e1dc99a64e542 | 578fe518cfc608169d3418bcb63a8342d3a24390 | refs/heads/master | 2023-08-31T17:57:45.396325 | 2022-09-01T10:13:14 | 2022-09-01T10:13:14 | 172,092,793 | 0 | 13 | null | 2023-04-27T10:26:52 | 2019-02-22T15:52:44 | Python | UTF-8 | Python | false | false | 2,545 | py | # Combinations to use
#comb = {}
optim={}
##optim['dymva0p82'] = ' && dymva_dnn_2j > 0.82 '
##optim['dymva0p83'] = ' && dymva_dnn_2j > 0.83 '
##optim['dymva0p84'] = ' && dymva_dnn_2j > 0.84 '
#optim['dymva0p845'] = ' && dymva_dnn_2j > 0.845 '
#optim['dymva0p85'] = ' && dymva_dnn_2j > 0.85 '
optim['dymva0p855'] = ' && dymva_dnn_2j > 0.855 '
##optim['dymva0p86'] = ' && dymva_dnn_2j > 0.86 '
optim['dymva0p865'] = ' && dymva_dnn_2j > 0.865 '
##optim['dymva0p87'] = ' && dymva_dnn_2j > 0.87 '
optim['dymva0p875'] = ' && dymva_dnn_2j > 0.875 '
##optim['dymva0p88'] = ' && dymva_dnn_2j > 0.88 '
optim['dymva0p885'] = ' && dymva_dnn_2j > 0.885 '
##optim['dymva0p89'] = ' && dymva_dnn_2j > 0.89 '
##optim['dymva0p90'] = ' && dymva_dnn_2j > 0.90 '
##optim['dymva0p91'] = ' && dymva_dnn_2j > 0.91 '
#optim['dymva0p92'] = ' && dymva_dnn_2j > 0.92 '
##optim['dymva0p925'] = ' && dymva_dnn_2j > 0.925 '
#optim['dymva0p93'] = ' && dymva_dnn_2j > 0.93 '
#optim['dymva0p94'] = ' && dymva_dnn_2j > 0.94 '
#optim['dymva0p945'] = ' && dymva_dnn_2j > 0.945 '
##optim['dymva0p95'] = ' && dymva_dnn_2j > 0.95 '
#optim['dymva0p955'] = ' && dymva_dnn_2j > 0.955 '
#optim['dymva0p96'] = ' && dymva_dnn_2j > 0.96 '
#optim['dymva0p965'] = ' && dymva_dnn_2j > 0.965 '
#optim['dymva0p97'] = ' && dymva_dnn_2j > 0.97 '
##optim['dymva0p975'] = ' && dymva_dnn_2j > 0.975 '
optim['dymva0p98'] = ' && dymva_dnn_2j > 0.98 '
optim['dymva0p985'] = ' && dymva_dnn_2j > 0.985 '
optim['dymva0p99'] = ' && dymva_dnn_2j > 0.99 '
##optim['dymva0p995'] = ' && dymva_dnn_2j > 0.995 '
for iCut in optim:
combs['hww2l2v_13TeV_2jee_'+iCut] = {
'hww2l2v_13TeV_2jee_'+iCut : 'events' ,
'hww2l2v_13TeV_WW_2jee_'+iCut : 'events' ,
'hww2l2v_13TeV_top_2jee_'+iCut : 'events' ,
}
combs['hww2l2v_13TeV_2jmm_'+iCut] = {
'hww2l2v_13TeV_2jmm_'+iCut : 'events' ,
'hww2l2v_13TeV_WW_2jmm_'+iCut : 'events' ,
'hww2l2v_13TeV_top_2jmm_'+iCut : 'events' ,
}
combs['hww2l2v_13TeV_2jsf_'+iCut] = {
'hww2l2v_13TeV_2jee_'+iCut : 'events' ,
'hww2l2v_13TeV_WW_2jee_'+iCut : 'events' ,
'hww2l2v_13TeV_top_2jee_'+iCut : 'events' ,
'hww2l2v_13TeV_2jmm_'+iCut : 'events' ,
'hww2l2v_13TeV_WW_2jmm_'+iCut : 'events' ,
'hww2l2v_13TeV_top_2jmm_'+iCut : 'events' ,
}
| [
"[email protected]"
] | |
c0fb52baf7c201286ee69e150f2b660a27dd829d | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/CISCO-BITS-CLOCK-CAPABILITY.py | 8a27e62b2849984358771dd4ae5e8c6fbbd7289a | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 3,206 | py | #
# PySNMP MIB module CISCO-BITS-CLOCK-CAPABILITY (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-BITS-CLOCK-CAPABILITY
# Produced by pysmi-0.3.4 at Wed May 1 11:51:22 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint")
ciscoAgentCapability, = mibBuilder.importSymbols("CISCO-SMI", "ciscoAgentCapability")
ModuleCompliance, NotificationGroup, AgentCapabilities = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "AgentCapabilities")
ObjectIdentity, MibIdentifier, IpAddress, Counter32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity, Counter64, Integer32, NotificationType, Bits, iso, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "MibIdentifier", "IpAddress", "Counter32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity", "Counter64", "Integer32", "NotificationType", "Bits", "iso", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoBitsClockCapability = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 7, 433))
ciscoBitsClockCapability.setRevisions(('2005-03-08 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoBitsClockCapability.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoBitsClockCapability.setLastUpdated('200503080000Z')
if mibBuilder.loadTexts: ciscoBitsClockCapability.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoBitsClockCapability.setContactInfo(' Cisco Systems Customer Service Postal: 170 West Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: [email protected]')
if mibBuilder.loadTexts: ciscoBitsClockCapability.setDescription('Agent capabilities for the CISCO-BITS-CLOCK-MIB.')
ciscoBitsClockV12R025000SW1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 9, 7, 433, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoBitsClockV12R025000SW1 = ciscoBitsClockV12R025000SW1.setProductRelease('Cisco IOS 12.2(25)SW1')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoBitsClockV12R025000SW1 = ciscoBitsClockV12R025000SW1.setStatus('current')
if mibBuilder.loadTexts: ciscoBitsClockV12R025000SW1.setDescription('IOS 12.2(25)SW1 Cisco CISCO-BITS-CLOCK-MIB.my User Agent MIB capabilities.')
mibBuilder.exportSymbols("CISCO-BITS-CLOCK-CAPABILITY", PYSNMP_MODULE_ID=ciscoBitsClockCapability, ciscoBitsClockV12R025000SW1=ciscoBitsClockV12R025000SW1, ciscoBitsClockCapability=ciscoBitsClockCapability)
| [
"[email protected]"
] | |
060fce54f0cf1197aa859bd4e882a86f97d1a248 | 34599596e145555fde0d4264a1d222f951f49051 | /pcat2py/class/20fab7f2-5cc5-11e4-af55-00155d01fe08.py | 65e06e5eb7d8192e2b4c7cd3f1cdffd123911b50 | [
"MIT"
] | permissive | phnomcobra/PCAT2PY | dc2fcbee142ce442e53da08476bfe4e68619346d | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | refs/heads/master | 2021-01-11T02:23:30.669168 | 2018-02-13T17:04:03 | 2018-02-13T17:04:03 | 70,970,520 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | #!/usr/bin/python
################################################################################
# 20fab7f2-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# [email protected]
# [email protected]
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "20fab7f2-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\Software\Policies\Microsoft\Windows\GameUX', 'DownloadGameInfo')
# Output Lines
self.output = [r'HKLM:\Software\Policies\Microsoft\Windows\GameUX', ('DownloadGameInfo=' + str(dword))]
if dword == 0:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Windows\GameUX'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Policies\Microsoft\Windows\GameUX' -name 'DownloadGameInfo' -value 0 -Type DWord")
| [
"[email protected]"
] | |
90780ca6d62b27da11a6b61560c01714020dd63d | d1ed040025811cce2533a7e55737eadc00bd15a9 | /shop/widgets.py | 84e21750c6450f92f338c5ecc50bc5985e6503d3 | [] | no_license | Code-Institute-Submissions/guillermo | 0dfa6aecab3e325c5bd30356557b710da6851deb | c6def675ac78e88fd20c86f59498d9251cf302a7 | refs/heads/master | 2023-01-15T11:38:35.306745 | 2020-11-10T21:09:41 | 2020-11-10T21:09:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | from django.forms.widgets import ClearableFileInput
from django.utils.translation import gettext_lazy as _
class CustomClearableFileInput(ClearableFileInput):
"""
Shows thumbnail of current image and checkbox to remove it.
"""
clear_checkbox_label = _("Remove")
initial_text = _("Current Image")
input_text = _("")
template_name = (
"shop/custom_widget_templates/custom_clearable_file_input.html"
)
| [
"[email protected]"
] | |
a99e6f4b0b71c257b8c163c1c1cf93f0172adcd4 | 07570ec33eb49effd9ed6af73214bac1b607038f | /client/swagger_client/models/domain_list.py | 4c04cf18e0a3344618f96b8efcd04ad9b132fceb | [
"MIT"
] | permissive | kakwa/certascale | d9998a66ba6a239ba5b5e537f12dabdd5876996c | 0df8da0f518506500117152fd0e28ee3286949af | refs/heads/master | 2020-03-29T09:24:32.794060 | 2019-01-30T14:06:10 | 2019-01-30T14:06:10 | 149,756,729 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,538 | py | # coding: utf-8
"""
certascale API
Certascale API documentation # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.domain import Domain # noqa: F401,E501
class DomainList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'list': 'list[Domain]',
'next_id': 'int'
}
attribute_map = {
'list': 'list',
'next_id': 'next_id'
}
def __init__(self, list=None, next_id=None): # noqa: E501
"""DomainList - a model defined in Swagger""" # noqa: E501
self._list = None
self._next_id = None
self.discriminator = None
if list is not None:
self.list = list
if next_id is not None:
self.next_id = next_id
@property
def list(self):
"""Gets the list of this DomainList. # noqa: E501
:return: The list of this DomainList. # noqa: E501
:rtype: list[Domain]
"""
return self._list
@list.setter
def list(self, list):
"""Sets the list of this DomainList.
:param list: The list of this DomainList. # noqa: E501
:type: list[Domain]
"""
self._list = list
@property
def next_id(self):
"""Gets the next_id of this DomainList. # noqa: E501
:return: The next_id of this DomainList. # noqa: E501
:rtype: int
"""
return self._next_id
@next_id.setter
def next_id(self, next_id):
"""Sets the next_id of this DomainList.
:param next_id: The next_id of this DomainList. # noqa: E501
:type: int
"""
self._next_id = next_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DomainList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.