blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3dc9e30e5c222c2883a7eac6fdbd007fc805284a
|
a31de016611f3b4efc7a576e7113cad1a738419b
|
/9.8_exception_classes.py
|
f3fb70d78cdd6362c88d732831f0ffe476b30943
|
[] |
no_license
|
Ing-Josef-Klotzner/python
|
9d4044d632672fff966b28ab80e1ef77763c78f5
|
3913729d7d6e1b7ac72b46db7b06ca0c58c8a608
|
refs/heads/master
| 2022-12-09T01:40:52.275592 | 2022-12-01T22:46:43 | 2022-12-01T22:46:43 | 189,040,355 | 0 | 0 | null | 2022-12-01T19:52:37 | 2019-05-28T14:05:16 |
Python
|
UTF-8
|
Python
| false | false | 207 |
py
|
# exception classes
#import sys
class B:
pass
class C(B):
pass
class D(C):
pass
for c in [B, C, D]:
try:
raise c()
except D:
print "D"
except C:
print "C"
except B:
print "B"
|
[
"[email protected]"
] | |
0ad259d0f323921f6882bfce2be18fa92b27acdc
|
e845f7f61ff76b3c0b8f4d8fd98f6192e48d542a
|
/djangocg/utils/safestring.py
|
f2b147175bdb1cb129a4deff7e5ed7f49f002395
|
[
"BSD-3-Clause"
] |
permissive
|
timothyclemans/djangocg
|
fd150c028013cb5f53f5a3b4fdc960a07fdaaa78
|
52cf28e046523bceb5d436f8e6bf61e7d4ba6312
|
refs/heads/master
| 2021-01-18T13:20:13.636812 | 2012-08-31T23:38:14 | 2012-08-31T23:38:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,189 |
py
|
"""
Functions for working with "safe strings": strings that can be displayed safely
without further escaping in HTML. Marking something as a "safe string" means
that the producer of the string has already turned characters that should not
be interpreted by the HTML engine (e.g. '<') into the appropriate entities.
"""
from djangocg.utils.functional import curry, Promise
from djangocg.utils import six
class EscapeData(object):
pass
class EscapeBytes(bytes, EscapeData):
"""
A byte string that should be HTML-escaped when output.
"""
pass
class EscapeText(six.text_type, EscapeData):
"""
A unicode string object that should be HTML-escaped when output.
"""
pass
if six.PY3:
EscapeString = EscapeText
else:
EscapeString = EscapeBytes
# backwards compatibility for Python 2
EscapeUnicode = EscapeText
class SafeData(object):
pass
class SafeBytes(bytes, SafeData):
"""
A bytes subclass that has been specifically marked as "safe" (requires no
further escaping) for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe byte string with another safe byte string or safe
unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeBytes, self).__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
decode = curry(_proxy_method, method=bytes.decode)
class SafeText(six.text_type, SafeData):
"""
A unicode (Python 2) / str (Python 3) subclass that has been specifically
marked as "safe" for HTML output purposes.
"""
def __add__(self, rhs):
"""
Concatenating a safe unicode string with another safe byte string or
safe unicode string is safe. Otherwise, the result is no longer safe.
"""
t = super(SafeText, self).__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
def _proxy_method(self, *args, **kwargs):
"""
Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the 'method'
argument.
"""
method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
encode = curry(_proxy_method, method=six.text_type.encode)
if six.PY3:
SafeString = SafeText
else:
SafeString = SafeBytes
# backwards compatibility for Python 2
SafeUnicode = SafeText
def mark_safe(s):
"""
Explicitly mark a string as safe for (HTML) output purposes. The returned
object can be used everywhere a string or unicode object is appropriate.
Can be called multiple times on a single string.
"""
if isinstance(s, SafeData):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return SafeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return SafeText(s)
return SafeString(str(s))
def mark_for_escaping(s):
"""
Explicitly mark a string as requiring HTML escaping upon output. Has no
effect on SafeData subclasses.
Can be called multiple times on a single string (the resulting escaping is
only applied once).
"""
if isinstance(s, (SafeData, EscapeData)):
return s
if isinstance(s, bytes) or (isinstance(s, Promise) and s._delegate_bytes):
return EscapeBytes(s)
if isinstance(s, (six.text_type, Promise)):
return EscapeText(s)
return EscapeBytes(bytes(s))
|
[
"[email protected]"
] | |
124ed1ef4d6daa12465b30bbddac78841214f9f8
|
6d116e51e92d199b65f60929ed51e3b8e2ffcba2
|
/dephell/commands/autocomplete.py
|
00fe5569a1e70210055c34b2de1fd44352a500b6
|
[
"MIT"
] |
permissive
|
avallbona/dephell
|
a2ce64466092360b82b98ae314f242626d54b099
|
b96b401ea6906b464f9ea87f7ec9f52f9ac6297f
|
refs/heads/master
| 2020-05-20T08:47:12.498537 | 2019-05-08T21:12:03 | 2019-05-09T05:56:28 | 185,482,156 | 0 | 0 | null | 2019-05-07T21:42:04 | 2019-05-07T21:42:03 | null |
UTF-8
|
Python
| false | false | 2,208 |
py
|
# built-in
from argparse import ArgumentParser
from pathlib import Path
# external
from appdirs import user_data_dir
from dephell_shells import Shells
# app
from ..actions import make_bash_autocomplete, make_zsh_autocomplete
from ..config import builders
from .base import BaseCommand
class AutocompleteCommand(BaseCommand):
"""Enable DepHell commands autocomplete for current shell.
https://dephell.readthedocs.io/en/latest/cmd-autocomplete.html
"""
@classmethod
def get_parser(cls) -> ArgumentParser:
parser = ArgumentParser(
prog='dephell autocomplete',
description=cls.__doc__,
)
builders.build_config(parser)
builders.build_output(parser)
return parser
def __call__(self):
shell = Shells(bin_path=None).shell_name
msg = 'Autocompletion installed. Please, reload your shell'
if shell == 'bash':
self._bash()
self.logger.info(msg)
return True
if shell == 'zsh':
self._zsh()
self.logger.info(msg)
return True
self.logger.error('unsupported shell', extra=dict(shell=shell))
return False
def _bash(self):
script = make_bash_autocomplete()
path = Path.home() / '.local' / 'etc' / 'bash_completion.d' / 'dephell.bash-completion'
path.write_text(script)
for rc_name in ('.bashrc', '.profile'):
rc_path = Path.home() / rc_name
if not rc_path.exists():
continue
if 'bash_completion.d' not in rc_path.read_text():
with rc_path.open('a') as stream:
stream.write('\n\nsource {}\n'.format(str(path)))
break
def _zsh(self):
script = make_zsh_autocomplete()
path = Path(user_data_dir('dephell')) / '_dephell_zsh_autocomplete'
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(script)
path.chmod(0o777)
rc_path = Path.home() / '.zshrc'
if str(path) not in rc_path.read_text():
with rc_path.open('a') as stream:
stream.write('\n\nsource {}\n'.format(str(path)))
|
[
"[email protected]"
] | |
82684a66eda279814ea72973da8efc55538b1150
|
4074db4436d5fc5fa5395de072557def620f993e
|
/0x07-python-test_driven_development/5-text_indentation.py
|
8cc9fff7f5c7176d0a15c39387dbc13dcfc8d719
|
[] |
no_license
|
Hunt66/holbertonschool-higher_level_programming
|
49b4a93a8b565cdd588e26e6348bed5d3e9d6953
|
b26f42c1d41bb24842d77bf5cf86c441bd8fcf51
|
refs/heads/master
| 2020-03-28T11:11:52.204554 | 2019-03-25T19:45:28 | 2019-03-25T19:45:28 | 148,187,536 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 299 |
py
|
#!/usr/bin/python3
def text_indentation(text):
if not isinstance(text, str):
raise TypeError("text must be a string")
for i in range(0, len(text)):
if text[i] == '.' or text[i] == '?' or text[i] == ':':
text = text[:i + 1] + '\n\n' + text[i + 2:]
print(text)
|
[
"[email protected]"
] | |
78216d61d2bb30903297162eb9e88377d48f746e
|
d64a5a065ec3d368ee508f197fc0e61fc7a6607e
|
/build/navigation/base_local_planner/catkin_generated/pkg.develspace.context.pc.py
|
7c5c6126d275af4617da46d917ce5215b4cfcc06
|
[] |
no_license
|
ProgettoSGN/charlie
|
925f0373b79135cab9d307ddd9a4fbac0dba69cf
|
b44c65cbb518fe7d3d7fa1b022ece92df3882595
|
refs/heads/master
| 2023-01-19T09:18:27.382628 | 2020-11-16T12:59:16 | 2020-11-16T12:59:16 | 312,522,792 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 819 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/robot/charlie_ws/devel/include;/home/robot/charlie_ws/src/navigation/base_local_planner/include".split(';') if "/home/robot/charlie_ws/devel/include;/home/robot/charlie_ws/src/navigation/base_local_planner/include" != "" else []
PROJECT_CATKIN_DEPENDS = "angles;costmap_2d;dynamic_reconfigure;geometry_msgs;message_runtime;nav_core;nav_msgs;pluginlib;roscpp;sensor_msgs;std_msgs;tf2;tf2_ros;voxel_grid".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lbase_local_planner;-ltrajectory_planner_ros".split(';') if "-lbase_local_planner;-ltrajectory_planner_ros" != "" else []
PROJECT_NAME = "base_local_planner"
PROJECT_SPACE_DIR = "/home/robot/charlie_ws/devel"
PROJECT_VERSION = "1.16.4"
|
[
"[email protected]"
] | |
27888c9296c46a1804a96ccbc084f8cacc2d38e5
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/A/alice_corona/mtv_italy_top_20_scraper.py
|
54032d35f4c41ec3e149925dd92b9a1ec496cc2a
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,626 |
py
|
import scraperwiki
import scraperwiki
import scraperwiki
import requests
import lxml.html
html = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli-7-gennaio-2012').text
root = lxml.html.fromstring(html)
for item in root.cssselect("span.today") :
date = item.text_content()
date2 = date.replace(' ', '-')
html2 = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli' + date2).text
print date2
#root2 = lxml.html.fromstring(html2)
# for item in root2.cssselect("a.cpChartEntryImage"):
# song = item.text_content()
# print song
#for item in root2.cssselect("span.pos"):
#position = item.text_content()
#for box in root2.cssselect("a"):
#print date, position
#html3 = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli' + date2 + '/pagina-2').text
#root3 = lxml.html.fromstring(html3)#for item in root3.cssselect("span.pos"):
#position = item.text_content()
#print date, position
#for item in root3.cssselect("span.pos"):
#position2 = item.text_content()
#for name in root2.cssselect("a"):
# print date, name.text_content(),
# Blank Python
import scraperwiki
import scraperwiki
import scraperwiki
import requests
import lxml.html
html = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli-7-gennaio-2012').text
root = lxml.html.fromstring(html)
for item in root.cssselect("span.today") :
date = item.text_content()
date2 = date.replace(' ', '-')
html2 = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli' + date2).text
print date2
#root2 = lxml.html.fromstring(html2)
# for item in root2.cssselect("a.cpChartEntryImage"):
# song = item.text_content()
# print song
#for item in root2.cssselect("span.pos"):
#position = item.text_content()
#for box in root2.cssselect("a"):
#print date, position
#html3 = requests.get('http://classifiche.mtv.it/classifica/hitlist-italia-classifica-singoli/hitlist-italia-singoli' + date2 + '/pagina-2').text
#root3 = lxml.html.fromstring(html3)#for item in root3.cssselect("span.pos"):
#position = item.text_content()
#print date, position
#for item in root3.cssselect("span.pos"):
#position2 = item.text_content()
#for name in root2.cssselect("a"):
# print date, name.text_content(),
# Blank Python
|
[
"[email protected]"
] | |
f8adb2e5d9355b185e9cfa1e7aa552bd39f443f7
|
245b92f4140f30e26313bfb3b2e47ed1871a5b83
|
/airflow/serialization/serializers/kubernetes.py
|
0ed9c96f7186007e794cfdde0cc62bc5d2e9cec3
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ephraimbuddy/airflow
|
238d6170a0e4f76456f00423124a260527960710
|
3193857376bc2c8cd2eb133017be1e8cbcaa8405
|
refs/heads/main
| 2023-05-29T05:37:44.992278 | 2023-05-13T19:49:43 | 2023-05-13T19:49:43 | 245,751,695 | 2 | 1 |
Apache-2.0
| 2021-05-20T08:10:14 | 2020-03-08T04:28:27 | null |
UTF-8
|
Python
| false | false | 2,226 |
py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from airflow.utils.module_loading import qualname
# lazy loading for performance reasons
serializers = [
"kubernetes.client.models.v1_resource_requirements.V1ResourceRequirements",
"kubernetes.client.models.v1_pod.V1Pod",
]
if TYPE_CHECKING:
from airflow.serialization.serde import U
__version__ = 1
deserializers: list[type[object]] = []
log = logging.getLogger(__name__)
def serialize(o: object) -> tuple[U, str, int, bool]:
from kubernetes.client import models as k8s
if not k8s:
return "", "", 0, False
if isinstance(o, (k8s.V1Pod, k8s.V1ResourceRequirements)):
from airflow.kubernetes.pod_generator import PodGenerator
def safe_get_name(pod):
"""
We're running this in an except block, so we don't want it to
fail under any circumstances, e.g. by accessing an attribute that isn't there
"""
try:
return pod.metadata.name
except Exception:
return None
try:
return PodGenerator.serialize_pod(o), qualname(o), __version__, True
except Exception:
log.warning("Serialization failed for pod %s", safe_get_name(o))
log.debug("traceback for serialization error", exc_info=True)
return "", "", 0, False
return "", "", 0, False
|
[
"[email protected]"
] | |
574d55b848536bfcfa322627dbf0c6b104d9d507
|
c64bb34a3dde14d3c9bf813bde414a7b3f10611d
|
/ommat_addons/sprogroup_purchase_request/models/__init__.py
|
570e708fb07b60d951eacbd75670b5b49e901c7c
|
[] |
no_license
|
sm2x/my_work
|
ebf2e1abd06191ee59b0d82a23534274a81a3195
|
efc469aee4cd20b038d48d4c09f8257f3f04ba1c
|
refs/heads/master
| 2021-01-07T20:41:45.254025 | 2020-02-12T16:02:46 | 2020-02-12T16:02:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 220 |
py
|
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import sprogroup_purchase_request
from . import vendor_model
from . import products
from . import stock_castom
|
[
"[email protected]"
] | |
abfb82095f1a7ee3c1b729cc4f99af3aa9ed2b7c
|
b9d54c64d4a280703b459b346e42518896e20e0a
|
/lingvo/tools/compare_params.py
|
4515d2c948c01e7afb76a2e16c89d9df30b14990
|
[
"Apache-2.0"
] |
permissive
|
zh794390558/lingvo
|
55a27a4e241414389f0c7b40f381a672bb164372
|
ecdf678179018ca07f4f52d065b9bf3fe2dc7c5a
|
refs/heads/master
| 2020-09-26T18:32:31.631402 | 2019-12-06T04:01:22 | 2019-12-06T04:02:05 | 177,497,272 | 0 | 0 |
Apache-2.0
| 2019-03-25T02:05:42 | 2019-03-25T02:05:42 | null |
UTF-8
|
Python
| false | false | 3,983 |
py
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for comparing two models / hyperparams."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lingvo import compat as tf
from lingvo import model_registry
def _hyperparams_text_to_dict(cfg_text):
"""Converts hyperparams config text to a dictionary of key-value pairs."""
txt_list = cfg_text.split("\n")
pair_list = []
for v in txt_list:
if not v:
continue
vals = v.split(" : ")
if len(vals) != 2:
raise ValueError(v)
pair_list.append(vals)
return dict(pair_list)
def hyperparams_text_diff(cfg1_text, cfg2_text):
"""Computes the differences between two hyperparams.Params texts.
Args:
cfg1_text: A hyperparams.Params().ToText() of the first model config.
cfg2_text: A hyperparams.Params().ToText() of the second model config.
Returns:
A tuple of 3 elements:
- cfg1_not_cfg2: A list of keys in cfg1 but not cfg2.
- cfg2_not_cfg1: A list of keys in cfg2 but not cfg1.
- cfg1_and_cfg2_diff: A dict of common keys whose config values differ: each
value is a tuple of the config values from cfg1 and cfg2 respectively.
"""
cfg1_dict = _hyperparams_text_to_dict(cfg1_text)
cfg2_dict = _hyperparams_text_to_dict(cfg2_text)
cfg1_keys = set(cfg1_dict.keys())
cfg2_keys = set(cfg2_dict.keys())
cfg1_not_cfg2 = sorted(list(cfg1_keys - cfg2_keys))
cfg2_not_cfg1 = sorted(list(cfg2_keys - cfg1_keys))
def get_class_name(v):
try:
idx = v.rindex("/")
return v[idx + 1:]
except ValueError:
return v
cfg1_and_cfg2_diff = {}
for k_intersection in cfg1_keys & cfg2_keys:
c1v = cfg1_dict[k_intersection]
c2v = cfg2_dict[k_intersection]
if k_intersection.endswith(".cls"):
c1v = get_class_name(c1v)
c2v = get_class_name(c2v)
if c1v != c2v:
cfg1_and_cfg2_diff[k_intersection] = (c1v, c2v)
return cfg1_not_cfg2, cfg2_not_cfg1, cfg1_and_cfg2_diff
def print_hyperparams_text_diff(path1, path2, cfg1_not_cfg2, cfg2_not_cfg1,
cfg1_and_cfg2_diff):
"""Prints the differences of the output of hyperparams_text_diff.
Args:
path1: Name of registered model or path to model 1.
path2: Name of registered model or path to model 2.
cfg1_not_cfg2: A list of keys in cfg1 but not cfg2.
cfg2_not_cfg1: A list of keys in cfg2 but not cfg1.
cfg1_and_cfg2_diff: A dictionary of common keys whose config values differ;
each value is a tuple of the config values from cfg1 and cfg2
respectively.
"""
if cfg1_not_cfg2:
print("\n\nKeys in %s but not %s: \n%s\n\n" %
(path1, path2, "\n".join(cfg1_not_cfg2)))
if cfg2_not_cfg1:
print("\n\nKeys in %s but not %s: \n%s\n\n" %
(path2, path1, "\n".join(cfg2_not_cfg1)))
if cfg1_and_cfg2_diff:
print("\n\nKeys with differences and their values: \n\n")
for k, v in sorted(cfg1_and_cfg2_diff.items()):
v1, v2 = v
print("%s: [%s] vs. [%s]" % (k, v1, v2))
print("\n\n")
def get_model_params_as_text(model_path):
try:
cfg = model_registry.GetParams(model_path, "Train")
return cfg.ToText()
except LookupError:
# Try reading as file.
return tf.io.gfile.GFile(model_path).read()
|
[
"[email protected]"
] | |
621c1ffa8b324f1b4fe00aa56300fb2c3f0c237c
|
ca034a0fe0ae4661ed952ea597e1ba97a1f48d7e
|
/_build.py
|
60c79d7104f07978bcdb6ca7f56f2e3ed2bf1f18
|
[] |
no_license
|
tokejepsen/docs
|
2bde19a64551b63c6017dec7103a17504f0d4019
|
631498f1552c3c5e45754e3f249aef0d254c9e15
|
refs/heads/master
| 2020-03-19T12:41:15.291282 | 2018-06-07T21:32:34 | 2018-06-07T21:32:34 | 136,533,338 | 0 | 0 | null | 2018-06-07T21:33:13 | 2018-06-07T21:33:13 | null |
UTF-8
|
Python
| false | false | 6,365 |
py
|
"""Generate markdown from template.
This module converts bespoke markdown into markdown compatible with
the bespoke mkdocs theme developed for Avalon.
"""
import sys
import json
import time
import shutil
import contextlib
import subprocess
from tempfile import mkdtemp
@contextlib.contextmanager
def tempfile(name):
try:
tempdir = mkdtemp()
fname = os.path.join(tempdir, name)
yield fname
finally:
shutil.rmtree(tempdir)
def on_template(template):
definition = template.strip("{{").rstrip().rstrip("}}")
key, value = definition.split(":")
if key == "schema":
return on_schema(value)
if key == "api" and value == "members":
return on_api_members()
return template
def on_block(language, block):
if language == "python":
return on_python(block)
return ""
def on_page(page):
formatted_time = time.strftime("%b %d %Y %H:%M:%S GMT+0", time.gmtime())
return """\
<p>{time}</p>
<br>
{content}\
""".format(time=formatted_time)
def on_api_members():
from avalon import api
table = """\
| Member | Description
|:-------|:--------
"""
row = "| `{name}` | {description}\n"
for name in api.__all__:
member = getattr(api, name)
doc = member.__doc__
if doc is None:
raise SyntaxError("'%s' is missing a docstring." % name)
table += row.format(
name=name,
description=doc.splitlines()[0]
)
return table
def on_schema(name):
from avalon import schema
schema = schema._cache[name]
description = """\
```json
{dump}
```
""".format(dump=json.dumps({
key: value.get("description", "")
for key, value in schema["properties"].items()
}, indent=4, sort_keys=True))
example = """\
**Example**
```json
{dump}
```
""".format(dump=json.dumps({
key: value.get("example", "")
for key, value in schema["properties"].items()
}, indent=4, sort_keys=True))
definition = """\
**Definition**
| Key | Description
|:----|:------------
"""
row = "| `{key}` | {description}\n"
for key, data in schema["properties"].items():
if "requires" in schema and key not in schema["requires"]:
continue
if "description" not in data:
raise SyntaxError("'%s' of %s must have a "
"description" % (key, name))
data["key"] = key
try:
data["type"] = {
"string": "str",
"number": "int",
"array": "list",
"object": "dict"
}[data["type"]]
except KeyError:
data["type"] = "any"
data["required"] = str(key in schema.get("required", {}))
definition += row.format(**data)
root = "https://github.com/getavalon/core/tree/master/avalon/schema"
link = """\
<a href="{root}/{name}" title="{name}" class="md-source-file">
{name}
</a>
""".format(root=root, name=name)
return os.linesep.join([link, description, example])
def on_python(block):
with tempfile("block.py") as fname:
with open(fname, "w") as f:
f.write(os.linesep.join(block))
try:
output = subprocess.check_output(
[sys.executable, fname],
stderr=subprocess.STDOUT,
universal_newlines=True
)
except subprocess.CalledProcessError as e:
output = e.output
output = "\n".join(
"<span class=\"p\">{line}</span>".format(line=line)
for line in output.splitlines()
)
source = """\
```python
{input}
```
""".format(input="".join(block))
output = """\
<table class="codehilitetable output">
<tbody>
<tr>
<td class="code">
<div class="codehilite" id="__code_1">
<pre>
{output}\
</pre>
</div>
</td>
</tr>
</tbody>
</table>
""".format(output=output) if output else ""
return "\n".join([source, output])
def parse(fname):
parsed = list()
blocks = list()
with open(fname) as f:
in_block = False
current_block = None
current_language = None
line_no = 0
for line in f:
line_no += 1
if line_no == 1 and line.startswith("build: false"):
print("Skipping '%s'.." % fname)
parsed = f.read()
break
if line.startswith("{{"):
line = on_template(line)
if in_block and line.startswith("```"):
print("Running Python..")
print("".join("\t%s" % line for line in current_block))
line = on_block(current_language, current_block)
in_block = False
current_language = None
parsed.append(line)
elif in_block:
current_block.append(line)
elif line.startswith("```python"):
in_block = True
current_language = "python"
current_block = list()
blocks.append(current_block)
else:
parsed.append(line)
return "".join(parsed)
if __name__ == '__main__':
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("path", nargs='?')
args = parser.parse_args()
cd = os.path.abspath(os.path.dirname(__file__))
os.chdir(cd)
if args.path and os.path.isfile(args.path):
files = [args.path]
else:
files = list()
path = args.path
for base, dirs, fnames in os.walk("pages"):
for fname in fnames:
name, ext = os.path.splitext(fname)
if ext != ".md":
continue
src = os.path.join(base, fname)
files.append(src)
results = list()
for src in files:
print("Building '%s'.." % src)
dst = src.replace("pages", "build")
parsed = parse(src)
results.append((dst, parsed))
# Parsing can take some time, so write
# files all in one batch when done
for dst, parsed in results:
try:
os.makedirs(os.path.dirname(dst))
except OSError:
pass
with open(dst, "w") as f:
f.write(parsed)
|
[
"[email protected]"
] | |
95dd605d13b812f1e44aa83d1847cdec84539d9d
|
50e089f906489b2586cc586712420fd085f1f637
|
/nested_functions.py
|
bc7e0eb17a8dd8bb22b42ee4d5c4d9a3db320695
|
[] |
no_license
|
AaronTho/Python_Notes
|
5ab629e3b3d49be5c68d2a285a79683dc604cd3e
|
4aa0e1fb4a35763458a1da467e1bb01e393bc972
|
refs/heads/main
| 2023-07-24T00:59:23.552952 | 2021-09-11T17:32:25 | 2021-09-11T17:32:25 | 375,399,260 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 148 |
py
|
def greeting(first, last):
def full_name():
return f'{first} {last}'
print(f'Hi {full_name()}!')
greeting('Kristine', 'Hudgens')
|
[
"[email protected]"
] | |
be5efb5f77e4571b19fcd2f4531c1a7779771783
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/22_专题/单词缩写/527. 单词缩写.py
|
05bf160951915d6d65e4e60fcb9259136c8d17ac
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,753 |
py
|
from typing import List
# 缩写规则:
# 1. 初始缩写由起始字母+省略字母的数量+结尾字母组成。
# 2. 若存在冲突,则使用更长的前缀代替首字母,直到从单词到缩写的映射唯一
# 3. 若缩写并不比原单词更短,则保留原样。
# 贪心:
# !首先给每个单词选择最短的缩写。然后我们对于所有重复的单词,我们增加这些重复项的长度。
class Solution:
def wordsAbbreviation(self, words: List[str]) -> List[str]:
def compress(word: str, start=0) -> str:
if len(word) - start <= 3:
return word
return word[: start + 1] + str(len(word) - start - 2) + word[-1]
n = len(words)
res = list(map(compress, words))
needStartFrom = [0] * n
for i in range(n):
while True:
dup = set()
for j in range(i + 1, n):
if res[i] == res[j]:
dup.add(j)
if not dup:
break
# 重复前缀的单词start+1 重新压缩
dup.add(i)
for dupeIndex in dup:
needStartFrom[dupeIndex] += 1
res[dupeIndex] = compress(words[dupeIndex], needStartFrom[dupeIndex])
return res
print(
Solution().wordsAbbreviation(
words=[
"like",
"god",
"internal",
"me",
"internet",
"interval",
"intension",
"face",
"intrusion",
]
)
)
# 输出: ["l2e","god","internal","me","i6t","interval","inte4n","f2e","intr4n"]
|
[
"[email protected]"
] | |
d0decda0750a41a8cd360423f492d5acd9c51db4
|
7d7ef4997628d92c9f2ad119ba956c711cc7470e
|
/preprocess/set_informations.py
|
1a0daaba394c3457093e1ac50ab82ccd56be0582
|
[] |
no_license
|
jpra2/bifasico_v2
|
6a53031d2b4c37ee231770a61c09815146f897d8
|
061330d5cc1185946283379a2478732ae9bb1b3b
|
refs/heads/master
| 2020-06-01T14:39:30.547389 | 2019-06-26T14:42:51 | 2019-06-26T14:42:51 | 190,817,718 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,145 |
py
|
from pymoab import types, rng
def injector_producer_press(mb, gama_w, gama_o, gravity, all_nodes, volumes_d, tags):
press_tag = tags['P']
values = mb.tag_get_data(press_tag, volumes_d, flat=True)
wells_injector_tag = mb.tag_get_handle('WELLS_INJECTOR', 1, types.MB_TYPE_HANDLE, types.MB_TAG_SPARSE, True)
wells_producer_tag = mb.tag_get_handle('WELLS_PRODUCER', 1, types.MB_TYPE_HANDLE, types.MB_TAG_SPARSE, True)
tags['WELLS_INJECTOR'] = wells_injector_tag
tags['WELLS_PRODUCER'] = wells_producer_tag
wells_injector_meshset = mb.create_meshset()
wells_producer_meshset = mb.create_meshset()
m = values.mean()
injectors = []
producers = []
for i, v in enumerate(values):
if v > m:
injectors.append(volumes_d[i])
else:
producers.append(volumes_d[i])
producers = rng.Range(producers)
injectors = rng.Range(injectors)
mb.add_entities(wells_producer_meshset, producers)
mb.add_entities(wells_injector_meshset, injectors)
mb.tag_set_data(wells_injector_tag, 0, wells_injector_meshset)
mb.tag_set_data(wells_producer_tag, 0, wells_producer_meshset)
if gravity:
set_p_with_gravity(mb, press_tag, all_nodes, injectors, producers, gama_w, gama_o, tags)
return injectors, producers
def set_p_with_gravity(mb, press_tag, all_nodes, injectors, producers, gama_w, gama_o, tags):
coords = mb.tag_get_data(tags['NODES'], all_nodes)
coords = coords.reshape([len(all_nodes), 3])
maxs = coords.max(axis=0)
Lz = maxs[2]
values = mb.tag_get_data(press_tag, injectors, flat=True)
z_elems = -1*mb.tag_get_data(tags['CENT'], injectors)[:,2]
delta_z = z_elems + Lz
pressao = gama_w*(delta_z) + values
mb.tag_set_data(press_tag, injectors, pressao)
values = mb.tag_get_data(press_tag, producers, flat=True)
z_elems = -1*mb.tag_get_data(tags['CENT'], producers)[:,2]
delta_z = z_elems + Lz
pressao = gama_o*(delta_z) + values
mb.tag_set_data(press_tag, producers, pressao)
def injector_producer(mb, gama_w, gama_o, gravity, all_nodes, volumes_d, volumes_n, tags):
neuman_tag = tags['Q']
press_tag = tags['P']
wells_injector_tag = mb.tag_get_handle('WELLS_INJECTOR', 1, types.MB_TYPE_HANDLE, types.MB_TAG_SPARSE, True)
wells_producer_tag = mb.tag_get_handle('WELLS_PRODUCER', 1, types.MB_TYPE_HANDLE, types.MB_TAG_SPARSE, True)
wells_injector_meshset = mb.create_meshset()
wells_producer_meshset = mb.create_meshset()
mb.add_entities(wells_producer_meshset, volumes_d)
mb.add_entities(wells_injector_meshset, volumes_n)
mb.tag_set_data(wells_injector_tag, 0, wells_injector_meshset)
mb.tag_set_data(wells_producer_tag, 0, wells_producer_meshset)
if gravity:
set_p_with_gravity(mb, tags['P'], all_nodes, volumes_n, volumes_d, gama_w, gama_o, tags)
return volumes_n, volumes_d
def convert_to_SI(mb, tags, all_volumes, all_faces, all_nodes, volumes_d, volumes_n):
from preprocess import conversao as conv
k_pe_to_m = 1.0
k_md_to_m2 = 1.0
k_psi_to_pa = 1.0
k_bbldia_to_m3seg = 1.0
k_pe_to_m = conv.pe_to_m(k_pe_to_m)
k_md_to_m2 = conv.milidarcy_to_m2(k_md_to_m2)
k_psi_to_pa = conv.psi_to_Pa(k_psi_to_pa)
k_bbldia_to_m3seg = conv.bbldia_to_m3seg(k_bbldia_to_m3seg)
p_tag = tags['P']
k_harm_tag = tags['KHARM']
cent_tag = tags['CENT']
press_values = mb.tag_get_data(tags['P'], volumes_d, flat=True)
press_values *= k_psi_to_pa
mb.tag_set_data(p_tag, volumes_d, press_values)
if len(volumes_n) > 0:
q_values = mb.tag_get_data(tags['Q'], volumes_n, flat=True)
q_values *= k_bbldia_to_m3seg
mb.tag_set_data(q_tag, volumes_q, q_values)
k_harms = mb.tag_get_data(tags['KHARM'], all_faces, flat=True)
k_harms *= k_md_to_m2*k_pe_to_m
mb.tag_set_data(k_harm_tag, all_faces, k_harms)
centroids = (k_pe_to_m)*mb.tag_get_data(cent_tag, all_volumes)
mb.tag_set_data(cent_tag, all_volumes, centroids)
coords = mb.tag_get_data(tags['NODES'], all_nodes)
mb.tag_set_data(tags['NODES'], all_nodes, coords*(k_pe_to_m))
|
[
"[email protected]"
] | |
77878cfdb6cf7b01007cf4810306020ad7afae92
|
e2dc5943070ddb3e6198711a9fe3c4dda4e8449a
|
/190227/최소배열/venv/Scripts/easy_install-script.py
|
338e6276c2540c67179947380316d9e7e30b6741
|
[] |
no_license
|
jiheelee/stack
|
eb7b34073180a9a46221b974585b4cd98cd42e59
|
b99dba43d1fb2bfae4b4643fda8a523ba18450e5
|
refs/heads/master
| 2020-04-25T15:25:53.306308 | 2019-02-27T08:53:28 | 2019-02-27T08:53:28 | 172,878,764 | 0 | 0 | null | null | null | null |
WINDOWS-1252
|
Python
| false | false | 471 |
py
|
#!C:\Users\student\PycharmProjects\190227\ÃÖ¼Ò¹è¿\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"[email protected]"
] | |
b4c9d08e5f326c5490092f715abc0fd8db325ea5
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-elb/huaweicloudsdkelb/v2/model/list_healthmonitors_request.py
|
55e42a77119b95340d34d87e638ffc28030ee5da
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 |
NOASSERTION
| 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null |
UTF-8
|
Python
| false | false | 15,706 |
py
|
# coding: utf-8
import pprint
import re
import six
class ListHealthmonitorsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'limit': 'int',
'marker': 'str',
'page_reverse': 'bool',
'id': 'str',
'name': 'str',
'delay': 'int',
'max_retries': 'int',
'admin_state_up': 'bool',
'timeout': 'int',
'type': 'str',
'monitor_port': 'int',
'expected_codes': 'str',
'domain_name': 'str',
'url_path': 'str',
'http_method': 'str'
}
attribute_map = {
'limit': 'limit',
'marker': 'marker',
'page_reverse': 'page_reverse',
'id': 'id',
'name': 'name',
'delay': 'delay',
'max_retries': 'max_retries',
'admin_state_up': 'admin_state_up',
'timeout': 'timeout',
'type': 'type',
'monitor_port': 'monitor_port',
'expected_codes': 'expected_codes',
'domain_name': 'domain_name',
'url_path': 'url_path',
'http_method': 'http_method'
}
def __init__(self, limit=None, marker=None, page_reverse=None, id=None, name=None, delay=None, max_retries=None, admin_state_up=None, timeout=None, type=None, monitor_port=None, expected_codes=None, domain_name=None, url_path=None, http_method=None):
"""ListHealthmonitorsRequest - a model defined in huaweicloud sdk"""
self._limit = None
self._marker = None
self._page_reverse = None
self._id = None
self._name = None
self._delay = None
self._max_retries = None
self._admin_state_up = None
self._timeout = None
self._type = None
self._monitor_port = None
self._expected_codes = None
self._domain_name = None
self._url_path = None
self._http_method = None
self.discriminator = None
if limit is not None:
self.limit = limit
if marker is not None:
self.marker = marker
if page_reverse is not None:
self.page_reverse = page_reverse
if id is not None:
self.id = id
if name is not None:
self.name = name
if delay is not None:
self.delay = delay
if max_retries is not None:
self.max_retries = max_retries
if admin_state_up is not None:
self.admin_state_up = admin_state_up
if timeout is not None:
self.timeout = timeout
if type is not None:
self.type = type
if monitor_port is not None:
self.monitor_port = monitor_port
if expected_codes is not None:
self.expected_codes = expected_codes
if domain_name is not None:
self.domain_name = domain_name
if url_path is not None:
self.url_path = url_path
if http_method is not None:
self.http_method = http_method
@property
def limit(self):
"""Gets the limit of this ListHealthmonitorsRequest.
分页查询中每页的健康检查个数
:return: The limit of this ListHealthmonitorsRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListHealthmonitorsRequest.
分页查询中每页的健康检查个数
:param limit: The limit of this ListHealthmonitorsRequest.
:type: int
"""
self._limit = limit
@property
def marker(self):
"""Gets the marker of this ListHealthmonitorsRequest.
分页查询的起始的资源id,表示上一页最后一条查询记录的健康检查的id。不指定时表示查询第一页。
:return: The marker of this ListHealthmonitorsRequest.
:rtype: str
"""
return self._marker
@marker.setter
def marker(self, marker):
"""Sets the marker of this ListHealthmonitorsRequest.
分页查询的起始的资源id,表示上一页最后一条查询记录的健康检查的id。不指定时表示查询第一页。
:param marker: The marker of this ListHealthmonitorsRequest.
:type: str
"""
self._marker = marker
@property
def page_reverse(self):
"""Gets the page_reverse of this ListHealthmonitorsRequest.
分页的顺序,true表示从后往前分页,false表示从前往后分页,默认为false。
:return: The page_reverse of this ListHealthmonitorsRequest.
:rtype: bool
"""
return self._page_reverse
@page_reverse.setter
def page_reverse(self, page_reverse):
"""Sets the page_reverse of this ListHealthmonitorsRequest.
分页的顺序,true表示从后往前分页,false表示从前往后分页,默认为false。
:param page_reverse: The page_reverse of this ListHealthmonitorsRequest.
:type: bool
"""
self._page_reverse = page_reverse
@property
def id(self):
"""Gets the id of this ListHealthmonitorsRequest.
健康检查ID。
:return: The id of this ListHealthmonitorsRequest.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ListHealthmonitorsRequest.
健康检查ID。
:param id: The id of this ListHealthmonitorsRequest.
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this ListHealthmonitorsRequest.
健康检查名称。
:return: The name of this ListHealthmonitorsRequest.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ListHealthmonitorsRequest.
健康检查名称。
:param name: The name of this ListHealthmonitorsRequest.
:type: str
"""
self._name = name
@property
def delay(self):
"""Gets the delay of this ListHealthmonitorsRequest.
健康检查间隔,单位秒,取值范围[1,50]。
:return: The delay of this ListHealthmonitorsRequest.
:rtype: int
"""
return self._delay
@delay.setter
def delay(self, delay):
"""Sets the delay of this ListHealthmonitorsRequest.
健康检查间隔,单位秒,取值范围[1,50]。
:param delay: The delay of this ListHealthmonitorsRequest.
:type: int
"""
self._delay = delay
@property
def max_retries(self):
"""Gets the max_retries of this ListHealthmonitorsRequest.
健康检查最大重试次数,取值范围[1,10]。
:return: The max_retries of this ListHealthmonitorsRequest.
:rtype: int
"""
return self._max_retries
@max_retries.setter
def max_retries(self, max_retries):
"""Sets the max_retries of this ListHealthmonitorsRequest.
健康检查最大重试次数,取值范围[1,10]。
:param max_retries: The max_retries of this ListHealthmonitorsRequest.
:type: int
"""
self._max_retries = max_retries
@property
def admin_state_up(self):
"""Gets the admin_state_up of this ListHealthmonitorsRequest.
健康检查的管理状态。取值范围:true/false。默认为true;true表示开启健康检查;false表示关闭健康检查。
:return: The admin_state_up of this ListHealthmonitorsRequest.
:rtype: bool
"""
return self._admin_state_up
@admin_state_up.setter
def admin_state_up(self, admin_state_up):
"""Sets the admin_state_up of this ListHealthmonitorsRequest.
健康检查的管理状态。取值范围:true/false。默认为true;true表示开启健康检查;false表示关闭健康检查。
:param admin_state_up: The admin_state_up of this ListHealthmonitorsRequest.
:type: bool
"""
self._admin_state_up = admin_state_up
@property
def timeout(self):
"""Gets the timeout of this ListHealthmonitorsRequest.
健康检查超时时间,单位秒,取值范围[1,50]。 建议该值小于delay的值。
:return: The timeout of this ListHealthmonitorsRequest.
:rtype: int
"""
return self._timeout
@timeout.setter
def timeout(self, timeout):
"""Sets the timeout of this ListHealthmonitorsRequest.
健康检查超时时间,单位秒,取值范围[1,50]。 建议该值小于delay的值。
:param timeout: The timeout of this ListHealthmonitorsRequest.
:type: int
"""
self._timeout = timeout
@property
def type(self):
"""Gets the type of this ListHealthmonitorsRequest.
健康检查的类型。取值范围:TCP、UDP_CONNECT、HTTP。
:return: The type of this ListHealthmonitorsRequest.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ListHealthmonitorsRequest.
健康检查的类型。取值范围:TCP、UDP_CONNECT、HTTP。
:param type: The type of this ListHealthmonitorsRequest.
:type: str
"""
self._type = type
@property
def monitor_port(self):
"""Gets the monitor_port of this ListHealthmonitorsRequest.
健康检查端口号]。默认为空,表示使用后端云服务器的protocol_port作为健康检查的检查端口。
:return: The monitor_port of this ListHealthmonitorsRequest.
:rtype: int
"""
return self._monitor_port
@monitor_port.setter
def monitor_port(self, monitor_port):
"""Sets the monitor_port of this ListHealthmonitorsRequest.
健康检查端口号]。默认为空,表示使用后端云服务器的protocol_port作为健康检查的检查端口。
:param monitor_port: The monitor_port of this ListHealthmonitorsRequest.
:type: int
"""
self._monitor_port = monitor_port
@property
def expected_codes(self):
"""Gets the expected_codes of this ListHealthmonitorsRequest.
期望HTTP响应状态码;默认值:“200”。取值范围:单值,例如200;列表,例如200,202;区间,例如200-204。仅当type为HTTP时生效。 该字段为预留字段,暂未启用。
:return: The expected_codes of this ListHealthmonitorsRequest.
:rtype: str
"""
return self._expected_codes
@expected_codes.setter
def expected_codes(self, expected_codes):
"""Sets the expected_codes of this ListHealthmonitorsRequest.
期望HTTP响应状态码;默认值:“200”。取值范围:单值,例如200;列表,例如200,202;区间,例如200-204。仅当type为HTTP时生效。 该字段为预留字段,暂未启用。
:param expected_codes: The expected_codes of this ListHealthmonitorsRequest.
:type: str
"""
self._expected_codes = expected_codes
@property
def domain_name(self):
"""Gets the domain_name of this ListHealthmonitorsRequest.
健康检查时,发送的http请求的域名。仅当type为HTTP时生效。默认为空,表示使用负载均衡器的vip_address作为http请求的目的地址。以数字或字母开头,只能包含数字、字母、’-’、’.’。例如:www.huaweitest.com
:return: The domain_name of this ListHealthmonitorsRequest.
:rtype: str
"""
return self._domain_name
@domain_name.setter
def domain_name(self, domain_name):
"""Sets the domain_name of this ListHealthmonitorsRequest.
健康检查时,发送的http请求的域名。仅当type为HTTP时生效。默认为空,表示使用负载均衡器的vip_address作为http请求的目的地址。以数字或字母开头,只能包含数字、字母、’-’、’.’。例如:www.huaweitest.com
:param domain_name: The domain_name of this ListHealthmonitorsRequest.
:type: str
"""
self._domain_name = domain_name
@property
def url_path(self):
"""Gets the url_path of this ListHealthmonitorsRequest.
健康检查时发送的http请求路径。默认为“/”。以“/”开头。仅当type为HTTP时生效。例如:“/test”
:return: The url_path of this ListHealthmonitorsRequest.
:rtype: str
"""
return self._url_path
@url_path.setter
def url_path(self, url_path):
"""Sets the url_path of this ListHealthmonitorsRequest.
健康检查时发送的http请求路径。默认为“/”。以“/”开头。仅当type为HTTP时生效。例如:“/test”
:param url_path: The url_path of this ListHealthmonitorsRequest.
:type: str
"""
self._url_path = url_path
@property
def http_method(self):
"""Gets the http_method of this ListHealthmonitorsRequest.
HTTP请求的方法;默认值:GET取值范围:GET、HEAD、POST、PUT、DELETE、TRACE、OPTIONS、CONNECT、PATCH。仅当type为HTTP时生效。
:return: The http_method of this ListHealthmonitorsRequest.
:rtype: str
"""
return self._http_method
@http_method.setter
def http_method(self, http_method):
"""Sets the http_method of this ListHealthmonitorsRequest.
HTTP请求的方法;默认值:GET取值范围:GET、HEAD、POST、PUT、DELETE、TRACE、OPTIONS、CONNECT、PATCH。仅当type为HTTP时生效。
:param http_method: The http_method of this ListHealthmonitorsRequest.
:type: str
"""
self._http_method = http_method
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListHealthmonitorsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
155751b1f16e508681de993ca06665d2975eb2ce
|
9c5abcd43318ef7553be95a95a859a0f3e41a467
|
/examples/tutorials/11_load.py
|
84afd7397952523fee2c5088abc9332745ba37a5
|
[
"BSD-3-Clause"
] |
permissive
|
xzy103/PyEasyGraphics
|
d66da503f601868fe39d404a77b3b8d0a43a2e52
|
16bd6f21dd6f3d76edaab2b533500e9aa789c6b2
|
refs/heads/master
| 2020-05-19T07:31:22.005958 | 2019-02-25T09:54:41 | 2019-02-25T09:54:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 309 |
py
|
"""
Load and display a image
"""
if __name__ == "__main__":
from easygraphics import *
init_graph(800, 600)
img = load_image("test.png")
draw_image((get_width() - img.get_width()) // 2,
(get_height() - img.get_height()) // 2, img)
pause()
img.close()
close_graph()
|
[
"[email protected]"
] | |
fd27ad6a8290c32a967a034eca6ecec5d2b83195
|
02b650423c122fd4c2a200eb6a90c4530a75511d
|
/tests/unit/api/commands/test_Schedule_Commands.py
|
0ab731997fe190c8d7405e2d013704fed5854efd
|
[
"Apache-2.0"
] |
permissive
|
OpenSecuritySummit/OSS-Bot
|
67b5f83ca94c612fbbed7610b92e4174e1f512de
|
8341df2be0f12c59ca1b0e9c407b650ef2fa44f9
|
refs/heads/master
| 2021-08-10T15:28:22.046363 | 2020-05-28T22:56:41 | 2020-05-28T22:56:41 | 185,175,370 | 1 | 1 |
Apache-2.0
| 2019-06-03T09:49:15 | 2019-05-06T10:31:25 |
Python
|
UTF-8
|
Python
| false | false | 572 |
py
|
from pbx_gs_python_utils.utils.Dev import Dev
from oss_bot.Deploy import Deploy
from oss_bot.api.commands.Participant_Commands import Participant_Commands
from oss_bot.api.commands.Schedule_Commands import Schedule_Commands
from oss_bot.helpers.Test_Helper import Test_Helper
class test_Schedule_Commands(Test_Helper):
def setUp(self):
super().setUp()
self.result = None
def tearDown(self):
if self.result is not None:
Dev.pprint(self.result)
def test_today(self):
Schedule_Commands.today(None,'DJ8UA0RFT',[])
|
[
"[email protected]"
] | |
714febafa72df8362cb0c1a03d8eeec7bb22472c
|
005d644bb56f8c9e168834ae7b8aa6eb6ac121fd
|
/splash/ui_splash_screen.py
|
f04928ecdf3274c3ce622ab71292fb9879474210
|
[] |
no_license
|
TrendingTechnology/Notepy
|
950c797a111a57aedd521f558cdebe14e643e03d
|
029f0725a667d2085e20a9ad60e9032d4535830c
|
refs/heads/main
| 2023-06-20T21:02:48.004169 | 2021-07-17T05:47:56 | 2021-07-17T05:47:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,474 |
py
|
from PySide2.QtCore import (QCoreApplication, QMetaObject, QObject, QPoint,
QRect, QSize, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QLinearGradient, QPalette, QPainter, QPixmap,
QRadialGradient)
from PySide2.QtWidgets import *
class Ui_SplashScreen(object):
def setupUi(self, SplashScreen):
if SplashScreen.objectName():
SplashScreen.setObjectName(u"SplashScreen")
SplashScreen.resize(680, 400)
self.centralwidget = QWidget(SplashScreen)
self.centralwidget.setObjectName(u"centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(10, 10, 10, 10)
self.dropShadowFrame = QFrame(self.centralwidget)
self.dropShadowFrame.setObjectName(u"dropShadowFrame")
self.dropShadowFrame.setStyleSheet(u"QFrame { \n"
" background-color: rgb(255, 255, 255); \n"
" color: rgb(220, 220, 220);\n"
" border-radius: 10px;\n"
"}")
self.dropShadowFrame.setFrameShape(QFrame.StyledPanel)
self.dropShadowFrame.setFrameShadow(QFrame.Raised)
self.label_title = QLabel(self.dropShadowFrame)
self.label_title.setObjectName(u"label_title")
self.label_title.setGeometry(QRect(0, 90, 661, 61))
font = QFont()
font.setFamily(u"Segoe UI")
font.setPointSize(40)
self.label_title.setFont(font)
self.label_title.setStyleSheet(u"color: rgb(254, 121, 199);")
self.label_title.setAlignment(Qt.AlignCenter)
self.label_description = QLabel(self.dropShadowFrame)
self.label_description.setObjectName(u"label_description")
self.label_description.setGeometry(QRect(0, 150, 661, 31))
font1 = QFont()
font1.setFamily(u"Segoe UI")
font1.setPointSize(14)
self.label_description.setFont(font1)
self.label_description.setStyleSheet(u"color: rgb(98, 114, 164);")
self.label_description.setAlignment(Qt.AlignCenter)
self.progressBar = QProgressBar(self.dropShadowFrame)
self.progressBar.setObjectName(u"progressBar")
self.progressBar.setGeometry(QRect(50, 280, 561, 23))
self.progressBar.setStyleSheet(u"QProgressBar {\n"
" \n"
" background-color: rgb(98, 114, 164);\n"
" color: rgb(200, 200, 200);\n"
" border-style: none;\n"
" border-radius: 10px;\n"
" text-align: center;\n"
"}\n"
"QProgressBar::chunk{\n"
" border-radius: 10px;\n"
" background-color: qlineargradient(spread:pad, x1:0, y1:0.511364, x2:1, y2:0.523, stop:0 rgba(254, 121, 199, 255), stop:1 rgba(170, 85, 255, 255));\n"
"}")
self.progressBar.setValue(24)
self.label_loading = QLabel(self.dropShadowFrame)
self.label_loading.setObjectName(u"label_loading")
self.label_loading.setGeometry(QRect(0, 320, 661, 21))
font2 = QFont()
font2.setFamily(u"Segoe UI")
font2.setPointSize(12)
self.label_loading.setFont(font2)
self.label_loading.setStyleSheet(u"color: rgb(98, 114, 164);")
self.label_loading.setAlignment(Qt.AlignCenter)
self.label_credits = QLabel(self.dropShadowFrame)
self.label_credits.setObjectName(u"label_credits")
self.label_credits.setGeometry(QRect(20, 350, 621, 21))
font3 = QFont()
font3.setFamily(u"Segoe UI")
font3.setPointSize(10)
self.label_credits.setFont(font3)
self.label_credits.setStyleSheet(u"color: rgb(98, 114, 164);")
self.label_credits.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.verticalLayout.addWidget(self.dropShadowFrame)
SplashScreen.setCentralWidget(self.centralwidget)
self.retranslateUi(SplashScreen)
QMetaObject.connectSlotsByName(SplashScreen)
# setupUi
def retranslateUi(self, SplashScreen):
SplashScreen.setWindowTitle(QCoreApplication.translate("SplashScreen", u"MainWindow", None))
self.label_title.setText(QCoreApplication.translate("SplashScreen", u"<strong>Notepy</strong>", None))
self.label_loading.setText(QCoreApplication.translate("SplashScreen", u"'Writing is the painting of the voice'" , None))
self.label_credits.setText(QCoreApplication.translate("SplashScreen", u"<strong>Created by</strong>: Mirko Rovere", None))
# retranslateUi
|
[
"[email protected]"
] | |
223fd5fbb8b7756b2d9e6bea44f9505273bd3aa9
|
b022d86a5beabbc830d3896ccfba4f219875cbca
|
/meshio/medit_io.py
|
87bcf8e31d2cc5a2c2217046f45872dff2bb8d40
|
[
"MIT"
] |
permissive
|
nicoguaro/meshio
|
4bb136417f0c93990b8ebbc3b9cd53d0b0b044c1
|
5b3a15e72f3a0c134d176b016d2c16e10e890f27
|
refs/heads/master
| 2021-01-22T04:24:37.177143 | 2017-05-18T17:45:50 | 2017-05-18T17:45:50 | 92,463,341 | 1 | 0 | null | 2017-05-26T02:24:27 | 2017-05-26T02:24:27 | null |
UTF-8
|
Python
| false | false | 3,757 |
py
|
# -*- coding: utf-8 -*-
#
'''
I/O for Medit's format, cf.
<https://people.sc.fsu.edu/~jburkardt/data/medit/medit.html>.
.. moduleauthor:: Nico Schlömer <[email protected]>
'''
from itertools import islice
import numpy
def read(filename):
with open(filename) as f:
points, cells = read_buffer(f)
return points, cells, {}, {}, {}
def read_buffer(f):
dim = 0
cells = {}
while True:
try:
line = next(islice(f, 1))
except StopIteration:
break
stripped = line.strip()
# skip comments and empty lines
if len(stripped) == 0 or stripped[0] == '#':
continue
assert stripped[0].isalpha()
keyword = stripped.split(' ')[0]
meshio_from_medit = {
'Edges': ('line', 2),
'Triangles': ('triangle', 3),
'Quadrilaterals': ('quad', 4),
'Tetrahedra': ('tetra', 4),
'Hexahedra': ('hexahedra', 8)
}
if keyword == 'MeshVersionFormatted':
assert stripped[-1] == '1'
elif keyword == 'Dimension':
dim = int(stripped[-1])
elif keyword == 'Vertices':
assert dim > 0
# The first line is the number of nodes
line = next(islice(f, 1))
num_verts = int(line)
points = numpy.empty((num_verts, dim), dtype=float)
for k, line in enumerate(islice(f, num_verts)):
# Throw away the label immediately
points[k] = numpy.array(line.split(), dtype=float)[:-1]
elif keyword in meshio_from_medit:
meshio_name, num = meshio_from_medit[keyword]
# The first line is the number of elements
line = next(islice(f, 1))
num_cells = int(line)
cell_data = numpy.empty((num_cells, num), dtype=int)
for k, line in enumerate(islice(f, num_cells)):
data = numpy.array(line.split(), dtype=int)
# Throw away the label
cell_data[k] = data[:-1]
# adapt 0-base
cells[meshio_name] = cell_data - 1
elif keyword == 'End':
pass
else:
raise RuntimeError('Unknown keyword \'%s\'.' % keyword)
return points, cells
def write(
filename,
points,
cells,
point_data=None,
cell_data=None,
field_data=None
):
with open(filename, 'wb') as fh:
fh.write(b'MeshVersionFormatted 1\n')
fh.write(b'# Created by meshio\n')
# Dimension info
d = '\nDimension %d\n' % points.shape[1]
fh.write(d.encode('utf-8'))
# vertices
fh.write(b'\nVertices\n')
fh.write(('%d\n' % len(points)).encode('utf-8'))
labels = numpy.ones(len(points), dtype=int)
data = numpy.c_[points, labels]
fmt = ' '.join(['%r'] * points.shape[1]) + ' %d'
numpy.savetxt(fh, data, fmt)
medit_from_meshio = {
'line': ('Edges', 2),
'triangle': ('Triangles', 3),
'quad': ('Quadrilaterals', 4),
'tetra': ('Tetrahedra', 4),
'hexahedra': ('Hexahedra', 8)
}
for key, data in cells.items():
medit_name, num = medit_from_meshio[key]
fh.write(b'\n')
fh.write(('%s\n' % medit_name).encode('utf-8'))
fh.write(('%d\n' % len(data)).encode('utf-8'))
labels = numpy.ones(len(data), dtype=int)
# adapt 1-base
data_with_label = numpy.c_[data + 1, labels]
fmt = ' '.join(['%d'] * (num + 1))
numpy.savetxt(fh, data_with_label, fmt)
fh.write(b'\nEnd\n')
return
|
[
"[email protected]"
] | |
da978648cea65bbf38b0bdafda58e143903f1c6e
|
5c36cf1f219f8a4f665f68346951d8f92b40eb28
|
/BUILD/doc/source/conf.py
|
b20254e88f9b52d9134c5fb4d06239d392f139ca
|
[
"MIT"
] |
permissive
|
c1rdan/pytan
|
33fbee6bf51a69479c0a8bca54b95f72ad69c2b1
|
5e537a6dcf4136e3b9c3905a39f073396e7f044f
|
refs/heads/master
| 2020-04-02T14:21:37.339108 | 2018-10-25T12:07:25 | 2018-10-25T12:07:25 | 154,521,218 | 1 | 0 |
MIT
| 2018-10-24T15:04:36 | 2018-10-24T15:04:35 | null |
UTF-8
|
Python
| false | false | 10,135 |
py
|
# -*- coding: utf-8 -*-
#
# PyTan documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 3 05:16:49 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import imp
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
my_file = os.path.abspath(__file__)
my_dir = os.path.dirname(my_file)
root_dir = os.path.join(my_dir, os.pardir, os.pardir, os.pardir)
root_dir = os.path.abspath(root_dir)
lib_dir = os.path.join(root_dir, 'lib')
test_dir = os.path.join(root_dir, 'test')
path_adds = [my_dir, lib_dir, test_dir]
for aa in path_adds:
if aa not in sys.path:
sys.path.append(aa)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'numpydoc',
]
# autodoc_default_flags = ['members', 'show-inheritance']
autosummary_generate = True
numpydoc_show_class_members = False
numpydoc_class_members_toctree = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = u'2015, Tanium Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
pyscript_path = os.path.join(lib_dir, 'pytan', '__init__.py')
a = imp.load_source('a', pyscript_path)
version = a.__version__
release = version
project = u'PyTan v{}'.format(version)
# The short X.Y version.
# version = '1.0.4'
# The full version, including alpha/beta/rc tags.
# release = '1.0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'sphinxdoc'
html_theme = "sphinx_rtd_theme"
html_theme_path = ["_themes", ]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
html_domain_indices = ['py-modindex']
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTandoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '''\usepackage{enumitem}
\setlistdepth{99}''',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PyTan.tex', u'PyTan Documentation',
u'Jim Olsen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pytan', u'PyTan Documentation',
[u'Jim Olsen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyTan', u'PyTan Documentation',
u'Jim Olsen', 'PyTan', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
|
[
"[email protected]"
] | |
22b155a60cc9f26f8fbcbfb19da779853da72f7b
|
fc7cad490cb774d769c1b463ac6d1d9a8ea97024
|
/pages/tests/test_views.py
|
1769e7849fc404679d64f326e2eea26408bcaedd
|
[] |
no_license
|
Aviemusca/curriculum-dev
|
c301915532353836cb085130fd12e2734da4b956
|
691a6536718ef496ac603b1c8daee7508b3e8ff2
|
refs/heads/master
| 2022-12-26T20:56:55.031344 | 2020-10-01T08:11:49 | 2020-10-01T08:11:49 | 297,643,769 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,158 |
py
|
from django.test import SimpleTestCase
from django.urls import reverse
class HomePageViewTests(SimpleTestCase):
def test_home_page_status_code(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_home_view_url_by_name(self):
response = self.client.get(reverse('pages:home'))
self.assertEqual(response.status_code, 200)
def test_home_view_template(self):
response = self.client.get(reverse('pages:home'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
class AboutPageViewTests(SimpleTestCase):
def test_about_page_status_code(self):
response = self.client.get('/about/')
self.assertEqual(response.status_code, 200)
def test_about_view_url_by_name(self):
response = self.client.get(reverse('pages:about'))
self.assertEqual(response.status_code, 200)
def test_about_view_template(self):
response = self.client.get(reverse('pages:about'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'about.html')
|
[
"[email protected]"
] | |
b6a3f94427f43ae734fb20b7e2c1c4f6c926b604
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/1ed4e86933efe88eca97680f436c5548265533bfbc196a385877104ccb45103b/_cython_0_29_11.py
|
091c1badfe884dc8faa5756d595898a85fb668e4
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,290 |
py
|
# encoding: utf-8
# module _cython_0_29_11
# from C:\Users\Doly\Anaconda3\lib\site-packages\skimage\measure\_marching_cubes_lewiner_cy.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# classes
class cython_function_or_method(object):
def __call__(self, *args, **kwargs): # real signature unknown
""" Call self as a function. """
pass
def __get__(self, *args, **kwargs): # real signature unknown
""" Return an attribute of instance, which is of type owner. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
func_closure = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_code = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_defaults = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_dict = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_doc = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_globals = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
func_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__annotations__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__closure__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__code__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__defaults__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__globals__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__kwdefaults__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__self__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is "mappingproxy({'__repr__': <slot wrapper '__repr__' of 'cython_function_or_method' objects>, '__call__': <slot wrapper '__call__' of 'cython_function_or_method' objects>, '__get__': <slot wrapper '__get__' of 'cython_function_or_method' objects>, '__reduce__': <method '__reduce__' of 'cython_function_or_method' objects>, '__module__': <member '__module__' of 'cython_function_or_method' objects>, 'func_doc': <attribute 'func_doc' of 'cython_function_or_method' objects>, '__doc__': <attribute '__doc__' of 'cython_function_or_method' objects>, 'func_name': <attribute 'func_name' of 'cython_function_or_method' objects>, '__name__': <attribute '__name__' of 'cython_function_or_method' objects>, '__qualname__': <attribute '__qualname__' of 'cython_function_or_method' objects>, '__self__': <attribute '__self__' of 'cython_function_or_method' objects>, 'func_dict': <attribute 'func_dict' of 'cython_function_or_method' objects>, '__dict__': <attribute '__dict__' of 'cython_function_or_method' objects>, 'func_globals': <attribute 'func_globals' of 'cython_function_or_method' objects>, '__globals__': <attribute '__globals__' of 'cython_function_or_method' objects>, 'func_closure': <attribute 'func_closure' of 'cython_function_or_method' objects>, '__closure__': <attribute '__closure__' of 'cython_function_or_method' objects>, 'func_code': <attribute 'func_code' of 'cython_function_or_method' objects>, '__code__': <attribute '__code__' of 'cython_function_or_method' objects>, 'func_defaults': <attribute 'func_defaults' of 'cython_function_or_method' objects>, '__defaults__': <attribute '__defaults__' of 'cython_function_or_method' objects>, '__kwdefaults__': <attribute '__kwdefaults__' of 'cython_function_or_method' objects>, '__annotations__': <attribute '__annotations__' of 'cython_function_or_method' objects>})"
__name__ = 'cython_function_or_method'
__qualname__ = 'cython_function_or_method'
|
[
"[email protected]"
] | |
6751ec11b96160d04f89626642ceb462999b1053
|
ab621c65fc91f5194c4032d68e750efaa5f85682
|
/pabi_asset_management/wizard/account_asset_compute.py
|
d6cacc753c4c8d1e7b206c84a6b2f2cc5727aea3
|
[] |
no_license
|
pabi2/pb2_addons
|
a1ca010002849b125dd89bd3d60a54cd9b9cdeef
|
e8c21082c187f4639373b29a7a0905d069d770f2
|
refs/heads/master
| 2021-06-04T19:38:53.048882 | 2020-11-25T03:18:24 | 2020-11-25T03:18:24 | 95,765,121 | 6 | 15 | null | 2022-10-06T04:28:27 | 2017-06-29T10:08:49 |
Python
|
UTF-8
|
Python
| false | false | 1,524 |
py
|
# -*- coding: utf-8 -*-
import ast
from openerp import models, fields, api, _
class AccountAssetCompute(models.Model): # Change to a Model
_inherit = 'account.asset.compute'
_rec_name = 'id'
_order = 'id desc'
id = fields.Integer(
string='ID',
readonly=True,
)
period_id = fields.Many2one(
readonly=True,
states={'draft': [('readonly', False)]},
)
state = fields.Selection(
[('draft', 'Draft'),
('done', 'Done')],
string='Status',
readonly=True,
default='draft',
)
move_ids = fields.Many2many(
'account.move',
'asset_compute_account_move_rel',
'compute_id', 'move_id',
string='Journal Entries',
readonly=True,
)
@api.multi
def asset_compute(self):
res = super(AccountAssetCompute, self).asset_compute()
domain = ast.literal_eval(res['domain'])
move_ids = domain[0][2]
self.write({'move_ids': [(6, 0, move_ids)],
'state': 'done'})
return True
@api.multi
def open_entries(self):
self.ensure_one()
return {
'name': _("Journal Entries"),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.move',
'view_id': False,
'type': 'ir.actions.act_window',
'context': self._context,
'nodestroy': True,
'domain': [('id', 'in', self.move_ids.ids)],
}
|
[
"[email protected]"
] | |
0b2dfc4a3aeb2cffa837508f7aeee51394e57bd1
|
2cfa0cd5e016d81ecdd3f643e95bd6382652f1ab
|
/toTheMoon/leetcode_139_WordBreak.py
|
64604808320a3a83f58121a9a3c08cfb7d26d7ca
|
[
"MIT"
] |
permissive
|
jercas/offer66-leetcode-newcode
|
b863871840875cc38e0310b1e20ccaa4040ea134
|
a2e5256f27dbfb23fc34119fc857cd9b00e28c03
|
refs/heads/master
| 2020-05-07T17:43:43.326326 | 2019-10-24T12:52:32 | 2019-10-24T12:52:32 | 180,738,543 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,094 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 20:32:31 2019
@author: jercas
"""
"""
leetcode-139: 单词拆分 MEDIUM
'动态规划'
给定一个非空字符串 s 和一个包含非空单词列表的字典 wordDict,判定 s 是否可以被空格拆分为一个或多个在字典中出现的单词。
说明:
拆分时可以重复使用字典中的单词。
你可以假设字典中没有重复的单词。
Hint:
(1)设dp[i]表示字符串s[0:i]是否可以被拆分,False 不能,True能。
(2)现在要想求dp[i]的值,很显然只要判断dp[i - k]的值和子串s[i - k: i]是否存在wordDict中,
其中k为wordDict中一个单词的长度,所以在这一块,可以遍历所有的单词来求。
(3)可以先求出wordDict中每个长度,并且给它排序,方便后面的计算。
"""
class Solution(object):
def wordBreak1(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
if len(s) == 0 or not wordDict:
return False
max_stride = max([len(x) for x in wordDict])
res = [0] * (len(s) + 1)
res[0] = 1
for i in range(1, len(s) + 1):
for j in range(i - max_stride, i):
if res[j] == 1 and s[j:i] in wordDict:
res[i] = 1
if res[-1] == 1:
return True
else:
return False
def wordBreak2(self, s, wordDict):
"""
:type s: str
:type wordDict: List[str]
:rtype: bool
"""
words = set(wordDict)
lengths = sorted({len(w) for w in words})
dp = [False] * (len(s) + 1)
dp[0] = True
for i in range(1, len(s) + 1):
for k in lengths:
if not dp[i] and i - k >= 0:
dp[i] = (dp[i - k] and s[i - k: i] in words)
#print(i, dp[i])
#print(dp)
return dp[-1]
if __name__ == "__main__":
s = ["leetcode", "applepenapple", "catsandog", "cars"]
wordDict = [["leet", "code"], ["apple", "pen"], ["cats", "dog", "sand", "and", "cat"], ["car", "ca", "rs"]]
A = [True, True, False, True]
solution = Solution()
for i in range(4):
if A[i] == solution.wordBreak2(s[i], wordDict[i]):
print(s[i],"+", wordDict[i], "-->", A[i])
print('AC')
|
[
"[email protected]"
] | |
4e2e3d32be1cba99eb676eb1b0b9eb38a7c57daf
|
4a8c1f7d9935609b780aff95c886ef7781967be0
|
/atcoder/_codeforces/1359_d.py
|
e4583d55e18b587b4ff08fba68b0847da062511c
|
[] |
no_license
|
recuraki/PythonJunkTest
|
d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a
|
2556c973d468a6988d307ce85c5f2f8ab15e759a
|
refs/heads/master
| 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,580 |
py
|
import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
from pprint import pprint
import sys
input = sys.stdin.readline
n = int(input())
dat = list(map(int, input().split()))
s = [0] * (n+1)
for i in range(n):
s[i+1] = s[i] + dat[i];
resval = -999999999999999999999
res = 0
for i in range(n):
vmin = dat[i]
for j in range(i+1, n):
v = s[j+1] - s[i]
vmin = max(vmin, dat[j])
tmp = v - vmin
resval = max(resval, tmp)
#print(i, j, v, vmin, tmp)
if resval < 0:
print(0)
else:
print(resval)
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """5
5 -2 10 -1 4"""
output = """6"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """8
5 2 5 3 -30 -30 6 9"""
output = """10"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """3
-10 6 -15"""
output = """0"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main()
|
[
"[email protected]"
] | |
369cc333ac5f443e085115379b09a143723492e7
|
936c893f29a7e3c99450b49512d5ad6dd7e63d12
|
/api/migrations/0006_codesubbietype.py
|
7a09f80fdcddf9e4a90dccbe52897390cab09456
|
[] |
no_license
|
pavithra994/cormack_dev
|
0d3f5f794173013f3219c786b83779df2f2ae344
|
78927eaeea27320908b117aa50380ddacd6e2662
|
refs/heads/master
| 2023-04-09T22:34:33.746134 | 2021-04-13T20:12:08 | 2021-04-13T20:12:08 | 357,628,147 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,504 |
py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Ocom Software- All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Ocom Software <[email protected], 2019
#
#
# Generated by Django 1.10.7 on 2017-12-12 04:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('api', '0005_hash'),
]
operations = [
migrations.CreateModel(
name='CodeSubbieType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('created_date', models.DateTimeField(auto_now_add=True)),
('active_start_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='Active Start Date')),
('active_end_date', models.DateTimeField(blank=True, null=True, verbose_name='Active End Date')),
('description', models.TextField()),
('code', models.CharField(blank=True, max_length=255, null=True, unique=True)),
],
options={
'verbose_name_plural': 'Subbie Types',
'db_table': 'code_subbie_type',
'verbose_name': 'Subbie Type',
},
),
]
|
[
"[email protected]"
] | |
b19af4145b02f7ea5d7e947944ec9842a0a92632
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02886/s480047151.py
|
182b8aad58af1035a08c56cbe0d021cb65d131cb
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 186 |
py
|
N=int(input())
List = list(map(int, input().split()))
wa = 0
for i in range(N):
for j in range(N):
if j == i:
pass
else:
wa += List[i]*List[j]
wa = wa //2
print(wa)
|
[
"[email protected]"
] | |
8d2091e6ed66f99a803f6b2852b1ad75f6b5c9b0
|
cca752de5ce7f2ce2a225a4d67fc05f917716572
|
/pyemma/coordinates/clustering/assign.py
|
62de35890757e6f5f8dd27392fac2db8b2345893
|
[
"BSD-2-Clause"
] |
permissive
|
kziolkowska/PyEMMA
|
aef5cf697c4d470e380d888e87d4ec81197f3651
|
358edf630f02a893795c41f57bb8ae2843510444
|
refs/heads/master
| 2021-01-16T22:46:03.817339 | 2015-02-23T07:48:46 | 2015-02-23T07:48:46 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,655 |
py
|
'''
Created on 18.02.2015
@author: marscher
'''
from pyemma.coordinates.clustering.interface import AbstractClustering
from pyemma.msm.io import read_matrix
import numpy as np
class AssignCenters(AbstractClustering):
"""Assigns given (precalculated) cluster centers. If you already have
cluster centers from somewhere, you use this class to assign your data to it.
Parameters
----------
clustercenters : path to file (csv) or ndarray
cluster centers to use in assignment of data
Examples
--------
Assuming you have stored your centers in a CSV file:
>>> from pyemma.coordinates.clustering import AssignCenters
>>> from pyemma.coordinates import discretizer
>>> reader = ...
>>> assign = AssignCenters('my_centers.dat')
>>> disc = discretizer(reader, cluster=assign)
>>> disc.run()
"""
def __init__(self, clustercenters):
super(AssignCenters, self).__init__()
if isinstance(clustercenters, str):
self.clustercenters = read_matrix(clustercenters)
self.clustercenters = clustercenters
assert isinstance(self.clustercenters, np.ndarray)
def param_add_data(self, X, itraj, t, first_chunk, last_chunk_in_traj,
last_chunk, ipass, Y=None):
# discretize all
if t == 0:
n = self.data_producer.trajectory_length(itraj)
self.dtrajs.append(np.empty(n, dtype=int))
L = np.shape(X)[0]
# TODO: optimize: assign one chunk at once
for i in xrange(L):
self.dtrajs[itraj][i + t] = self.map(X[i])
if last_chunk:
return True
|
[
"[email protected]"
] | |
5fbe51365e416484320375db8432cd15c9a6a8f0
|
ea9b8accf7040cc7d463cb162c798e9675f16533
|
/edittask
|
3e3fb3f7085e5c25fa4576cfc0f4486dba8ed4d0
|
[] |
no_license
|
thomasballinger/ballingercalendar
|
d51619310cc9b39ff343547366caa764ee86404b
|
6ebaa8b97d6fb3a393ce1a7d988e22a1daaaec49
|
refs/heads/master
| 2021-01-10T21:04:52.684282 | 2011-05-27T21:06:44 | 2011-05-27T21:06:44 | 32,118,158 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,963 |
#!/usr/bin/python
# edittask
# being updated for use with spreadsheet-based tasks
# [email protected]
import time
import os, sys, optparse
import datetime, parse
import gssltask as task
import calendarhours as hours
origDir = os.getcwd()
scriptFile = os.path.abspath(__file__)
scriptDir = os.path.basename(scriptFile)
import cmd
import pretty
from pprint import pprint
class EditTasksCLI(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = ':) '
self.selected_task = None
self.task_list = task.createTasks()
def do_updateLocalTasklist(self, arg):
self.task_list = task.createTasks()
def do_exit(self, arg):
sys.exit(0)
def do_quit(self, arg):
sys.exit(0)
def do_EOF(self, arg):
print('')
return True
def do_prettyHours(self, arg):
if not arg:
return False
splitargs = arg.split()
(ds1, ds2) = splitargs[:2]
if len(splitargs) == 3:
filename = splitargs[2]
return pretty.showWeekly(ds1, ds2, filename=filename)
return pretty.showWeekly(ds1, ds2)
def do_listtasks(self, arg):
print self.task_list
def do_task(self, arg):
if arg:
arg = arg.replace('_',' ')
l = [x for x in self.task_list if x.name == arg]
if not l:
l = [x.name.replace('-','_').replace("'",'') for x in self.task_list if x.name.replace('-','_').replace("'",'') == arg]
print('no such task')
return
self.selected_task = l[0]
print(self.selected_task)
elif self.selected_task:
print(self.selected_task)
else:
print('select a task, or create a new one')
def complete_task(self, text, line, beginindex, endindex):
if not text:
a = [x.name.replace(' ','_').replace('-','_').replace("'",'') for x in self.task_list]
return a
else:
a = [x.name.replace(' ','_').replace('-','_').replace("'",'') for x in [t for t in self.task_list if text.lower() in t.name.replace(' ','_').replace("'",'').lower()]]
return a
def do_rename(self,arg):
if not self.selected_task: print('choose a task first'); return
self.selected_task.name = arg.replace('_',' ')
self.selected_task.put()
self.task_list = task.createTasks()
def do_newtask(self,arg):
if not arg:
arg = raw_input('task name:')
if not arg:
print('nm')
return False
t = task.newTask(arg.replace('_',' '))
self.selected_task = t
time.sleep(1)
def do_description(self, arg):
if not self.selected_task: print('choose a task first'); return
if not arg:
print('description:'+self.selected_task.description)
return
self.selected_task.description = arg
self.selected_task.put()
self.task_list = task.createTasks()
def do_timeLeft(self, arg):
"""Displays time left before a task is due, """
if not self.selected_task: print('choose a task first'); return
self.selected_task.timespent = hours.get_hours_worked(self.selected_task.id)
overdue = self.selected_task.duedate - datetime.datetime.now()
left = self.selected_task.estimatedtime - self.selected_task.timespent
if self.selected_task.iscompleted:
print 'task completed.'
print 'estimated time for task: ', self.selected_task.estimatedtime
print 'time spent on task: ', self.selected_task.timespent
else:
if overdue < datetime.timedelta(0):
print 'task overdue by: ', abs(overdue)
else:
print 'time until task due: ', overdue
if left < datetime.timedelta(0):
print 'task is overbudget by: ', abs(left)
else:
print 'estimated time to complete', left
print 'estimated time for task: ', self.selected_task.estimatedtime
print 'time spent so far: ', self.selected_task.timespent
def do_due(self, arg):
if not self.selected_task: print('choose a task first'); return
if not arg:
print 'due date:',self.selected_task.duedate
return
time = parse.parseDate(arg)
self.selected_task.duedate = time
print 'due date:',time
self.selected_task.put()
self.task_list = task.createTasks()
def do_assigner(self, arg):
if not self.selected_task: print('choose a task first'); return
if not arg:
print 'assigner:',self.selected_task.assigner
return
self.selected_task.assigner = arg
self.selected_task.put()
self.task_list = task.createTasks()
def do_whose(self, arg):
if not self.selected_task: print('choose a task first'); return
if not arg:
print 'whose:',self.selected_task.whose
return
self.selected_task.whose = arg
self.selected_task.put()
self.task_list = task.createTasks()
def do_priority(self, arg):
if not self.selected_task: print('choose a task first'); return
if not arg:
print 'priority:',self.selected_task.priority
return
if not -1<int(arg)<10:
print('bad priority value')
return
self.selected_task.priority = int(arg)
self.selected_task.put()
self.task_list = task.createTasks()
def do_estimatedTime (self, arg):
if not self.selected_task: print('choose a task first'); return
if not arg:
print 'estimated time:',self.selected_task.estimatedtime
return
timedelta = parse.parseTimedelta(arg)
self.selected_task.estimatedtime = timedelta
self.selected_task.put()
self.task_list = task.createTasks()
print 'estimated time:',self.selected_task.estimatedtime
def do_timespent(self, arg):
if not self.selected_task: print('choose a task first'); return
if not arg:
print 'time spend:',self.selected_task.timespent
return
interval = parse.parseTimeInterval(arg)
self.selected_task.timeSpend.append()
self.selected_task.put()
self.task_list = task.createTasks()
print 'time spend:',self.selected_task.timespent
def do_wait(self, arg):
if not self.selected_task: print('choose a task first'); return
if not arg:
print 'waits:',self.selected_task.waits
return
self.selected_task.waits.append(task.Wait())
self.selected_task.waits[-1].whatFor = arg
selected_task.put()
print 'waits:',self.selected_task.waits
def do_appointment(self, arg):
if not self.selected_task: print('choose a task first'); return
self.selected_task.isappointment = True
self.selected_task.put()
self.task_list = task.createTasks()
print 'task is now an appointment'
def do_notAppointment(self, arg):
if not self.selected_task: print('choose a task first'); return
self.selected_task.isappointment = False
self.selected_task.put()
self.task_list = task.createTasks()
print 'task is now not an appointment'
def do_notComplete(self, arg):
if not self.selected_task: print('choose a task first'); return
self.selected_task.iscompleted= False
self.selected_task.put()
self.task_list = task.createTasks()
def do_complete(self, arg):
if not self.selected_task: print('choose a task first'); return
self.selected_task.iscompleted= True
self.selected_task.put()
self.task_list = task.createTasks()
def do_showCurrentTask(self, arg):
if not self.selected_task: print 'select a task first'; return
t = self.selected_task
print(t.name)
for (label,prop) in zip(
['desc:','due:','assigned by:','priority:','time estimate:','time spent','start time','waits','is appointment:','is complete:'],
[t.description, t.duedate, t.assigner, t.priority, t.estimatedtime, t.timespent, t.starttime, t.waitids, t.isappointment, t.iscompleted]):
if prop:
print label,prop
def do_removeTask(self, arg):
if not self.selected_task: print 'select a task first'; return
check = parse.parseBoolean(raw_input('really delete task'+self.selected_task.__repr__()+'?\n'))
if check:
task.deleteTask(self.selected_task)
self.selected_task = None
def do_listChronologicallyByDueDate(self, arg):
pprint(task.getTheStack(self.task_list))
def do_listOverdue(self, arg):
pprint([t for t in self.task_list if t.duedate < datetime.datetime.now() and not t.iscompleted])
def do_listCompleted(self, arg):
pprint([t for t in self.task_list if t.iscompleted])
def do_listInProgress(self, arg):
task_list = [t for t in self.task_list if not t.iscompleted]
task_list.sort(key=lambda t: datetime.timedelta(t.priority*365*10) + (t.duedate - datetime.datetime.now()))
task_list.reverse()
maxTaskLength = max(len(t.name) for t in task_list)
print ('task name'+' '*maxTaskLength)[:maxTaskLength]+ ' p' + ' ' + 'time left' + ' ' + 'time till due'
for t in task_list:
timeToGo = self.timedeltaToHoursString(t.estimatedtime - t.timespent)
timeTillDue = self.timedeltaToDaysString(t.duedate - datetime.datetime.now())
print (t.name + ' '*maxTaskLength)[:maxTaskLength]+' '+str(t.priority)+' '+(timeToGo+' '*10)[:10]+timeTillDue
def do_listProjects(self, arg):
task_list = [t for t in self.task_list if not t.iscompleted]
#task_list.sort(key=lambda t: datetime.timedelta(t.priority*365*10) + (t.duedate - datetime.datetime.now()))
task_list.sort(key=lambda t: t.assigner)
maxTaskLength = max(len(t.name) for t in task_list)
for t in task_list:
timeToGo = self.timedeltaToHoursString(t.estimatedtime - t.timespent)
timeTillDue = self.timedeltaToDaysString(t.duedate - datetime.datetime.now())
print timeToGo+'\t'+str(t.id)+'\t'+str(t.priority)+'\t'+(t.name + ' '*maxTaskLength)[:maxTaskLength]+'\t'+t.assigner
print '\t'+t.description+'\n'
def timedeltaToDaysString(self, td):
if abs(td) < datetime.timedelta(1):
output = str(abs(td).seconds / 3600)+':'+('00'+str(abs(td).seconds / 60))[-2:]
else:
output = str(abs(td).days)+' days'
# output = str(abs(td).days)+' days, '+('00'+str(abs(td).seconds / 3600))[-2:]+':'+('00'+str(abs(td).seconds / 60))[-2:]
if td < datetime.timedelta(0):
# overdue timedelta
return 'overdue by '+output
else:
return output
def timedeltaToHoursString(self, td):
s = td.seconds + 24 * 60 * 60 * td.days
h = s / 60 / 60
m = int(s / 60 % 60)
return str(h)+':'+('00'+str(m))[-2:]
def do_graphTasks(self, arg):
pass
def do_workedOn(self, arg):
"doesn't do anything yet"
return parse.parseTimeInterval(arg)
def do_debug(self, arg):
"enters debug mode"
import pudb; pudb.set_trace()
def do_updateTimeSpent(self, arg):
if not self.selected_task:
print 'select a task first'
return
self.selected_task.timespent = hours.get_hours_worked(self.selected_task.id)
print self.selected_task.timespent
self.selected_task.put()
self.task_list = task.createTasks()
def do_clockHours(self, arg):
if not self.selected_task:
print 'select a task first'
return
if arg and len(arg.split()) % 2 == 0:
hours.clock_time(
self.selected_task.id,
title=self.selected_task.name,
description=self.selected_task.description,
start_datetime=parse.parseTimeInterval(' '.join(arg.split()[:len(arg.split()/2)])),
end_datetime=parse.parseTimeInterval(' '.join(arg.split()[arg.split()/2:]))
)
else:
hours.clock_time(
self.selected_task.id,
title=self.selected_task.name,
description=self.selected_task.description)
print 'hours clocked'
def do_clear(self, arg):
for i in range(100):
print ''
def do_hours(self, arg):
pprint([(t.name, t.timespent) for t in self.task_list])
if __name__ == '__main__':
cli = EditTasksCLI()
cli.cmdloop()
|
[
"[email protected]"
] | ||
0b045919b9d842b5ca1abb59d93d7ecbd92dd4d6
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_156/ch23_2020_03_09_19_36_00_577839.py
|
3639b7a9881fc419db9d75cc326fb12e7b140fb9
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 138 |
py
|
x = int(input('Velocidade do carro: ?'))
if x>80:
print("Voce foi multado: {0}".format((x-80)*5))
else:
print("Não foi multado")
|
[
"[email protected]"
] | |
6b71d61f7c665ab19ef6b4a329625f9dd05d5188
|
b6c7f367306f8f3d9fad7551810c68b392a1b879
|
/omoide/tests/unit/infra/test_walking.py
|
9dd3fab1ae1c23cbaf0f50c2bc112f8e5e83b910
|
[
"MIT"
] |
permissive
|
TaXeH/Omoide
|
c96ef35b1394125fc98367e8a9ef1674718e7e9e
|
8ccc9d47e802433bb2de21ff930e6630658cd5e3
|
refs/heads/main
| 2023-07-18T12:00:15.469707 | 2021-08-28T11:37:23 | 2021-08-28T11:37:23 | 400,773,814 | 0 | 0 |
MIT
| 2021-08-28T11:17:55 | 2021-08-28T11:17:55 | null |
UTF-8
|
Python
| false | false | 1,110 |
py
|
"""Tests.
"""
import tempfile
from unittest import mock
import pytest
from omoide import infra
@pytest.fixture()
def filesystem():
return infra.Filesystem()
def test_walk(filesystem):
with tempfile.TemporaryDirectory() as tmp_dir:
fake_stdout = mock.Mock()
path_1 = filesystem.join(tmp_dir, 'source_1')
path_2 = filesystem.join(tmp_dir, 'source_1', 'migration_1')
path_3 = filesystem.join(tmp_dir, 'source_1', 'migration_2')
path_4 = filesystem.join(tmp_dir, 'source_2', 'migration_3')
path_5 = filesystem.join(tmp_dir, 'source_2', 'migration_4')
for path in (path_1, path_2, path_3, path_4, path_5):
filesystem.ensure_folder_exists(path, fake_stdout)
gen = infra.walk(tmp_dir, filesystem,
branch='source_2', leaf='migration_3')
assert list(gen) == [('source_2',
'migration_3',
filesystem.join(tmp_dir,
'source_2',
'migration_3'))]
|
[
"[email protected]"
] | |
e2453c1ab2c4cb5b531f46e966480c82014a35bf
|
450fc27c8c11eb3ffe7c764081c048d506a7fdea
|
/src/py_script.py
|
26f9d3ec788457a4b83465a273dc735636edc2c5
|
[] |
no_license
|
rduvalwa5/SysExamples
|
6228fd93d4cec66e189ff3561af5c4e062349ea5
|
e47e8843b10c06f4f02d94d89a3f75b133c1d617
|
refs/heads/master
| 2020-04-06T06:28:07.657630 | 2016-10-12T06:37:20 | 2016-10-12T06:37:20 | 70,551,976 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 624 |
py
|
#!/usr/local/bin/python
'''
Created on Feb 19, 2016
@author: rduvalwa2
To run a script, go to directory were script is a execute
Simpple Python script
'''
print("1) go run a script, go to directory were script is")
print("2) make sure is defined by system as executable")
print("3) at command line type the script name and hit return")
print("example >./PytthonScriptExmaple.py")
print("4) Or type python <script name> and hit return")
print("example > python PytthonScriptExmaple.py")
import math
# Use math.pow method.
a = math.pow(2, 3)
# Use operator.
b = 2 ** 3
# Print results.
print(a)
print(b)
#Output
#8.0
#8
|
[
"[email protected]"
] | |
c8377597fc5c29bc3e200dfff5a26e53cad854ca
|
d57b51ec207002e333b8655a8f5832ed143aa28c
|
/.history/1/PyGame/game_20200606103432.py
|
a377557bcf3c7a0484507e7545a9a8cfba1b1108
|
[] |
no_license
|
yevheniir/python_course_2020
|
b42766c4278a08b8b79fec77e036a1b987accf51
|
a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b
|
refs/heads/master
| 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,306 |
py
|
# 1 - Import library
import pygame
from pygame.locals import *
import math
import random
import os
import json
# 2 - Initialize the game
pygame.init()
width, height = 640, 480
screen=pygame.display.set_mode((width, height))
keys = [False, False, False, False]
playerpos=[100,100]
acc=[0,0]
arrows=[]
badtimer=100
badtimer1=0
badguys=[[640,100]]
healthvalue=194
pygame.mixer.init()
# 3 - Load image
player = pygame.image.load("resources/images/dude.png")
grass = pygame.image.load("resources/images/grass.png")
castle = pygame.image.load("resources/images/castle.png")
arrow = pygame.image.load("resources/images/bullet.png")
badguyimg1 = pygame.image.load("resources/images/badguy.png")
badguyimg=badguyimg1
healthbar = pygame.image.load("resources/images/healthbar.png")
health = pygame.image.load("resources/images/health.png")
gameover = pygame.image.load("resources/images/gameover.png")
youwin = pygame.image.load("resources/images/youwin.png")
# 3.1 - Load audio
hit = pygame.mixer.Sound("resources/audio/explode.wav")
enemy = pygame.mixer.Sound("resources/audio/enemy.wav")
shoot = pygame.mixer.Sound("resources/audio/shoot.wav")
hit.set_volume(0.05)
enemy.set_volume(0.05)
shoot.set_volume(0.05)
pygame.mixer.music.load('resources/audio/moonlight.wav')
pygame.mixer.music.play(-1, 0.0)
pygame.mixer.music.set_volume(0.25)
# 4 - keep looping through
running = 1
exitcode = 0
while running:
badtimer-=1
# 5 - clear the screen before drawing it again
screen.fill(0)
# 6 - draw the player on the screen at X:100, Y:100
for x in range(width//grass.get_width()+1):
for y in range(height//grass.get_height()+1):
screen.blit(grass,(x*100,y*100))
# initialize font; must be called after 'pygame.init()' to avoid 'Font not Initialized' error
myfont = pygame.font.SysFont("monospace", 15)
mpcs = []
dir_path = os.path.dirname(os.path.realpath(__file__)) + "/../save.json"
with open("") as json_file:
mpcs = json.load(json_file).map(lambda x: x.name)
step = height // len(mpcs)
for x in range(1, len(mpcs)):
label = myfont.render(mpcs[x], 1, (255,255,0))
screen.blit(castle,(0,x*step))
screen.blit(castle,(0,30))
screen.blit(castle,(0,135))
screen.blit(castle,(0,240))
screen.blit(castle,(0,345 ))
# 6.1 - Set player position and rotation
position = pygame.mouse.get_pos()
angle = math.atan2(position[1]-(playerpos[1]+32),position[0]-(playerpos[0]+26))
playerrot = pygame.transform.rotate(player, 360-angle*57.29)
playerpos1 = (playerpos[0]-playerrot.get_rect().width/2, playerpos[1]-playerrot.get_rect().height/2)
screen.blit(playerrot, playerpos1)
# 6.2 - Draw arrows
for bullet in arrows:
index=0
velx=math.cos(bullet[0])*10
vely=math.sin(bullet[0])*10
bullet[1]+=velx
bullet[2]+=vely
if bullet[1]<-64 or bullet[1]>640 or bullet[2]<-64 or bullet[2]>480:
arrows.pop(index)
index+=1
for projectile in arrows:
arrow1 = pygame.transform.rotate(arrow, 360-projectile[0]*57.29)
screen.blit(arrow1, (projectile[1], projectile[2]))
# 6.3 - Draw badgers
if badtimer==0:
badguys.append([640, random.randint(50,430)])
badtimer=100-(badtimer1*2)
if badtimer1>=35:
badtimer1=35
else:
badtimer1+=5
index=0
for badguy in badguys:
if badguy[0]<-64:
badguys.pop(index)
badguy[0]-=5
# 6.3.1 - Attack castle
badrect=pygame.Rect(badguyimg.get_rect())
badrect.top=badguy[1]
badrect.left=badguy[0]
if badrect.left<64:
hit.play()
healthvalue -= random.randint(5,20)
badguys.pop(index)
#6.3.2 - Check for collisions
index1=0
for bullet in arrows:
bullrect=pygame.Rect(arrow.get_rect())
bullrect.left=bullet[1]
bullrect.top=bullet[2]
if badrect.colliderect(bullrect):
enemy.play()
acc[0]+=1
badguys.pop(index)
arrows.pop(index1)
index1+=1
# 6.3.3 - Next bad guy
index+=1
for badguy in badguys:
screen.blit(badguyimg, badguy)
# 6.4 - Draw clock
font = pygame.font.Font(None, 24)
survivedtext = font.render(str((90000-pygame.time.get_ticks())/60000)+":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2), True, (0,0,0))
textRect = survivedtext.get_rect()
textRect.topright=[635,5]
screen.blit(survivedtext, textRect)
# 6.5 - Draw health bar
screen.blit(healthbar, (5,5))
for health1 in range(healthvalue):
screen.blit(health, (health1+8,8))
# 7 - update the screen
pygame.display.flip()
# 8 - loop through the events
for event in pygame.event.get():
# check if the event is the X button
if event.type==pygame.QUIT:
# if it is quit the game
pygame.quit()
exit(0)
if event.type == pygame.KEYDOWN:
if event.key==K_w:
keys[0]=True
elif event.key==K_a:
keys[1]=True
elif event.key==K_s:
keys[2]=True
elif event.key==K_d:
keys[3]=True
if event.type == pygame.KEYUP:
if event.key==pygame.K_w:
keys[0]=False
elif event.key==pygame.K_a:
keys[1]=False
elif event.key==pygame.K_s:
keys[2]=False
elif event.key==pygame.K_d:
keys[3]=False
if event.type==pygame.MOUSEBUTTONDOWN:
shoot.play()
position=pygame.mouse.get_pos()
acc[1]+=1
arrows.append([math.atan2(position[1]-(playerpos1[1]+32),position[0]-(playerpos1[0]+26)),playerpos1[0]+32,playerpos1[1]+32])
# 9 - Move player
if keys[0]:
playerpos[1]-=5
elif keys[2]:
playerpos[1]+=5
if keys[1]:
playerpos[0]-=5
elif keys[3]:
playerpos[0]+=5
#10 - Win/Lose check
if pygame.time.get_ticks()>=90000:
running=0
exitcode=1
if healthvalue<=0:
running=0
exitcode=0
if acc[1]!=0:
accuracy=acc[0]*1.0/acc[1]*100
else:
accuracy=0
# 11 - Win/lose display
if exitcode==0:
pygame.font.init()
font = pygame.font.Font(None, 24)
text = font.render("Accuracy: "+str(accuracy)+"%", True, (255,0,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(gameover, (0,0))
screen.blit(text, textRect)
else:
pygame.font.init()
font = pygame.font.Font(None, 24)
text = font.render("Accuracy: "+str(accuracy)+"%", True, (0,255,0))
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
screen.blit(youwin, (0,0))
screen.blit(text, textRect)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip()
|
[
"[email protected]"
] | |
914e9bb4f96c7ca489cc4fbe7f9151479e95c700
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1705+398/sdB_PG_1705+398_lc.py
|
fb5a2c9a6010f68b8235076c5ed37319371ad64e
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 346 |
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[256.681792,39.732494], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_1705+398 /sdB_PG_1705+398_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
f062e5a2033afeb2b59a95e2eeb47f1166817c97
|
56ade096db1fe376ee43d38c96b43651ee07f217
|
/023. Merge k Sorted Lists/Python/Solution.py
|
955cbe788c96145af7c9fd5e35bd21a77b6ede15
|
[] |
no_license
|
xiaole0310/leetcode
|
c08649c3f9a9b04579635ee7e768fe3378c04900
|
7a501cf84cfa46b677d9c9fced18deacb61de0e8
|
refs/heads/master
| 2020-03-17T05:46:41.102580 | 2018-04-20T13:05:32 | 2018-04-20T13:05:32 | 133,328,416 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,196 |
py
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
def partition(lists, start, end):
if start == end:
return lists[start]
if start < end:
mid = (start + end) // 2
list_1 = partition(lists, start, mid)
list_2 = partition(lists, mid + 1, end)
return merge(list_1, list_2)
return None
def merge(list_1, list_2):
fake_head = ListNode(0)
current = fake_head
while list_1 and list_2:
if list_1.val < list_2.val:
current.next = list_1
list_1 = list_1.next
else:
current.next = list_2
list_2 = list_2.next
current = current.next
current.next = list_1 if list_1 else list_2
return fake_head.next
return partition(lists, 0, len(lists) - 1)
|
[
"[email protected]"
] | |
7fa5192bb918b94bae014f603e2807d7b5949b69
|
43eb7f8581a8dbfa1298b4e6d84fc7b7a552e335
|
/python/kserve/kserve/models/v1beta1_xg_boost_spec.py
|
afd2ad2cb98960f28ef7270b898f7d8bcede6bd5
|
[
"Apache-2.0"
] |
permissive
|
Suresh-Nakkeran/kserve
|
c2d114f7258a70b4c8ddeb8ee8c584d4eee0f81b
|
d3910e0fc6af4bf73156a53bd912d6e4acc87533
|
refs/heads/master
| 2023-07-29T00:17:28.900100 | 2021-09-11T08:04:54 | 2021-09-11T08:04:54 | 406,243,335 | 0 | 0 |
Apache-2.0
| 2021-09-14T05:59:05 | 2021-09-14T05:59:04 | null |
UTF-8
|
Python
| false | false | 32,713 |
py
|
# Copyright 2020 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KServe
Python SDK for KServe # noqa: E501
The version of the OpenAPI document: v0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kserve.configuration import Configuration
class V1beta1XGBoostSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'args': 'list[str]',
'command': 'list[str]',
'env': 'list[V1EnvVar]',
'env_from': 'list[V1EnvFromSource]',
'image': 'str',
'image_pull_policy': 'str',
'lifecycle': 'V1Lifecycle',
'liveness_probe': 'V1Probe',
'name': 'str',
'ports': 'list[V1ContainerPort]',
'protocol_version': 'str',
'readiness_probe': 'V1Probe',
'resources': 'V1ResourceRequirements',
'runtime_version': 'str',
'security_context': 'V1SecurityContext',
'startup_probe': 'V1Probe',
'stdin': 'bool',
'stdin_once': 'bool',
'storage_uri': 'str',
'termination_message_path': 'str',
'termination_message_policy': 'str',
'tty': 'bool',
'volume_devices': 'list[V1VolumeDevice]',
'volume_mounts': 'list[V1VolumeMount]',
'working_dir': 'str'
}
attribute_map = {
'args': 'args',
'command': 'command',
'env': 'env',
'env_from': 'envFrom',
'image': 'image',
'image_pull_policy': 'imagePullPolicy',
'lifecycle': 'lifecycle',
'liveness_probe': 'livenessProbe',
'name': 'name',
'ports': 'ports',
'protocol_version': 'protocolVersion',
'readiness_probe': 'readinessProbe',
'resources': 'resources',
'runtime_version': 'runtimeVersion',
'security_context': 'securityContext',
'startup_probe': 'startupProbe',
'stdin': 'stdin',
'stdin_once': 'stdinOnce',
'storage_uri': 'storageUri',
'termination_message_path': 'terminationMessagePath',
'termination_message_policy': 'terminationMessagePolicy',
'tty': 'tty',
'volume_devices': 'volumeDevices',
'volume_mounts': 'volumeMounts',
'working_dir': 'workingDir'
}
def __init__(self, args=None, command=None, env=None, env_from=None, image=None, image_pull_policy=None, lifecycle=None, liveness_probe=None, name=None, ports=None, protocol_version=None, readiness_probe=None, resources=None, runtime_version=None, security_context=None, startup_probe=None, stdin=None, stdin_once=None, storage_uri=None, termination_message_path=None, termination_message_policy=None, tty=None, volume_devices=None, volume_mounts=None, working_dir=None, local_vars_configuration=None): # noqa: E501
"""V1beta1XGBoostSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._args = None
self._command = None
self._env = None
self._env_from = None
self._image = None
self._image_pull_policy = None
self._lifecycle = None
self._liveness_probe = None
self._name = None
self._ports = None
self._protocol_version = None
self._readiness_probe = None
self._resources = None
self._runtime_version = None
self._security_context = None
self._startup_probe = None
self._stdin = None
self._stdin_once = None
self._storage_uri = None
self._termination_message_path = None
self._termination_message_policy = None
self._tty = None
self._volume_devices = None
self._volume_mounts = None
self._working_dir = None
self.discriminator = None
if args is not None:
self.args = args
if command is not None:
self.command = command
if env is not None:
self.env = env
if env_from is not None:
self.env_from = env_from
if image is not None:
self.image = image
if image_pull_policy is not None:
self.image_pull_policy = image_pull_policy
if lifecycle is not None:
self.lifecycle = lifecycle
if liveness_probe is not None:
self.liveness_probe = liveness_probe
if name is not None:
self.name = name
if ports is not None:
self.ports = ports
if protocol_version is not None:
self.protocol_version = protocol_version
if readiness_probe is not None:
self.readiness_probe = readiness_probe
if resources is not None:
self.resources = resources
if runtime_version is not None:
self.runtime_version = runtime_version
if security_context is not None:
self.security_context = security_context
if startup_probe is not None:
self.startup_probe = startup_probe
if stdin is not None:
self.stdin = stdin
if stdin_once is not None:
self.stdin_once = stdin_once
if storage_uri is not None:
self.storage_uri = storage_uri
if termination_message_path is not None:
self.termination_message_path = termination_message_path
if termination_message_policy is not None:
self.termination_message_policy = termination_message_policy
if tty is not None:
self.tty = tty
if volume_devices is not None:
self.volume_devices = volume_devices
if volume_mounts is not None:
self.volume_mounts = volume_mounts
if working_dir is not None:
self.working_dir = working_dir
@property
def args(self):
"""Gets the args of this V1beta1XGBoostSpec. # noqa: E501
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The args of this V1beta1XGBoostSpec. # noqa: E501
:rtype: list[str]
"""
return self._args
@args.setter
def args(self, args):
"""Sets the args of this V1beta1XGBoostSpec.
Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param args: The args of this V1beta1XGBoostSpec. # noqa: E501
:type: list[str]
"""
self._args = args
@property
def command(self):
"""Gets the command of this V1beta1XGBoostSpec. # noqa: E501
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:return: The command of this V1beta1XGBoostSpec. # noqa: E501
:rtype: list[str]
"""
return self._command
@command.setter
def command(self, command):
"""Sets the command of this V1beta1XGBoostSpec.
Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell # noqa: E501
:param command: The command of this V1beta1XGBoostSpec. # noqa: E501
:type: list[str]
"""
self._command = command
@property
def env(self):
"""Gets the env of this V1beta1XGBoostSpec. # noqa: E501
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:return: The env of this V1beta1XGBoostSpec. # noqa: E501
:rtype: list[V1EnvVar]
"""
return self._env
@env.setter
def env(self, env):
"""Sets the env of this V1beta1XGBoostSpec.
List of environment variables to set in the container. Cannot be updated. # noqa: E501
:param env: The env of this V1beta1XGBoostSpec. # noqa: E501
:type: list[V1EnvVar]
"""
self._env = env
@property
def env_from(self):
"""Gets the env_from of this V1beta1XGBoostSpec. # noqa: E501
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:return: The env_from of this V1beta1XGBoostSpec. # noqa: E501
:rtype: list[V1EnvFromSource]
"""
return self._env_from
@env_from.setter
def env_from(self, env_from):
"""Sets the env_from of this V1beta1XGBoostSpec.
List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. # noqa: E501
:param env_from: The env_from of this V1beta1XGBoostSpec. # noqa: E501
:type: list[V1EnvFromSource]
"""
self._env_from = env_from
@property
def image(self):
"""Gets the image of this V1beta1XGBoostSpec. # noqa: E501
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
:return: The image of this V1beta1XGBoostSpec. # noqa: E501
:rtype: str
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this V1beta1XGBoostSpec.
Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets. # noqa: E501
:param image: The image of this V1beta1XGBoostSpec. # noqa: E501
:type: str
"""
self._image = image
@property
def image_pull_policy(self):
"""Gets the image_pull_policy of this V1beta1XGBoostSpec. # noqa: E501
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
:return: The image_pull_policy of this V1beta1XGBoostSpec. # noqa: E501
:rtype: str
"""
return self._image_pull_policy
@image_pull_policy.setter
def image_pull_policy(self, image_pull_policy):
"""Sets the image_pull_policy of this V1beta1XGBoostSpec.
Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images # noqa: E501
:param image_pull_policy: The image_pull_policy of this V1beta1XGBoostSpec. # noqa: E501
:type: str
"""
self._image_pull_policy = image_pull_policy
@property
def lifecycle(self):
"""Gets the lifecycle of this V1beta1XGBoostSpec. # noqa: E501
:return: The lifecycle of this V1beta1XGBoostSpec. # noqa: E501
:rtype: V1Lifecycle
"""
return self._lifecycle
@lifecycle.setter
def lifecycle(self, lifecycle):
"""Sets the lifecycle of this V1beta1XGBoostSpec.
:param lifecycle: The lifecycle of this V1beta1XGBoostSpec. # noqa: E501
:type: V1Lifecycle
"""
self._lifecycle = lifecycle
@property
def liveness_probe(self):
"""Gets the liveness_probe of this V1beta1XGBoostSpec. # noqa: E501
:return: The liveness_probe of this V1beta1XGBoostSpec. # noqa: E501
:rtype: V1Probe
"""
return self._liveness_probe
@liveness_probe.setter
def liveness_probe(self, liveness_probe):
"""Sets the liveness_probe of this V1beta1XGBoostSpec.
:param liveness_probe: The liveness_probe of this V1beta1XGBoostSpec. # noqa: E501
:type: V1Probe
"""
self._liveness_probe = liveness_probe
@property
def name(self):
"""Gets the name of this V1beta1XGBoostSpec. # noqa: E501
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
:return: The name of this V1beta1XGBoostSpec. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1beta1XGBoostSpec.
Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. # noqa: E501
:param name: The name of this V1beta1XGBoostSpec. # noqa: E501
:type: str
"""
self._name = name
@property
def ports(self):
"""Gets the ports of this V1beta1XGBoostSpec. # noqa: E501
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. # noqa: E501
:return: The ports of this V1beta1XGBoostSpec. # noqa: E501
:rtype: list[V1ContainerPort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this V1beta1XGBoostSpec.
List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated. # noqa: E501
:param ports: The ports of this V1beta1XGBoostSpec. # noqa: E501
:type: list[V1ContainerPort]
"""
self._ports = ports
@property
def protocol_version(self):
"""Gets the protocol_version of this V1beta1XGBoostSpec. # noqa: E501
Protocol version to use by the predictor (i.e. v1 or v2) # noqa: E501
:return: The protocol_version of this V1beta1XGBoostSpec. # noqa: E501
:rtype: str
"""
return self._protocol_version
@protocol_version.setter
def protocol_version(self, protocol_version):
"""Sets the protocol_version of this V1beta1XGBoostSpec.
Protocol version to use by the predictor (i.e. v1 or v2) # noqa: E501
:param protocol_version: The protocol_version of this V1beta1XGBoostSpec. # noqa: E501
:type: str
"""
self._protocol_version = protocol_version
@property
def readiness_probe(self):
"""Gets the readiness_probe of this V1beta1XGBoostSpec. # noqa: E501
:return: The readiness_probe of this V1beta1XGBoostSpec. # noqa: E501
:rtype: V1Probe
"""
return self._readiness_probe
@readiness_probe.setter
def readiness_probe(self, readiness_probe):
"""Sets the readiness_probe of this V1beta1XGBoostSpec.
:param readiness_probe: The readiness_probe of this V1beta1XGBoostSpec. # noqa: E501
:type: V1Probe
"""
self._readiness_probe = readiness_probe
@property
def resources(self):
"""Gets the resources of this V1beta1XGBoostSpec. # noqa: E501
:return: The resources of this V1beta1XGBoostSpec. # noqa: E501
:rtype: V1ResourceRequirements
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1beta1XGBoostSpec.
:param resources: The resources of this V1beta1XGBoostSpec. # noqa: E501
:type: V1ResourceRequirements
"""
self._resources = resources
@property
def runtime_version(self):
"""Gets the runtime_version of this V1beta1XGBoostSpec. # noqa: E501
Runtime version of the predictor docker image # noqa: E501
:return: The runtime_version of this V1beta1XGBoostSpec. # noqa: E501
:rtype: str
"""
return self._runtime_version
@runtime_version.setter
def runtime_version(self, runtime_version):
"""Sets the runtime_version of this V1beta1XGBoostSpec.
Runtime version of the predictor docker image # noqa: E501
:param runtime_version: The runtime_version of this V1beta1XGBoostSpec. # noqa: E501
:type: str
"""
self._runtime_version = runtime_version
@property
def security_context(self):
"""Gets the security_context of this V1beta1XGBoostSpec. # noqa: E501
:return: The security_context of this V1beta1XGBoostSpec. # noqa: E501
:rtype: V1SecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""Sets the security_context of this V1beta1XGBoostSpec.
:param security_context: The security_context of this V1beta1XGBoostSpec. # noqa: E501
:type: V1SecurityContext
"""
self._security_context = security_context
@property
def startup_probe(self):
"""Gets the startup_probe of this V1beta1XGBoostSpec. # noqa: E501
:return: The startup_probe of this V1beta1XGBoostSpec. # noqa: E501
:rtype: V1Probe
"""
return self._startup_probe
@startup_probe.setter
def startup_probe(self, startup_probe):
"""Sets the startup_probe of this V1beta1XGBoostSpec.
:param startup_probe: The startup_probe of this V1beta1XGBoostSpec. # noqa: E501
:type: V1Probe
"""
self._startup_probe = startup_probe
@property
def stdin(self):
"""Gets the stdin of this V1beta1XGBoostSpec. # noqa: E501
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:return: The stdin of this V1beta1XGBoostSpec. # noqa: E501
:rtype: bool
"""
return self._stdin
@stdin.setter
def stdin(self, stdin):
"""Sets the stdin of this V1beta1XGBoostSpec.
Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. # noqa: E501
:param stdin: The stdin of this V1beta1XGBoostSpec. # noqa: E501
:type: bool
"""
self._stdin = stdin
@property
def stdin_once(self):
"""Gets the stdin_once of this V1beta1XGBoostSpec. # noqa: E501
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:return: The stdin_once of this V1beta1XGBoostSpec. # noqa: E501
:rtype: bool
"""
return self._stdin_once
@stdin_once.setter
def stdin_once(self, stdin_once):
"""Sets the stdin_once of this V1beta1XGBoostSpec.
Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false # noqa: E501
:param stdin_once: The stdin_once of this V1beta1XGBoostSpec. # noqa: E501
:type: bool
"""
self._stdin_once = stdin_once
@property
def storage_uri(self):
"""Gets the storage_uri of this V1beta1XGBoostSpec. # noqa: E501
This field points to the location of the trained model which is mounted onto the pod. # noqa: E501
:return: The storage_uri of this V1beta1XGBoostSpec. # noqa: E501
:rtype: str
"""
return self._storage_uri
@storage_uri.setter
def storage_uri(self, storage_uri):
"""Sets the storage_uri of this V1beta1XGBoostSpec.
This field points to the location of the trained model which is mounted onto the pod. # noqa: E501
:param storage_uri: The storage_uri of this V1beta1XGBoostSpec. # noqa: E501
:type: str
"""
self._storage_uri = storage_uri
@property
def termination_message_path(self):
"""Gets the termination_message_path of this V1beta1XGBoostSpec. # noqa: E501
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:return: The termination_message_path of this V1beta1XGBoostSpec. # noqa: E501
:rtype: str
"""
return self._termination_message_path
@termination_message_path.setter
def termination_message_path(self, termination_message_path):
"""Sets the termination_message_path of this V1beta1XGBoostSpec.
Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated. # noqa: E501
:param termination_message_path: The termination_message_path of this V1beta1XGBoostSpec. # noqa: E501
:type: str
"""
self._termination_message_path = termination_message_path
@property
def termination_message_policy(self):
"""Gets the termination_message_policy of this V1beta1XGBoostSpec. # noqa: E501
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
:return: The termination_message_policy of this V1beta1XGBoostSpec. # noqa: E501
:rtype: str
"""
return self._termination_message_policy
@termination_message_policy.setter
def termination_message_policy(self, termination_message_policy):
"""Sets the termination_message_policy of this V1beta1XGBoostSpec.
Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. # noqa: E501
:param termination_message_policy: The termination_message_policy of this V1beta1XGBoostSpec. # noqa: E501
:type: str
"""
self._termination_message_policy = termination_message_policy
@property
def tty(self):
"""Gets the tty of this V1beta1XGBoostSpec. # noqa: E501
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:return: The tty of this V1beta1XGBoostSpec. # noqa: E501
:rtype: bool
"""
return self._tty
@tty.setter
def tty(self, tty):
"""Sets the tty of this V1beta1XGBoostSpec.
Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. # noqa: E501
:param tty: The tty of this V1beta1XGBoostSpec. # noqa: E501
:type: bool
"""
self._tty = tty
@property
def volume_devices(self):
"""Gets the volume_devices of this V1beta1XGBoostSpec. # noqa: E501
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:return: The volume_devices of this V1beta1XGBoostSpec. # noqa: E501
:rtype: list[V1VolumeDevice]
"""
return self._volume_devices
@volume_devices.setter
def volume_devices(self, volume_devices):
"""Sets the volume_devices of this V1beta1XGBoostSpec.
volumeDevices is the list of block devices to be used by the container. # noqa: E501
:param volume_devices: The volume_devices of this V1beta1XGBoostSpec. # noqa: E501
:type: list[V1VolumeDevice]
"""
self._volume_devices = volume_devices
@property
def volume_mounts(self):
"""Gets the volume_mounts of this V1beta1XGBoostSpec. # noqa: E501
Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
:return: The volume_mounts of this V1beta1XGBoostSpec. # noqa: E501
:rtype: list[V1VolumeMount]
"""
return self._volume_mounts
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
"""Sets the volume_mounts of this V1beta1XGBoostSpec.
Pod volumes to mount into the container's filesystem. Cannot be updated. # noqa: E501
:param volume_mounts: The volume_mounts of this V1beta1XGBoostSpec. # noqa: E501
:type: list[V1VolumeMount]
"""
self._volume_mounts = volume_mounts
@property
def working_dir(self):
"""Gets the working_dir of this V1beta1XGBoostSpec. # noqa: E501
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:return: The working_dir of this V1beta1XGBoostSpec. # noqa: E501
:rtype: str
"""
return self._working_dir
@working_dir.setter
def working_dir(self, working_dir):
"""Sets the working_dir of this V1beta1XGBoostSpec.
Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. # noqa: E501
:param working_dir: The working_dir of this V1beta1XGBoostSpec. # noqa: E501
:type: str
"""
self._working_dir = working_dir
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1XGBoostSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1XGBoostSpec):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
a91d226941fc0003ace12552f033eb694ed8630a
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/eqpt/egrtotal15min.py
|
4b39e4bc1852a11791deb8bef7fb97bc03e4298d
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 42,132 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class EgrTotal15min(Mo):
"""
A class that represents the most current statistics for Egress in a 15 minute sampling interval. This class updates every 5 minutes.
"""
meta = StatsClassMeta("cobra.model.eqpt.EgrTotal15min", "Egress")
counter = CounterMeta("util", CounterCategory.GAUGE, "percentage", "Egress Link Utilization")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "utilLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "utilMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "utilMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "utilAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "utilSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "utilTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "utilThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "utilTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "utilTr"
meta._counters.append(counter)
counter = CounterMeta("pktsRate", CounterCategory.GAUGE, "packets-per-second", "Total Egress Packets rate")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "pktsRateLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsRateSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "pktsRateTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "pktsRateTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsRateTr"
meta._counters.append(counter)
counter = CounterMeta("pkts", CounterCategory.COUNTER, "packets", "Total Egress Packets")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "pktsLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "pktsCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "pktsPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "pktsMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "pktsMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "pktsAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "pktsSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "pktsBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "pktsThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "pktsTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "pktsTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "pktsRate"
meta._counters.append(counter)
counter = CounterMeta("bytesRate", CounterCategory.GAUGE, "bytes-per-second", "Total Egress Bytes rate")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "bytesRateLast"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesRateMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesRateMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesRateAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesRateSpct"
counter._propRefs[PropCategory.IMPLICIT_TOTAL] = "bytesRateTtl"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesRateThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "bytesRateTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesRateTr"
meta._counters.append(counter)
counter = CounterMeta("bytes", CounterCategory.COUNTER, "bytes", "Total Egress Bytes")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "bytesLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "bytesCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "bytesPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "bytesMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "bytesMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "bytesAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "bytesSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "bytesBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "bytesThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "bytesTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "bytesTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "bytesRate"
meta._counters.append(counter)
meta.moClassName = "eqptEgrTotal15min"
meta.rnFormat = "CDeqptEgrTotal15min"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current Egress stats in 15 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.mgmt.MgmtIf")
meta.parentClasses.add("cobra.model.eqpt.CpuP")
meta.parentClasses.add("cobra.model.pc.AggrIf")
meta.parentClasses.add("cobra.model.l1.PhysIf")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.eqpt.EgrTotal")
meta.rnPrefixes = [
('CDeqptEgrTotal15min', False),
]
prop = PropMeta("str", "bytesAvg", "bytesAvg", 8129, PropCategory.IMPLICIT_AVG)
prop.label = "Total Egress Bytes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesAvg", prop)
prop = PropMeta("str", "bytesBase", "bytesBase", 8124, PropCategory.IMPLICIT_BASELINE)
prop.label = "Total Egress Bytes baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesBase", prop)
prop = PropMeta("str", "bytesCum", "bytesCum", 8125, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total Egress Bytes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesCum", prop)
prop = PropMeta("str", "bytesLast", "bytesLast", 8123, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total Egress Bytes current value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesLast", prop)
prop = PropMeta("str", "bytesMax", "bytesMax", 8128, PropCategory.IMPLICIT_MAX)
prop.label = "Total Egress Bytes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesMax", prop)
prop = PropMeta("str", "bytesMin", "bytesMin", 8127, PropCategory.IMPLICIT_MIN)
prop.label = "Total Egress Bytes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesMin", prop)
prop = PropMeta("str", "bytesPer", "bytesPer", 8126, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total Egress Bytes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesPer", prop)
prop = PropMeta("str", "bytesRate", "bytesRate", 8134, PropCategory.IMPLICIT_RATE)
prop.label = "Total Egress Bytes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRate", prop)
prop = PropMeta("str", "bytesRateAvg", "bytesRateAvg", 8153, PropCategory.IMPLICIT_AVG)
prop.label = "Total Egress Bytes rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateAvg", prop)
prop = PropMeta("str", "bytesRateLast", "bytesRateLast", 8150, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total Egress Bytes rate current value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateLast", prop)
prop = PropMeta("str", "bytesRateMax", "bytesRateMax", 8152, PropCategory.IMPLICIT_MAX)
prop.label = "Total Egress Bytes rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateMax", prop)
prop = PropMeta("str", "bytesRateMin", "bytesRateMin", 8151, PropCategory.IMPLICIT_MIN)
prop.label = "Total Egress Bytes rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateMin", prop)
prop = PropMeta("str", "bytesRateSpct", "bytesRateSpct", 8154, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total Egress Bytes rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateSpct", prop)
prop = PropMeta("str", "bytesRateThr", "bytesRateThr", 8156, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total Egress Bytes rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bytesRateThr", prop)
prop = PropMeta("str", "bytesRateTr", "bytesRateTr", 8158, PropCategory.IMPLICIT_TREND)
prop.label = "Total Egress Bytes rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateTr", prop)
prop = PropMeta("str", "bytesRateTrBase", "bytesRateTrBase", 8157, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total Egress Bytes rate trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateTrBase", prop)
prop = PropMeta("str", "bytesRateTtl", "bytesRateTtl", 8155, PropCategory.IMPLICIT_TOTAL)
prop.label = "Total Egress Bytes rate total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesRateTtl", prop)
prop = PropMeta("str", "bytesSpct", "bytesSpct", 8130, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total Egress Bytes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesSpct", prop)
prop = PropMeta("str", "bytesThr", "bytesThr", 8131, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total Egress Bytes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("bytesThr", prop)
prop = PropMeta("str", "bytesTr", "bytesTr", 8133, PropCategory.IMPLICIT_TREND)
prop.label = "Total Egress Bytes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesTr", prop)
prop = PropMeta("str", "bytesTrBase", "bytesTrBase", 8132, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total Egress Bytes trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("bytesTrBase", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "pktsAvg", "pktsAvg", 8177, PropCategory.IMPLICIT_AVG)
prop.label = "Total Egress Packets average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsAvg", prop)
prop = PropMeta("str", "pktsBase", "pktsBase", 8172, PropCategory.IMPLICIT_BASELINE)
prop.label = "Total Egress Packets baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsBase", prop)
prop = PropMeta("str", "pktsCum", "pktsCum", 8173, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total Egress Packets cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsCum", prop)
prop = PropMeta("str", "pktsLast", "pktsLast", 8171, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total Egress Packets current value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsLast", prop)
prop = PropMeta("str", "pktsMax", "pktsMax", 8176, PropCategory.IMPLICIT_MAX)
prop.label = "Total Egress Packets maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMax", prop)
prop = PropMeta("str", "pktsMin", "pktsMin", 8175, PropCategory.IMPLICIT_MIN)
prop.label = "Total Egress Packets minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsMin", prop)
prop = PropMeta("str", "pktsPer", "pktsPer", 8174, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total Egress Packets periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsPer", prop)
prop = PropMeta("str", "pktsRate", "pktsRate", 8182, PropCategory.IMPLICIT_RATE)
prop.label = "Total Egress Packets rate"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRate", prop)
prop = PropMeta("str", "pktsRateAvg", "pktsRateAvg", 8201, PropCategory.IMPLICIT_AVG)
prop.label = "Total Egress Packets rate average value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateAvg", prop)
prop = PropMeta("str", "pktsRateLast", "pktsRateLast", 8198, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Total Egress Packets rate current value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateLast", prop)
prop = PropMeta("str", "pktsRateMax", "pktsRateMax", 8200, PropCategory.IMPLICIT_MAX)
prop.label = "Total Egress Packets rate maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateMax", prop)
prop = PropMeta("str", "pktsRateMin", "pktsRateMin", 8199, PropCategory.IMPLICIT_MIN)
prop.label = "Total Egress Packets rate minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateMin", prop)
prop = PropMeta("str", "pktsRateSpct", "pktsRateSpct", 8202, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total Egress Packets rate suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateSpct", prop)
prop = PropMeta("str", "pktsRateThr", "pktsRateThr", 8204, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total Egress Packets rate thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsRateThr", prop)
prop = PropMeta("str", "pktsRateTr", "pktsRateTr", 8206, PropCategory.IMPLICIT_TREND)
prop.label = "Total Egress Packets rate trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateTr", prop)
prop = PropMeta("str", "pktsRateTrBase", "pktsRateTrBase", 8205, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total Egress Packets rate trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateTrBase", prop)
prop = PropMeta("str", "pktsRateTtl", "pktsRateTtl", 8203, PropCategory.IMPLICIT_TOTAL)
prop.label = "Total Egress Packets rate total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsRateTtl", prop)
prop = PropMeta("str", "pktsSpct", "pktsSpct", 8178, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total Egress Packets suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsSpct", prop)
prop = PropMeta("str", "pktsThr", "pktsThr", 8179, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total Egress Packets thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("pktsThr", prop)
prop = PropMeta("str", "pktsTr", "pktsTr", 8181, PropCategory.IMPLICIT_TREND)
prop.label = "Total Egress Packets trend"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTr", prop)
prop = PropMeta("str", "pktsTrBase", "pktsTrBase", 8180, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Total Egress Packets trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("pktsTrBase", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "utilAvg", "utilAvg", 8222, PropCategory.IMPLICIT_AVG)
prop.label = "Egress Link Utilization average value"
prop.isOper = True
prop.isStats = True
meta.props.add("utilAvg", prop)
prop = PropMeta("str", "utilLast", "utilLast", 8219, PropCategory.IMPLICIT_LASTREADING)
prop.label = "Egress Link Utilization current value"
prop.isOper = True
prop.isStats = True
meta.props.add("utilLast", prop)
prop = PropMeta("str", "utilMax", "utilMax", 8221, PropCategory.IMPLICIT_MAX)
prop.label = "Egress Link Utilization maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("utilMax", prop)
prop = PropMeta("str", "utilMin", "utilMin", 8220, PropCategory.IMPLICIT_MIN)
prop.label = "Egress Link Utilization minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("utilMin", prop)
prop = PropMeta("str", "utilSpct", "utilSpct", 8223, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Egress Link Utilization suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("utilSpct", prop)
prop = PropMeta("str", "utilThr", "utilThr", 8225, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Egress Link Utilization thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("utilThr", prop)
prop = PropMeta("str", "utilTr", "utilTr", 8227, PropCategory.IMPLICIT_TREND)
prop.label = "Egress Link Utilization trend"
prop.isOper = True
prop.isStats = True
meta.props.add("utilTr", prop)
prop = PropMeta("str", "utilTrBase", "utilTrBase", 8226, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "Egress Link Utilization trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("utilTrBase", prop)
prop = PropMeta("str", "utilTtl", "utilTtl", 8224, PropCategory.IMPLICIT_TOTAL)
prop.label = "Egress Link Utilization total sum"
prop.isOper = True
prop.isStats = True
meta.props.add("utilTtl", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("l1EthIfToEPg", "EPG", "cobra.model.fv.EPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("EqptPortToEPg", "EPG", "cobra.model.fv.EPg"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
575e4ca2deb65350aa1786280363eb93b2489ec8
|
9e98a7770465227e8e0e962c02850acc5c172e96
|
/backend/admin/secure.py
|
e8efebe1599d1525a767e1c7dad8388d4a903692
|
[
"MIT"
] |
permissive
|
pengjinfu/flask-bigger
|
281a43770958584c406accb34b2d13eebd4ba8cc
|
cc5ba476c20129a009ad8a8366daf4dc060bd4ac
|
refs/heads/master
| 2021-04-19T21:24:20.385510 | 2019-03-09T01:07:06 | 2019-03-09T01:07:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,490 |
py
|
# -*- coding: utf-8 -*-
from functools import wraps
from flask import (
g,
session,
request,
redirect,
url_for,
current_app,
abort
)
def login_required(func):
@wraps(func)
def decorated_function(*args, **kwargs):
not_in_g = not hasattr(g, 'login_user') or g.login_user is None
not_in_s = not 'login_user' in session or session['login_user'] is None
if not_in_g and not_in_s:
_route = 'admin.login_view'
return redirect(url_for(_route, next=request.url))
if not_in_g:
g.login_user = session['login_user']
return func(*args, **kwargs)
return decorated_function
def admin_required(func):
@wraps(func)
def decorated_function(*args, **kwargs):
in_g = hasattr(g, 'login_user') and not getattr(g, 'login_user') is None
in_s = 'login_user' in session and not session['login_user'] is None
if in_g or in_s:
g_admin = in_g and getattr(g, 'login_user').is_admin
s_admin = in_s and 'is_admin' in session['login_user'] and bool(session['login_user']['is_admin'])
if g_admin or s_admin:
if not in_g:
g.login_user = session['login_user']
return func(*args, **kwargs)
else:
return abort(403)
else:
_route = 'admin.login_view'
return redirect(url_for(_route, next=request.url))
return decorated_function
|
[
"[email protected]"
] | |
c4e79832b0eae413614aef8f2f1b3143244b8230
|
0b14062e8db610817b7f0730bfb21bf3e93765b8
|
/component/intent/response.py
|
8539ee74f4e2f507f8567ee3b13ee8c97c86dc48
|
[
"MIT"
] |
permissive
|
bkosciow/tenchi
|
63fa827607b7b725ea61b73119193904bde25a6a
|
e53e59df34934e3e81da3e9321c1648a844aa23c
|
refs/heads/develop
| 2023-06-24T17:14:45.696811 | 2023-06-12T09:53:40 | 2023-06-12T09:53:40 | 175,896,569 | 0 | 0 |
MIT
| 2023-06-12T09:53:42 | 2019-03-15T21:59:49 |
Python
|
UTF-8
|
Python
| false | false | 682 |
py
|
class Response(object):
def __init__(self, request=None):
self.intent_name = request.intent_name if request else ''
self.lang = request.lang if request else ''
self.data = request.data if request else ''
self._speech = ''
self._text = ''
@property
def text(self):
if self._text == '':
return self._speech
return self._text
@text.setter
def text(self, value):
self._text = value
@property
def speech(self):
if self._speech == '':
return self._text
return self._speech
@speech.setter
def speech(self, value):
self._speech = value
|
[
"[email protected]"
] | |
4b3d81773808ab07ce6131fa88b8d2fc3dd8e8e0
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/bpmn/models/t_global_conversation.py
|
d14fa1bdd32f9e7ae6b1d934d12d5dd4c4b99b1d
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 |
Python
|
UTF-8
|
Python
| false | false | 250 |
py
|
from dataclasses import dataclass
from .t_collaboration import TCollaboration
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class TGlobalConversation(TCollaboration):
class Meta:
name = "tGlobalConversation"
|
[
"[email protected]"
] | |
3cddfeb55bb003700c11e2ec31184cc755b38f94
|
3e4fd46157cd8bee454fe0bff227da6204ade9f4
|
/api/migrations/0020_cdekkey_updated_at.py
|
c6a46496d2f3621b432afd3eeca684ce5a14729b
|
[] |
no_license
|
skiboorg/docs_api
|
8e7017457cc111311d836f572597aeb3d6bed1c4
|
4bae50c8ea772439b93bf4e0fc95cb6395bb9cfb
|
refs/heads/master
| 2023-06-26T14:43:54.248638 | 2021-08-02T15:32:28 | 2021-08-02T15:32:28 | 336,874,780 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 378 |
py
|
# Generated by Django 3.1.5 on 2021-02-24 10:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0019_cdekkey'),
]
operations = [
migrations.AddField(
model_name='cdekkey',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
|
[
"[email protected]"
] | |
7e7edd6dae26504cd0c4210724cf8208bcd5fe68
|
5357b71f92e25f9fae36560daef33512c15cded6
|
/CommonTools/test/buildWZworkspace_f4_ATLASCMSforComb_signalShapeFix_less1sigmaInputForNegativeSystBkgDD_lnNall.py
|
a207b63daf03ed856867f0291e7c5caddee23903
|
[] |
no_license
|
senka/ATLASCMS_combination_2
|
b0740cb00479db9d5692b42d1e0bb9be3568c640
|
080a2abff576a3f07dd2e78e95955eb673f38cbf
|
refs/heads/master
| 2021-01-15T10:47:01.015627 | 2014-08-06T08:54:10 | 2014-08-06T08:54:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 39,768 |
py
|
import pyroot_logon
import limits
import os
import sys
from array import *
from ROOT import *
from optparse import OptionParser
from ConfigParser import SafeConfigParser
l2nu2_bkgMC=[0,0,0,0,8.53, 8.38, 4.14]
l2nu2_bkgDD=[0.63,0.2,0.09,0.011,17.48, 7.58, 0.78]
l2nu2_signal=[27.93, 14.63, 9.28, 1.55, 13.55, 15.66, 10.14]
stat_signal_err=[0.24, 0.17, 0.14, 0.06, 0.22, 0.28, 0.14]
stat_bkgDD_err=[0.75, 0.24, 0.1, 0.01, 4.38, 2.69, 1.7]
syst_bkgDD_err=[0.48, 0.15, 0.07, 0.008, 1.12, 0.68, 0.19]
stat_bkgMC_err=[0., 0., 0., 0.0, 0.78, 0.74, 0.66]
syst_bkgMC_err=[0., 0., 0., 0.0, 0.19, 0.19, 0.09]
syst_signal_reco_err=[0.97, 0.55, 0.37, 0.07, 0.27, 0.48, 0.4]
syst_signal_th_err=[1.79, 1.04, 0.79, 0.25, 1.29, 1.64, 1.35]
stat_signal_err_rel_Up=[]
stat_bkgDD_err_rel_Up=[]
syst_bkgDD_err_rel_Up=[]
stat_bkgMC_err_rel_Up=[]
syst_bkgMC_err_rel_Up=[]
syst_signal_reco_err_rel_Up=[]
syst_signal_th_err_rel_Up=[]
stat_signal_err_rel_Down=[]
stat_bkgDD_err_rel_Down=[]
syst_bkgDD_err_rel_Down=[]
stat_bkgMC_err_rel_Down=[]
syst_bkgMC_err_rel_Down=[]
syst_signal_reco_err_rel_Down=[]
syst_signal_th_err_rel_Down=[]
for i in range(0,7):
stat_signal_err_rel_Up.append([])
stat_bkgDD_err_rel_Up.append([])
syst_bkgDD_err_rel_Up.append([])
stat_bkgMC_err_rel_Up.append([])
syst_bkgMC_err_rel_Up.append([])
syst_signal_reco_err_rel_Up.append([])
syst_signal_th_err_rel_Up.append([])
stat_signal_err_rel_Down.append([])
stat_bkgDD_err_rel_Down.append([])
syst_bkgDD_err_rel_Down.append([])
stat_bkgMC_err_rel_Down.append([])
syst_bkgMC_err_rel_Down.append([])
syst_signal_reco_err_rel_Down.append([])
syst_signal_th_err_rel_Down.append([])
print 'i= ',i,'\t',stat_signal_err[i],'\t',l2nu2_signal[i]
if l2nu2_signal[i]>0:
if stat_signal_err[i]<l2nu2_signal[i]:
stat_signal_err_rel_Down[i]=1.-stat_signal_err[i]/l2nu2_signal[i]
else:
stat_signal_err_rel_Down[i]=0.001
stat_signal_err_rel_Up[i]=1.+stat_signal_err[i]/l2nu2_signal[i]
syst_signal_reco_err_rel_Up[i]=1.+syst_signal_reco_err[i]/l2nu2_signal[i]
syst_signal_th_err_rel_Up[i]=1.+syst_signal_th_err[i]/l2nu2_signal[i]
syst_signal_reco_err_rel_Down[i]=1.-syst_signal_reco_err[i]/l2nu2_signal[i]
syst_signal_th_err_rel_Down[i]=1.-syst_signal_th_err[i]/l2nu2_signal[i]
else:
stat_signal_err_rel_Up[i]=1.
syst_signal_reco_err_rel_Up[i]=1.
syst_signal_th_err_rel_Up[i]=1.
stat_signal_err_rel_Down[i]=1.
syst_signal_reco_err_rel_Down[i]=1.
syst_signal_th_err_rel_Down[i]=1.
if l2nu2_bkgDD[i]>0:
if stat_bkgDD_err[i]<l2nu2_bkgDD[i]:
stat_bkgDD_err_rel_Down[i]=1.-stat_bkgDD_err[i]/l2nu2_bkgDD[i]
else:
stat_bkgDD_err_rel_Down[i]=0.001
stat_bkgDD_err_rel_Up[i]=1.+stat_bkgDD_err[i]/l2nu2_bkgDD[i]
syst_bkgDD_err_rel_Up[i]=1.+syst_bkgDD_err[i]/l2nu2_bkgDD[i]
syst_bkgDD_err_rel_Down[i]=1.-syst_bkgDD_err[i]/l2nu2_bkgDD[i]
else:
stat_bkgDD_err_rel_Up[i]=1.
syst_bkgDD_err_rel_Up[i]=1.
stat_bkgDD_err_rel_Down[i]=1.
syst_bkgDD_err_rel_Down[i]=1.
if l2nu2_bkgMC[i]>0:
stat_bkgMC_err_rel_Up[i]=1.+stat_bkgMC_err[i]/l2nu2_bkgMC[i]
syst_bkgMC_err_rel_Up[i]=1.+syst_bkgMC_err[i]/l2nu2_bkgMC[i]
stat_bkgMC_err_rel_Down[i]=1.-stat_bkgMC_err[i]/l2nu2_bkgMC[i]
syst_bkgMC_err_rel_Down[i]=1.-syst_bkgMC_err[i]/l2nu2_bkgMC[i]
else:
stat_bkgMC_err_rel_Up[i]=1.
syst_bkgMC_err_rel_Up[i]=1.
stat_bkgMC_err_rel_Down[i]=1.
syst_bkgMC_err_rel_Down[i]=1.
syst_signal_sum_err = []
for i in range(0,7):
syst_signal_sum_err.append([])
print i
syst_signal_sum_err[i]=sqrt(syst_signal_reco_err[i]*syst_signal_reco_err[i]+syst_signal_th_err[i]*syst_signal_th_err[i])
def isItCorrelated(name):
print '\t ----> isItCorrelated: testing ',name
if ('_MC_syst' in name or '_les' in name or '_DD_syst' in name or '_recoth' in name or '_reco' in name or '_th' in name):
print '-> true'
return True
else:
print '-> false'
return False
doAllLnN=True
parser = OptionParser(description="%prog : A RooStats Implementation of Anomalous Triple Gauge Coupling Analysis.",
usage="buildWZworkspace --config=example_config.cfg")
cfgparse = SafeConfigParser()
parser.add_option("--config",dest="config",help="The name of the input configuration file.")
(options,args) = parser.parse_args()
miss_options = False
if options.config is None:
print 'Need to specify --config'
miss_options=True
if miss_options:
exit(1)
cfgparse.read(options.config)
options.config = cfgparse # put the parsed config file into our options
cfg = options.config
#lType = sys.argv[1]
#codename = ""
#planeID = sys.argv[2]
norm_sig_sm = -1
norm_sig_sm_up = -1
norm_sig_sm_down = -1
norm_bkg = -1
norm_obs = -1
fit_sections = cfg.sections()
fit_sections.remove('Global') #don't need to iterate over the global configuration
basepath = '%s/src/CombinedEWKAnalysis/CommonTools/data/WV_semileptonic'%os.environ['CMSSW_BASE']
for section in fit_sections:
codename = section
lType = codename
print '\n\tlType=',lType
f = TFile('%s/%s_boosted_withSignalSyst_adjustedUnc_f4.root'%(basepath,codename))
Nbkg = cfg.get(codename,'Nbkg')
print "Nbkg= ",Nbkg
Nbkg_int=int(Nbkg)
bkg_name = []
for i in range(1,Nbkg_int+1):
bkg_name.append(cfg.get(codename,'bkg%i_name'%i))
background = []
for i in range(0,Nbkg_int):
background.append(f.Get(bkg_name[i]))
print 'backgrounds= ',background
background_shapeSyst = []
for i in range(0,Nbkg_int):
background_shapeSyst.append([])
for name in cfg.get(codename,'bkg%i_shape_syst'%(i+1)).split(','):
background_shapeSyst[i].append(name)
background_backshapeUp = []
background_backshapeDown = []
for j in range(0,Nbkg_int):
background_backshapeUp.append([])
background_backshapeDown.append([])
for i in range(0,len(background_shapeSyst[j])):
print ' bkg shape syst: ',background_shapeSyst[j]
print ' getting bkgUp ','%sUp'%background_shapeSyst[j][i]
background_backshapeUp[j].append(f.Get('%sUp'%background_shapeSyst[j][i]))
background_backshapeDown[j].append(f.Get('%sDown'%background_shapeSyst[j][i]))
data_obs = f.Get('data_obs')
diboson = f.Get('diboson')
doSignalShape_unc=False
cfg_items=cfg.items(codename)
for cfg_item in cfg_items:
if 'signal_shape_syst' in cfg_item:
doSignalShape_unc = True
print 'doSignalShape_unc=',doSignalShape_unc
if (doSignalShape_unc):
diboson_up = {}
diboson_down = {}
norm_sig_sm_up = {}
norm_sig_sm_down = {}
signal_shapeSyst = [string(i) for i in cfg.get(codename,'signal_shape_syst').split(',')]
for i in range(0,len(signal_shapeSyst)):
print ' signal shape syst: ',signal_shapeSyst[i]
diboson_up[i] = f.Get('%sUp'%signal_shapeSyst[i])
diboson_down[i] = f.Get('%sDown'%signal_shapeSyst[i])
norm_sig_sm_up[i] = diboson_up[i].Integral()
norm_sig_sm_down[i] = diboson_down[i].Integral()
norm_sig_sm = diboson.Integral()
norm_bkg = []
for i in range(0,Nbkg_int):
norm_bkg.append(background[i].Integral())
norm_obs = data_obs.Integral()
print 'bkg integral: ',norm_bkg
if (doSignalShape_unc):
print 'signal shape unc: ',norm_sig_sm_down,' ',norm_sig_sm,' ',norm_sig_sm_up
theWS = RooWorkspace('WV_%sboosted'%codename, 'WV_%sboosted'%codename)
wpt = theWS.factory('W_pt_%s[%f,%f]' % (codename,data_obs.GetBinLowEdge(1),
data_obs.GetBinLowEdge(data_obs.GetNbinsX())+data_obs.GetBinWidth(data_obs.GetNbinsX())))
binning=array('d',[])
for i in range(1, data_obs.GetNbinsX()+1):
binning.append(data_obs.GetBinLowEdge(i))
binning.append(data_obs.GetBinLowEdge(data_obs.GetNbinsX()+1))
print "bining: "
for i in range(0, len(binning)):
print binning[i]
bins=RooBinning(len(binning)-1, binning)
wpt.setBinning(bins)
lz = theWS.factory('lZ[0., -0.025, 0.025]')
lz.setConstant(False)
dkg = theWS.factory('dkg[0.,-0.025, 0.025]')
dg1 = theWS.factory('dg1[0.,-0.025, 0.025]')
vars = RooArgList(wpt)
varSet = RooArgSet(wpt)
data = RooDataHist('data_obs', 'data_obs_WV_%s'%codename, vars, data_obs)
bkgHist = {}
for i in range(0,Nbkg_int):
bkgHist[i] = RooDataHist('WV_semileptonic_bkg%i_%s'%(i+1,codename),
'WV_semileptonic_bkg%i_%s'%(i+1,codename),
vars,
background[i])
bkgHist_systUp = []
bkgHist_systDown = []
for j in range(0,Nbkg_int):
bkgHist_systUp.append([])
bkgHist_systDown.append([])
for i in range(0,len(background_shapeSyst[j])):
if (isItCorrelated(background_shapeSyst[j][i])):
print ' \n\t\t ==================================> <=========================== '
name_forCorr=background_shapeSyst[j][i]
print ' name_forCorr= ',name_forCorr
if ('_DD_syst' in name_forCorr and ('ch1' in name_forCorr or 'ch3' in name_forCorr)):
name_forCorr=name_forCorr.replace('l2nu2_','')
name_forCorr=name_forCorr.replace('l4_','')
name_forCorr=name_forCorr.replace('ch1_','odd_')
name_forCorr=name_forCorr.replace('ch3_','odd_')
elif ('_DD_syst' in name_forCorr and ('ch2' in name_forCorr or 'ch4' in name_forCorr)):
name_forCorr=name_forCorr.replace('l2nu2_','')
name_forCorr=name_forCorr.replace('l4_','')
name_forCorr=name_forCorr.replace('ch2_','even_')
name_forCorr=name_forCorr.replace('ch4_','even_')
else:
name_forCorr=name_forCorr.replace('l2nu2_','')
name_forCorr=name_forCorr.replace('l4_','')
name_forCorr=name_forCorr.replace('ch1_','')
name_forCorr=name_forCorr.replace('ch2_','')
name_forCorr=name_forCorr.replace('ch3_','')
name_forCorr=name_forCorr.replace('ch4_','')
print ' -> name_forCorr= ',name_forCorr
bkgHist_systUp[j].append(RooDataHist('WV_semileptonic_bkg%i_%s_%sUp'%(j+1,codename,name_forCorr),
'WV_semileptonic_bkg%i_%s_%sUp'%(j+1,codename,name_forCorr),
vars,
background_backshapeUp[j][i]))
bkgHist_systDown[j].append(RooDataHist('WV_semileptonic_bkg%i_%s_%sDown'%(j+1,codename,name_forCorr),
'WV_semileptonic_bkg%i_%s_%sDown'%(j+1,codename,name_forCorr),
vars,
background_backshapeDown[j][i]))
else:
bkgHist_systUp[j].append(RooDataHist('WV_semileptonic_bkg%i_%s_%sUp'%(j+1,codename,background_shapeSyst[j][i]),
'WV_semileptonic_bkg%i_%s_%sUp'%(j+1,codename,background_shapeSyst[j][i]),
vars,
background_backshapeUp[j][i]))
bkgHist_systDown[j].append(RooDataHist('WV_semileptonic_bkg%i_%s_%sDown'%(j+1,codename,background_shapeSyst[j][i]),
'WV_semileptonic_bkg%i_%s_%sDown'%(j+1,codename,background_shapeSyst[j][i]),
vars,
background_backshapeDown[j][i]))
dibosonHist = RooDataHist('WV_semileptonic_SM_%s_rawshape'%codename,
'WV_semileptonic_SM_%s_rawshape'%codename,
vars,
diboson)
if (doSignalShape_unc):
dibosonHist_up = {}
dibosonHist_down = {}
for i in range(0,len(signal_shapeSyst)):
if (isItCorrelated(str(signal_shapeSyst[i]))):
print ' \n\t\t ==================================> <=========================== '
name_forCorr=str(signal_shapeSyst[i])
print ' name_forCorr= ',name_forCorr
name_forCorr=name_forCorr.replace('l2nu2_','')
name_forCorr=name_forCorr.replace('l4_','')
name_forCorr=name_forCorr.replace('ch1_','')
name_forCorr=name_forCorr.replace('ch2_','')
name_forCorr=name_forCorr.replace('ch3_','')
name_forCorr=name_forCorr.replace('ch4_','')
print ' -> name_forCorr= ',name_forCorr
dibosonHist_up[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,name_forCorr),
'WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,name_forCorr),
vars,
diboson_up[i])
dibosonHist_down[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,name_forCorr),
'WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,name_forCorr),
vars,
diboson_down[i])
else:
dibosonHist_up[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,signal_shapeSyst[i]),
'WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,signal_shapeSyst[i]),
vars,
diboson_up[i])
dibosonHist_down[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,signal_shapeSyst[i]),
'WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,signal_shapeSyst[i]),
vars,
diboson_down[i])
# dibosonHist_up[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,signal_shapeSyst[i]),
# 'WV_semileptonic_SM_%s_rawshape_%sUp'%(codename,signal_shapeSyst[i]),
# vars,
# diboson_up[i])
# dibosonHist_down[i] = RooDataHist('WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,signal_shapeSyst[i]),
# 'WV_semileptonic_SM_%s_rawshape_%sDown'%(codename,signal_shapeSyst[i]),
# vars,
# diboson_down[i])
dibosonPdf = RooHistFunc('WV_semileptonic_SM_%s_shape'%codename,
'WV_semileptonic_SM_%s_shape'%codename,
varSet,
dibosonHist)
if (doSignalShape_unc):
dibosonPdf_up = {}
dibosonPdf_down = {}
for i in range(0,len(signal_shapeSyst)):
if (isItCorrelated(str(signal_shapeSyst[i]))):
name_forCorr=str(signal_shapeSyst[i])
name_forCorr=name_forCorr.replace('l2nu2_','')
name_forCorr=name_forCorr.replace('l4_','')
name_forCorr=name_forCorr.replace('ch1_','')
name_forCorr=name_forCorr.replace('ch2_','')
name_forCorr=name_forCorr.replace('ch3_','')
name_forCorr=name_forCorr.replace('ch4_','')
dibosonPdf_up[i] = RooHistFunc('WV_semileptonic_SM_%s_shape_%sUp'%(codename,name_forCorr),
'WV_semileptonic_SM_%s_shape_%sUp'%(codename,name_forCorr),
varSet,
dibosonHist_up[i])
dibosonPdf_down[i] = RooHistFunc('WV_semileptonic_SM_%s_shape_%sDown'%(codename,name_forCorr),
'WV_semileptonic_SM_%s_shape_%sDown'%(codename,name_forCorr),
varSet,
dibosonHist_down[i])
else:
dibosonPdf_up[i] = RooHistFunc('WV_semileptonic_SM_%s_shape_%sUp'%(codename,signal_shapeSyst[i]),
'WV_semileptonic_SM_%s_shape_%sUp'%(codename,signal_shapeSyst[i]),
varSet,
dibosonHist_up[i])
dibosonPdf_down[i] = RooHistFunc('WV_semileptonic_SM_%s_shape_%sDown'%(codename,signal_shapeSyst[i]),
'WV_semileptonic_SM_%s_shape_%sDown'%(codename,signal_shapeSyst[i]),
varSet,
dibosonHist_down[i])
# dibosonPdf_up[i] = RooHistFunc('WV_semileptonic_SM_%s_shape_%sUp'%(codename,signal_shapeSyst[i]),
# 'WV_semileptonic_SM_%s_shape_%sUp'%(codename,signal_shapeSyst[i]),
# varSet,
# dibosonHist_up[i])
# dibosonPdf_down[i] = RooHistFunc('WV_semileptonic_SM_%s_shape_%sDown'%(codename,signal_shapeSyst[i]),
# 'WV_semileptonic_SM_%s_shape_%sDown'%(codename,signal_shapeSyst[i]),
# varSet,
# dibosonHist_down[i])
print '\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ reading RooATGCFunction\n'
# aTGC = RooATGCFunction_wz('ATGC_shapescale_WWgammaZ_WV_atgc_semileptonic_%s'%codename,
# 'ATGC_shapescale_%s'%codename,
# wpt,
# lz,
# dkg,
# dg1,
# '%s/signal_%s_f4.root'%(basepath,codename))
print '\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ read RooATGCFunction\n'
limtype = -1
planeID = 'dkglZ'
print 'setting up for %s plane!'%planeID
if ( planeID == 'dkglZ' ):
limtype = 0
elif ( planeID == 'dg1lZ' ):
limtype = 1
elif ( planeID == 'dkgdg1'):
limtype = 2
else:
raise RuntimeError('InvalidCouplingChoice',
'We can only use [dkg,lZ], [dg1,lZ], and [dkg,dg1]'\
' as POIs right now!')
print limtype
print '\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ reading RooATGCSemi\n'
if (doSignalShape_unc):
kappaLow = {}
kappaHigh = {}
aTGCPdf_norm = {}
theta = {}
kappaLow_sum_d = 1.
kappaHigh_sum_d = 1.
for i in range(0,len(signal_shapeSyst)):
kappaLow[i] = RooRealVar("kappaL_%s_%s"%(i+1,codename),"kappaL_%s_%s"%(i+1,codename),norm_sig_sm_down[i]/norm_sig_sm)
kappaLow[i].setConstant(True)
kappaHigh[i] = RooRealVar("kappaH_%s_%s"%(i+1,codename),"kappaH_%s_%s"%(i+1,codename),norm_sig_sm_up[i]/norm_sig_sm)
kappaHigh[i].setConstant(True)
kappaLow_sum_d = kappaLow_sum_d*norm_sig_sm_down[i]/norm_sig_sm
kappaHigh_sum_d = kappaHigh_sum_d*norm_sig_sm_up[i]/norm_sig_sm
# theWS.factory("%s[-7,7]"%signal_shapeSyst[i])
# theta[i] = theWS.var("%s"%signal_shapeSyst[i])
if (isItCorrelated(str(signal_shapeSyst[i]))):
name_forCorr=str(signal_shapeSyst[i])
name_forCorr=name_forCorr.replace('l2nu2_','')
name_forCorr=name_forCorr.replace('l4_','')
name_forCorr=name_forCorr.replace('ch1_','')
name_forCorr=name_forCorr.replace('ch2_','')
name_forCorr=name_forCorr.replace('ch3_','')
name_forCorr=name_forCorr.replace('ch4_','')
if not doAllLnN:
theWS.factory("%s[-7,7]"%name_forCorr)
theta[i] = theWS.var("%s"%name_forCorr)
else:
if not doAllLnN:
theWS.factory("%s[-7,7]"%signal_shapeSyst[i])
theta[i] = theWS.var("%s"%signal_shapeSyst[i])
if not doAllLnN:
aTGCPdf_norm[i] = AsymPow('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_integral%s'%(codename,i+1),
'ATGCPdf_WV_%s_integral%s'%(codename,i+1),
kappaLow[i],
kappaHigh[i],
theta[i])
if not doAllLnN:
if (len(signal_shapeSyst)==1):
aTGCPdf_norm_sum = aTGCPdf_norm[0]
else:
for i in range(0,len(signal_shapeSyst)):
if (i==0): prodset=RooArgList(aTGCPdf_norm[i])
else: prodset.add(RooArgList(aTGCPdf_norm[i]))
aTGCPdf_norm_sum = RooProduct("aTGCPdf_norm_sum","aTGCPdf_norm_sum",prodset)
kappaLow_sum = RooRealVar("kappaLow_sum","kappaLow_sum",kappaLow_sum_d)
kappaHigh_sum = RooRealVar("kappaHigh_sum","kappaHigh_sum",kappaHigh_sum_d)
aTGCPdf_norm_sum.SetNameTitle('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_norm'%codename,
'ATGCPdf_WV_%s_norm'%codename)
aTGCPdf = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s'%codename,
'ATGCPdf_WV_%s'%codename,
wpt,
dkg,
lz,
dg1,
dibosonPdf,
'%s/signal_%s_f4.root'%(basepath,codename),
limtype
)
if (doSignalShape_unc):
aTGCPdf_up = {}
aTGCPdf_down = {}
for i in range(0,len(signal_shapeSyst)):
if (isItCorrelated(str(signal_shapeSyst[i]))):
name_forCorr=str(signal_shapeSyst[i])
name_forCorr=name_forCorr.replace('l4_','')
name_forCorr=name_forCorr.replace('l2nu2_','')
name_forCorr=name_forCorr.replace('ch1_','')
name_forCorr=name_forCorr.replace('ch2_','')
name_forCorr=name_forCorr.replace('ch3_','')
name_forCorr=name_forCorr.replace('ch4_','')
aTGCPdf_up[i] = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_%sUp'%(codename,name_forCorr),
'ATGCPdf_WV_%s'%codename,
wpt,
dkg,
lz,
dg1,
dibosonPdf_up[i],
'%s/signal_%s_f4.root'%(basepath,codename),
limtype
)
aTGCPdf_down[i] = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_%sDown'%(codename,name_forCorr),
'ATGCPdf_WV_%s'%codename,
wpt,
dkg,
lz,
dg1,
dibosonPdf_down[i],
'%s/signal_%s_f4.root'%(basepath,codename),
limtype
)
else:
aTGCPdf_up[i] = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_%sUp'%(codename,signal_shapeSyst[i]),
'ATGCPdf_WV_%s'%codename,
wpt,
dkg,
lz,
dg1,
dibosonPdf_up[i],
'%s/signal_%s_f4.root'%(basepath,codename),
limtype
)
aTGCPdf_down[i] = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_%sDown'%(codename,signal_shapeSyst[i]),
'ATGCPdf_WV_%s'%codename,
wpt,
dkg,
lz,
dg1,
dibosonPdf_down[i],
'%s/signal_%s_f4.root'%(basepath,codename),
limtype
)
# aTGCPdf_up[i] = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_%sUp'%(codename,signal_shapeSyst[i]),
# 'ATGCPdf_WV_%s'%codename,
# wpt,
# dkg,
# lz,
# dg1,
# dibosonPdf_up[i],
# '%s/signal_%s_f4.root'%(basepath,codename),
# limtype
# )
# aTGCPdf_down[i] = RooATGCSemiAnalyticPdf_wz('ATGCPdf_WWgammaZ_WV_atgc_semileptonic_%s_%sDown'%(codename,signal_shapeSyst[i]),
# 'ATGCPdf_WV_%s'%codename,
# wpt,
# dkg,
# lz,
# dg1,
# dibosonPdf_down[i],
# '%s/signal_%s_f4.root'%(basepath,codename),
# limtype
# )
print '\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ read RooATGCSemi\n'
getattr(theWS, 'import')(data)
for i in range(0,Nbkg_int):
getattr(theWS, 'import')(bkgHist[i])
getattr(theWS, 'import')(aTGCPdf)
if not doAllLnN:
for j in range(0,Nbkg_int):
for i in range(0,len(background_shapeSyst[j])):
getattr(theWS, 'import')(bkgHist_systUp[j][i])
getattr(theWS, 'import')(bkgHist_systDown[j][i])
if (doSignalShape_unc):
for i in range(0,len(signal_shapeSyst)):
getattr(theWS, 'import')(aTGCPdf_up[i])
getattr(theWS, 'import')(aTGCPdf_down[i])
# getattr(theWS, 'import')(aTGCPdf_norm[i])
getattr(theWS, 'import')(aTGCPdf_norm_sum)
theWS.Print()
fout = TFile('%s_boosted_ws.root'%(codename), 'recreate')
theWS.Write()
fout.Close()
leptons=""
ch=100
## calculate relative error:
if (codename=="ch1"):
ch=0
leptons="l4"
codename_forBKGDDsyst="odd"
if (codename=="ch2"):
ch=1
leptons="l4"
codename_forBKGDDsyst="even"
if (codename=="ch3"):
ch=2
leptons="l4"
codename_forBKGDDsyst="odd"
if (codename=="ch4"):
ch=3
leptons="l4"
codename_forBKGDDsyst="even"
if (codename=="ch5"):
ch=4
codename_forBKGDDsyst="odd"
leptons="l2nu2"
Nbkg_int=2
norm_bkg.append([])
norm_bkg[0]=l2nu2_bkgMC[4]
norm_bkg[1]=l2nu2_bkgDD[4]
if (codename=="ch6"):
ch=5
codename_forBKGDDsyst="even"
Nbkg_int=2
leptons="l2nu2"
norm_bkg.append([])
norm_bkg[0]=l2nu2_bkgMC[5]
norm_bkg[1]=l2nu2_bkgDD[5]
if (codename=="ch7"):
ch=6
leptons="l2nu2"
codename_forBKGDDsyst="odd"
Nbkg_int=2
norm_bkg.append([])
norm_bkg[0]=l2nu2_bkgMC[6]
norm_bkg[1]=l2nu2_bkgDD[6]
print "===================> channel: ",ch+1
### make the card for this channel and plane ID
card = """
# Simple counting experiment, with one signal and a few background processes
imax 1 number of channels
jmax {Nbkg_int} number of backgrounds
kmax * number of nuisance parameters (sources of systematical uncertainties)
------------""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,Nbkg_int=Nbkg_int)
for i in range(0,Nbkg_int):
card += """
shapes WV_semileptonic_bkg{Nbkg_int}_{codename} {codename}boosted ./{codename}_boosted_ws.root WV_{codename}boosted:$PROCESS WV_{codename}boosted:$PROCESS""".format(Nbkg_int=i+1,codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
card += """
shapes data_obs {codename}boosted ./{codename}_boosted_ws.root WV_{codename}boosted:$PROCESS """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs,Nbkg_int=Nbkg_int)
if (doSignalShape_unc):
card += """
shapes WWgammaZ_WV_atgc_semileptonic_{codename} {codename}boosted ./{codename}_boosted_ws.root WV_{codename}boosted:ATGCPdf_$PROCESS WV_{codename}boosted:ATGCPdf_$PROCESS """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
else:
card += """
shapes WWgammaZ_WV_atgc_semileptonic_{codename} {codename}boosted ./{codename}_boosted_ws.root WV_{codename}boosted:ATGCPdf_$PROCESS
""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
card += """
------------
bin {codename}boosted
observation {norm_obs}
------------
bin {codename}boosted\t\t""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg,norm_obs=norm_obs)
for i in range(0,Nbkg_int):
card += """\t\t\t{codename}boosted""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
card += """
process\t\t\t WWgammaZ_WV_atgc_semileptonic_{codename} """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
for i in range(0,Nbkg_int):
card += """\tWV_semileptonic_bkg{Nbkg_int}_{codename}""".format(Nbkg_int=i+1,codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
card += """
process 0 """.format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
for i in range(0,Nbkg_int):
card += """ \t\t\t\t{i}""".format(i=i+1,codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
card += """
rate {norm_sig_sm}\t""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
for i in range(0,Nbkg_int):
card += """ \t\t\t{norm_bkg}""".format(codename=codename,norm_sig_sm=norm_sig_sm,norm_bkg=norm_bkg[i],norm_obs=norm_obs)
if (codename=="mu" or codename=="el" or codename=="elmu"):
card += """
------------
lumi_8TeV lnN 1.022 """
for i in range(0,Nbkg_int):
card += """\t\t\t\t-"""
card += """
sig_other lnN 1.1342 """
for i in range(0,Nbkg_int):
card += """\t\t\t\t-"""
card += """
background_{codename}boosted_backshape lnN - """.format(codename=codename)
for i in range(0,Nbkg_int):
card += """\t\t\t\t0.7/1.3"""
card += """
signal_th lnN 0.96/1.04 """.format(codename=codename)
for i in range(0,Nbkg_int):
card += """\t\t\t\t-"""
else:
if (leptons=="l2nu2"):
card += """
------------
lumi_8TeV lnN 1.039\t\t\t1.039\t\t\t-"""
else:
card += """
------------
lumi_8TeV lnN 1.039\t\t\t-"""
if (leptons=="l2nu2"):
card += """
{leptons}_{codename}_background_MC_stat lnN - {err_down}/{err_up} -""".format(leptons=leptons,codename=codename,err_down=stat_bkgMC_err_rel_Down[ch],err_up=stat_bkgMC_err_rel_Up[ch])
card += """
background_MC_syst lnN - {err_down}/{err_up} -""".format(leptons=leptons,codename=codename,err_down=syst_bkgMC_err_rel_Down[ch],err_up=syst_bkgMC_err_rel_Up[ch])
if (leptons=="l2nu2"):
card += """
{leptons}_{codename}_background_DD_factor2p5_stat lnN - - {err_down}/{err_up}""".format(leptons=leptons,codename=codename,err_down=stat_bkgDD_err_rel_Down[ch],err_up=stat_bkgDD_err_rel_Up[ch])
if (codename_forBKGDDsyst=="odd"):
card += """
odd_background_DD_syst lnN - - {err_down}/{err_up}""".format(leptons=leptons,codename=codename,err_down=syst_bkgDD_err_rel_Down[ch],err_up=syst_bkgDD_err_rel_Up[ch])
else:
card += """
even_background_DD_syst lnN - - {err_down}/{err_up}""".format(leptons=leptons,codename=codename,err_down=syst_bkgDD_err_rel_Down[ch],err_up=syst_bkgDD_err_rel_Up[ch])
card += """
{leptons}_{codename}_signal_stat lnN {err_down}/{err_up} - -""".format(leptons=leptons,codename=codename,err_down=stat_signal_err_rel_Down[ch],err_up=stat_signal_err_rel_Up[ch])
card += """
signal_reco lnN {err_down}/{err_up} - -""".format(leptons=leptons,codename=codename,err_down=syst_signal_reco_err_rel_Down[ch],err_up=syst_signal_reco_err_rel_Up[ch])
card += """
signal_th lnN {err_down}/{err_up} - -""".format(leptons=leptons,codename=codename,err_down=syst_signal_th_err_rel_Down[ch],err_up=syst_signal_th_err_rel_Up[ch])
else:
card += """
{leptons}_{codename}_background_DD_factor1p5_stat lnN - {err_down}/{err_up}""".format(leptons=leptons,codename=codename,err_down=stat_bkgDD_err_rel_Down[ch],err_up=stat_bkgDD_err_rel_Up[ch])
if (codename_forBKGDDsyst=="odd"):
card += """
odd_background_DD_syst lnN - {err_down}/{err_up}""".format(leptons=leptons,codename=codename,err_down=syst_bkgDD_err_rel_Down[ch],err_up=syst_bkgDD_err_rel_Up[ch])
else:
card += """
even_background_DD_syst lnN - {err_down}/{err_up}""".format(leptons=leptons,codename=codename,err_down=syst_bkgDD_err_rel_Down[ch],err_up=syst_bkgDD_err_rel_Up[ch])
card += """
{leptons}_{codename}_signal_stat lnN {err_down}/{err_up} -""".format(leptons=leptons,codename=codename,err_down=stat_signal_err_rel_Down[ch],err_up=stat_signal_err_rel_Up[ch])
card += """
signal_reco lnN {err_down}/{err_up} -""".format(leptons=leptons,codename=codename,err_down=syst_signal_reco_err_rel_Down[ch],err_up=syst_signal_reco_err_rel_Up[ch])
card += """
signal_th lnN {err_down}/{err_up} -""".format(leptons=leptons,codename=codename,err_down=syst_signal_th_err_rel_Down[ch],err_up=syst_signal_th_err_rel_Up[ch])
#lumi_8TeV lnN {err_down}/{err_up}39 {err_down}/{err_up}39 -
#l2nu2_ch3_background_MC_stat shape - {err_down}/{err_up} .format(err_down=stat_bkgMC_err_rel_Down[ch],err_up=stat_bkgMC_err_rel_Up[ch]) -
#background_MC_syst shape - {err_down}/{err_up} -
#l2nu2_ch3_background_DD_factor2p5_stat shape - - 1.845
#odd_background_DD_syst shape - - {err_down}/{err_up}
#l2nu2_ch3_signal_stat shape1 {err_down}/{err_up} - -
#signal_reco shape1 {err_down}/{err_up} - -
#signal_th shape1 {err_down}/{err_up} - -
#lumi_8TeV lnN 1.039 -
#l4_ch1_background_DD_factor1p5_stat shape - 1.335
#odd_background_DD_syst shape - 1.0
#l4_ch1_signal_stat shape1 1.0 -
#signal_reco shape1 1.0 -
#signal_th shape1 1.0 -
print card
cardfile = open('wv_semil_%sboosted.txt'%(codename),'w')
cardfile.write(card)
cardfile.close
|
[
"[email protected]"
] | |
744c5eaa6cb3ca445d2636ef9869716a03f6577a
|
498e792e16ab1a74ac034c53177c4cccbeef2749
|
/classification/swin_transformer/utils.py
|
96ad54a4b596ffeb720d7ffd0dac0cc47157abe1
|
[] |
no_license
|
ydwisroad/imageprocessingpytorch
|
f97bec4469c087f6bbbca5d42da180c95be8b13f
|
bd8d1af228619c9c6c9c1a2b880422f7d5048dd5
|
refs/heads/master
| 2023-07-29T05:05:11.145832 | 2022-02-21T23:32:03 | 2022-02-21T23:32:03 | 284,976,501 | 7 | 3 | null | 2023-07-24T01:08:22 | 2020-08-04T12:43:24 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 6,729 |
py
|
import os
import sys
import json
import pickle
import random
import torch
from tqdm import tqdm
import matplotlib.pyplot as plt
def read_split_data(root: str, val_rate: float = 0.2):
random.seed(0) # 保证随机结果可复现
assert os.path.exists(root), "dataset root: {} does not exist.".format(root)
# 遍历文件夹,一个文件夹对应一个类别
flower_class = [cla for cla in os.listdir(root) if os.path.isdir(os.path.join(root, cla))]
# 排序,保证顺序一致
flower_class.sort()
# 生成类别名称以及对应的数字索引
class_indices = dict((k, v) for v, k in enumerate(flower_class))
json_str = json.dumps(dict((val, key) for key, val in class_indices.items()), indent=4)
with open('class_indices.json', 'w') as json_file:
json_file.write(json_str)
train_images_path = [] # 存储训练集的所有图片路径
train_images_label = [] # 存储训练集图片对应索引信息
val_images_path = [] # 存储验证集的所有图片路径
val_images_label = [] # 存储验证集图片对应索引信息
every_class_num = [] # 存储每个类别的样本总数
supported = [".jpg", ".JPG", ".png", ".PNG"] # 支持的文件后缀类型
# 遍历每个文件夹下的文件
for cla in flower_class:
cla_path = os.path.join(root, cla)
# 遍历获取supported支持的所有文件路径
images = [os.path.join(root, cla, i) for i in os.listdir(cla_path)
if os.path.splitext(i)[-1] in supported]
# 获取该类别对应的索引
image_class = class_indices[cla]
# 记录该类别的样本数量
every_class_num.append(len(images))
# 按比例随机采样验证样本
val_path = random.sample(images, k=int(len(images) * val_rate))
for img_path in images:
if img_path in val_path: # 如果该路径在采样的验证集样本中则存入验证集
val_images_path.append(img_path)
val_images_label.append(image_class)
else: # 否则存入训练集
train_images_path.append(img_path)
train_images_label.append(image_class)
print("{} images were found in the dataset.".format(sum(every_class_num)))
print("{} images for training.".format(len(train_images_path)))
print("{} images for validation.".format(len(val_images_path)))
plot_image = False
if plot_image:
# 绘制每种类别个数柱状图
plt.bar(range(len(flower_class)), every_class_num, align='center')
# 将横坐标0,1,2,3,4替换为相应的类别名称
plt.xticks(range(len(flower_class)), flower_class)
# 在柱状图上添加数值标签
for i, v in enumerate(every_class_num):
plt.text(x=i, y=v + 5, s=str(v), ha='center')
# 设置x坐标
plt.xlabel('image class')
# 设置y坐标
plt.ylabel('number of images')
# 设置柱状图的标题
plt.title('flower class distribution')
plt.show()
return train_images_path, train_images_label, val_images_path, val_images_label
def plot_data_loader_image(data_loader):
batch_size = data_loader.batch_size
plot_num = min(batch_size, 4)
json_path = './class_indices.json'
assert os.path.exists(json_path), json_path + " does not exist."
json_file = open(json_path, 'r')
class_indices = json.load(json_file)
for data in data_loader:
images, labels = data
for i in range(plot_num):
# [C, H, W] -> [H, W, C]
img = images[i].numpy().transpose(1, 2, 0)
# 反Normalize操作
img = (img * [0.229, 0.224, 0.225] + [0.485, 0.456, 0.406]) * 255
label = labels[i].item()
plt.subplot(1, plot_num, i+1)
plt.xlabel(class_indices[str(label)])
plt.xticks([]) # 去掉x轴的刻度
plt.yticks([]) # 去掉y轴的刻度
plt.imshow(img.astype('uint8'))
plt.show()
def write_pickle(list_info: list, file_name: str):
with open(file_name, 'wb') as f:
pickle.dump(list_info, f)
def read_pickle(file_name: str) -> list:
with open(file_name, 'rb') as f:
info_list = pickle.load(f)
return info_list
def train_one_epoch(model, optimizer, data_loader, device, epoch):
model.train()
loss_function = torch.nn.CrossEntropyLoss()
accu_loss = torch.zeros(1).to(device) # 累计损失
accu_num = torch.zeros(1).to(device) # 累计预测正确的样本数
optimizer.zero_grad()
sample_num = 0
data_loader = tqdm(data_loader, file=sys.stdout)
for step, data in enumerate(data_loader):
images, labels = data
sample_num += images.shape[0]
pred = model(images.to(device))
pred_classes = torch.max(pred, dim=1)[1]
accu_num += torch.eq(pred_classes, labels.to(device)).sum()
loss = loss_function(pred, labels.to(device))
loss.backward()
accu_loss += loss.detach()
data_loader.desc = "[train epoch {}] loss: {:.3f}, acc: {:.3f}".format(epoch,
accu_loss.item() / (step + 1),
accu_num.item() / sample_num)
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss)
sys.exit(1)
optimizer.step()
optimizer.zero_grad()
return accu_loss.item() / (step + 1), accu_num.item() / sample_num
@torch.no_grad()
def evaluate(model, data_loader, device, epoch):
loss_function = torch.nn.CrossEntropyLoss()
model.eval()
accu_num = torch.zeros(1).to(device) # 累计预测正确的样本数
accu_loss = torch.zeros(1).to(device) # 累计损失
sample_num = 0
data_loader = tqdm(data_loader, file=sys.stdout)
for step, data in enumerate(data_loader):
images, labels = data
sample_num += images.shape[0]
pred = model(images.to(device))
pred_classes = torch.max(pred, dim=1)[1]
accu_num += torch.eq(pred_classes, labels.to(device)).sum()
loss = loss_function(pred, labels.to(device))
accu_loss += loss
data_loader.desc = "[valid epoch {}] loss: {:.3f}, acc: {:.3f}".format(epoch,
accu_loss.item() / (step + 1),
accu_num.item() / sample_num)
return accu_loss.item() / (step + 1), accu_num.item() / sample_num
|
[
"[email protected]"
] | |
1acb498dd3826b34f113982a9323dd004030e092
|
5335098059388245cf65f0b0c791f1c3c910b74d
|
/mysite/settings.py
|
9dccea7de9f3ab318d24763e77c548922f870a70
|
[] |
no_license
|
Mariia1991/my-first-blog
|
c333cbcf3648b12e1018a23da3995a76eb8be612
|
91fc87e054ccf95f7da422ea1f6453c5d96fec19
|
refs/heads/master
| 2021-01-10T16:17:43.171755 | 2016-03-06T10:42:57 | 2016-03-06T10:42:57 | 53,248,539 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,706 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sv7qfi55@*-no03^gl0!nqb5p=62+r=i5+4ox5u2ad9%xwq6qh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"[email protected]"
] | |
18e9b2b74f11283b6321cc911e118ff92ad27bc1
|
0c6b4e9c5ecc5a7595717f9699953b227486ef3e
|
/tests/unit/modules/remote_management/oneview/test_oneview_network_set.py
|
925b136aea929159834fde6379a0dea5fd372036
|
[] |
no_license
|
ansible-collection-migration/ansible.misc
|
d9c92e8bb0c17b3e2a92976215f523c2afaa5a46
|
3c02be2a8c03b2e375a1e1f37b0c119145ea358c
|
refs/heads/master
| 2020-12-26T23:11:36.544511 | 2020-02-03T22:18:53 | 2020-02-03T22:18:53 | 237,681,535 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,364 |
py
|
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible_collections.ansible.misc.tests.unit.compat import unittest, mock
from .hpe_test_utils import OneViewBaseTestCase
from .oneview_module_loader import NetworkSetModule
FAKE_MSG_ERROR = 'Fake message error'
NETWORK_SET = dict(
name='OneViewSDK Test Network Set',
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc']
)
NETWORK_SET_WITH_NEW_NAME = dict(name='OneViewSDK Test Network Set - Renamed')
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(name=NETWORK_SET['name'],
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc'])
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(name=NETWORK_SET['name'],
newName=NETWORK_SET['name'] + " - Renamed",
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc', 'Name of a Network'])
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name=NETWORK_SET['name'])
)
class NetworkSetModuleSpec(unittest.TestCase,
OneViewBaseTestCase):
"""
OneViewBaseTestCase has common tests for class constructor and main function,
also provides the mocks used in this test case.
"""
def setUp(self):
self.configure_mocks(self, NetworkSetModule)
self.resource = self.mock_ov_client.network_sets
self.ethernet_network_client = self.mock_ov_client.ethernet_networks
def test_should_create_new_network_set(self):
self.resource.get_by.return_value = []
self.resource.create.return_value = NETWORK_SET
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=NetworkSetModule.MSG_CREATED,
ansible_facts=dict(network_set=NETWORK_SET)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.get_by.return_value = [NETWORK_SET]
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=NetworkSetModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(network_set=NETWORK_SET)
)
def test_update_when_data_has_modified_attributes(self):
data_merged = dict(name=NETWORK_SET['name'] + " - Renamed",
networkUris=['/rest/ethernet-networks/aaa-bbb-ccc',
'/rest/ethernet-networks/ddd-eee-fff']
)
self.resource.get_by.side_effect = [NETWORK_SET], []
self.resource.update.return_value = data_merged
self.ethernet_network_client.get_by.return_value = [{'uri': '/rest/ethernet-networks/ddd-eee-fff'}]
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=NetworkSetModule.MSG_UPDATED,
ansible_facts=dict(network_set=data_merged)
)
def test_should_raise_exception_when_ethernet_network_not_found(self):
self.resource.get_by.side_effect = [NETWORK_SET], []
self.ethernet_network_client.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
NetworkSetModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(
exception=mock.ANY,
msg=NetworkSetModule.MSG_ETHERNET_NETWORK_NOT_FOUND + "Name of a Network"
)
def test_should_remove_network(self):
self.resource.get_by.return_value = [NETWORK_SET]
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=NetworkSetModule.MSG_DELETED
)
def test_should_do_nothing_when_network_set_not_exist(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
NetworkSetModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=NetworkSetModule.MSG_ALREADY_ABSENT
)
def test_update_scopes_when_different(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = NETWORK_SET.copy()
resource_data['scopeUris'] = ['fake']
resource_data['uri'] = 'rest/network-sets/fake'
self.resource.get_by.return_value = [resource_data]
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
self.resource.patch.return_value = patch_return
NetworkSetModule().run()
self.resource.patch.assert_called_once_with('rest/network-sets/fake',
operation='replace',
path='/scopeUris',
value=['test'])
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(network_set=patch_return),
msg=NetworkSetModule.MSG_UPDATED
)
def test_should_do_nothing_when_scopes_are_the_same(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = NETWORK_SET.copy()
resource_data['scopeUris'] = ['test']
self.resource.get_by.return_value = [resource_data]
NetworkSetModule().run()
self.resource.patch.not_been_called()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(network_set=resource_data),
msg=NetworkSetModule.MSG_ALREADY_PRESENT
)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
f07650432096a5e2ce129c31565a46449a0bc4ac
|
9fc6604ae98e1ae91c490e8201364fdee1b4222a
|
/eg_unifonic_sms/wizards/__init__.py
|
ac6faede52edcd98fc0b73903df17624720de3c3
|
[] |
no_license
|
nabiforks/baytonia
|
b65e6a7e1c7f52a7243e82f5fbcc62ae4cbe93c4
|
58cb304d105bb7332f0a6ab685015f070988ba56
|
refs/heads/main
| 2023-03-23T21:02:57.862331 | 2021-01-04T03:40:58 | 2021-01-04T03:40:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 39 |
py
|
from . import unifonic_post_sms_wizard
|
[
"[email protected]"
] | |
864af22112c10a166d04ae1997c58bc756b43a51
|
f3f38a66daddccbb7fd0bfc1cacce40c7ad31010
|
/tippecanoe-downloads.py
|
0f09b1da5d9ca1e7df0f83a2ca8a0e864b40bc63
|
[] |
no_license
|
kimballjohnson/dotmaps
|
386b5b87ce757412eeb7712def8bb595cc59e98f
|
09c9a3ceb16ba7f350247eee9a3b65ddb53fe290
|
refs/heads/master
| 2021-09-12T09:28:11.772233 | 2017-03-01T19:44:56 | 2017-03-01T19:44:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,221 |
py
|
import sys, csv, zipfile, os, itertools, io, json, tempfile, subprocess
OA_PROPERTIES = 'HASH', 'NUMBER', 'STREET', 'UNIT', 'CITY', 'POSTCODE'
with open('downloaded/files.csv') as file:
run_rows = csv.DictReader(file)
set_key = lambda run_row: int(run_row['set_id'])
sorted_rows = sorted(run_rows, key=set_key, reverse=False)
grouped_rows = itertools.groupby(sorted_rows, set_key)
for (set_id, runs) in grouped_rows:
print('Starting set', set_id, '...', file=sys.stderr)
mbtiles_filename = 'set_{}.mbtiles'.format(set_id)
cmd = 'tippecanoe', '-l', 'dots', '-r', '3', \
'-n', 'OpenAddresses Dots, Set {}'.format(set_id), '-f', \
'-t', tempfile.gettempdir(), '-o', mbtiles_filename
print(' '.join(cmd), file=sys.stderr)
tippecanoe = subprocess.Popen(cmd, stdin=subprocess.PIPE, bufsize=1)
for run_row in runs:
data_path = os.path.join('downloaded', run_row['path'])
_, data_ext = os.path.splitext(data_path)
if data_ext == '.csv':
csv_buff = open(data_path)
elif data_ext == '.zip':
zip = zipfile.ZipFile(data_path)
(csv_name, ) = [name for name in zip.namelist()
if os.path.splitext(name)[1] == '.csv']
csv_buff = io.TextIOWrapper(zip.open(csv_name))
for csv_row in csv.DictReader(csv_buff):
try:
x, y = float(csv_row['LON']), float(csv_row['LAT'])
except ValueError:
continue
else:
geometry = dict(type='Point', coordinates=[x, y])
properties = {key.lower(): csv_row.get(key, '') for key in OA_PROPERTIES}
properties.update(source_path=run_row['source_path'])
feature = dict(type='Feature', geometry=geometry, properties=properties)
tippecanoe.stdin.write(json.dumps(feature).encode('utf8'))
tippecanoe.stdin.write(b'\n')
#break
tippecanoe.stdin.close()
tippecanoe.wait()
#break
|
[
"[email protected]"
] | |
01eb492e08352e8b7c503545b15924f421b2b23a
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/choroplethmapbox/_reversescale.py
|
50dd2164ab3093d034fb5a3d4f8f4c172e533564
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 |
MIT
| 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null |
UTF-8
|
Python
| false | false | 435 |
py
|
import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="reversescale", parent_name="choroplethmapbox", **kwargs
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
[
"[email protected]"
] | |
ffa5db07a8c27e2c3241e1df2652096ea125e9a5
|
fc73e7249e227e5507976bd3825af037fbe6b46b
|
/legacy/geraldCode.save.py
|
739942b2397f49a83ea6644f3d89cbc5392f3f20
|
[
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
mussard/SecondQuantizationAlgebra
|
32d10d85abae82da343c9b41764802f3f541d551
|
ee32159e24d510654a6d38df391b544ec9ffeb4a
|
refs/heads/master
| 2020-03-17T21:46:28.875095 | 2019-07-10T17:31:26 | 2019-07-10T17:31:26 | 133,974,911 | 0 | 0 | null | 2018-05-18T15:50:13 | 2018-05-18T15:50:13 | null |
UTF-8
|
Python
| false | false | 29,699 |
py
|
import secondQuantizationAlgebra as sqa
def replaceindex(tensor, a, b) :
for i in range(len(tensor.indices)):
if (tensor.indices[i].name == a):
tensor.indices[i] = b
def replaceAllKdeltaWithDeltas(term, rdmDelta):
import string
l = list(string.ascii_lowercase) #list of all printables
usedIndices = []
for t in term.tensors:
for index in t.indices:
usedIndices.append(index.name)
unUsedList = sorted(list(set(l) - set(usedIndices)))
Deltas = []
import copy
tensorcopy = copy.deepcopy(term.tensors)
removeDelta = []
for t in tensorcopy:
if (t.name == "kdelta"):
if (t.indices[0].indType[0] == sqa.options.core_type):
Deltas.append(rdmDelta[0].copy())
elif (t.indices[0].indType[0] == sqa.options.active_type):
Deltas.append(rdmDelta[1].copy())
elif (t.indices[0].indType[0] == sqa.options.virtual_type):
Deltas.append(rdmDelta[2].copy())
Deltas[-1].indices[0].name = t.indices[0].name
Deltas[-1].indices[1].name = t.indices[1].name
removeDelta.append(t)
# term.tensors.remove(t)
# break
for t in removeDelta:
# print(t)
term.tensors.remove(t)
for d in Deltas:
# print(d)
term.tensors.append(d)
# exit(0)
return term
def replaceSingleKdeltaWithDeltas(term, rdmDelta):
import string
l = list(string.ascii_lowercase) #list of all printables
usedIndices = []
for t in term.tensors:
for index in t.indices:
usedIndices.append(index.name)
unUsedList = sorted(list(set(l) - set(usedIndices)))
Deltas = []
import copy
tensorcopy = copy.copy(term.tensors)
numNonDeltaTensors = 0
for t in tensorcopy:
if (t.name != "kdelta"):
numNonDeltaTensors += 1
if (numNonDeltaTensors>0):
return term
for t in tensorcopy:
if (t.name == "kdelta"):
if (t.indices[0].indType[0] == sqa.options.core_type):
Deltas.append(rdmDelta[0].copy())
elif (t.indices[0].indType[0] == sqa.options.active_type):
Deltas.append(rdmDelta[1].copy())
elif (t.indices[0].indType[0] == sqa.options.virtual_type):
Deltas.append(rdmDelta[2].copy())
Deltas[-1].indices[0].name = t.indices[0].name
Deltas[-1].indices[1].name = t.indices[1].name
term.tensors.remove(t)
break
for d in Deltas:
term.tensors.append(d)
return term
def replaceRepeatIndicesWithDeltas(term, rdmDelta):
import string
l = list(string.ascii_lowercase) #list of all printables
usedIndices = []
for t in term.tensors:
for index in t.indices:
usedIndices.append(index.name)
unUsedList = sorted(list(set(l) - set(usedIndices)))
Deltas = []
for t in term.tensors:
if (t.name == "kdelta"):
continue
uniques = list(set(t.indices))
numRepeats = len(uniques)*[0]
for index in t.indices:
numRepeats[ uniques.index(index) ] +=1
for i in range(len(numRepeats)):
if (numRepeats[i] > 2):
print("more than a double repeat in tensor ", t)
exit(0)
if (numRepeats[i] == 2):
repeatIndex = uniques[i]
repeatPositions = [j for j, x in enumerate(t.indices) if x == uniques[i]]
if ( len(repeatPositions) != 2):
print(uniques[i].name," should occur twice in ", t)
exit(0)
newName = unUsedList[0]
unUsedList.remove(newName)
t.indices[ repeatPositions[1] ].name = newName
if (t.indices[ repeatPositions[1] ].indType[0] == sqa.options.core_type):
Deltas.append(rdmDelta[0].copy())
elif (t.indices[ repeatPositions[1] ].indType[0] == sqa.options.active_type):
Deltas.append(rdmDelta[1].copy())
if (t.indices[ repeatPositions[1] ].indType[0] == sqa.options.virtual_type):
Deltas.append(rdmDelta[2].copy())
Deltas[-1].indices[0].name = newName
Deltas[-1].indices[1].name = uniques[i].name
for d in Deltas:
term.tensors.append( d)
return term
def printTensor(tensor, keymap):
string = tensor.name +"["
for i in range(len(tensor.indices)):
if (keymap.has_key(tensor.indices[i].name)):
string += keymap[tensor.indices[i].name]+","
else:
string += tensor.indices[i].name+","
string = string[:-1]+"]"
return string
def printIntTensor(tensor, activeInEinsum = False):
string = tensor.name +"["
for i in range(len(tensor.indices)):
if (tensor.indices[i].name[0]=="V"):
string += "nc:,"
elif (tensor.indices[i].name[0] == "A"):
if (not activeInEinsum):
string += tensor.indices[i].name+"+ncore,"
else:
string += "ncore:nc,"
elif (tensor.indices[i].name[0] == "C"):
string += tensor.indices[i].name+","
elif (len(tensor.indices[i].name[0]) == 1):
if (len(tensor.indices[i].indType) > 1):
print("Something wrong index is a composite of core/active/virtual")
exit(0)
elif (tensor.indices[i].indType[0] == sqa.options.core_type) :
string += ":ncore,"
elif (tensor.indices[i].indType[0] == sqa.options.active_type) :
string += "ncore:nc,"
elif (tensor.indices[i].indType[0] == sqa.options.virtual_type) :
string += "nc:,"
else :
print("index seems to be neither dummy nor defined")
exit(0)
string = string[:-1]+"]"
return string
def printETensor(tensor, activeInEinsum = False):
string = tensor.name +"["
for i in range(len(tensor.indices)):
if (tensor.indices[i].name[0]=="V"):
print("RDM cannot have virtual index")
exit(0)
elif (tensor.indices[i].name[0] == "A"):
if (not activeInEinsum):
string += tensor.indices[i].name+","
else:
string += ":,"
elif (tensor.indices[i].name[0] == "C"):
print("RDM cannot have core index")
exit(0)
elif (len(tensor.indices[i].name[0]) == 1):
if (len(tensor.indices[i].indType) > 1):
print("Something wrong index is a composite of core/active/virtual")
exit(0)
elif (tensor.indices[i].indType[0] == sqa.options.core_type) :
print("RDM cannot have core index")
exit(0)
elif (tensor.indices[i].indType[0] == sqa.options.active_type) :
string += ":,"
elif (tensor.indices[i].indType[0] == sqa.options.virtual_type) :
print("RDM cannot have virtual index")
exit(0)
else :
print("index seems to be neither dummy nor defined")
exit(0)
string = string[:-1]+"]"
return string
def writeTensors(AllTensors, CommentKey, Domains, Usage,commentE3=False):
UsageKey = {"A":"USAGE_Amplitude",\
"R":"USAGE_Residual",\
"H":"USAGE_Hamiltonian",\
"D":"USAGE_Density",\
"I":"USAGE_Intermediate"}
i = 0
not_commented=0
outString=''
for tensor in AllTensors:
if (CommentKey[tensor]=="E3" and commentE3):
intro='// /*{:3}*/'.format(i)
else:
intro=' /*{:3}*/'.format(i)
not_commented+=1
outString += intro+'{{"{:8}, "{:10}, "", {:18}}},\n'\
.format(CommentKey[tensor]+'"',\
Domains[i]+'"',\
UsageKey[Usage[i]])
i += 1
outString = " FTensorDecl TensorDecls[%i] = {\n"%(not_commented)\
+outString[:-1]+"\n };\n"
print(outString)
return not_commented
def WriteCodeSimple(result, AllTensors, commentTensor, scale=1.0, commentE3=False):
tensorIndexes=[]
middleLine =[]
tensorNumbers=[]
commentLine =[]
commented =[]
for t in result:
commented.append(False)
# tensorIndexes
tensorIndexStringList = []
for i in range(len(t.tensors)):
tensor = t.tensors[i]
tensorIndexString = ''
for index in range(len(tensor.indices)):
if ( len(tensor.indices[index].name) > 1):
tensorIndexString += tensor.indices[index].name[-1].capitalize()
else :
tensorIndexString += tensor.indices[index].name[0]
tensorIndexStringList.append(tensorIndexString) #tensor index string of output
indexes=''
for indexstring in tensorIndexStringList:
indexes += indexstring+','
tensorIndexes.append('{"'+indexes[:-1]+'",')
# middleLine
middleLine.append('{:6}, {:3}, {{'.format(t.numConstant*scale,len(tensorIndexStringList)))
# tensorNumbers and commentLine
index = 1
indexes=''
commentString = ' //{:6} '.format(t.numConstant*scale)
for i in range(len(t.tensors)):
tensor = t.tensors[i]
if ((tensor.name=="E3" and commentE3) or (tensor.name=="int2v")):
commented[-1]=True
indexes+= '{:2},'.format(AllTensors.index(tensor.name))
commentString += commentTensor[tensor.name]+'['+tensorIndexStringList[index-1]+'] '
index += 1
commentLine.append(commentString)
tensorNumbers.append(indexes[:-1]+'}},')
width1=len(max(tensorIndexes, key=len))
width2=len(max(tensorNumbers, key=len))
print("\tFEqInfo EqsRes[%i] = {" %(commented.count(False)))
for i in range(len(tensorIndexes)):
if commented[i]:
print('// {:{width1}}{:}{:{width2}}{:}'.format(tensorIndexes[i],middleLine[i],tensorNumbers[i],commentLine[i],width1=width1,width2=width2))
else:
print(' {:{width1}}{:}{:{width2}}{:}'.format(tensorIndexes[i],middleLine[i],tensorNumbers[i],commentLine[i],width1=width1,width2=width2))
print("\t};\n")
return commented.count(False)
def WriteCode(result, tensors):
outString = ""
for t in result:
tensorString = ""
ifstatement = "if"
tensorcopy = t.tensors
dontprint= []
indexKey = {'Va': 'Va', 'Vb' : 'Vb'}
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
#check the delta function
if (tensor.name == "kdelta"):
dontprint.append(i)
if (tensor.indices[0].name == "Vc" or tensor.indices[0].name == "Vd") :
indexKey[tensor.indices[1].name] = tensor.indices[0].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[0].name, tensor.indices[1].name)
elif (tensor.indices[1].name == "Vc" or tensor.indices[1].name == "Vd") :
indexKey[tensor.indices[0].name] = tensor.indices[1].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[1].name, tensor.indices[0].name)
else :
ifstatement += tensor.name +" == " +tensor.name + " and"
if (len(ifstatement) != 2) :
outString += ifstatement[:-3]+" : "
outString += '\t\t{"CDRS,'
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
for index in range(len(tensor.indices)):
if ( len(tensor.indices[index].name) > 1):
outString += tensor.indices[index].name[-1].capitalize()
else :
outString += tensor.indices[index].name[0]
outString += " ,"
outString += indexKey["Va"][-1].capitalize()+indexKey["Vb"][-1].capitalize()+'PQ", '+str(t.numConstant)+", 4 , {1"
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
outString+= " , "+ str(tensors[tensor.name])
outString += ", 0}},\n"
print(outString[:-1]+"\n\t};")
def WriteCode_ccaa(result, SupressActive, intmapkey, RDMmapkey, activeInEinsum = False):
if (SupressActive):
for t in result:
tensorString = ""
ifstatement = "\tif "
tensorcopy = t.tensors
dontprint= []
indexKey = {'Ap': 'Ap', 'Aq' : 'Aq'}
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
#check the delta function
if (tensor.name == "kdelta"):
dontprint.append(i)
if (tensor.indices[0].name == "Ar" or tensor.indices[0].name == "As") :
indexKey[tensor.indices[1].name] = tensor.indices[0].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[0].name, tensor.indices[1].name)
elif (tensor.indices[1].name == "Ar" or tensor.indices[1].name == "As") :
indexKey[tensor.indices[0].name] = tensor.indices[1].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[1].name, tensor.indices[0].name)
else :
ifstatement += tensor.indices[0].name +" == " +tensor.indices[1].name + " and "
#start by printint the if statement
outString = ""
if (len(ifstatement) != 4) :
outString += ifstatement[:-4]+" : \n\t"
#now print(the einsum string)
outString += "\tCout += ("+ str(t.numConstant)+ ") *numpy.einsum( '"
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
for index in range(len(tensor.indices)):
if (tensor.name[:3] == "int" and len(tensor.indices[index].name) > 1 and tensor.indices[index].name[0] == "A"):
outString += tensor.indices[index].name[-1].capitalize()
elif (tensor.name[:1] == "E" and len(tensor.indices[index].name) > 1 and tensor.indices[index].name[0] == "A"):
outString += tensor.indices[index].name[-1].capitalize()
elif len(tensor.indices[index].name) == 1:
outString += tensor.indices[index].name[0]
outString += " ,"
outString += indexKey["Ap"][-1].capitalize()+indexKey["Aq"][-1].capitalize()+" -> RS' "
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
if (tensor.name[:3] == "int") :
outString+= " , "+ printIntTensor(tensor, activeInEinsum)
elif (tensor.name[0] == "E"):
outString+= " , "+ printETensor(tensor, activeInEinsum)
else:
outString+= " , "+ printTensor(tensor, {})
outString += " , Cin)"
print(outString)
else :
for t in result:
tensorString = ""
ifstatement = "if"
tensorcopy = t.tensors
dontprint= []
indexKey = {'Va': 'Va', 'Vb' : 'Vb'}
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
#check the delta function
if (tensor.name == "kdelta"):
dontprint.append(i)
if (tensor.indices[0].name == "Vc" or tensor.indices[0].name == "Vd") :
indexKey[tensor.indices[1].name] = tensor.indices[0].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[0].name, tensor.indices[1].name)
elif (tensor.indices[1].name == "Vc" or tensor.indices[1].name == "Vd") :
indexKey[tensor.indices[0].name] = tensor.indices[1].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[1].name, tensor.indices[0].name)
else :
ifstatement += tensor.name +" == " +tensor.name + " and"
outString = ""
if (len(ifstatement) != 2) :
outString += ifstatement[:-3]+" : "
outString += "\t Cout[Vc,Vd,Ar,As] += ("+ str(t.numConstant)+ ") *numpy.einsum( '"
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
for index in range(len(tensor.indices)):
if ( len(tensor.indices[index].name) > 1):
outString += tensor.indices[index].name[-1].capitalize()
else :
outString += tensor.indices[index].name[0]
outString += " ,"
outString += indexKey["Va"][-1].capitalize()+indexKey["Vb"][-1].capitalize()+"PQ -> CDRS' "
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
outString+= " , "+ tensor.__str__()
outString += " , Cin[" + indexKey["Va"]+ ","+ indexKey["Vb"]+ ",Ap,Aq])"
print(outString)
def WriteCode_ccav(result, SupressActive, intmapkey, RDMmapkey, activeInEinsum = False):
if (SupressActive):
for t in result:
tensorString = ""
ifstatement = "\tif "
tensorcopy = t.tensors
dontprint= []
indexKey = {'Ap': 'Ap', 'Va' : 'Va'}
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
#check the delta function
if (tensor.name == "kdelta"):
dontprint.append(i)
if (tensor.indices[0].name == "Aq" or tensor.indices[0].name == "Vb") :
indexKey[tensor.indices[1].name] = tensor.indices[0].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[0].name, tensor.indices[1].name)
elif (tensor.indices[1].name == "Aq" or tensor.indices[1].name == "Vb") :
indexKey[tensor.indices[0].name] = tensor.indices[1].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[1].name, tensor.indices[0].name)
else :
ifstatement += tensor.indices[0].name +" == " +tensor.indices[1].name + " and "
#start by printint the if statement
outString = ""
if (len(ifstatement) != 4) :
outString += ifstatement[:-4]+" : \n\t"
#now print(the einsum string)
outString += "\tCout += ("+ str(t.numConstant)+ ") *numpy.einsum( '"
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
for index in range(len(tensor.indices)):
if (tensor.name[:3] == "int" and len(tensor.indices[index].name) > 1 and (tensor.indices[index].name[0] == "A" or tensor.indices[index].name[0] == "V")):
outString += tensor.indices[index].name[-1].capitalize()
elif (tensor.name[:1] == "E" and len(tensor.indices[index].name) > 1 and (tensor.indices[index].name[0] == "A" or tensor.indices[index].name[0] == "V")):
outString += tensor.indices[index].name[-1].capitalize()
elif len(tensor.indices[index].name) == 1:
outString += tensor.indices[index].name[0]
outString += " ,"
outString += indexKey["Ap"][-1].capitalize()+indexKey["Va"][-1].capitalize()+" -> QB' "
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
if (tensor.name[:3] == "int") :
outString+= " , "+ printIntTensor(tensor, True)
elif (tensor.name[0] == "E"):
outString+= " , "+ printETensor(tensor, True)
else:
outString+= " , "+ printTensor(tensor, {})
outString += " , Cin)"
print(outString)
else :
for t in result:
tensorString = ""
ifstatement = "if"
tensorcopy = t.tensors
dontprint= []
indexKey = {'Va': 'Va', 'Vb' : 'Vb'}
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
#check the delta function
if (tensor.name == "kdelta"):
dontprint.append(i)
if (tensor.indices[0].name == "Vc" or tensor.indices[0].name == "Vd") :
indexKey[tensor.indices[1].name] = tensor.indices[0].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[0].name, tensor.indices[1].name)
elif (tensor.indices[1].name == "Vc" or tensor.indices[1].name == "Vd") :
indexKey[tensor.indices[0].name] = tensor.indices[1].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[1].name, tensor.indices[0].name)
else :
ifstatement += tensor.name +" == " +tensor.name + " and"
outString = ""
if (len(ifstatement) != 2) :
outString += ifstatement[:-3]+" : "
outString += "\t Cout[Vc,Vd,Ar,As] += ("+ str(t.numConstant)+ ") *numpy.einsum( '"
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
for index in range(len(tensor.indices)):
if ( len(tensor.indices[index].name) > 1):
outString += tensor.indices[index].name[-1].capitalize()
else :
outString += tensor.indices[index].name[0]
outString += " ,"
outString += indexKey["Va"][-1].capitalize()+indexKey["Vb"][-1].capitalize()+"PQ -> CDRS' "
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
outString+= " , "+ tensor.__str__()
outString += " , Cin[" + indexKey["Va"]+ ","+ indexKey["Vb"]+ ",Ap,Aq])"
print(outString)
def WriteCode_caav(result, SupressActive, intmapkey, RDMmapkey, activeInEinsum = False):
if (SupressActive):
for t in result:
tensorString = ""
ifstatement = "\tif "
tensorcopy = t.tensors
dontprint= []
indexKey = {'Ap': 'Ap', 'Aq' : 'Aq', 'Va' : 'Va'}
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
#check the delta function
if (tensor.name == "kdelta"):
dontprint.append(i)
if (tensor.indices[0].name == "Ar" or tensor.indices[0].name == "As" or tensor.indices[0].name == "Vb") :
indexKey[tensor.indices[1].name] = tensor.indices[0].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[0].name, tensor.indices[1].name)
elif (tensor.indices[1].name == "Ar" or tensor.indices[1].name == "As" or tensor.indices[1].name == "Vb") :
indexKey[tensor.indices[0].name] = tensor.indices[1].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[1].name, tensor.indices[0].name)
else :
ifstatement += tensor.indices[0].name +" == " +tensor.indices[1].name + " and "
#start by printint the if statement
outString = ""
if (len(ifstatement) != 4) :
outString += ifstatement[:-4]+" : \n\t"
#now print(the einsum string)
outString += "\tCout += ("+ str(t.numConstant)+ ") *numpy.einsum( '"
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
for index in range(len(tensor.indices)):
if (tensor.name[:3] == "int" and len(tensor.indices[index].name) > 1 and (tensor.indices[index].name[0] == "A" or tensor.indices[index].name[0] == "V")):
outString += tensor.indices[index].name[-1].capitalize()
elif (tensor.name[:1] == "E" and len(tensor.indices[index].name) > 1 and (tensor.indices[index].name[0] == "A" or tensor.indices[index].name[0] == "V")):
outString += tensor.indices[index].name[-1].capitalize()
elif len(tensor.indices[index].name) == 1:
outString += tensor.indices[index].name[0]
outString += " ,"
outString += indexKey["Ap"][-1].capitalize()+indexKey["Aq"][-1].capitalize()+indexKey["Va"][-1].capitalize()+" -> RSB' "
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
if (tensor.name[:3] == "int") :
outString+= " , "+ printIntTensor(tensor, True)
elif (tensor.name[0] == "E"):
outString+= " , "+ printETensor(tensor, True)
else:
outString+= " , "+ printTensor(tensor, {})
outString += " , Cin)"
print(outString)
def WriteCode_lcc(result, AllTensors, inputIndices, outIndicesString, commentTensor, inputtensorname="p", outputtensorname="Ap", EquationName="EqsRes", scale=1.0):
outString = ""
for t in result:
tensorString = ""
indexKey = {}
for index in inputIndices:
indexKey[index] = index
tensorcopy = t.tensors
dontprint= []
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
#check the delta function
if (tensor.name == "kdelta"):
#take the delta functions and if one of the index is in Cin then replace that index with other in all tensors
if (tensor.indices[0].name in indexKey):
dontprint.append(i)
indexKey[tensor.indices[0].name] = tensor.indices[1].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[1].name, tensor.indices[0].name)
elif (tensor.indices[1].name in indexKey):
dontprint.append(i)
indexKey[tensor.indices[1].name] = tensor.indices[0].name
for j in range(len(t.tensors)):
if (j not in dontprint):
replaceindex(t.tensors[j], tensor.indices[0].name, tensor.indices[1].name)
tensorIndexStringList = [outIndicesString] #output tensor indices
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
tensorIndexString = ""
if(i not in dontprint):
for index in range(len(tensor.indices)):
if ( len(tensor.indices[index].name) > 1):
tensorIndexString += tensor.indices[index].name[-1].capitalize()
else :
tensorIndexString += tensor.indices[index].name[0]
tensorIndexStringList.append(tensorIndexString) #tensor index string of output
inputIndicesString = ""
for key in inputIndices:
inputIndicesString += indexKey[key][-1].capitalize()
if (inputIndicesString != ""):
tensorIndexStringList.append(inputIndicesString)
#now make the string for the equation
commentString = "\t\t//"
outString += "\t\t{\""
for indexstring in tensorIndexStringList:
outString += indexstring+","
outString = outString[:-1] +"\","
outString +=" "+ str(t.numConstant*scale)+" , "+str(len(tensorIndexStringList))+", {"+str(AllTensors.index(outputtensorname))
commentString += outputtensorname+"["+tensorIndexStringList[0]+"] += "+str(t.numConstant*scale)+" "
index = 1
for i in range(len(tensorcopy)):
tensor = tensorcopy[i]
if(i not in dontprint):
outString+= ","+ str(AllTensors.index(tensor.name))
commentString += commentTensor[tensor.name]+"["+tensorIndexStringList[index]+"] "
index += 1
commentString += inputtensorname+"["+inputIndicesString+"]"
if (inputtensorname != "") :
outString += ","+str(AllTensors.index(inputtensorname)) +"}},"+commentString+ "\n"
else:
outString = outString[:-1]+"}},"+commentString+ "\n"
print(outString[:-1])
|
[
"[email protected]"
] | |
e748ebb1dc90bf83276047fd9bf39673e39504bb
|
7d1fd87e1aaf7e6b2ea72ab086a128d03ab059f1
|
/Python_Web_Udemy/Udemy_REST_APIs/4_FLASK_RESFUL_MYSQLITE/create_table.py
|
62f58b871ea5f67e59f094c3c47fae03970e0dbb
|
[] |
no_license
|
efren1990/codepy
|
05fb34fb608d9921cd5b1c257a9869f2d42eafae
|
1bd957e7a7285d459ba76e99c4bccb8dbabf8da4
|
refs/heads/main
| 2023-06-30T06:31:51.154519 | 2021-08-13T01:30:57 | 2021-08-13T01:30:57 | 395,486,511 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 840 |
py
|
"""
#APLICACION FLASK RESTFUL SQLITE3
----------------------------------------------------------------
Archivo para crear base de datos y tablas
"""
# Libreria Sqlite3 ------->
import sqlite3
# Conexion ------->
connection = sqlite3.connect('data.db')
# Cursor ------->
cursor = connection.cursor()
# Query table ------->
# INTEGER- ENTERO AUTOINCREMENTAL EN SQLITE3
create_table = "CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, username text, password text)"
# Ejecutar query table users------->
cursor.execute(create_table)
# Ejecutar query table items------->
create_table = "CREATE TABLE IF NOT EXISTS items (id INTEGER PRIMARY KEY, name text, price real)"
cursor.execute(create_table)
cursor.execute("INSERT INTO items VALUES(NULL, 'test', 9.99)")
# Commit ------->
connection.commit()
# Cierre ------->
connection.close()
|
[
"[email protected]"
] | |
f353297693f93cf1bb526a1b505a7bc7cceb929c
|
4a5f11b55e23999a82b62f5c72b44e9a36d24f63
|
/simplemooc/settings.py
|
16965576ab5e4b9506cda51fa320f5cf46a46247
|
[] |
no_license
|
diogo-alves/simplemooc
|
dca62bfcb2ea6357a551a5760778537f083b675c
|
cfec59f99888e4e23d41f020ff06bfdf39f70203
|
refs/heads/master
| 2022-05-10T10:32:18.686313 | 2019-06-04T19:30:43 | 2019-06-04T19:30:43 | 190,260,470 | 0 | 0 | null | 2022-04-22T21:34:44 | 2019-06-04T18:46:43 |
Python
|
UTF-8
|
Python
| false | false | 4,739 |
py
|
"""
Django settings for simplemooc project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import django_heroku
from decouple import config
from dj_database_url import parse as db_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ADMINS = [('Diogo Alves', '[email protected]')]
# Allow host headers for Heroku
ALLOWED_HOSTS = ['http://mymooc.herokuapp.com', '127.0.0.1', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'taggit',
'simplemooc.core',
'simplemooc.accounts',
'simplemooc.courses',
'simplemooc.forum',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
# Simplified static file serving.
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'simplemooc.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'simplemooc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': config(
'DATABASE_URL',
default='sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3'),
cast=db_url
)
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MEDIA_ROOT = os.path.join(BASE_DIR, 'simplemooc', 'media')
MEDIA_URL = '/media/'
# E-mails
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = config('EMAIL_HOST', default='localhost')
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
EMAIL_PORT = config('EMAIL_PORT', default=25, cast=int)
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=False, cast=bool)
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL', default='')
EMAIL_LINK_DOMAIN = config('EMAIL_LINK_DOMAIN', default='')
CONTACT_EMAIL = config('CONTACT_EMAIL', default='')
# Auth
LOGIN_URL = 'accounts:login'
LOGIN_REDIRECT_URL = 'core:home'
LOGOUT_URL = 'accounts:logout'
AUTH_USER_MODEL = 'accounts.User'
PASSWORD_RESET_TIMEOUT_DAYS = 2
# Activate Django-Heroku.
django_heroku.settings(locals())
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
[
"[email protected]"
] | |
82ae50b9ae51ccd60366426d9c0ace8f68d0e24f
|
b719159a9980ef876b6e67131322917796320eb6
|
/tests/test_package.py
|
d846944bfda91415c8afc57c8ce9f43cf63fa854
|
[
"BSD-3-Clause"
] |
permissive
|
rblack42/TikzBuilder
|
b4c369038c374fca32a0aa24c8366dbdd5d3bbdb
|
ec69517db422ebda947c6e236c0b9ea597c3a934
|
refs/heads/master
| 2020-03-10T15:18:33.651883 | 2018-04-19T01:24:15 | 2018-04-19T01:24:15 | 129,446,030 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 118 |
py
|
from TikzBuilder.Builder import Builder
def test_package():
tb = Builder()
assert "version" in tb.version()
|
[
"[email protected]"
] | |
cb7f7346fbcbae97170d156650e13081a6403c4e
|
c8c1c8b3327ae93ceaa671681d5f9eac0933e738
|
/mrgpylinux/femcalc/meshgrid/iomrg.py
|
f99b0df5e027c53c26122eaa74a6195950cb345e
|
[] |
no_license
|
gpspelle/acoustic-pollution
|
bbb2a6492b3d02d046cb533470affabcacb38409
|
ad80f1fd582f47ce679748bb6ac93ff3149fd445
|
refs/heads/master
| 2022-07-28T06:49:20.493083 | 2019-11-15T00:27:52 | 2019-11-15T00:27:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 45,951 |
py
|
__pyarmor__(__name__, __file__, b'\x50\x59\x41\x52\x4d\x4f\x52\x00\x00\x03\x06\x00\x33\x0d\x0d\x0a\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x40\x00\x00\x00\x96\x2c\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xea\x50\x8c\x64\x26\x42\xd6\x01\x0d\x71\x36\x36\xae\xbf\x7c\xd7\x13\xbc\x29\x13\x84\x8a\x2e\x8a\x0c\x07\x8b\x3d\x9e\x1f\x8f\x32\x6a\x52\x1a\x8f\xe2\x2a\x7b\x53\xa2\x03\x4b\xef\xb0\x7b\xf3\x41\x8d\xf8\x7e\x94\xe3\x1b\xf1\x5f\x04\xe5\xac\x4f\xcd\xa8\x74\xdf\x98\x26\x5e\x9f\x12\x10\x25\x16\xcd\xed\x8c\xbd\xde\xf0\xa6\x8f\x6d\xe2\xb1\x35\xeb\xe2\x25\xb4\xd3\xe1\xb7\x0c\x3c\x99\x64\xb0\xd1\x77\x5f\xde\x3f\x99\x7c\x75\x07\x6f\x72\x10\x16\x2f\x6f\x9f\x38\xc7\x77\xcb\xc6\x2f\x8e\x0e\x9a\x6d\x12\x8f\x68\x8d\x1d\x28\x77\xd6\xe5\x6c\x34\x82\xf6\x66\x05\x47\x07\x34\x78\x74\x2a\x79\x99\xc4\x84\x57\x18\xfe\x19\xab\xb7\x1d\x43\xb0\xb7\x87\x5c\x9d\x08\x99\xb5\xfb\x52\xb1\x22\xf2\x0b\xfd\x6c\xd3\xcf\x8b\x11\x70\x15\x8d\xca\x9c\xfd\xd9\x89\x42\x32\x71\x6c\x54\xa3\x5d\xdb\x2f\xb4\x83\xe5\x55\x9e\x54\xf5\x07\xc3\x9a\x35\x17\xaa\x7c\x87\x9a\x72\xfd\x6d\x56\x1f\x1a\x3c\x0d\xdf\x05\x19\xa4\x15\xd3\x91\xb6\x70\xd2\x0e\x12\x9c\xf1\x1d\x45\xc1\x56\x52\x47\x15\xb1\x5b\xa4\x71\xbd\xbf\x45\x4c\x3e\xe6\x3f\x09\x09\x1f\xa7\x12\xdf\x8c\xbe\xc6\xea\x0d\x83\x75\xb6\x6f\xce\xfa\xd2\x44\x28\x57\xa2\x6c\xe7\x5c\xcc\xcf\x7d\xce\x71\x8a\x01\x88\x8f\x2d\xbe\xc4\x1e\x7b\x98\x18\xcf\xa9\x1f\xff\xa0\x77\x65\x31\x20\x51\xb2\x83\xa9\xb4\x18\xb6\x7b\xb5\x65\xcc\xb6\x66\x55\x40\xaf\x2c\xa7\x39\x8f\xd2\xe7\x48\x86\xe2\xa8\x65\x9f\x38\x43\xbf\xca\xb8\x60\x7b\x3c\x8f\xe1\xe2\x00\x75\x58\xa6\xe6\x1e\xf6\xd1\x11\xdc\x70\xc2\xe3\x12\x6b\xf0\x78\x48\x20\x62\xda\xcf\xe1\xed\x9e\x8a\x49\x93\xd8\x3b\x3b\x94\xf0\xbf\x49\xb4\x4d\xf9\xc0\x3e\xbd\x36\x6a\xa9\xa7\x5f\x6d\xeb\x00\x2d\x97\x49\x4c\x7d\xb3\x73\xc6\xf8\x90\xa4\xa4\xc2\x60\x6a\x2e\x7d\x77\x0d\x9f\xd9\xfb\xff\x70\xcb\x6d\xa9\xe8\x28\xb8\x57\x73\xda\xe8\x98\x50\x6a\x9f\xab\x76\x04\xaf\xd5\x26\x86\xe3\x89\x58\xf8\xe3\xdf\xd8\x49\x88\x5c\xb8\xcc\xb1\x90\x9c\x1e\x78\x57\xa3\x61\x2e\x33\xac\xb1\x4e\x1a\x05\x2a\x7a\x6c\x2d\x9b\xd4\x6e\x4e\xc6\x64\x50\xaa\x71\x64\x65\xc0\xed\x12\x4a\xfc\x3b\x90\x81\xb5\x72\x89\xdc\x41\x8d\x49\xec\x60\x4b\x04\x53\xb2\x4a\xea\xfe\x74\xa4\x1b\x96\xeb\x03\x2b\x0a\xe5\x8e\x5e\xf3\x3b\xc5\xb4\x28\x56\xc0\x68\x13\x72\x72\xa6\xc2\x6d\x2c\x1e\x05\xf6\x72\x64\x8f\xcd\x71\xa3\x11\xe6\xf8\x46\xbe\x0b\x66\x89\xb0\x4d\x5a\x0a\x8b\x1c\xe3\xa4\x63\x2b\x4b\x1a\xe2\x54\x1c\x50\x18\x82\xe8\xbd\xf2\x0e\x2d\xcf\x6f\x6a\xa5\x00\xb5\xad\xe9\x8a\x2a\x84\xd6\x49\x52\xc4\x0c\x36\x04\xd1\x29\xa1\x09\x24\x36\x18\x97\x58\xc2\x3b\xe9\x54\xd2\x25\x23\x59\x40\x14\x4f\xb6\x95\xe7\x8f\x0f\xd0\xfa\x2d\x3a\xc1\x69\x61\xe1\x34\x72\x90\x48\x48\x2b\xe5\x1b\x20\x30\xe7\x7e\x8c\xf1\x9d\x50\x9b\xd0\x7e\xa2\x8f\x9f\x24\x5a\x78\xb6\xf9\x0c\x42\xb7\xc7\x4b\x6b\x9e\x6c\x85\x5a\x6a\xe2\x6b\x74\xf0\x8a\x7a\x1f\xcd\x2a\xfe\x2c\xed\x59\x9d\xd8\xe1\xa6\x11\xe6\x1d\x76\x2f\x24\x3a\xd5\x0a\x0b\x24\xdc\x23\x97\x39\x04\x2e\x34\x83\xbf\x22\xb2\x11\x12\xc1\x58\x28\xfe\xbb\xc8\x79\x6f\x20\xe5\x90\x5d\xdd\xb2\xd7\x73\x3c\x69\xdd\xe4\x7a\xde\x55\xad\xf9\x33\x63\x3a\xda\x72\x1d\xca\x14\xb7\x71\xf7\x27\x8f\x98\xd6\x7f\xee\x06\xaf\x08\x57\xcd\x12\x53\xc7\xd8\x88\xb9\xe3\xf3\xaf\xa0\xe2\xc4\x8f\xfe\xf9\x52\xb1\x23\x57\x09\x71\xb9\xfa\x48\x38\x1a\x22\x4a\xc6\xdd\x22\x70\xce\x94\x34\x16\x9b\x2b\x34\xc2\x98\x36\xb7\x72\x50\xf4\x28\x3f\x00\xa7\x62\x33\x5c\xcf\x68\x98\xed\xf6\xb6\x1f\x1b\xcd\xfb\xe3\xe3\xed\xb6\xfb\x90\x2b\x34\x73\x6f\x10\x36\xc5\x78\x7a\x27\xfc\x44\xe2\x05\xfe\x38\xa1\x32\xe1\x38\x46\x76\x3b\x48\xb4\xb4\x95\xd3\x19\xac\x72\xc8\x18\x54\x3b\xe4\xeb\x70\x24\xcd\xcd\xe0\xfb\x5e\x05\xa0\x13\x1b\xac\x9d\xd6\x6f\x31\x12\x67\xb4\xe6\x68\x8d\x2f\x01\xf1\x27\x5a\xab\xa6\x56\x88\xa7\x79\x34\xb7\x74\x9a\x62\x1a\x4a\xcf\x0b\x32\x99\x7e\x7a\x2d\x66\xe0\x30\xe4\xbc\xae\xee\xa9\xdb\x60\x02\x4c\xca\x92\x21\xd7\xaa\x57\xda\xa4\x23\xd7\x6f\x00\xd8\x4a\x08\x7a\x2c\x79\xe5\x47\x96\x8f\x5e\x6a\x33\xf7\xaf\xca\x47\xee\xff\x8e\x7f\xfe\x64\x8c\xf9\xcb\x22\xf4\x26\x94\xbb\x0b\xa7\x40\x45\x78\x2d\x28\x7e\x9b\x6c\xef\x62\xed\x1b\xcc\xbd\xed\x6e\x82\xd3\x12\x1a\x5c\x22\xec\xbc\x59\x76\xcf\xd7\x69\x0b\x9b\xc9\xbb\xb5\x1f\x11\x14\x4c\xad\x28\x85\xad\xbb\x15\x6b\x34\xa0\x69\xcc\x9c\xc9\x0f\xe3\x06\x0c\x25\xba\x9c\xe8\xab\x00\x12\xf3\x30\x7e\x47\x01\x32\x33\x77\xba\xcf\xbe\xc2\x58\xd9\x60\x58\xbb\x41\x3d\x43\xc0\x4c\x22\x8a\xb7\x22\xab\xeb\xb2\x33\x4b\x14\xd7\x66\x09\x73\x7b\x7f\x95\xda\xd3\xfc\xcb\x0c\xae\x04\x62\x5c\x4f\x79\xf5\xd5\xff\x93\x60\x51\xb7\x99\x26\x39\xc7\xe1\xd9\x0d\x18\xe9\x29\x63\x40\xfd\x75\x8f\x36\xb7\x7b\xae\x97\x39\x69\x39\x84\xbb\x9f\x98\x09\x3b\x5d\xed\x67\xcb\x90\xc6\x4d\xdb\xe4\x02\x95\x75\x38\x7b\x0f\xfc\xd0\x99\xf6\x3e\x3e\xa8\xe2\x6c\x30\xd7\xbc\x1a\xe9\xdb\x4e\x41\x0a\x63\x84\xb8\x52\x5b\x5b\x7a\x6a\x34\xe2\xbc\x42\xe9\x93\x69\x60\x4a\x25\xd6\xfa\xc4\x36\xf7\x2a\x3f\x0c\xdb\x4e\x05\xbd\x5e\x89\xa9\xc1\x2e\xd6\xea\xaf\x19\x8b\xd0\x86\xc3\x7a\x3a\x93\xf1\x72\x20\x85\xb4\x0c\xef\x91\x64\xe8\x47\xad\x3a\xc2\x34\x2b\x09\xae\xf8\xd5\x42\x34\x82\x94\x3e\x06\x41\xa9\x20\x34\xa9\xde\x75\xf1\xd6\x61\x31\x4f\x24\x14\xba\x04\x69\x04\xc1\xd8\xf5\x9c\x83\x1c\xcc\x42\x10\x89\xb5\x07\x16\xb7\x49\xa7\xf1\x78\xab\xe1\x0d\x37\x9a\xca\xaf\xab\xc8\xe1\x46\xb8\x55\x8c\x52\x78\xe7\x87\x5f\x55\x80\xe8\x49\x29\x1c\xd0\x2c\x15\x5c\x03\x32\x6b\x76\x94\xc8\x50\x7d\x95\x90\x43\xa8\x3d\xe3\x9a\xa3\x85\xe9\xae\x9e\x3b\x33\xc4\x21\x05\x57\x46\xb3\x85\x63\xae\xd9\xee\x67\xb6\x49\x13\x81\x9e\x81\x94\xaf\xeb\x32\xb3\x5b\x49\x38\x9d\xad\x4b\x4b\xb1\x15\x61\x4e\x27\x6b\x85\x8b\x67\x8a\x7b\xf6\x27\x6d\x67\x53\xcd\xa4\x85\x02\x2b\xc4\xec\x26\x43\x37\xda\x92\x2c\xef\xbc\x5d\xc7\xc1\x63\x17\x8c\xc4\x3b\x22\x86\x54\x63\x37\xa3\x59\x6f\x7d\xf6\x14\x34\xe0\xd4\xda\x5f\x03\x27\x13\xc8\x60\x86\xb9\xc3\x8f\x3d\x7d\xd6\x51\xfc\x9d\xfa\xf8\xf1\xd1\x59\xf9\xfa\x6c\xc8\x7f\x21\xc0\xc0\x9d\x38\x74\x14\x29\x46\xca\x32\x2f\xfb\x27\x2f\xa8\x27\xcf\xdd\xe7\x4a\x85\xc4\x64\x44\xc0\x47\x87\xba\xcc\xcc\x49\xe7\x1c\x03\x04\xff\x06\x63\x9f\xd7\xf7\xa7\x1f\x08\xd7\x45\x3f\xbd\x71\xf0\xd1\x9b\x4a\x2c\xca\xf1\x23\x34\xf3\xdb\x30\x69\x9f\x81\xf7\xfc\x27\xc6\x76\xfc\x71\x77\x5a\xf5\x28\x0d\xbe\xb9\xc1\x2c\x08\xff\x37\x0a\xb0\xac\x1c\xa9\x78\xa9\xaf\xf5\xa9\x9d\x14\xc0\x81\xdb\x91\xae\xc8\xc5\x6a\xe0\x81\xa4\x2a\xd6\x9b\xb7\xbf\xdb\x9a\x17\x92\x04\xb3\x2b\xf8\xd6\x01\xbf\x53\x68\xe7\x9e\x90\xb9\x43\x60\xef\xa5\xc4\x71\xa4\x11\x61\xf9\x31\x0e\xfb\x6f\xbb\x66\x6f\x40\xd5\x5c\x87\x27\x35\x85\xc0\x86\xf4\xd7\x7d\xc5\x70\x22\xe3\x63\x93\x8d\x95\x79\x82\x3d\x22\x61\xeb\xbf\x36\x11\xec\x30\x2a\x65\x66\x5d\xa9\x9e\xed\x1a\x49\x11\x00\x57\x15\x00\xcc\xdb\x0a\x92\xf3\x98\x94\xc4\x7c\x4c\xc0\xe5\xf1\xe0\x90\xfd\xb9\x31\xcd\x29\x51\x08\x94\x7b\xb8\xa4\x37\xf1\xb5\xaf\xf8\xb1\x10\x2d\x9a\x5c\xdd\xa5\x7d\xfd\x26\xb2\xe3\x29\x1b\xd0\x18\xb0\x4b\x97\x1b\xfb\x23\x07\x4b\x86\x2f\xdb\x8a\xf5\x86\xef\x5f\xfa\x5c\x41\x77\xfe\xc4\xdf\x07\xe3\xb8\x73\xe3\x3e\x53\xf6\x76\x41\x35\x3d\x6d\xf9\x33\x47\xeb\x8c\xe6\xe2\x35\x8d\x46\x16\x2e\xa4\x2f\xd2\xe1\xa6\x44\x85\xbc\x7f\xb8\x63\xd7\x3a\x58\x51\x61\x10\x72\x2b\x6e\xa9\x9f\xb6\xac\xa8\xeb\x10\x4d\x50\x01\x0d\x53\xbc\x6b\x7e\x4c\x94\x95\x9c\x58\x3f\x50\xf5\xba\xba\x01\xc9\x0e\x25\x59\xf5\x1a\x70\xb2\x82\xa5\xe6\x66\xf9\x22\x98\x75\xae\x10\x4c\x7d\x06\x60\x8d\x34\x1b\xfb\x53\x16\xce\xcf\xe0\xd4\xc7\xcd\xbe\x04\x30\x9b\xb1\x47\xcc\xe0\x34\x01\x26\x74\x21\xfe\xda\x91\x6c\xa9\x23\x79\x6e\xda\xe5\xd8\x18\xab\xf0\x39\xd2\x38\x8c\x09\xec\xed\x6b\x43\x52\x3a\x66\x20\xf2\xae\xd2\x19\x48\x42\x53\xc0\x85\x9c\x33\x2e\xf5\x5c\xb5\xfe\x25\x85\x54\xbf\x45\xc0\xde\xa4\x1d\x09\x87\x21\x02\xa8\x5c\x7e\x69\x73\xd7\x94\x76\x94\xbb\x1d\x08\x7b\xe9\xc6\x12\xbd\x1c\x8b\x00\x2e\x1b\x93\xa0\xb0\x71\x48\xb5\x08\x00\x87\x38\x25\xa3\x64\x93\x9b\x23\x34\x00\x07\x91\x0c\x2a\x76\x44\xa4\xb6\xc7\x53\xbd\x11\x52\x7c\x40\x74\xcc\xb4\xeb\x20\x9a\x80\x4c\x6e\x04\xed\x5c\xce\x0c\x37\xd7\xe7\xb0\x3d\xe5\x2d\x90\x47\xab\x4d\x72\x25\x0c\xc4\x5c\xbb\x8e\xbc\xa0\x03\x4f\x0c\xbe\xf2\xc6\x5e\xd9\x4d\x5b\x4a\xc7\xc1\x41\xb2\x70\xfe\xe8\x85\x00\xce\x99\x7b\x62\x08\x42\xbb\xcc\xe4\x50\x7c\xd9\x66\x7d\x93\x0f\xcd\x3d\xef\x00\xc7\x10\xfa\x71\x26\x6e\xa4\x4a\x49\xdb\xc9\x08\xa9\x7c\x66\xc6\x1b\xf0\x6b\xd8\x1c\x55\x09\x3e\x3f\xa2\x4b\x90\xcf\xcf\x10\x3c\x98\x60\x9e\x22\x06\x44\xdc\x17\xa5\x1e\x87\x83\xc1\x50\x10\x98\xf0\xc9\x4f\x63\x92\x66\x08\x67\x05\x87\xf3\x3c\x72\x87\x71\x7e\xd1\x03\x57\xb7\x84\xe6\x69\xa0\x7a\x84\xcf\xca\xe2\x97\x6b\x8b\x2b\x2c\x69\x5d\xac\x4c\x5a\xca\xc7\x0d\xae\xfa\xad\xd6\x3d\x49\x8c\xf2\x10\xc7\xdd\xdb\xd8\xf4\xe0\x91\x20\x6d\x7d\x66\x12\x76\x5d\xce\x87\x3b\x7f\xce\x90\xbf\x61\x2d\x27\xe2\x83\xea\x7c\x34\x91\x69\x37\xa6\x8f\x1a\x15\x62\x94\xf4\x0b\x90\xd9\x9a\x93\xc3\xb1\x51\x5e\x21\xa3\x34\x6a\x38\x5b\xdc\x99\x13\xeb\x2e\x2e\x91\xce\xfe\xc3\xec\x9c\xfd\x1f\x12\xb9\x36\x3a\x19\xaf\x67\x51\xb4\x8b\x7d\xc0\xb9\xc0\xbc\x23\xa7\x95\x98\x5e\xfb\xfc\x79\xcc\x91\xb2\x55\x60\x2d\xc1\x8f\xdb\x87\x7f\xc5\xd5\xe6\x0d\x3b\xc2\x55\xbd\x38\xe6\x9c\x57\xd3\xcf\x66\xf4\xac\x98\x5a\x24\x5c\x3f\x0a\x88\x91\x62\x99\x5d\xd3\xdd\xc8\x55\x02\x7a\x67\xe7\x30\x14\x62\xa7\x38\xb4\xbe\x72\xa5\xd0\x96\x45\xfd\xce\xd9\x0d\xb5\xc1\x06\x60\xfd\x55\x7b\x73\x90\xb0\xd7\x69\xa2\x62\x22\x80\x99\xbb\x96\x75\xee\xc5\x7d\x59\x14\xf5\x61\x15\xf2\x25\x63\x18\x59\x0e\x33\x81\xf3\x24\x47\xe6\x62\x57\x0d\xad\xbd\x51\x17\x11\x68\x29\xfe\x83\xaf\xfc\xe4\x9c\xa2\x7b\xf6\x66\xa8\x0a\x91\x4c\xdb\x9e\x79\xb5\x8e\xe2\x99\xcf\x48\x45\xba\x16\x83\x74\xc6\x59\x97\xe4\x5f\xff\x1c\x97\x5d\x1d\x85\xad\xcc\x2f\x4b\xfc\x0c\x2a\xb8\xff\xa6\xbc\xbe\xac\x19\xb5\x23\xf0\xb3\xab\x51\xa7\x58\xb7\xa6\x2d\x9a\x2e\xbd\xd8\xc3\x20\xa1\x00\xc8\x68\x70\x22\x44\x66\x0e\xac\x9c\xac\x10\xf2\x6a\xae\xe2\xd1\x02\xfd\x16\xfa\x05\xea\x6f\xf7\x64\x75\xfd\x3e\xf9\x00\xdc\x54\xd9\xd9\xfe\x9c\xb7\x62\x22\x77\xea\xf0\x79\x36\x84\x8b\x09\x84\x81\xe6\x7b\x3a\x66\x21\x87\x5d\x95\x90\xac\x84\xc3\x8a\xe8\xaa\x07\x31\x40\xb8\x90\xa8\xff\x51\x01\xd8\xbd\xba\x97\x7b\xcf\x5f\x2a\xa8\x5e\xcb\xfe\xff\x24\xcc\xa0\x58\x42\x8e\xc8\x4a\x42\x52\x62\x8c\x23\x57\x9f\x34\xf3\xc0\xef\x97\x03\x42\x0f\x33\x4a\x71\x0d\x52\x39\x5d\x46\xff\xfb\xf9\x72\xba\x83\xf6\xbc\xb4\xc5\x63\x4e\x48\x68\x2b\x60\xff\x6f\x59\x85\x88\x29\xe7\x77\x40\x01\x72\xcd\x77\x0e\x8a\x31\xdb\x2b\x7b\xa1\x44\x99\x59\x7b\xdd\xe7\x03\xd0\x75\x1f\xff\x73\xf7\x82\x68\x62\xbd\x49\x20\xa5\xe8\xcb\xb1\x03\x25\x04\x73\x26\x01\x54\x2a\x1f\xbe\xa8\x4a\x2a\x86\x8f\xcf\xf6\xc4\x71\x7a\x1f\x73\xca\x77\xbb\x54\xb0\xf1\x8f\x3d\xa4\x4f\xc8\x11\x41\x5b\xef\x8d\xee\x23\x97\xbb\x24\xf9\x9e\xc0\x35\xab\x03\x2c\x3c\xba\xd8\x62\xff\x27\x0c\x45\xf3\x5d\x5d\x05\xcd\xfa\x25\xbb\xdd\xc5\x73\x2d\xda\x35\xfd\xc1\xd4\xc5\x21\x49\xde\xb4\x97\x5c\xb1\x65\xd9\x92\xf4\xfb\x0b\xbd\xc5\x57\x80\x73\x0d\x15\x0a\x78\xda\x82\x16\x77\x4a\xd6\x4a\xd5\x99\xf9\xbb\xce\x86\xf2\x1d\x13\xa6\x59\xa6\x16\x50\xd4\x27\xef\x24\x04\xa0\x26\x74\x01\x45\x37\xb1\xc0\xda\x34\xf3\xca\xf3\x3b\x4e\x91\x50\x98\x82\x46\xdb\x2a\xa4\xec\x1e\x52\xda\xf9\x5d\x7d\xc0\xe0\xe6\x1f\x89\xa8\x66\x51\x83\xd9\x15\x78\x58\x3b\xac\xef\xb1\x11\xed\x3d\x03\xad\xbe\xd3\xb8\x36\xdd\x51\xfa\xf6\xa8\x21\xfe\x1e\xa5\x92\xcd\xfa\x9d\x80\xbe\x78\xa1\x03\x44\x62\x2e\x97\x25\x31\x2e\x8f\xec\xe0\xbe\x55\xc1\xad\x05\xd0\x96\x01\x56\x84\x69\xda\xbe\xf4\xf5\x3f\xb1\x77\xdc\x22\xa8\x81\x05\x2a\x9f\x48\x00\x26\xef\x81\x86\x85\x8f\xc9\xcd\xcf\x0e\xfe\xaa\xee\x93\xf4\x56\xb6\x2b\x12\x5c\x07\xa4\x80\x2d\xdf\x4c\x02\xcc\x1d\xe9\xc9\x2f\x81\xac\xf8\x41\xc6\xa7\xad\xb4\x50\xeb\xbb\x2c\x65\xf3\x75\xef\x64\x3e\xc9\x76\x0e\x82\x99\x9d\x0e\x3c\xcf\x1a\xea\x8b\x2d\x0a\x91\x99\x54\x67\xf1\x5c\x4e\x2c\x3e\xcd\xb4\x57\xf0\xec\x8e\x74\xb0\xe7\x71\x61\x2e\x47\x70\xf4\x04\xbc\x62\x81\x4a\x8d\x05\xe8\x11\xe4\xeb\x5a\x58\xd1\x0d\x20\xc1\x2a\xa1\x1d\xc4\x8f\x1f\xc8\xbd\x80\xda\xb1\xf1\x6f\xce\x9a\xce\x1a\x0b\x65\x54\x47\x41\xb3\xc0\x56\x03\x85\xb2\x16\x9e\x11\xe6\x9d\xe4\x6a\x1d\x93\xeb\x12\xdc\x62\x43\x77\x79\x07\xfb\x12\xaa\x68\x95\x29\x33\x81\xf7\x78\x52\x63\xe0\x0a\x22\xc7\xcf\xd5\xb8\xef\x45\x9f\x6f\xf4\x0e\x3b\xae\x63\x9e\x37\x1a\xb4\x92\x34\x03\x8c\x38\x46\x69\x63\x53\xb6\x00\x17\x5c\x6a\xea\xd6\x2d\xe3\xaa\xcd\x03\x85\xd9\x71\x29\x45\xa7\x60\x40\xa9\xe9\x1e\x2b\x59\xd8\x98\x89\xf9\x60\x8d\xb3\x81\xf9\x5e\x7d\xfc\x28\xab\xe2\x07\x45\x51\x5b\x94\x1c\xcc\xe0\x94\x55\x33\x1b\x0b\x63\x73\xda\x58\x11\x70\x9d\x08\xb9\xc4\x04\xc2\xfa\x47\xbb\x0e\x17\xd4\xcb\x45\x9c\x14\x3b\x4d\xe4\x6d\x7e\x15\xb5\x2b\xa8\xd5\x55\xb1\xe5\x90\x02\x81\x46\x7b\xac\x1d\x60\xc4\xcc\x48\xb6\x8b\xa5\x02\x6d\x9b\x33\x65\xd1\x08\xf6\xc4\x1d\xca\x0e\x36\x3e\x76\x4c\x17\xef\x0c\x83\xe7\x39\x0a\x00\xeb\x8f\x1f\xfe\x02\x8b\xfb\xf3\x7a\x02\x7f\x16\x27\x9a\x51\xdc\x8c\x3f\x22\x27\x00\x0e\x28\x6a\x3b\x8d\xa8\xba\xc0\xfe\x03\x1f\x2e\x84\xe6\x96\x49\x09\x44\xdd\xcb\x9e\x59\x1a\x40\x9d\xc4\xc0\xfb\x6e\x48\x9c\x13\x08\xf9\xab\xd1\xb2\x1d\x57\x14\x70\x5d\xab\x33\xe7\x62\xf0\x8d\x1f\x39\x76\xaf\x71\xd6\x1a\x9b\x35\xd2\xe1\xd0\x38\x1c\x5b\xa7\x44\x5e\x04\x5e\x7e\xa2\x65\x0a\xf5\x4d\x09\x71\xa6\x2a\x38\x25\x7e\xed\x08\x75\xbe\xe9\xbd\x07\x93\x6b\x3c\x37\x37\x89\xf9\xfc\x26\x3a\xb3\xa9\x20\xea\x95\xae\x7d\xf2\x69\xb9\xba\x1a\x7b\x99\x99\x1d\xdd\xc5\xaf\xcc\x89\xe8\x23\xbe\x67\x8d\xc1\xab\x5b\x5f\x11\x2a\x48\x14\x0c\xed\x37\x3b\x3a\xcb\xfd\x77\xbf\xb7\x43\x01\xbf\x62\x39\x7d\x6b\x53\xac\x0c\x63\xac\xb7\x88\x97\x39\x06\x3c\x05\x5a\x40\xd7\x09\x22\xe2\x39\x16\xf6\x61\x81\x31\x05\xe0\xe1\x72\x7f\x91\xac\x7b\x1c\x28\x75\xab\xff\x5e\x65\xc0\xa2\x2c\x8c\x07\xc5\xe0\x1f\xbf\xc1\x6a\x3b\x51\x52\xca\x01\xe7\xb4\x8b\x16\xb7\x90\xc3\x7e\x2a\xeb\x86\x0f\x86\xcc\x94\x16\x58\x6c\xbe\xc3\x0f\xa0\xf6\x2c\x9e\x64\x9a\x0e\x48\x98\xfc\x56\xb9\x30\xa4\x58\x61\x4a\xd0\x9f\xd3\x12\xa6\xbb\xd9\xac\x96\x86\x97\xf5\x2a\x5e\x45\x84\x62\x10\xd8\xe2\xea\x91\x44\x05\x88\x0d\xbb\x5a\xde\xc7\xd4\x28\xe5\x56\xd2\x18\x0a\x2f\x93\xbd\x08\x03\xff\x10\xf5\xc3\x5e\x01\xf1\x82\xd4\x69\xf7\x3c\x54\x4e\x33\x81\x4b\x26\x35\x1c\xaf\x37\xbe\x58\xa9\xea\x01\xca\xc1\x36\x14\xc2\xa8\x0d\xa4\xbd\x82\x11\xba\x0f\x60\xcb\x70\xb2\xff\xa1\x17\x3b\x1f\xd0\xb3\xa1\x62\x51\xa4\xf7\x76\x46\xda\xd1\x16\x93\x2a\xb5\xe7\xea\x16\xef\xeb\x5c\x60\xb6\xe8\x62\x30\x6e\x46\x00\x31\x75\x39\x84\xb4\x5a\x21\xcf\xdd\x5d\x45\x08\xac\x89\xec\xfb\x67\x8b\xc5\xf9\xc3\xde\xba\x00\xc2\xe8\x4f\xfc\x70\xdf\x00\x65\x3f\xe1\xee\xee\xa8\x23\x12\x45\x60\x95\xd6\xe2\x85\x5e\x6c\x8c\xe9\x90\xd4\xaa\xa4\x40\xdd\x65\xe8\x77\xf7\xed\x48\x84\xa5\x67\x03\x57\x9c\x23\x31\x4e\x8a\x85\xdc\x3d\x9e\x2a\x4b\x20\x40\xa6\xa6\xad\xe3\x98\x75\x4b\x82\x4e\x95\x0c\x1f\xa8\x09\x04\x1c\x99\xc0\x36\x16\xcd\x90\xdb\xb1\xf8\x36\xf0\x80\x45\x76\x17\x76\x5a\xea\x44\xa0\xef\xd8\x31\x63\xdc\x5c\x17\x8e\x51\xe2\x36\x64\x2d\xd6\x83\x84\xb2\xd2\xdf\x99\x62\x2e\x25\xea\xa8\xe1\xd5\xd3\xfd\x2f\xf6\xd9\xd8\x37\x70\x56\xc7\x11\x49\x76\x68\xf4\x13\x02\x98\xd5\xe8\x27\x2d\xeb\x83\x7d\x85\x53\xa8\x13\x1a\x72\x2b\x75\xd6\x96\xb0\x47\x4a\x02\xbe\xe9\x42\xa5\x83\x12\x2e\x66\xaf\xc7\x3a\xa2\xf3\xc4\x74\x27\x36\x00\x3f\xfe\xc0\xdd\xc0\x4b\x03\x85\xc7\x60\x91\x7e\x70\xc1\x67\x02\x8b\xfd\x77\x8e\x79\xe5\x9f\x68\x6e\x5f\xa3\xfb\xf5\xd5\x25\x82\x2d\x9b\xef\x9f\x25\x3a\xc9\x77\xc4\x45\x92\x27\x7a\xbd\xc4\x41\xd3\xb1\x0b\x08\x99\xc9\xe9\x7e\x66\x96\xf5\x90\x44\xdd\xb4\x67\x50\xb1\x83\xa7\x28\x64\x1d\x8a\xf8\xd2\x61\x1e\x75\xc9\x6c\xe7\x1a\x81\x01\xb2\x34\x22\x0c\x5c\xb9\xde\x3e\x56\x27\xfc\xf0\xd8\xbb\xeb\x61\x56\x45\xca\x12\x4f\xbb\xb3\x30\x9f\x38\x5b\xd8\xf3\x6c\xbd\x82\xbd\x5a\xd3\xd7\x23\x9c\x0a\x6a\x33\x92\x31\x50\xce\x7f\x1a\xaf\xfa\xb6\x8c\x27\x2f\x25\xb1\xf4\x36\xcd\x68\x1a\x2f\xf7\xf7\xa1\x54\xe4\x68\x32\xb5\xb4\xf1\x78\x1b\x0c\x07\x0d\x56\xb3\x56\xac\x17\xce\xd0\xa3\x72\x1b\x7a\x6d\x69\x06\x0e\xfc\x92\x63\xe9\xb3\xe7\xbf\x32\x21\xe1\x69\xc8\xec\x8a\xbb\x3c\x20\x5c\x1c\xaf\x8e\x6b\x4d\xaa\x6b\x70\xd0\xc0\x6c\x15\x1a\x56\x43\x04\xd6\x1f\xe3\x04\x64\xda\x37\x0c\xd4\x62\x10\x00\x55\xf0\x3a\x9e\xae\xc3\x78\xc2\x2d\x1a\x15\xc5\x90\xe1\x2d\x9e\xa6\x42\x48\x11\x95\xf6\x0a\xc7\xe0\xf9\x91\x4f\x09\xc8\x6b\x95\x36\xba\x7d\x75\x3e\xe7\x87\xcb\xff\xff\x5a\x6c\x18\x73\xbe\x0c\x15\xf6\xc6\x41\x6e\xb1\x24\x00\x66\xb5\x70\xb8\xa4\x03\xe7\xd0\xc6\x2a\x5d\xe7\x37\x78\x37\x03\xa0\x50\xe8\xcd\x5a\xcd\xfa\x42\x5b\x0c\xcc\x0a\xab\x39\x23\xfc\x67\x08\x93\x02\x85\x87\xa9\x90\x5a\x14\x32\x1d\x9e\x8c\xc7\x9b\xeb\x7d\x7b\xa0\x57\x4a\xa7\x7d\xc1\xfd\xde\x81\x65\xc5\x55\xe9\x23\x13\x27\x35\x99\x66\x6d\x06\x62\x20\x8b\x1c\x6a\xc5\x36\xd4\xc0\xc2\x9e\x5c\xec\x56\x97\x79\x5b\x7c\x01\x26\x48\x63\x50\xcd\x0c\x2f\x08\x12\xe5\xcb\x52\xdc\xd2\x9a\xab\x54\x08\xb0\x04\xd8\xf4\x47\xa5\x2a\x4b\xfd\xe5\x7d\xd1\x8b\x42\xe9\x7d\x6d\x50\x21\xb2\xbd\x9c\x7b\x21\x20\x86\x6d\x11\xd1\xe3\x9b\x39\x84\xc5\x59\x8b\x1c\x58\xab\xd8\x32\x30\xfd\x76\x0d\x6c\x99\x92\xc5\x3e\x6c\x5a\xfc\xb4\x32\x0b\xed\x44\x78\x4d\xe8\x66\x26\x72\x49\x5f\x03\x6b\xd8\x3f\x32\xd1\xe4\xec\x48\x38\x00\x4e\x52\x58\x0c\x64\x8d\x21\x5d\xee\xb1\x23\x32\x5c\x8a\x5b\xbb\xbf\x68\x29\x14\x51\xdd\x49\xcc\x96\xe3\x55\x50\x52\x88\x1f\x3a\xb0\x7e\x5a\x73\xa8\x82\x5f\x60\x5e\x69\x94\xfb\xbd\x64\xf6\xa9\x1b\xa9\xcb\x64\xee\x5c\xa1\x54\x11\x42\x27\xbc\x33\xab\x26\xc9\x34\xe8\x23\xef\x21\x3c\x6b\x9c\xf3\x87\x1b\xce\xfb\xb4\x8d\x65\xbe\x5d\x27\x45\xf2\x97\x82\x1c\x04\xd0\x7c\xda\xe5\xb6\x88\x74\xfb\xda\xfe\x47\xec\x6f\xe8\xe2\xbc\xf8\xe3\xc7\xe4\xec\x9d\x4e\xdc\x7d\x88\x16\x00\x02\x5f\x87\x25\x97\x7b\xc2\x69\x83\x7f\x81\xfc\x45\xfc\xd4\xbb\x68\x14\x5e\x05\xca\x5c\x05\xa2\x3a\x00\x3a\xc0\x26\x6b\x26\xeb\xcb\x51\x60\xe3\xca\x17\x51\xa6\xbb\x1d\x69\x07\xa3\x85\xbc\x65\xe7\xe7\x71\x08\xec\x4a\x2e\x54\xe2\x62\xaf\xba\x6b\xe9\xfd\x57\xe0\xe1\xc4\x50\x20\x72\x14\x2d\x19\xa5\x7a\xa7\xfb\x7d\xf9\xee\xd3\x05\xb4\x5c\x19\xf6\xbd\x06\xb9\x3c\x92\x8f\x75\x81\xe7\x11\x37\x72\x36\x2a\x68\xac\x11\xeb\x67\xc6\x07\xe9\x21\xe7\x3b\x74\xeb\x5b\x5a\x1f\xc9\x81\x55\x32\x69\x54\x60\xcb\xc5\x45\x92\x4e\x25\x21\xf8\x7f\xec\xb9\xa9\x8c\xbf\x24\xb7\xb6\x1a\x46\x14\x96\xd4\x99\xc8\xa2\x73\xb6\x9b\xfc\xe0\x14\x9a\xfa\x23\x96\xc8\x49\x89\x25\x30\xfd\xdb\xcf\xce\x30\x44\xb8\x75\xdb\x24\x07\xa7\x2b\xc3\xd1\x74\x6d\xa9\xc1\x6f\x2d\xcf\x74\x11\x5f\xfd\x2f\xfc\x7b\x3e\x37\x4f\x53\x50\xfc\x69\x83\xbd\x4e\xc5\x69\x6c\x91\xc9\x6c\x7b\xca\x2e\x1c\x3f\x70\x1f\x30\x4a\x31\x93\xaa\xb2\x0c\x02\xa0\x69\xd3\x08\x15\xce\x00\x28\x4b\x8e\xea\xfc\x72\xd3\x91\x0c\x19\x1f\x2c\x2a\x06\x4c\xc3\x38\x63\x37\x51\xad\x46\x2c\xc2\x77\xa9\x04\xce\x73\xb8\xbb\x6b\x10\x4e\x70\x1e\x5c\x7f\x3f\xb6\xad\xff\xe1\xa8\xb8\x18\x81\x10\x92\x98\xd4\xe0\x8a\x34\x27\xfd\x1d\x0c\xfa\xa9\xc0\x35\x4a\x1a\x50\xf5\xa1\x3f\x79\xd2\x50\xed\xc3\xa7\xb2\x8c\xc5\x90\xef\x77\xca\xe7\xd7\x98\x90\xdf\x67\x1d\xdf\x64\x2e\xd5\xee\x4b\x36\xf4\xa6\x87\xe9\xf5\x97\xa8\x3f\x96\x98\x08\x65\xa5\x91\x9b\x4e\x7d\x5e\xe6\x46\xf6\x4a\x17\x9e\x0c\x32\xcf\x83\x70\x0c\x8a\x49\xb5\xf2\xec\x0f\x5f\xee\xd4\xee\xae\x00\xd8\xf1\x44\x8a\x6e\xa4\x3f\x9c\xab\x14\x26\xbd\x65\x54\xc2\x34\x3d\xce\x60\xd0\x35\xdb\xe7\xef\x33\xd2\x7d\x94\xad\x7f\x61\x17\x58\xea\x80\x96\x7f\xe9\x5a\x37\x79\x3c\x7a\x68\xf9\xb8\x1d\x09\x79\x73\x56\x6a\x46\x4b\xde\x94\xeb\x10\x2b\x9e\xc4\xcb\xe5\x7d\x5e\x4e\xe9\xc8\x77\x90\x3e\x94\x40\x2b\xb9\xaf\x79\xf1\x33\xda\x82\x8a\x81\x28\x97\xd3\xf6\xff\x64\x44\xe5\x06\x7a\x27\x93\xec\x1c\xec\x71\x0d\xe0\xad\x73\x85\x69\x5b\xdf\x5d\xfb\xc4\xe7\x98\xdf\x03\xe4\xf8\xf2\xfa\xa3\x28\xc9\xf9\x3e\xf8\x79\xb7\x99\x19\x90\x14\x6f\x85\x35\x2f\x0e\x0e\x04\xa7\xc2\xb0\x76\x3b\x68\x86\x52\x44\x1f\x79\xb4\x23\xc0\x4a\x6d\x24\x0b\xf2\x2a\xb7\xf6\x82\xce\x37\x40\x1c\x66\xec\xc0\x27\x56\xd1\xb3\x5c\x72\x05\x5b\x23\xc3\xb1\x38\x53\x60\x1f\x8f\x07\xe3\xa3\xcd\xdd\x75\x6c\xbe\xb1\x16\x58\x5d\x5e\x6e\x9e\x72\xba\x9a\xd8\x3a\x15\xa0\xfb\x71\x24\xd8\xf3\xe4\x0f\x0f\xe4\x3a\x9e\xa0\x7b\x10\x42\x0e\x15\x2a\xe9\xcb\x43\x7a\x78\x66\x8c\xc2\xbb\x87\xa0\xb0\xa9\xc5\x03\x1a\x85\xa5\xf9\xdf\xc0\x35\xef\xcc\xaf\xca\x31\x88\xe8\x2e\x28\xf7\x49\x7b\xcf\x94\xac\xa6\xfd\x77\xa8\x99\x3e\x9c\x94\x71\xca\x46\x79\x22\x33\xca\x2a\x62\xfe\x78\x3d\xea\x89\x5c\x39\xca\x77\xf5\x53\xf2\xd6\xa9\x69\xe2\x66\x28\x35\x27\xf5\x8b\xb8\xc0\x87\x78\x78\x7e\xa8\x62\x04\x6b\x8d\xea\x5e\xbf\xc3\xee\xa0\xbc\xb3\xea\x33\xd5\x14\x5c\xad\xe2\xee\xd7\xeb\xdd\xd9\xe2\xb3\x5d\xcc\x8a\x53\xca\xcd\xd2\xbb\x06\x6b\x27\xdb\x40\xb7\x78\x99\xe2\x95\xfd\xc3\xa3\xba\x62\x46\xcd\xb6\xc3\x03\xa1\xa8\x86\xa7\xa4\xb0\x3a\xa8\xfa\x85\xd5\xfa\x8a\xbc\xe9\x59\xe7\x96\xbe\xbc\xef\xcf\x7e\x44\x51\x7d\x17\xae\xec\x83\x43\x12\xe3\x5e\x08\x95\x15\x74\x11\x37\x08\x0e\x24\x3e\xd0\x81\x86\xb0\xc5\xcd\x74\x60\x44\xbd\x53\x08\xcf\x7f\x30\x81\x9a\x18\xd0\x71\xc5\x6c\x1c\x52\x2e\x66\x3b\x07\x03\xd3\xa5\x75\x3c\xdc\xa8\x88\x9d\x9c\x33\xd1\x04\x09\xe5\x76\x1a\x1b\x74\xf6\x23\x77\x3f\x92\x6f\x86\xd2\x87\xc9\x5d\x68\x38\xd2\x28\xad\x29\x12\xc5\xbb\xa5\xf2\x8b\xc8\x21\x6e\x27\x14\xfc\x0e\x82\xfe\x64\x33\x17\x28\x2d\x20\xc9\xe2\xb8\xa3\x51\xfe\xc0\xc8\xd4\x83\xd4\xb7\x22\xb5\xf6\x50\x9e\x9f\xc9\x48\xe3\x2f\x78\x48\xda\x6c\xf6\xa9\xfc\x5e\x58\xc4\x55\x9a\x53\x13\x5f\xfe\x05\xef\x06\x97\xdc\x79\xb9\x2e\xe0\xdd\x45\x35\x2c\xad\x21\xb8\x43\xe6\x66\x25\x93\x32\x82\xc5\x88\x61\x2c\xbf\x7a\xfc\x16\x36\x2e\x6e\x0e\x6c\x5d\xbd\x42\x3b\x01\x01\x74\xf0\x5d\x43\x30\xec\x4b\x00\x34\xf4\xbf\x0d\x2c\x59\x56\x89\x41\x57\x0a\x1d\xd9\xee\xc5\x33\x87\x15\x8f\xdb\x20\x10\x25\x84\x92\x78\x34\x4c\x41\xa2\xef\x8f\x65\xfd\x69\x77\x87\x19\xfd\xed\xfd\x6c\x91\xfc\xf5\xbe\x9d\xc9\xcd\x9a\x29\xaa\x98\xcd\x94\x5a\xee\x2c\x8d\x05\xc4\x61\xac\x12\xac\x9f\xf1\x68\xc9\x7e\x21\x41\x8c\x4a\xa6\x06\xc1\xfd\x05\xc3\x2a\x26\x07\x0e\x53\x42\xc8\x2d\xfd\x86\xdb\x02\xb2\x88\xd3\x0f\xd7\x40\xf7\xd9\xb8\xf6\x21\xf1\xda\x35\x80\x4e\x16\xe0\x8b\x5c\xe0\xab\x69\xf2\x11\xd6\x79\x40\x43\x7c\x9c\xf4\x2d\x17\x66\x58\x47\xb5\x05\x31\xc9\x02\xe3\x0d\x0e\xcf\x06\xf8\xea\xce\x84\xda\x3c\xcd\x12\x07\xfe\xf6\x56\x22\xa0\x92\x4e\x48\x96\xfd\xc4\xd4\x0d\x00\x5d\x6b\x65\x35\xcb\xa9\x8c\x16\xd0\x65\x30\x12\x85\xbe\x31\xbb\xae\x7a\x9d\x59\x0b\xd8\x10\x79\x96\x1a\x28\x9f\x07\x9f\xcc\x2f\xf4\x1f\x04\x37\x0d\x93\x0c\x7f\x0a\x63\xf2\xad\x9a\x7d\xb0\xe4\x35\x0f\xd8\xe5\xea\x29\x3e\x86\xc4\x38\xc3\x33\x24\x0b\x73\xb7\xf0\x2b\x01\xd1\x6b\xd7\x3a\x23\xb2\x6d\x53\xa2\x33\x88\x9d\xae\xe3\xb4\x7f\x41\xee\x29\xf4\xa7\xff\x66\x81\x86\x7d\xba\x51\x60\x17\x27\x04\x82\xf7\xf1\x39\xca\xb6\x74\xa9\x28\xec\xde\x30\xb6\x25\x2e\xdc\x89\xcc\xd7\xc8\x7b\xe8\x22\x1b\xbb\x1f\xd0\xc2\x8f\xd3\x5e\xdc\x4c\x05\xf9\x83\x9b\xea\x4e\x65\xc5\xf6\xf3\x48\x97\xf8\x7c\x2a\xd4\x82\x4f\x8a\xae\xa9\xad\x3b\x2d\x88\xfc\xd2\x22\x25\xfb\xdc\xcf\xe8\xd0\x49\xd7\xd2\x60\x1c\x63\xc0\xd6\x63\x41\xe4\xf0\x6f\x4b\xe9\xfd\xd0\xcc\x14\x1a\xdf\xc3\x33\x4b\x56\xff\xb2\xea\xff\xc1\xfb\x54\xa9\xbe\xde\x5b\xa9\xec\x91\xf8\xef\x61\x95\xee\x96\x02\x1f\x4d\xa9\x4a\xdc\x49\xb4\xa1\x50\x85\x46\xa5\x55\x69\x42\xdd\x6e\xe6\x93\xb9\xfb\xe6\xa9\xec\xfe\x5a\x5f\xae\xde\x23\x25\xa4\x4e\x1a\x0a\xb4\xe4\x7f\xd9\x65\xa3\x95\x07\x50\xc8\x7e\xc0\xe7\xf4\x7b\x99\x4e\x4f\x58\x8f\x64\x1e\x52\xaa\xb3\xd0\xa9\xcb\xae\x70\x4e\x9e\x1f\x39\xfa\x27\xe4\xe2\x6d\x38\xd9\x28\xbf\x8f\xf8\x59\xbd\xbf\x60\x61\xdd\x50\x53\x48\xd1\x2a\xdd\xb3\x60\xe8\xe2\x7e\x21\x6b\xcc\x9e\xaf\xc0\xf4\x96\x7d\x27\xad\xb1\x10\x0b\x0c\x25\x9d\xc3\x80\xab\x67\x9e\x1b\x0e\x4a\x8b\xdb\x6e\x64\x1a\xb1\x51\x47\xf3\xc7\x7c\x3c\xa9\xc2\x6f\x30\x9d\x82\x8a\x6c\xd5\xf7\x31\x36\x5d\x74\x5f\x63\x84\x5d\x23\xec\xff\x29\xf6\x00\xa1\x39\x65\xb4\x6a\x75\xe0\x95\x90\x61\xd4\xdd\xb7\x16\x23\x6a\xa1\xf5\x7b\x33\x83\x38\x9f\xd0\xa6\x0d\x5c\x03\x62\xe9\xb1\x33\x79\xa2\xae\x7c\xaa\x27\x6f\x11\xfd\xb6\x39\x55\x1d\x76\xcd\x34\x08\xcd\x13\x12\xfd\xc1\xf7\x1b\xf7\x43\x6d\xe4\x50\x27\x2e\x3a\xaa\x95\x0e\x93\x5c\x72\x5c\x6b\xec\x1b\x14\x4b\x14\x63\x29\x07\x4b\x2c\x17\x01\x16\x5e\x6f\x31\xdf\x96\x3f\x9e\x2c\x1c\x7b\x05\xa7\x60\x01\x12\x81\x82\x03\xb8\x95\xac\x23\xd2\x06\x22\x19\x35\x6a\xe6\x5a\x10\x96\x2c\xa5\x72\x29\x15\xcb\x6a\x53\x98\x60\xff\x18\x6c\x2c\x17\x31\xcb\x18\xca\x5c\xd9\x7d\xe4\x83\x79\x15\x1e\x18\xcd\x93\x34\x8c\xf2\x0a\x61\x99\xf3\xe8\x59\x3a\x4f\x39\x9d\x69\x9f\xb7\x05\x51\x44\x53\x4c\x9b\x93\x90\xa5\x4c\x08\x42\xf5\x55\x9e\xcd\x5f\x2e\x3f\x91\xa4\x5e\xd0\x4b\x99\xcf\xe6\xe9\xa2\x74\x2d\x43\x5f\x9b\x31\xa6\xba\x27\x63\x6e\xcb\xfa\x08\x5e\xc5\xdc\x74\x0c\x2d\x10\x2e\x31\x1d\xcb\x27\x9b\xf9\x5e\xab\x44\x8f\x50\xf8\x88\x8f\xc5\xa0\x6c\xfb\x6d\xd3\xa4\x5f\x84\xe3\x1e\x1f\xa8\x0d\x83\x11\x81\xad\x21\x86\xf3\x91\xb2\x0d\xbf\x12\x79\x1f\xe2\x4e\x0b\xe4\x8b\x2e\xaf\x9e\x79\x08\x7f\xb2\xec\x45\xc8\x55\x0a\x1b\xbe\x7d\xdd\x58\x3a\xdf\x71\x59\x18\xea\x6f\xf9\x3e\xc5\xb7\x65\x1f\x02\x46\xfd\x19\xca\xff\x0e\xf6\x00\xd5\xf2\xc3\x64\x5b\x87\x6a\xf5\xe9\x29\x8d\x09\xd6\xdc\x51\x47\x32\x04\xe0\x04\xa6\x40\x95\x0a\x2f\x6f\xb4\xa0\x32\x1f\x58\x35\x52\x80\xbd\xeb\x07\x5f\x37\x1d\x1c\x4d\xab\x29\xe7\x1a\xb1\xfc\xc7\xcf\x33\x69\xac\x06\x37\x81\x84\x79\x63\x53\xd8\x2f\x85\xdd\x03\x7b\xe6\xa0\xde\x29\x83\xf0\xdb\x0b\x92\xfe\xf9\x1c\xf2\x10\x87\x96\xf7\x6f\x22\x65\x5b\xb3\x7d\x69\x7c\x81\x5f\x66\x8c\x53\x8a\xa4\x04\xc4\xc6\xb1\x06\x3e\xa4\x51\xff\xd5\x33\x6d\xf1\x03\x8c\x0a\x0a\xf2\x21\x09\x14\xea\x16\x63\x66\x4a\x16\x53\x08\x03\xd7\xa4\x2c\x01\xf0\x86\x25\xa3\x77\x82\x70\x1d\x7d\x17\x44\xc4\x3b\xd1\x62\x81\xfa\xb0\xcf\xeb\x78\xbb\x04\x26\x34\x97\x62\x5b\x35\x71\xd9\x92\x80\x82\x40\xc3\xf9\x37\xb3\xd7\x8a\x16\xca\xdb\x00\x6c\x26\x2b\xec\x2d\x36\x04\x9e\xc9\xfb\xbc\x87\x3e\xc5\xfc\xa4\x42\x36\xc8\xca\x05\x12\x07\x1a\xf6\xab\x25\x1c\x9a\xb2\xdb\x8c\xb7\xcb\xe8\x72\x29\xf6\x02\x1e\xb2\x6d\xe8\xfe\xde\x2b\x85\x4d\xbf\xa1\x3e\x85\xd2\x06\x73\xa4\x31\xd8\xba\x1a\xda\x7f\x02\x93\xf9\x0f\x94\xcb\x03\xc3\x8c\x50\x30\xd7\x91\x86\x60\x26\xad\x70\xb9\xa1\x2d\xf2\xc3\x26\xfd\xc1\xa1\xbe\x92\xea\x71\xa4\xe5\x85\x37\x64\xf9\x13\x7b\xff\xd5\xdc\x5a\x5b\x58\xf4\x58\x65\x62\x7e\x93\x5e\xda\xe3\x01\xbf\x3e\x09\x1b\xb3\xf3\x98\x96\x56\x4a\xa2\x17\x12\x68\x4e\x56\xbd\xc1\x3d\xa6\x9b\x2f\x2d\x8d\xed\x07\xa7\xb4\x49\x88\xc8\xd9\x29\x0c\x3d\xa5\xb4\x98\xb1\xac\xb2\x25\x02\x11\x2a\x97\x54\x7d\x06\x00\xfa\x15\x1e\x7a\x87\xc9\x8e\x2a\xa1\x74\x28\x69\xf2\xa4\x83\x0b\x7f\x9e\x7d\x72\xe2\x5c\x4b\x9b\x9d\xcb\xb0\xb5\x0d\xf6\x98\x55\xe9\x47\x74\x7d\x41\x22\x55\x53\xf9\x75\xfc\x2c\x2c\xcc\x70\xbf\x19\xf9\x60\x24\xfe\xe5\xd9\xd0\x22\x3c\xb5\x94\xf4\xbc\xfa\x12\xa8\x65\x56\xfe\x46\xef\x63\x05\x9a\x12\xba\x02\x83\x1a\x55\x9c\x1b\x39\x8c\x13\xa3\x8f\xd8\xb7\xb5\x2e\x46\x55\xf6\x46\x1b\xdf\x2b\xeb\xf6\xec\x3f\xcc\x81\xbc\x34\x2b\xc9\x8d\x26\x16\xf3\xce\x09\x21\x38\x10\x5e\xfd\x6a\x5b\xb6\x2b\x2e\x98\x2f\x9f\x3c\x0d\xbb\xee\xb4\x97\x79\x63\xee\xe1\xbb\xc4\x9c\x0b\x6a\x2c\x3a\x2b\x99\x96\x5f\x79\x61\x64\xe2\x02\xef\xd9\x43\x13\x8b\x97\xf4\x3d\x8b\x39\xdd\x60\x2a\xbc\x69\x3a\xd5\x4b\x71\xdb\x49\x82\xf9\x93\x2b\x57\x8a\x60\xa0\xc4\x9c\x0a\x02\xc5\xce\x53\xa7\x7a\x92\x0a\xa3\xa2\xa3\x95\xa6\x44\x00\x86\xa3\x04\x06\xcc\xc5\x63\xcf\x4a\xc2\xc8\x9e\x7d\x3c\x0b\xac\x79\x94\xd5\x6b\xc2\x61\xab\x3e\x32\xb8\x10\x7a\x46\x15\xfc\xc5\xe3\xd5\xe2\x0f\x83\x96\x7e\x0c\xbd\x1d\xc0\x10\x25\x92\x42\xaa\x9e\x41\x41\x1d\xf4\x4a\xaa\x33\xdb\x9e\x17\xd8\xfb\x31\x4f\x98\x3a\x84\xdc\xc9\x78\xd8\xb1\x22\xd6\xc0\x98\xf4\x54\xd0\xa5\x63\xca\xcc\xe8\x0b\x73\x74\x15\xa9\x1c\xb9\xfb\xa6\x66\x2a\x05\x16\x98\x21\xa0\xdd\x28\x4e\x86\x5c\x8b\x9b\xb4\x7d\x96\xa8\x8d\x9b\x2f\x2c\xc3\x07\xd5\xa0\x12\xf6\xe3\x76\x20\x73\x05\xbf\xdf\x40\x2d\x60\x70\xed\x1c\x64\x2a\x52\x5e\xbc\x09\x8b\xcd\x5f\xa1\x13\x62\x7a\xf8\xe4\x06\xdc\xfd\x59\x0e\x27\xf3\x2b\xc0\xfa\x23\xbc\xd7\x44\xab\x42\xf8\x80\xbe\x4c\x9a\x2b\x17\x64\x7d\xfd\x2d\x9b\xb1\x09\x95\xf1\x06\x97\x84\xee\x63\x55\xe0\xb4\x1f\xd7\x69\x89\xde\x0b\x10\xbe\x8b\x3e\xbf\xe7\xbb\x53\xc5\x9e\xae\xc2\x81\x44\x04\x9a\x3f\xd9\x66\xeb\x28\x93\x32\xb3\xa3\x94\x53\xca\x28\xac\x09\xee\xf5\x07\x88\x5c\xe6\x86\x27\xbf\xa4\x4d\x5d\xa4\x2c\x1d\xf2\xf1\x9a\x98\x22\x55\xee\x2f\x17\x07\xb7\xce\x75\xfe\xa0\x7e\x87\x0a\xcf\x39\x71\x9d\xcd\x07\x2c\x57\xaa\xec\x15\x62\xc7\x0e\x69\xf1\xc0\x9b\xd0\x69\x47\x4a\xc1\x14\x0e\x89\xbf\xd3\x35\x25\xf5\x71\x43\x76\xc9\xb2\x50\x7f\x22\xbf\xa0\xdf\x2a\x8e\x89\x46\x70\x4c\x6d\x84\xb5\x51\x17\xc3\x80\x65\x04\xbe\x6c\xe2\x72\x6b\xd2\x20\x5b\x7b\xf3\x28\x37\x9f\x80\x0f\x69\x04\xed\xec\xbf\x4a\x08\x16\xab\x8e\x42\xe9\xc0\xa0\xf3\x00\xdb\x84\xd2\x78\xa6\x31\x0e\x39\x38\x75\x3e\xf2\x75\x84\x7c\x2a\x93\xff\x4c\x24\x72\xf1\x85\x27\xbf\x29\x72\xe6\x08\x4b\xd0\x2f\xb5\x66\x02\x60\x2b\xb4\x71\x26\x3a\xb5\xb3\x98\x40\x5b\x8c\xda\xd3\x03\xa2\xc5\xc2\x1e\x05\xbb\xb8\x35\xc8\xf2\x35\x81\x01\x3e\x1a\x82\x82\x9a\x2d\xe0\x49\x70\x2c\xce\x1f\xd4\x12\x9e\x80\x20\xef\xc0\xed\xc7\x8c\x61\xfc\x1b\x2b\x2d\x95\xf9\x54\x8a\xf8\x36\x9a\xae\xe8\x73\x23\x61\x03\xcd\xa7\x6b\x1a\xfb\x61\x33\xc1\x3a\x3c\x3c\x22\x61\x96\x3c\x1f\x58\xe0\x04\x7d\x98\x85\x0a\x93\x4e\x19\x8f\xcf\x84\xaf\x04\x32\x25\xa0\x5e\xec\x79\xf8\x5a\x2c\x9c\xc0\x28\xda\x75\x77\xbc\x8d\xf1\xa4\x1c\x20\x04\x37\x18\xe9\xa5\x0d\xbe\xa8\x65\xf2\x82\xa5\x38\x38\x18\x67\x22\x20\x58\x1a\x0b\x1e\x3e\x00\x09\x61\x2a\x25\x34\xe6\xa1\x0a\x59\x5b\xa2\x8c\x2b\x90\xde\x28\x32\x98\x34\xe2\x77\x2d\x9e\x80\x85\xeb\x2a\x42\x5d\xcd\xba\xbc\x4b\x7f\x27\x80\x20\xb8\xa4\x8a\xf1\xd4\x07\xc8\xd1\x11\xd4\x6d\x3a\xfa\x9e\x97\x12\xa2\xed\x2f\x43\x0d\x4e\x4b\xa9\x3f\x3d\xea\xd5\x33\xea\xc5\x77\x98\x6c\x12\x5d\x41\x6e\x17\x99\xa8\xe4\x91\x70\x86\x1b\x9f\x16\x1f\x64\x7b\xd0\x5c\x68\xfc\x5b\x0c\x27\xdb\x0a\x11\x09\xfd\x06\x34\x9e\x53\x70\xe6\xf5\x11\xb7\xe5\xf0\xac\x0f\x9c\x99\x2d\x04\xa1\xfa\x61\x89\x27\x58\x26\xfd\xb5\x37\xc6\x7a\x27\x82\xeb\x50\x5b\x10\x7e\x4e\xff\x9c\x3d\xe2\xfc\xb6\x54\xf5\x26\xa2\x0b\x95\xd4\xef\xc4\xe6\x69\x72\x4b\x22\x27\x1d\x94\x84\x8d\xa2\x70\x5f\x29\x4c\x17\xae\x70\x28\xc5\x18\x05\xbd\x91\x57\x46\x46\xde\x88\xc5\xcd\x1e\x42\x5a\xb8\xcf\x63\x11\xf1\x02\xf3\x77\x49\x9c\x60\x49\xf8\xfd\x54\x8c\xc0\x54\x2f\xc2\x2d\x07\x42\xd4\x23\x6c\x30\x80\x69\xa9\x7f\xf5\x9a\xac\x07\xc8\xdf\xa4\x45\xe4\x02\x2b\x14\x4b\xff\x91\xb7\x06\xb2\xca\x75\x5a\xf6\x97\x3c\x03\x14\x26\x62\xaa\x48\x93\x6f\x82\xdc\xc3\xc2\x15\x7b\xa0\x50\x5b\x9b\x87\x90\x57\x6e\xd0\x9e\xc3\xec\x6e\x9a\x59\xc9\x83\x50\xc0\x8d\x65\xee\xc9\x79\x37\x86\x6f\x3a\x80\xb5\xd9\x6d\x9b\xb1\xc3\x83\x05\xae\x11\x6b\xfb\x62\xb1\x38\x22\x71\x4e\x99\xbc\x85\x47\x7f\xbc\x0e\x9d\x0a\x41\x61\x68\xdb\x64\xb7\x13\x14\xcf\x36\xf4\x9a\xd2\x01\x6d\x2d\xaa\x38\x14\xa4\x90\xd0\xe0\xa7\xf0\xf7\xb0\xd4\xc8\x2e\x9c\x27\x8c\xe4\x94\x05\x01\xb2\x3a\xb3\xd1\x34\x69\xd0\x95\xd9\x56\x66\x03\x25\xde\x79\xbc\xe9\x9c\x70\x84\x61\x24\x43\x50\xa0\xfc\x36\x93\xa2\x4f\x09\xc0\x4f\x95\x25\x62\xd4\xef\x76\xd0\xa7\xda\x22\x73\x24\xde\x5f\xdf\xcd\x02\xc0\x6c\x6d\x39\x55\x6f\xdc\x6e\x5f\x60\x6f\x55\x0a\xf8\xd1\x48\x74\xcc\x4d\xd2\x48\xd7\x61\x2d\x41\x0a\x8a\x86\x64\x8f\xb7\x13\x76\xfd\xb1\x0e\xd8\x7d\x41\xaf\x27\x51\x51\x80\x86\x27\x71\x57\xcc\xb7\x57\x3d\x09\xaa\xaf\x2d\xcf\x50\xdd\x61\xa2\xc0\x54\x50\x8d\x32\x57\x69\x5e\x07\x31\x8f\xef\xc4\x66\xda\x60\x96\x23\x27\x64\x3b\xd6\xa7\x4d\x1f\xf9\x6a\x82\xb0\x74\x56\xf4\x7d\x26\x9c\x42\x61\x00\x01\x0e\x06\xa7\x82\x25\xd8\x3c\x4e\xa7\x11\x22\xc3\xed\x06\x2c\x83\x86\x14\x95\xbf\x2e\x5e\x17\x7b\xe9\xa2\xbb\x2e\x0a\xd8\x40\x55\x88\x25\x90\xb2\x48\xc9\x5e\x7e\x2e\x0e\x7d\xcd\x7c\x7c\x35\x60\x7b\xc1\x55\x4b\xbc\xbb\x06\xda\x82\x0f\xdb\x3e\xd7\xf3\xce\x71\xa9\x35\x64\xa3\x9a\x5b\x28\xfa\xd8\x34\x12\x2a\xc0\xf2\x87\x27\x8c\xd5\x70\x49\x70\x79\x67\x53\x03\x8c\x7c\x78\xfb\x6b\xfa\x6c\x43\xd0\xa3\x72\x77\xd5\xc9\xca\x07\xea\x2a\x17\x6c\x86\xec\x29\xe5\x1c\x19\xe8\xe6\xa2\x59\xfd\xe7\xde\x9b\xd0\x76\xeb\x0b\x74\xcc\x84\x66\xcf\x58\x35\x1e\x98\xfc\x9a\x97\xb3\x7d\x86\xe2\x88\xb7\x99\x6a\x70\x97\x97\xf4\xe7\x48\x1f\xea\x07\x9f\xdc\x08\x32\x6e\xdb\xa2\xbf\x25\xec\x05\x08\x71\xe0\x0d\x1e\xda\x9d\xe7\x0a\x2d\xa3\xd3\x82\xfd\x28\xd3\xec\xb2\x4a\xf7\x6e\xe9\xe1\xb1\xa6\x4a\x63\xb9\x82\x81\xf9\x92\x88\x8d\x5f\x1d\xaa\x2a\x7f\x82\x4a\xf4\x67\xc0\xea\x3f\x49\x7c\xdc\x26\xf1\x43\x5e\x93\x40\x2b\xd3\xe4\x75\x57\x73\x8d\x58\x7f\x2f\xdb\x78\x8d\xf3\x33\x47\x45\x24\x22\x06\x49\x91\xba\xbf\xd5\x5a\x37\xfc\x36\x32\xe3\xba\x00\xa8\xaa\xef\x6a\x46\x00\x3b\x31\x95\xfe\xfe\xe3\x03\x1c\xb5\xa8\x17\xd2\x6b\x07\x69\x64\x44\x3f\x59\x48\xdc\xab\x4d\xe4\x7a\x73\x89\xf2\x35\x9e\x16\xa8\xe9\x8c\x9b\x8c\xc9\xdc\x42\x0f\x17\x96\x83\x8b\x7c\x78\x5a\x6a\x77\x56\xd8\x3a\x62\x15\x8d\x25\x66\xa3\x9b\x6a\x15\x9c\x3b\x27\x15\xb2\xe3\x90\xca\xf0\x9e\x49\x6a\xd3\xe7\x10\x90\x2d\x45\xaa\xe9\xe0\x72\x79\xcc\xd2\x25\xab\x62\xf7\xc9\xa9\x58\xeb\x40\x6d\x5f\x93\x2a\xb3\x7e\x40\x42\x21\x73\x70\x65\x6a\x7d\x6b\x7c\x66\x29\xf1\x26\x11\x77\x2a\x62\xa4\x50\xdf\x45\xa0\xc9\xad\x24\x21\xcf\x37\x6c\x29\xbc\xc8\x6f\x3a\xd2\x89\x9e\x32\x3d\x58\x68\xfc\x83\x4f\xb9\x49\xad\x57\x95\xd2\xb5\x0b\x23\x63\xe4\xd6\x35\x95\x91\xa6\x21\xb0\xdf\x9b\x19\x62\x1d\xce\x57\xa1\xfe\xac\x04\x00\x9d\x9a\x25\x58\x4d\x7f\x7e\x1d\x0f\x67\x04\x97\x8c\x62\x0e\x20\x7c\x64\x01\x1c\x6f\x9a\x5b\xe0\x33\xd3\xb0\x78\x60\x19\x9b\x38\x09\x4f\x17\x8a\x0d\xa9\xc2\x5e\xea\x45\x87\x66\x35\x61\xff\x28\x68\x84\x3a\x75\x65\xa9\xb3\x57\x85\xad\x3b\x78\x31\x09\x50\x57\xd7\xe6\x19\x6f\xc7\xfe\xbd\xee\x4c\x1a\x44\x5f\x77\x9a\x6b\x10\x0a\x47\x9b\x00\x9b\x49\x47\x2c\x68\xa4\xde\x30\xef\xd6\xc3\xa6\xbd\xa4\x53\x7f\xed\x7f\x1c\x5d\x6f\x8f\xb6\x7e\x22\x9f\xd1\xed\x1b\x71\xf5\xa2\xf3\xf4\x8e\xcc\xd0\x79\xef\x03\x40\x18\xfc\xe7\x56\x6a\xfd\x2c\x80\xf5\x94\x19\x5a\xa1\x6b\xeb\x82\xf3\x14\x6b\xf3\x2c\xe8\x99\x88\x4d\x78\xb8\x17\x67\xfd\x1d\x90\xb1\xe0\xc0\xf2\x45\x1a\x48\x3e\xdf\xf7\xd1\x2a\x1a\xff\xe8\x7e\x26\xba\x3b\x76\xa1\xd1\xba\xb2\x67\xfa\x1e\x33\x25\xcd\x86\x92\x92\x17\xc7\x5f\xb5\x79\x63\xc7\x66\x99\x4d\xc5\xca\xd5\x01\x55\x2e\x04\xf9\x0c\x77\x8a\xa8\xcd\x42\xa6\xf8\xe8\x02\x48\x1a\x20\xec\x99\x85\xce\x92\x84\x3e\x7a\x04\xb4\xb7\xfc\x81\xf3\x2f\x72\xf4\x1d\x86\x3f\x9a\xa6\x6a\x3a\x56\xd9\x81\x90\x72\x8f\xa5\xbc\xe8\x66\xa1\x39\xf7\x8c\x9b\xcf\xc8\x17\xe2\x6c\xee\x87\x20\xa1\x48\xe8\x1c\xb1\x0d\xe0\xf9\xf4\x72\xe6\xcb\xad\x42\x57\x90\xd1\x18\xd6\xf8\xa8\x0e\x5a\xbf\x9e\xd0\x61\xab\x0d\x95\xa4\xb3\x0f\x2d\xdb\x00\xa6\x5d\xa8\x75\x76\x62\x04\xd9\xcd\xac\xe2\x90\x2b\x2a\xa7\x3d\x12\x80\x42\x95\x08\x72\x3c\xc6\x39\x60\xa2\x34\x06\xfc\xcd\xec\x5d\x28\x67\x74\x02\x16\x77\xa8\x51\x2e\xc5\x32\x85\xf6\x6b\x1a\xa3\xe9\x01\x53\x92\x3c\xd9\x66\x49\x66\x58\x07\x2d\x40\xac\x40\xd1\x42\x64\xe8\xb0\x6b\x08\xec\x7a\x46\x03\x2f\xee\x96\x34\xc0\x3a\x9a\xd6\xab\xb8\x2e\x7d\x4a\x92\xdf\x13\xee\x96\xb2\xaa\x60\x93\x42\x69\x37\x02\x6f\x49\x12\x20\x6e\xfd\xe8\x71\x28\x55\xfa\xc7\x2b\x04\x5e\x44\xdf\x69\x36\xf4\xf8\x39\x3b\xa2\x9c\x02\x47\x24\x05\x79\x09\x40\x57\x2f\x38\x03\xeb\xeb\xaa\xb0\x18\x5f\x85\x75\x91\x1e\x76\x4b\x5d\x74\xb2\x49\xb4\xf3\x65\x2e\x42\x7b\xc1\xa8\x14\x72\x1e\xdc\x86\x87\x29\x2b\x65\x06\xb5\x0d\x6a\x2c\x07\xf5\xba\x23\x32\x60\xb5\xf6\x21\x43\x7c\xca\xb0\x8a\x82\x56\x86\xd8\x83\xf3\xbc\xd5\x10\xa0\xf2\xf0\xef\xa7\x63\xc3\xaf\x5e\x8f\x8d\x03\xc0\x5b\x14\x79\x0e\xcc\xef\xe1\x2c\x06\x9c\xd1\xe3\xfc\x0e\x98\x5d\x1e\x31\xd4\x6c\x1d\xa4\xe5\x05\x35\x4c\x0e\xf7\x15\x3c\x3d\x72\x78\xad\xb1\xd2\x40\xcc\x7f\xb8\xd8\x45\x65\x32\x49\xc9\x69\x2f\xc7\x4b\x2d\x30\xdf\x88\xbb\x7b\x20\xa0\x2c\xa4\x05\x28\xdf\x8d\xb7\x4c\xed\x3d\x6f\x74\x5b\xc2\x14\x5a\x78\x08\x82\x2e\xae\x10\x45\xf4\x03\xba\xf9\x41\x43\xa8\xee\x50\xa9\x56\x35\x86\xc5\xf7\x65\x8a\xf8\xe4\x0c\x6b\x11\x29\x6b\xe3\xc6\x81\x4e\x4d\x9a\xbc\x0e\x3c\xd7\x44\xfc\x5c\x86\x36\xb6\xdc\x40\x6b\xf9\x4f\x3e\xc1\xee\xef\xe7\x57\x68\x1a\xa1\xc8\x8f\xec\xe1\xe9\xd1\xa8\xf9\xbb\x67\xfd\x0f\xf1\x20\xcf\x98\xe8\x5f\x2d\x89\xf1\x1e\x99\xe8\x2d\x54\x88\x04\x71\x09\xda\xfb\xdc\x10\xb4\xd6\x0c\x11\xc3\xf0\x67\x85\xfb\x0b\x35\x82\xad\xc5\x8d\xcf\xfe\xfd\xb8\x77\xba\x8e\x3a\x5e\x71\x49\x96\xe2\xc4\x1e\xc7\xd1\xa6\x9e\xa4\xe1\xbc\xd9\xd1\x98\x5e\xbe\xd0\x20\x4c\x0a\x0c\xde\x77\x68\xe1\x0e\xa2\x10\xfd\xfc\x5e\x83\x20\x20\x5d\x1f\x3c\xca\x9b\xd1\x05\x9a\xe1\x83\x0c\x1e\x69\x94\x19\xf9\x41\x5f\xed\xc0\xef\xcc\xb6\x12\xf2\x4b\xd1\xca\xb4\xbc\xf3\xec\x20\xca\x6e\x54\x07\x2a\xdd\x7e\xdf\x57\xf2\xaa\xb7\xb9\xff\x3f\xc3\x7e\xbc\xed\x59\xb3\xf5\x8a\xcf\x77\x25\x41\xc2\x69\xe8\x8e\x8d\xf7\x26\xe0\xa2\xed\x2f\x3b\x7b\x0c\xd2\x6c\x7a\xa2\x8f\x38\xbf\x2c\xfb\xa9\x9c\x49\x00\x13\xf3\xe5\x75\x75\x74\x7c\xd4\x08\xd3\xa6\x24\x89\x57\x12\x5b\x8d\x21\xd3\x20\x54\xe2\x55\x6c\x63\x7c\xa2\xc1\xdc\xed\x88\x34\x43\x14\x9d\xb4\xeb\xff\xff\x2f\x1b\x20\x31\x77\x0c\x9c\xd6\x09\xdd\x8e\xc1\x44\xcf\xef\x89\x94\xcf\x03\x03\xe7\x0e\x7a\x9c\x10\x2a\x84\x6a\x6c\x26\x14\xe2\xa0\x8c\x95\x0c\xee\xea\x35\xc9\x53\xf4\xc9\x07\x55\xaa\xa6\x87\xa4\x7e\x42\xe2\xfd\xcc\xbd\x84\x74\xf3\x48\x92\xf0\x26\xd6\x01\x8b\xf2\x77\xaa\xd6\x36\xb2\xbb\x36\x43\xa3\xfa\xaf\x01\x09\xbc\xd6\x0e\x20\xb0\x51\xd2\x9d\x6f\x0a\x41\xfe\x36\xb8\x22\xcf\x29\x9b\x7d\xc6\xbf\xc8\x61\x46\xbd\xc9\x11\x8a\xe8\x73\xce\x4c\x9b\xd2\xc2\x4b\x5a\x24\x38\x32\x92\x1b\x04\x31\xa4\xa0\x6a\x24\xfa\x92\x12\x89\xca\x45\x79\x8f\x40\x18\x47\xe1\x93\x20\xa3\x4f\xa4\xed\xe7\x4e\x2b\xc9\xf2\x2a\xe7\x5e\xce\x1e\x17\x65\x9c\x6e\x09\x49\x1a\x30\xf3\x55\xf0\x73\xfc\xf7\x6b\x46\x5f\xbd\x67\x66\x80\x9d\xb1\x70\x0f\x6e\x64\x9b\xe3\x17\x86\x57\x12\xb5\xb4\x68\x90\xc2\xe9\x11\x33\xd6\x9f\xd6\x12\xa5\x42\xab\xfb\x33\xf7\x37\x16\x9f\xd8\x78\x24\x6a\x9e\x3a\xab\xf3\x92\xdb\x34\x3b\x48\x17\xa9\x9d\xcf\xda\x9d\x6c\xe4\xba\x2f\x23\xd0\x23\x2c\x1c\x1b\xde\x8c\x96\x27\x6f\x04\x9b\xa0\x4a\xb4\x08\x12\x29\x4e\xc9\x98\x1a\x68\x3c\xed\x5a\xb1\x87\x7f\x6f\x60\x73\x39\xaa\xb7\x5e\x35\x90\x24\xb5\x74\x8f\x03\xb4\xee\x87\xdf\xc7\x67\xc1\x71\xa1\x47\x25\x76\x09\x0e\x29\x1f\x04\x2b\xae\xa3\x02\x24\x79\x2f\x33\x50\x0d\xc5\xd1\x47\x33\x58\xf8\xc4\xd6\x81\x9b\x84\x3e\x91\xd9\xa3\x98\xf6\x88\x63\xb3\x96\xe8\x3d\xd4\xe1\x62\x7d\x1c\x3b\xce\xc4\x91\xaa\x95\x73\x9b\xfc\x74\x53\xea\x23\x84\xe0\x82\x62\xd3\xa8\x93\x09\xbe\x1c\xbd\xd5\xb1\xcc\xcb\xa7\x36\x3f\x46\xad\x7f\xc1\x08\xa0\x91\xbc\x32\x5c\xf5\xba\xe1\x3f\xde\x8a\xaa\x51\x67\x6a\x60\x13\x8b\x11\x82\xdd\x16\x0b\x8c\x0a\xc2\x45\xc3\x5a\x73\x3a\xff\x1b\xe4\xba\x50\x88\xce\xd0\xf7\x3b\x3d\x55\xc9\x84\x2c\x77\xcf\xd4\xa0\x98\x39\x1b\xcf\x83\xc8\xbb\x47\x0c\x74\x77\xec\xde\x40\xa8\xe0\x1d\xf0\xda\xaa\x00\x18\x3c\x36\xce\xb7\x10\x10\xdf\xce\x8f\xd8\x3a\x3c\x7e\x67\x81\x29\x12\xf2\x15\x44\x67\x07\xda\xff\xee\x44\xe0\x0b\xf8\x12\xe2\xbb\x15\xe2\x2f\xb8\xb7\x9f\x2e\xb5\xa6\xe5\x4f\x87\x9b\x78\x33\xe2\x89\x65\x9e\x07\x51\xf8\x7b\x5e\x7b\xd3\xd2\x79\xc1\x7b\x6f\x40\x49\x94\x6a\x7c\x23\x16\x63\x1d\x42\x80\xd4\x0d\xdf\x1d\x94\x4f\xbd\x35\xce\x7b\x16\xf0\x9b\x2e\xab\xc1\x70\xb9\xdf\xa4\xc7\xf4\xd5\xa3\xc2\xf7\xc5\xed\x64\xfb\xbb\xfd\xec\x37\x7c\x2a\x66\x04\xbd\x6c\xec\xab\x18\xdf\x36\x42\x4e\x1f\x76\xd0\xd6\x10\x1c\xd6\x2d\x75\x26\x10\x9e\x3f\xe0\xe4\x14\xd5\x62\xbf\x5b\x0a\xcc\x1b\xd0\x85\x7a\xfa\x65\xa9\x8e\x57\xc9\x5f\xd8\xd9\x6a\xfc\xfe\xe5\x28\x9f\x67\x38\x5e\x2c\xeb\x5e\x4b\x60\x17\x03\x3e\x60\x04\x4c\x54\xac\x3b\x7f\xed\xf2\x85\xd5\xac\x4e\x82\x86\xbe\xd1\xe3\x28\x56\xd7\x51\xe3\x5e\x0d\x89\x59\xc2\x69\x2f\xca\x23\xa7\x58\x56\xaf\x63\x54\x31\x36\x53\x33\x8a\xfa\xd7\xae\xad\x5a\x06\x16\xa9\x0c\xba\x6d\xc1\x77\x2c\xf1\x57\xff\xe4\xa9\xb5\xa2\x84\x96\x94\x27\x70\x82\xaa\x52\x2e\xb2\x36\xa5\x70\xd1\xc4\x4a\x5d\xd6\xa3\xc6\x72\x2e\x49\x0d\xb5\x8f\xce\xaf\x4e\x16\xa3\x0b\x7a\x70\xd0\x40\x04\xe8\xc7\x6e\xb4\x69\x24\x83\xea\xe9\x0d\xe9\x2f\x8d\x2a\x01\x93\x26\xf8\x19\x3a\x2b\x82\xa8\xdb\xa6\x70\x07\xf7\x9e\xbf\xe7\x1c\xaa\xa9\xb8\xbc\x61\x6e\x41\x3e\x29\x69\x10\xe8\x6f\x9b\xbb\x74\x6a\x1e\xd9\x22\xc0\xcb\xa3\x77\x09\xa0\x3d\xc5\xf9\xc9\xfa\x01\xa4\xc1\x26\x2a\x66\xe3\x27\x93\xa6\x8f\xb6\x32\x28\xd2\x8b\x63\xb0\x23\x4b\x4f\xdf\xa9\xdf\x92\x3b\xec\x8e\x93\x8f\x52\x4e\xf1\x4d\x5e\xa9\x21\xb3\x04\xa2\x04\xca\x22\x35\xf3\xc2\xab\xca\x86\xfc\x2f\xa7\xfe\x9e\x81\x9e\x83\x61\xd4\xba\xae\xb6\x2e\x48\xca\x52\xf9\xd3\x80\xd7\x5a\x81\x5a\x60\xc0\xb1\xfa\x9d\xba\xfe\x14\x2f\x95\xf9\x3a\xa2\x86\xa0\x15\x66\x8c\x3b\xf4\xa1\x78\x5f\x8d\x70\xe9\x77\x25\x60\xcf\x9a\x82\xe3\x1b\x2e\xb4\xc2\xb8\xa8\x38\xf3\xe7\xbb\xe0\x65\x43\x98\xa4\xb3\x9a\x57\x88\x2e\x6f\x4e\x19\xa9\xd1\xa0\xd3\x19\x36\x8b\xf5\xbc\x88\x3d\xef\x44\xf0\x26\x44\x2c\xfd\x97\x82\x55\x48\x9e\x61\xf5\xea\xe8\x0d\x34\xc1\x5c\xa8\x8f\x24\x68\xa0\xd3\x4a\xc9\xe9\xf2\xf8\x68\x1c\x5a\x8f\x79\x4f\x73\x26\x2b\xb6\x5d\x5b\x8a\x31\x78\x67\x65\xe1\xb6\x07\x32\x84\x9e\xe1\xb9\x89\x66\xa2\x1d\x7a\xc7\xa1\xbe\xea\xf0\x02\x2d\x54\x38\x71\xa4\x77\x84\x28\x35\xfd\x30\x6f\x94\x45\x15\xff\x25\xe2\xa1\x0f\x6f\x60\x02\x09\x26\x1d\xce\x15\x64\xc5\x9e\x6c\xa1\x39\x45\xa2\xe5\xa8\x7e\x09\x2a\x76\x8c\xdc\xd6\x9e\x9a\xd4\xfa\xc2\xd8\x17\xa3\xf6\xb7\x2d\x8b\xb5\x1f\x81\x3f\x11\x4f\x39\x5f\xfb\x98\xf6\xc2\x0f\x43\x1d\x5e\xb4\x4c\xd6\x85\x00\xd4\x79\xde\xa9\xe5\x93\x1a\x84\x2f\x13\x9a\xde\xc7\xb7\x75\x99\xa5\xbb\xba\x7c\x3a\x9c\x8f\xa3\x41\x83\xc0\x1a\x15\xe7\xda\xc2\xd4\x52\x86\x68\x83\xcf\x39\x66\x00\x90\x46\x4b\x68\x11\xd0\xeb\x97\xe5\xff\xb8\x5d\x3a\x7a\x97\x43\x0d\xd1\x5b\x3a\x28\xab\x4d\xb4\x81\xc1\x41\x97\xc9\x43\xe1\xa0\x85\x71\x00\x4e\x3a\x09\xb6\x7f\x5e\x44\x09\xc0\x1d\x66\xae\xd2\x66\xe6\x13\x9d\x01\x6a\x6b\xf6\x09\x44\xd4\xec\xb2\xf1\x97\xb8\xf3\x93\x27\xf8\x21\x5a\x5a\x04\x34\x82\x6a\x7d\x45\x4e\x5a\xbe\x67\x04\x77\xc2\x77\x96\xdd\xec\x9a\x8c\x90\x13\x54\x82\x29\xe1\xbe\x7a\x4a\x6d\xf3\xa7\xa2\x96\xa5\x70\xef\x76\xdb\x47\x8f\x5c\xe6\x79\x0a\x83\xea\xce\xc6\x0a\xae\x46\x21\xb3\x93\xa8\xb5\xb9\xed\x39\xf3\x33\xb0\xee\xab\x28\x8d\x46\x5d\xca\x78\x2d\x9c\x0d\x70\x0e\xb3\x9c\xd2\x5e\xc2\x7e\x6a\x2e\x6f\x9b\xad\x36\x73\x63\x4d\x8b\xe9\x7a\x39\x67\xd2\x8c\x2a\x83\x9e\x23\x78\x8b\x80\x2d\x6f\x53\x8d\x9e\x18\xfa\xa8\xaa\x62\x34\xf9\x80\xeb\x32\xa4\x94\xc3\xf7\x33\x4e\x7d\x32\xbb\xfe\xc9\x31\xf0\xa0\x64\x83\xa0\xdd\xce\xa0\x6b\xc0\xf0\xe6\x90\xcb\x4d\xa5\x66\x76\x8b\xbf\x11\x26\x2c\x78\x60\x55\xba\xdd\xa3\xb4\xe6\xa1\x36\x9e\x29\x5b\xac\x9a\x25\x6f\xc0\x80\x96\xe0\x6d\x0e\x81\xec\x06\x69\xc8\x8a\x5d\x4e\x25\xbe\x9b\x82\xf0\x29\x27\x54\x86\x52\xa9\xd6\x11\xb9\xac\x03\xef\x98\x85\x4a\xe0\xa7\x8b\x06\x23\xbb\x6f\x4d\x25\xbf\x6a\xe9\x36\x59\xa3\x05\x73\x74\x7b\x0b\xe1\xbb\xec\x55\xc2\xfb\xc7\x76\x6a\x5c\x55\x4c\x56\xdc\x49\x71\xac\xc5\xca\x61\x1b\xfe\x87\x5d\xae\xb3\x8c\x1d\xd2\x4e\x87\xb8\xc6\x40\x2b\xf7\x0f\xec\xa9\x6b\x1b\x9d\xdb\x70\xfe\xf4\x24\x17\x85\xeb\xd2\x34\xa1\xca\xf7\xee\x80\x29\xe4\xf6\x29\xfe\x48\x99\x64\x66\x9c\xe2\x6f\x50\xe4\x75\x6b\xf6\xe3\x67\x15\x52\x69\x4d\xf7\xed\x26\x23\x5c\xca\x91\xc1\x1b\x2f\x94\x85\x3f\xa7\x0a\x8c\xd0\xc8\xda\x89\xeb\xef\xf2\x03\x9a\x5f\x8b\x58\x14\x12\x24\x6c\x3c\x5f\x42\xc1\x7b\xdb\x93\x6c\xdf\x68\xa1\x01\xf9\x52\xfe\x96\x12\xb0\x7d\x70\xb0\x0b\xa9\x35\x41\xce\x96\xbe\x9d\x1f\x50\x3b\xbb\x74\x02\x8b\x44\xd0\x79\x7c\x95\x32\x62\xdb\xf6\xe0\x7a\x8a\x23\xf2\x1b\x32\xe8\x9a\x74\xef\xc7\x43\x42\xe4\x2b\x76\xcf\x11\x80\xa0\xc2\x1a\xe8\x8b\x05\x50\x5d\x4e\x76\x67\x21\x4f\x33\x94\x22\x87\x02\xb7\xe1\x77\x8a\x68\xc3\xe0\x87\x65\xe1\x1c\x42\x6e\x23\x66\xfe\x9f\x0e\x82\x8e\x2e\x43\x21\x76\x07\xc3\xb5\x34\x27\x60\x78\x89\xac\x09\x13\x36\x87\x78\x2b\x1d\x38\xa0\xf2\xc5\x7c\x43\x48\x01\xa1\x23\xd5\x84\x38\x27\x0a\x26\x49\x9b\x85\x11\xc5\xfb\x0f\xa2\x82\x75\x1b\x64\x3e\x9f\x45\xa0\x64\x59\xa1\xa4\xff\x0f\xd8\x06\xdb\x27\x05\x82\x0d\x16\x97\xe7\x28\xb4\x47\x3e\x07\x8e\x38\xef\x55\x62\xcd\x24\x57\xd8\xf3\x24\xa5\xfe\x89\xd4\xa7\x80\xe4\xf1\x00\x8a\xfc\xc0\x42\xa7\x6d\x0d\x99\x35\x55\xd9\x1d\xa6\x47\x49\x89\xea\x15\x61\x8a\x29\xa8\xad\x47\x9e\x86\xd0\xda\xfe\x65\x95\x36\x08\x0d\x57\x75\x9d\x3f\x28\x9d\x6d\x67\x50\x1a\x06\xd9\xfa\xc7\x7a\x5f\xfd\xd4\x18\x54\x11\xbf\xaf\x59\x69\x09\xe9\x12\x99\x5e\x92\x92\xe8\x9d\x4a\x10\xa8\xbf\x94\x43\xa3\x85\x30\x3b\xd0\x0d\x72\xe3\xd8\x4d\xb2\xf3\xdd\xf5\x75\xe1\xe5\xaa\x5f\xd5\x56\x34\x97\x9a\x1c\xd6\xaf\x61\x0b\xbd\x0d\x92\xef\x72\x3a\x65\xe4\xcc\xff\xde\x9b\xc7\x50\x5c\xa9\x9c\x9a\xd1\xe2\xf9\x05\xa5\x2a\x8b\xbf\xab\x93\x5a\x1e\xac\x96\x7f\xbd\xd1\x0f\x5e\xb8\xdf\x83\x2c\x9c\x4e\x71\xfc\xdd\xa8\x76\x3d\x7c\xb5\x3e\xe8\xf0\x78\x80\xd6\x21\x4d\x23\x28\x1f\x7c\xae\xd3\x7b\x2b\xb1\x75\xdd\xb4\x3b\xe1\xa6\xe4\x6b\xe7\x1b\x38\xff\x72\xb1\xe5\x96\x82\x19\xe4\xcd\xcd\xf1\x6b\x8f\x9b\xf0\x8c\xf3\xd0\x9a\x27\xf9\xda\xce\x9a\xd3\x6a\x3e\xf6\xac\x47\xc8\x44\xe9\x5f\x4d\x96\xad\xb1\x23\x0f\x6e\x41\x88\xba\x45\xf7\x11\x9d\x25\xfd\xe9\x25\x3d\x7d\x96\x0e\x1b\xda\xbe\x01\xe8\x9c\x0e\xdf\x82\x3b\xfd\xa5\x48\xf2\x17\xa4\x59\x94\x11\xfa\xa0\x2b\x55\xfb\x92\x2f\xee\x18\x81\x50\x91\x83\xc4\x68\x2f\x48\x23\xcd\xeb\x23\x2f\x7d\x67\x66\xf2\x66\xe6\xbe\x70\x29\xe2\x6b\x6e\x70\x50\x07\xdb\x57\x05\xbe\x82\xf5\xb1\x18\x60\xd9\x4d\xbf\xc4\x61\xbb\x2e\xf1\x2d\x6b\x60\x0a\xf3\xf6\x78\x8a\x04\x8e\xde\xb9\xfc\xe8\x7f\x39\x3d\x6d\xc2\x92\x46\x23\x34\xbc\xde\x60\xe9\x96\xfe\x96\xef\xaa\xe9\xf6\x52\xab\x05\x47\xfa\x60\xa8\xb2\xdc\x77\xf8\xec\xa6\x62\x95\xbb\xc8\xe1\xca\xf7\xc3\x6e\x2b\xd0\xe9\x48\x3f\x9d\xd3\x8c\x75\x42\x6e\x42\xa2\x6d\xc0\x56\x08\xb9\x6c\x82\x6a\x82\x41\x9c\xb8\x2d\x80\xe5\x8e\xfc\xbd\xa7\xde\x6a\xf1\xc3\x4a\x24\xc6\x44\xfa\x95\x7b\x9e\x73\xba\xde\xcd\x92\x91\x95\x51\x6a\x9f\xdf\x26\x4d\x23\x53\x0c\xc9\x97\x77\x99\x68\x24\x70\x69\x33\x25\xc5\xf6\x3b\x47\xce\xbf\xe4\x79\x7e\x85\xb1\xa4\xea\x0b\xf0\x96\x13\xfb\xd6\xf5\x68\xdc\x11\x27\x4c\x54\x54\x01\xfa\x12\x50\xf8\x03\xc2\xb7\x46\x7f\x77\x20\x3e\x5c\x06\xb4\x4a\x5a\x59\xbe\x47\xfe\x2e\x01\xb6\x1d\x47\x74\xe8\x55\xf1\x53\x05\xda\x1b\x38\x91\x9d\x4d\x59\x30\x8a\x2f\xc8\x31\xd6\xc8\xf5\x0f\x8b\x04\xeb\x4c\x58\x61\x69\x3c\x2c\x7f\xa3\xa2\xd6\xf8\xe8\x5f\x44\xb0\x91\xa1\xce\x1d\x17\x7a\x93\xef\x0d\x8f\x29\x88\x1f\xb7\xca\xbf\x84\xa1\xfc\x27\x7b\x74\x0f\x35\x70\x47\x73\x7f\x6d\x72\xe5\xdd\xf3\x79\x7e\x18\x4c\xfa\xcd\x27\xeb\x34\x80\xbe\xdb\x71\x94\xb7\x14\x80\x2a\x0f\xe1\xb0\x36\x87\xdb\x88\x54\x19\x20\x2e\x40\x15\x43\x9e\x64\x18\xaf\x64\xb1\x3a\xb6\xc8\x29\xbc\xb1\xb0\xf9\x37\x61\x74\xa0\xef\x67\xcf\xf8\x43\x88\xb7\x13\xef\x8c\xdd\xf5\x93\x5c\x25\x7a\x30\xe7\x58\x93\x6f\x01\x24\x3d\x87\x0b\xaa\x4f\x29\x96\xa7\xc3\x57\x66\xa4\x13\x76\x14\xff\x1c\x3f\x70\x3b\xfe\xe5\x65\x11\x06\xbd\x2b\x70\x83\xd2\xa7\xa1\x85\x16\x93\x85\xcb\xd1\xd8\xae\xbb\xf2\x48\x67\x2a\x2c\x2c\x7e\xf9\x50\x9c\x32\x85\xf2\x28\x6c\xd0\x51\x5f\xc1\x1f\x87\x73\xbf\xd7\xc6\x6f\xe7\x2f\xc6\x9a\x05\xe0\xc8\x69\x71\xcd\x8a\x01\x67\x86\x11\x08\x84\xff\x5d\x32\x54\x65\xac\x3a\xa0\x61\xee\xec\xb8\xb4\x5f\x44\xd1\x49\x7d\x74\x5c\x22\x19\x41\x05\xac\xb2\xb2\xfa\xe9\x3c\x88\x30\x19\x25\x47\xb7\xe4\x7e\xd3\x93\xaf\x66\x77\xfc\xc6\xc7\x9e\x0a\x8b\x49\x9e\x45\xed\xd0\xde\x45\xe2\x2f\x7b\xb9\xf7\xab\x3e\x6c\xe5\x4f\x1f\x3e\x6e\x09\x48\xf6\xfa\x21\x88\x31\x4c\x79\xaf\x85\xb6\x6f\xb4\x8b\x91\xda\x21\xb6\x66\x43\x67\x22\xdb\xc5\x30\xb3\x0b\xed\x9f\xf6\x65\xec\x78\x42\x21\xc1\x3c\x32\x33\xb2', 1)
|
[
"[email protected]"
] | |
7656c3adcf817be7059c97d960b189aefd1dfef1
|
db5b57a505d0ecdecf342d80b79a0e22e280c606
|
/bitbox/script.py
|
fcf5815868eb3669d4753c83febb5073ad144e43
|
[
"MIT"
] |
permissive
|
lightswarm124/bitbox-py
|
6620ead6fc4272585389f3004aeec05b2b6784a3
|
67ee0d216e2630fd44dba83b5233f33c315dd30b
|
refs/heads/master
| 2020-09-12T18:00:01.353437 | 2019-11-17T02:33:52 | 2019-11-17T02:33:52 | 222,001,618 | 0 | 0 |
MIT
| 2019-11-15T20:54:24 | 2019-11-15T20:54:23 | null |
UTF-8
|
Python
| false | false | 3,640 |
py
|
class Script:
def opcodes():
codes = {
"OP_FALSE": 0,
"OP_0": 0,
"OP_PUSHDATA1": 76,
"OP_PUSHDATA2": 77,
"OP_PUSHDATA4": 78,
"OP_1NEGATE": 79,
"OP_RESERVED": 80,
"OP_TRUE": 81,
"OP_1": 81,
"OP_2": 82,
"OP_3": 83,
"OP_4": 84,
"OP_5": 85,
"OP_6": 86,
"OP_7": 87,
"OP_8": 88,
"OP_9": 89,
"OP_10": 90,
"OP_11": 91,
"OP_12": 92,
"OP_13": 93,
"OP_14": 94,
"OP_15": 95,
"OP_16": 96,
"OP_NOP": 97,
"OP_VER": 98,
"OP_IF": 99,
"OP_NOTIF": 100,
"OP_VERIF": 101,
"OP_VERNOTIF": 102,
"OP_ELSE": 103,
"OP_ENDIF": 104,
"OP_VERIFY": 105,
"OP_RETURN": 106,
"OP_TOALTSTACK": 107,
"OP_FROMALTSTACK": 108,
"OP_2DROP": 109,
"OP_2DUP": 110,
"OP_3DUP": 111,
"OP_2OVER": 112,
"OP_2ROT": 113,
"OP_2SWAP": 114,
"OP_IFDUP": 115,
"OP_DEPTH": 116,
"OP_DROP": 117,
"OP_DUP": 118,
"OP_NIP": 119,
"OP_OVER": 120,
"OP_PICK": 121,
"OP_ROLL": 122,
"OP_ROT": 123,
"OP_SWAP": 124,
"OP_TUCK": 125,
"OP_CAT": 126,
"OP_SPLIT": 127,
"OP_NUM2BIN": 128,
"OP_BIN2NUM": 129,
"OP_SIZE": 130,
"OP_INVERT": 131,
"OP_AND": 132,
"OP_OR": 133,
"OP_XOR": 134,
"OP_EQUAL": 135,
"OP_EQUALVERIFY": 136,
"OP_RESERVED1": 137,
"OP_RESERVED2": 138,
"OP_1ADD": 139,
"OP_1SUB": 140,
"OP_2MUL": 141,
"OP_2DIV": 142,
"OP_NEGATE": 143,
"OP_ABS": 144,
"OP_NOT": 145,
"OP_0NOTEQUAL": 146,
"OP_ADD": 147,
"OP_SUB": 148,
"OP_MUL": 149,
"OP_DIV": 150,
"OP_MOD": 151,
"OP_LSHIFT": 152,
"OP_RSHIFT": 153,
"OP_BOOLAND": 154,
"OP_BOOLOR": 155,
"OP_NUMEQUAL": 156,
"OP_NUMEQUALVERIFY": 157,
"OP_NUMNOTEQUAL": 158,
"OP_LESSTHAN": 159,
"OP_GREATERTHAN": 160,
"OP_LESSTHANOREQUAL": 161,
"OP_GREATERTHANOREQUAL": 162,
"OP_MIN": 163,
"OP_MAX": 164,
"OP_WITHIN": 165,
"OP_RIPEMD160": 166,
"OP_SHA1": 167,
"OP_SHA256": 168,
"OP_HASH160": 169,
"OP_HASH256": 170,
"OP_CODESEPARATOR": 171,
"OP_CHECKSIG": 172,
"OP_CHECKSIGVERIFY": 173,
"OP_CHECKMULTISIG": 174,
"OP_CHECKMULTISIGVERIFY": 175,
"OP_NOP1": 176,
"OP_NOP2": 177,
"OP_CHECKLOCKTIMEVERIFY": 177,
"OP_NOP3": 178,
"OP_CHECKSEQUENCEVERIFY": 178,
"OP_NOP4": 179,
"OP_NOP5": 180,
"OP_NOP6": 181,
"OP_NOP7": 182,
"OP_NOP8": 183,
"OP_NOP9": 184,
"OP_NOP10": 185,
"OP_CHECKDATASIG": 186,
"OP_CHECKDATASIGVERIFY": 187,
"OP_PUBKEYHASH": 253,
"OP_PUBKEY": 254,
"OP_INVALIDOPCODE": 255
}
return codes
|
[
"[email protected]"
] | |
f3340d0bd5c5e5f803e09c14c522220a33e21689
|
98c6ea9c884152e8340605a706efefbea6170be5
|
/examples/data/Assignment_8/fkzluv001/question1.py
|
ceffc8905a53ff115df948b1aef2e73ac10f9c3c
|
[] |
no_license
|
MrHamdulay/csc3-capstone
|
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
|
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
|
refs/heads/master
| 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 603 |
py
|
"""program with a recursive function to calculate whether or not a string is a palindrome
Luvo Fokazi
09 May 2014"""
def alt(n,var,j,count):
if j>n:
return count
else:
if var%j==0:
count+=1
j+=1
return alt(n,var,j,count)
def Palindrome(dString,n):
d=(n+1)*-1
if n+1==len(dString):
return "Palindrome!"
if(dString[n]==dString[d]):
return Palindrome(dString,n+1)
else:
return "Not a palindrome!"
if __name__ == "__main__":
x=input("Enter a string:\n")
print(Palindrome(x,0))
|
[
"[email protected]"
] | |
a3e6f64e66ad6b90edc86ac5a3a78782dd3ef12b
|
11c00c704a3d1171d1bf4474be7ff1779a1dcb69
|
/LSTM_Stateful/recurrent_v2.py
|
f97804311d3bca360167caab49fdee3d89de4684
|
[] |
no_license
|
mikechen66/LSTM-TF2
|
89bd67ca481ded7264191e240e45f792cc5546c3
|
741250374509c332d9f4f5ddebcb1a966e268df0
|
refs/heads/main
| 2023-04-08T15:59:46.999735 | 2021-04-20T01:53:34 | 2021-04-20T01:53:34 | 305,043,822 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 109,883 |
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Recurrent layers and their base classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.StackedRNNCells')
class StackedRNNCells(Layer):
"""Wrapper allowing a stack of RNN cells to behave as a single cell.
Used to implement efficient stacked RNNs.
Arguments:
cells: List of RNN cell instances.
Examples:
```python
cells = [
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
keras.layers.LSTMCell(output_dim),
]
inputs = keras.Input((timesteps, input_dim))
x = keras.layers.RNN(cells)(inputs)
```
"""
def __init__(self, cells, **kwargs):
for cell in cells:
if not hasattr(cell, 'call'):
raise ValueError('All cells must have a `call` method. '
'received cells:', cells)
if not hasattr(cell, 'state_size'):
raise ValueError('All cells must have a '
'`state_size` attribute. '
'received cells:', cells)
self.cells = cells
# reverse_state_order determines whether the state size will be in a reverse
# order of the cells' state. User might want to set this to True to keep the
# existing behavior. This is only useful when use RNN(return_state=True)
# since the state will be returned as the same order of state_size.
self.reverse_state_order = kwargs.pop('reverse_state_order', False)
if self.reverse_state_order:
logging.warning('reverse_state_order=True in StackedRNNCells will soon '
'be deprecated. Please update the code to work with the '
'natural order of states if you rely on the RNN states, '
'eg RNN(return_state=True).')
super(StackedRNNCells, self).__init__(**kwargs)
@property
def state_size(self):
return tuple(c.state_size for c in
(self.cells[::-1] if self.reverse_state_order else self.cells))
@property
def output_size(self):
if getattr(self.cells[-1], 'output_size', None) is not None:
return self.cells[-1].output_size
elif _is_multiple_state(self.cells[-1].state_size):
return self.cells[-1].state_size[0]
else:
return self.cells[-1].state_size
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
initial_states = []
for cell in self.cells[::-1] if self.reverse_state_order else self.cells:
get_initial_state_fn = getattr(cell, 'get_initial_state', None)
if get_initial_state_fn:
initial_states.append(get_initial_state_fn(
inputs=inputs, batch_size=batch_size, dtype=dtype))
else:
initial_states.append(_generate_zero_filled_state_for_cell(
cell, inputs, batch_size, dtype))
return tuple(initial_states)
def call(self, inputs, states, constants=None, **kwargs):
# Recover per-cell states.
state_size = (self.state_size[::-1]
if self.reverse_state_order else self.state_size)
nested_states = nest.pack_sequence_as(state_size, nest.flatten(states))
# Call the cells in order and store the returned states.
new_nested_states = []
for cell, states in zip(self.cells, nested_states):
states = states if nest.is_sequence(states) else [states]
# TF cell does not wrap the state into list when there is only one state.
is_tf_rnn_cell = getattr(cell, '_is_tf_rnn_cell', None) is not None
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
if generic_utils.has_arg(cell.call, 'constants'):
inputs, states = cell.call(inputs, states, constants=constants,
**kwargs)
else:
inputs, states = cell.call(inputs, states, **kwargs)
new_nested_states.append(states)
return inputs, nest.pack_sequence_as(state_size,
nest.flatten(new_nested_states))
@tf_utils.shape_type_conversion
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
for cell in self.cells:
if isinstance(cell, Layer):
if not cell.built:
cell.build(input_shape)
if getattr(cell, 'output_size', None) is not None:
output_dim = cell.output_size
elif _is_multiple_state(cell.state_size):
output_dim = cell.state_size[0]
else:
output_dim = cell.state_size
input_shape = tuple([input_shape[0]] +
tensor_shape.as_shape(output_dim).as_list())
self.built = True
def get_config(self):
cells = []
for cell in self.cells:
cells.append({
'class_name': cell.__class__.__name__,
'config': cell.get_config()
})
config = {'cells': cells}
base_config = super(StackedRNNCells, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cells = []
for cell_config in config.pop('cells'):
cells.append(
deserialize_layer(cell_config, custom_objects=custom_objects))
return cls(cells, **config)
@keras_export('keras.layers.RNN')
class RNN(Layer):
"""Base class for recurrent layers.
Arguments:
cell: A RNN cell instance or a list of RNN cell instances.
A RNN cell is a class that has:
- A `call(input_at_t, states_at_t)` method, returning
`(output_at_t, states_at_t_plus_1)`. The call method of the
cell can also take the optional argument `constants`, see
section "Note on passing external constants" below.
- A `state_size` attribute. This can be a single integer
(single state) in which case it is the size of the recurrent
state. This can also be a list/tuple of integers (one size per
state).
The `state_size` can also be TensorShape or tuple/list of
TensorShape, to represent high dimension state.
- A `output_size` attribute. This can be a single integer or a
TensorShape, which represent the shape of the output. For backward
compatible reason, if this attribute is not available for the
cell, the value will be inferred by the first element of the
`state_size`.
- A `get_initial_state(inputs=None, batch_size=None, dtype=None)`
method that creates a tensor meant to be fed to `call()` as the
initial state, if the user didn't specify any initial state via other
means. The returned initial state should have a shape of
[batch_size, cell.state_size]. The cell might choose to create a
tensor full of zeros, or full of other values based on the cell's
implementation.
`inputs` is the input tensor to the RNN layer, which should
contain the batch size as its shape[0], and also dtype. Note that
the shape[0] might be `None` during the graph construction. Either
the `inputs` or the pair of `batch_size` and `dtype` are provided.
`batch_size` is a scalar tensor that represents the batch size
of the inputs. `dtype` is `tf.DType` that represents the dtype of
the inputs.
For backward compatible reason, if this method is not implemented
by the cell, the RNN layer will create a zero filled tensor with the
size of [batch_size, cell.state_size].
In the case that `cell` is a list of RNN cell instances, the cells
will be stacked on top of each other in the RNN, resulting in an
efficient stacked RNN.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled, else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Call arguments:
inputs: Input tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is for use with cells that use dropout.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
constants: List of constant tensors to be passed to the cell at each
timestep.
Input shape:
N-D tensor with shape `(batch_size, timesteps, ...)` or
`(timesteps, batch_size, ...)` when time_major is True.
Output shape:
- If `return_state`: a list of tensors. The first tensor is
the output. The remaining tensors are the last states,
each with shape `(batch_size, state_size)`, where `state_size` could
be a high dimension tensor shape.
- If `return_sequences`: N-D tensor with shape
`(batch_size, timesteps, output_size)`, where `output_size` could
be a high dimension tensor shape, or
`(timesteps, batch_size, output_size)` when `time_major` is True.
- Else, N-D tensor with shape `(batch_size, output_size)`, where
`output_size` could be a high dimension tensor shape.
Masking:
This layer supports masking for input data with a variable number
of timesteps. To introduce masks to your data,
use an [Embedding](embeddings.md) layer with the `mask_zero` parameter
set to `True`.
Note on using statefulness in RNNs:
You can set RNN layers to be 'stateful', which means that the states
computed for the samples in one batch will be reused as initial states
for the samples in the next batch. This assumes a one-to-one mapping
between samples in different successive batches.
To enable statefulness:
- Specify `stateful=True` in the layer constructor.
- Specify a fixed batch size for your model, by passing
If sequential model:
`batch_input_shape=(...)` to the first layer in your model.
Else for functional model with 1 or more Input layers:
`batch_shape=(...)` to all the first layers in your model.
This is the expected shape of your inputs
*including the batch size*.
It should be a tuple of integers, e.g. `(32, 10, 100)`.
- Specify `shuffle=False` when calling fit().
To reset the states of your model, call `.reset_states()` on either
a specific layer, or on your entire model.
Note on specifying the initial state of RNNs:
You can specify the initial state of RNN layers symbolically by
calling them with the keyword argument `initial_state`. The value of
`initial_state` should be a tensor or list of tensors representing
the initial state of the RNN layer.
You can specify the initial state of RNN layers numerically by
calling `reset_states` with the keyword argument `states`. The value of
`states` should be a numpy array or list of numpy arrays representing
the initial state of the RNN layer.
Note on passing external constants to RNNs:
You can pass "external" constants to the cell using the `constants`
keyword argument of `RNN.__call__` (as well as `RNN.call`) method. This
requires that the `cell.call` method accepts the same keyword argument
`constants`. Such constants can be used to condition the cell
transformation on additional static inputs (not changing over time),
a.k.a. an attention mechanism.
Examples:
```python
# First, let's define a RNN Cell, as a layer subclass.
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, [output]
# Let's use this cell in a RNN layer:
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = RNN(cell)
y = layer(x)
# Here's how to use the cell to build a stacked RNN:
cells = [MinimalRNNCell(32), MinimalRNNCell(64)]
x = keras.Input((None, 5))
layer = RNN(cells)
y = layer(x)
```
"""
def __init__(self,
cell,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
time_major=False,
**kwargs):
if isinstance(cell, (list, tuple)):
cell = StackedRNNCells(cell)
if not hasattr(cell, 'call'):
raise ValueError('`cell` should have a `call` method. '
'The RNN was passed:', cell)
if not hasattr(cell, 'state_size'):
raise ValueError('The RNN cell should have '
'an attribute `state_size` '
'(tuple of integers, '
'one integer per RNN state).')
# If True, the output for masked timestep will be zeros, whereas in the
# False case, output from previous timestep is returned for masked timestep.
self.zero_output_for_mask = kwargs.pop('zero_output_for_mask', False)
if 'input_shape' not in kwargs and (
'input_dim' in kwargs or 'input_length' in kwargs):
input_shape = (kwargs.pop('input_length', None),
kwargs.pop('input_dim', None))
kwargs['input_shape'] = input_shape
super(RNN, self).__init__(**kwargs)
self.cell = cell
self.return_sequences = return_sequences
self.return_state = return_state
self.go_backwards = go_backwards
self.stateful = stateful
self.unroll = unroll
self.time_major = time_major
self.supports_masking = True
# The input shape is unknown yet, it could have nested tensor inputs, and
# the input spec will be the list of specs for nested inputs, the structure
# of the input_spec will be the same as the input.
self.input_spec = None
self.state_spec = None
self._states = None
self.constants_spec = None
self._num_constants = 0
@property
def states(self):
if self._states is None:
state = nest.map_structure(lambda _: None, self.cell.state_size)
return state if nest.is_sequence(self.cell.state_size) else [state]
return self._states
@states.setter
# Automatic tracking catches "self._states" which adds an extra weight and
# breaks HDF5 checkpoints.
@trackable.no_automatic_dependency_tracking
def states(self, states):
self._states = states
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
input_shape = nest.flatten(input_shape)[0]
batch = input_shape[0]
time_step = input_shape[1]
if self.time_major:
batch, time_step = time_step, batch
if _is_multiple_state(self.cell.state_size):
state_size = self.cell.state_size
else:
state_size = [self.cell.state_size]
def _get_output_shape(flat_output_size):
output_dim = tensor_shape.as_shape(flat_output_size).as_list()
if self.return_sequences:
if self.time_major:
output_shape = tensor_shape.as_shape([time_step, batch] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch, time_step] + output_dim)
else:
output_shape = tensor_shape.as_shape([batch] + output_dim)
return output_shape
if getattr(self.cell, 'output_size', None) is not None:
# cell.output_size could be nested structure.
output_shape = nest.flatten(nest.map_structure(
_get_output_shape, self.cell.output_size))
output_shape = output_shape[0] if len(output_shape) == 1 else output_shape
else:
# Note that state_size[0] could be a tensor_shape or int.
output_shape = _get_output_shape(state_size[0])
if self.return_state:
def _get_state_shape(flat_state):
state_shape = [batch] + tensor_shape.as_shape(flat_state).as_list()
return tensor_shape.as_shape(state_shape)
state_shape = nest.map_structure(_get_state_shape, state_size)
return generic_utils.to_list(output_shape) + nest.flatten(state_shape)
else:
return output_shape
def compute_mask(self, inputs, mask):
# Time step masks must be the same for each input.
# This is because the mask for an RNN is of size [batch, time_steps, 1],
# and specifies which time steps should be skipped, and a time step
# must be skipped for all inputs.
# TODO(scottzhu): Should we accept multiple different masks?
mask = nest.flatten(mask)[0]
output_mask = mask if self.return_sequences else None
if self.return_state:
state_mask = [None for _ in self.states]
return [output_mask] + state_mask
else:
return output_mask
def build(self, input_shape):
if isinstance(input_shape, list):
input_shape = input_shape[0]
# The input_shape here could be a nest structure.
# do the tensor_shape to shapes here. The input could be single tensor, or a
# nested structure of tensors.
def get_input_spec(shape):
if isinstance(shape, tensor_shape.TensorShape):
input_spec_shape = shape.as_list()
else:
input_spec_shape = list(shape)
batch_index, time_step_index = (1, 0) if self.time_major else (0, 1)
if not self.stateful:
input_spec_shape[batch_index] = None
input_spec_shape[time_step_index] = None
return InputSpec(shape=tuple(input_spec_shape))
def get_step_input_shape(shape):
if isinstance(shape, tensor_shape.TensorShape):
shape = tuple(shape.as_list())
# remove the timestep from the input_shape
return shape[1:] if self.time_major else (shape[0],) + shape[2:]
# Check whether the input shape contains any nested shapes. It could be
# (tensor_shape(1, 2), tensor_shape(3, 4)) or (1, 2, 3) which is from numpy
# inputs.
try:
input_shape = tensor_shape.as_shape(input_shape)
except (ValueError, TypeError):
# A nested tensor input
pass
if not nest.is_sequence(input_shape):
# This indicates the there is only one input.
if self.input_spec is not None:
self.input_spec[0] = get_input_spec(input_shape)
else:
self.input_spec = [get_input_spec(input_shape)]
step_input_shape = get_step_input_shape(input_shape)
else:
if self.input_spec is not None:
self.input_spec[0] = nest.map_structure(get_input_spec, input_shape)
else:
self.input_spec = generic_utils.to_list(
nest.map_structure(get_input_spec, input_shape))
step_input_shape = nest.map_structure(get_step_input_shape, input_shape)
# allow cell (if layer) to build before we set or validate state_spec
if isinstance(self.cell, Layer):
if not self.cell.built:
self.cell.build(step_input_shape)
# set or validate state_spec
if _is_multiple_state(self.cell.state_size):
state_size = list(self.cell.state_size)
else:
state_size = [self.cell.state_size]
if self.state_spec is not None:
# initial_state was passed in call, check compatibility
self._validate_state_spec(state_size, self.state_spec)
else:
self.state_spec = [
InputSpec(shape=[None] + tensor_shape.as_shape(dim).as_list())
for dim in state_size
]
if self.stateful:
self.reset_states()
self.built = True
@staticmethod
def _validate_state_spec(cell_state_sizes, init_state_specs):
"""Validate the state spec between the initial_state and the state_size.
Args:
cell_state_sizes: list, the `state_size` attribute from the cell.
init_state_specs: list, the `state_spec` from the initial_state that is
passed in `call()`.
Raises:
ValueError: When initial state spec is not compatible with the state size.
"""
validation_error = ValueError(
'An `initial_state` was passed that is not compatible with '
'`cell.state_size`. Received `state_spec`={}; '
'however `cell.state_size` is '
'{}'.format(init_state_specs, cell_state_sizes))
flat_cell_state_size = nest.flatten(cell_state_sizes)
flat_state_spec = nest.flatten(init_state_specs)
if len(flat_cell_state_size) != len(flat_state_spec):
raise validation_error
for i in range(len(flat_cell_state_size)):
if not tensor_shape.TensorShape(
# Ignore the first axis for init_state which is for batch
flat_state_spec[i].shape[1:]).is_compatible_with(
tensor_shape.TensorShape(flat_cell_state_size[i])):
raise validation_error
def get_initial_state(self, inputs):
get_initial_state_fn = getattr(self.cell, 'get_initial_state', None)
if nest.is_sequence(inputs):
# The input are nested sequences. Use the first element in the seq to get
# batch size and dtype.
inputs = nest.flatten(inputs)[0]
input_shape = array_ops.shape(inputs)
batch_size = input_shape[1] if self.time_major else input_shape[0]
dtype = inputs.dtype
if get_initial_state_fn:
init_state = get_initial_state_fn(
inputs=None, batch_size=batch_size, dtype=dtype)
else:
init_state = _generate_zero_filled_state(batch_size, self.cell.state_size,
dtype)
# Keras RNN expect the states in a list, even if it's a single state tensor.
if not nest.is_sequence(init_state):
init_state = [init_state]
# Force the state to be a list in case it is a namedtuple eg LSTMStateTuple.
return list(init_state)
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = _standardize_args(inputs,
initial_state,
constants,
self._num_constants)
if initial_state is None and constants is None:
return super(RNN, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
additional_inputs += initial_state
self.state_spec = nest.map_structure(
lambda s: InputSpec(shape=K.int_shape(s)), initial_state)
additional_specs += self.state_spec
if constants is not None:
additional_inputs += constants
self.constants_spec = [
InputSpec(shape=K.int_shape(constant)) for constant in constants
]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
is_keras_tensor = K.is_keras_tensor(nest.flatten(additional_inputs)[0])
for tensor in nest.flatten(additional_inputs):
if K.is_keras_tensor(tensor) != is_keras_tensor:
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors'
' (a "Keras tensor" is a tensor that was'
' returned by a Keras layer, or by `Input`)')
if is_keras_tensor:
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
# The original input_spec is None since there could be a nested tensor
# input. Update the input_spec to match the inputs.
full_input_spec = generic_utils.to_list(
nest.map_structure(lambda _: None, inputs)) + additional_specs
# Perform the call with temporarily replaced input_spec
self.input_spec = full_input_spec
output = super(RNN, self).__call__(full_input, **kwargs)
# Remove the additional_specs from input spec and keep the rest. It is
# important to keep since the input spec was populated by build(), and
# will be reused in the stateful=True.
self.input_spec = self.input_spec[:-len(additional_specs)]
return output
else:
if initial_state is not None:
kwargs['initial_state'] = initial_state
if constants is not None:
kwargs['constants'] = constants
return super(RNN, self).__call__(inputs, **kwargs)
def call(self,
inputs,
mask=None,
training=None,
initial_state=None,
constants=None):
inputs, initial_state, constants = self._process_inputs(
inputs, initial_state, constants)
if mask is not None:
# Time step masks must be the same for each input.
# TODO(scottzhu): Should we accept multiple different masks?
mask = nest.flatten(mask)[0]
if nest.is_sequence(inputs):
# In the case of nested input, use the first element for shape check.
input_shape = K.int_shape(nest.flatten(inputs)[0])
else:
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if self.unroll and timesteps is None:
raise ValueError('Cannot unroll a RNN if the '
'time dimension is undefined. \n'
'- If using a Sequential model, '
'specify the time dimension by passing '
'an `input_shape` or `batch_input_shape` '
'argument to your first layer. If your '
'first layer is an Embedding, you can '
'also use the `input_length` argument.\n'
'- If using the functional API, specify '
'the time dimension by passing a `shape` '
'or `batch_shape` argument to your Input layer.')
kwargs = {}
if generic_utils.has_arg(self.cell.call, 'training'):
kwargs['training'] = training
# TF RNN cells expect single tensor as state instead of list wrapped tensor.
is_tf_rnn_cell = getattr(self.cell, '_is_tf_rnn_cell', None) is not None
if constants:
if not generic_utils.has_arg(self.cell.call, 'constants'):
raise ValueError('RNN cell does not support constants')
def step(inputs, states):
constants = states[-self._num_constants:] # pylint: disable=invalid-unary-operand-type
states = states[:-self._num_constants] # pylint: disable=invalid-unary-operand-type
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = self.cell.call(
inputs, states, constants=constants, **kwargs)
if not nest.is_sequence(new_states):
new_states = [new_states]
return output, new_states
else:
def step(inputs, states):
states = states[0] if len(states) == 1 and is_tf_rnn_cell else states
output, new_states = self.cell.call(inputs, states, **kwargs)
if not nest.is_sequence(new_states):
new_states = [new_states]
return output, new_states
# `input_length` is passed as the `maximum_iterations` arg to tf.while_loop.
# We only specify that when building for XLA since that causes slowdowns
# on GPU in TF.
if (not context.executing_eagerly() and
control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph())):
input_length = timesteps
else:
input_length = None
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=constants,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=input_length,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
if self.stateful:
updates = []
for state_, state in zip(nest.flatten(self.states), nest.flatten(states)):
updates.append(state_ops.assign(state_, state))
self.add_update(updates)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
if not isinstance(states, (list, tuple)):
states = [states]
else:
states = list(states)
return generic_utils.to_list(output) + states
else:
return output
def _process_inputs(self, inputs, initial_state, constants):
# input shape: `(samples, time (padded with zeros), input_dim)`
# note that the .build() method of subclasses MUST define
# self.input_spec and self.state_spec with complete input shapes.
if (isinstance(inputs, collections.Sequence)
and not isinstance(inputs, tuple)):
# get initial_state from full input spec
# as they could be copied to multiple GPU.
if not self._num_constants:
initial_state = inputs[1:]
else:
initial_state = inputs[1:-self._num_constants]
constants = inputs[-self._num_constants:]
if len(initial_state) == 0:
initial_state = None
inputs = inputs[0]
if initial_state is not None:
pass
elif self.stateful:
initial_state = self.states
else:
initial_state = self.get_initial_state(inputs)
if len(initial_state) != len(self.states):
raise ValueError('Layer has ' + str(len(self.states)) +
' states but was passed ' + str(len(initial_state)) +
' initial states.')
return inputs, initial_state, constants
def reset_states(self, states=None):
if not self.stateful:
raise AttributeError('Layer must be stateful.')
spec_shape = None if self.input_spec is None else self.input_spec[0].shape
if spec_shape is None:
# It is possible to have spec shape to be None, eg when construct a RNN
# with a custom cell, or standard RNN layers (LSTM/GRU) which we only know
# it has 3 dim input, but not its full shape spec before build().
batch_size = None
else:
batch_size = spec_shape[1] if self.time_major else spec_shape[0]
if not batch_size:
raise ValueError('If a RNN is stateful, it needs to know '
'its batch size. Specify the batch size '
'of your input tensors: \n'
'- If using a Sequential model, '
'specify the batch size by passing '
'a `batch_input_shape` '
'argument to your first layer.\n'
'- If using the functional API, specify '
'the batch size by passing a '
'`batch_shape` argument to your Input layer.')
# initialize state if None
if nest.flatten(self.states)[0] is None:
def create_state_variable(state):
return K.zeros([batch_size] + tensor_shape.as_shape(state).as_list())
self.states = nest.map_structure(
create_state_variable, self.cell.state_size)
if not nest.is_sequence(self.states):
self.states = [self.states]
elif states is None:
for state, size in zip(nest.flatten(self.states),
nest.flatten(self.cell.state_size)):
K.set_value(state, np.zeros([batch_size] +
tensor_shape.as_shape(size).as_list()))
else:
flat_states = nest.flatten(self.states)
flat_input_states = nest.flatten(states)
if len(flat_input_states) != len(flat_states):
raise ValueError('Layer ' + self.name + ' expects ' +
str(len(flat_states)) + ' states, '
'but it received ' + str(len(flat_input_states)) +
' state values. Input received: ' + str(states))
set_value_tuples = []
for i, (value, state) in enumerate(zip(flat_input_states,
flat_states)):
if value.shape != state.shape:
raise ValueError(
'State ' + str(i) + ' is incompatible with layer ' +
self.name + ': expected shape=' + str(
(batch_size, state)) + ', found shape=' + str(value.shape))
set_value_tuples.append((state, value))
K.batch_set_value(set_value_tuples)
def get_config(self):
config = {
'return_sequences': self.return_sequences,
'return_state': self.return_state,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'time_major': self.time_major
}
if self._num_constants:
config['num_constants'] = self._num_constants
if self.zero_output_for_mask:
config['zero_output_for_mask'] = self.zero_output_for_mask
cell_config = self.cell.get_config()
config['cell'] = {
'class_name': self.cell.__class__.__name__,
'config': cell_config
}
base_config = super(RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects)
num_constants = config.pop('num_constants', 0)
layer = cls(cell, **config)
layer._num_constants = num_constants
return layer
@keras_export('keras.layers.AbstractRNNCell')
class AbstractRNNCell(Layer):
"""Abstract object representing an RNN cell.
This is the base class for implementing RNN cells with custom behavior.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`.
Examples:
```python
class MinimalRNNCell(AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super(MinimalRNNCell, self).__init__(**kwargs)
@property
def state_size(self):
return self.units
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = K.dot(inputs, self.kernel)
output = h + K.dot(prev_output, self.recurrent_kernel)
return output, output
```
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def call(self, inputs, states):
"""The function that contains the logic for one RNN step calculation.
Args:
inputs: the input tensor, which is a slide from the overall RNN input by
the time dimension (usually the second dimension).
states: the state tensor from previous step, which has the same shape
as `(batch, state_size)`. In the case of timestep 0, it will be the
initial state user specified, or zero filled tensor otherwise.
Returns:
A tuple of two tensors:
1. output tensor for the current timestep, with size `output_size`.
2. state tensor for next step, which has the shape of `state_size`.
"""
raise NotImplementedError('Abstract method')
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError('Abstract method')
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError('Abstract method')
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
class DropoutRNNCellMixin(object):
"""Object that hold dropout related fields for RNN Cell.
This class is not a standalone RNN cell. It suppose to be used with a RNN cell
by multiple inheritance. Any cell that mix with class should have following
fields:
dropout: a float number within range [0, 1). The ratio that the input
tensor need to dropout.
recurrent_dropout: a float number within range [0, 1). The ratio that the
recurrent state weights need to dropout.
This object will create and cache created dropout masks, and reuse them for
the incoming data, so that the same mask is used for every batch input.
"""
def __init__(self, *args, **kwargs):
# Note that the following two masks will be used in "graph function" mode,
# e.g. these masks are symbolic tensors. In eager mode, the `eager_*_mask`
# tensors will be generated differently than in the "graph function" case,
# and they will be cached.
# Also note that in graph mode, we still cache those masks only because the
# RNN could be created with `unroll=True`. In that case, the `cell.call()`
# function will be invoked multiple times, and we want to ensure same mask
# is used every time.
self._dropout_mask = None
self._recurrent_dropout_mask = None
self._eager_dropout_mask = None
self._eager_recurrent_dropout_mask = None
super(DropoutRNNCellMixin, self).__init__(*args, **kwargs)
def reset_dropout_mask(self):
"""Reset the cached dropout masks if any.
This is important for the RNN layer to invoke this in it call() method so
that the cached mask is cleared before calling the cell.call(). The mask
should be cached across the timestep within the same batch, but shouldn't
be cached between batches. Otherwise it will introduce unreasonable bias
against certain index of data within the batch.
"""
self._dropout_mask = None
self._eager_dropout_mask = None
def reset_recurrent_dropout_mask(self):
"""Reset the cached recurrent dropout masks if any.
This is important for the RNN layer to invoke this in it call() method so
that the cached mask is cleared before calling the cell.call(). The mask
should be cached across the timestep within the same batch, but shouldn't
be cached between batches. Otherwise it will introduce unreasonable bias
against certain index of data within the batch.
"""
self._recurrent_dropout_mask = None
self._eager_recurrent_dropout_mask = None
def get_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the dropout mask for RNN cell's input.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: the input tensor whose shape will be used to generate dropout
mask.
training: boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.dropout == 0:
return None
if (not context.executing_eagerly() and self._dropout_mask is None
or context.executing_eagerly() and self._eager_dropout_mask is None):
# Generate new mask and cache it based on context.
dp_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.dropout,
training=training,
count=count)
if context.executing_eagerly():
self._eager_dropout_mask = dp_mask
else:
self._dropout_mask = dp_mask
else:
# Reuse the existing mask.
dp_mask = (self._eager_dropout_mask
if context.executing_eagerly() else self._dropout_mask)
return dp_mask
def get_recurrent_dropout_mask_for_cell(self, inputs, training, count=1):
"""Get the recurrent dropout mask for RNN cell.
It will create mask based on context if there isn't any existing cached
mask. If a new mask is generated, it will update the cache in the cell.
Args:
inputs: the input tensor whose shape will be used to generate dropout
mask.
training: boolean tensor, whether its in training mode, dropout will be
ignored in non-training mode.
count: int, how many dropout mask will be generated. It is useful for cell
that has internal weights fused together.
Returns:
List of mask tensor, generated or cached mask based on context.
"""
if self.recurrent_dropout == 0:
return None
if (not context.executing_eagerly() and self._recurrent_dropout_mask is None
or context.executing_eagerly()
and self._eager_recurrent_dropout_mask is None):
# Generate new mask and cache it based on context.
rec_dp_mask = _generate_dropout_mask(
array_ops.ones_like(inputs),
self.recurrent_dropout,
training=training,
count=count)
if context.executing_eagerly():
self._eager_recurrent_dropout_mask = rec_dp_mask
else:
self._recurrent_dropout_mask = rec_dp_mask
else:
# Reuse the existing mask.
rec_dp_mask = (self._eager_recurrent_dropout_mask
if context.executing_eagerly()
else self._recurrent_dropout_mask)
return rec_dp_mask
@keras_export('keras.layers.SimpleRNNCell')
class SimpleRNNCell(DropoutRNNCellMixin, Layer):
"""Cell class for SimpleRNN.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
**kwargs):
super(SimpleRNNCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
self.kernel = self.add_weight(
shape=(input_shape[-1], self.units),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.units,),
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
prev_output = states[0]
dp_mask = self.get_dropout_mask_for_cell(inputs, training)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
prev_output, training)
if dp_mask is not None:
h = K.dot(inputs * dp_mask, self.kernel)
else:
h = K.dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
if rec_dp_mask is not None:
prev_output = prev_output * rec_dp_mask
output = h + K.dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
return output, [output]
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.layers.SimpleRNN')
class SimpleRNN(RNN):
"""Fully-connected RNN where the output is to be fed back to input.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if 'implementation' in kwargs:
kwargs.pop('implementation')
logging.warning('The `implementation` argument '
'in `SimpleRNN` has been deprecated. '
'Please remove it from your layer call.')
cell = SimpleRNNCell(
units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
dtype=kwargs.get('dtype'))
super(SimpleRNN, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell.reset_dropout_mask()
self.cell.reset_recurrent_dropout_mask()
return super(SimpleRNN, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout
}
base_config = super(SimpleRNN, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config:
config.pop('implementation')
return cls(**config)
@keras_export(v1=['keras.layers.GRUCell'])
class GRUCell(DropoutRNNCellMixin, Layer):
"""Cell class for the GRU layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass None, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
reset_after=False,
**kwargs):
super(GRUCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
self.reset_after = reset_after
self.state_size = self.units
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 3),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 3),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if not self.reset_after:
bias_shape = (3 * self.units,)
else:
# separate biases for input and recurrent kernels
# Note: the shape is intentionally different from CuDNNGRU biases
# `(2 * 3 * self.units,)`, so that we can distinguish the classes
# when loading and converting saved weights.
bias_shape = (2, 3 * self.units)
self.bias = self.add_weight(shape=bias_shape,
name='bias',
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=3)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=3)
if self.use_bias:
if not self.reset_after:
input_bias, recurrent_bias = self.bias, None
else:
input_bias, recurrent_bias = array_ops.unstack(self.bias)
if self.implementation == 1:
if 0. < self.dropout < 1.:
inputs_z = inputs * dp_mask[0]
inputs_r = inputs * dp_mask[1]
inputs_h = inputs * dp_mask[2]
else:
inputs_z = inputs
inputs_r = inputs
inputs_h = inputs
x_z = K.dot(inputs_z, self.kernel[:, :self.units])
x_r = K.dot(inputs_r, self.kernel[:, self.units:self.units * 2])
x_h = K.dot(inputs_h, self.kernel[:, self.units * 2:])
if self.use_bias:
x_z = K.bias_add(x_z, input_bias[:self.units])
x_r = K.bias_add(x_r, input_bias[self.units: self.units * 2])
x_h = K.bias_add(x_h, input_bias[self.units * 2:])
if 0. < self.recurrent_dropout < 1.:
h_tm1_z = h_tm1 * rec_dp_mask[0]
h_tm1_r = h_tm1 * rec_dp_mask[1]
h_tm1_h = h_tm1 * rec_dp_mask[2]
else:
h_tm1_z = h_tm1
h_tm1_r = h_tm1
h_tm1_h = h_tm1
recurrent_z = K.dot(h_tm1_z, self.recurrent_kernel[:, :self.units])
recurrent_r = K.dot(h_tm1_r,
self.recurrent_kernel[:, self.units:self.units * 2])
if self.reset_after and self.use_bias:
recurrent_z = K.bias_add(recurrent_z, recurrent_bias[:self.units])
recurrent_r = K.bias_add(recurrent_r,
recurrent_bias[self.units:self.units * 2])
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
# reset gate applied after/before matrix multiplication
if self.reset_after:
recurrent_h = K.dot(h_tm1_h, self.recurrent_kernel[:, self.units * 2:])
if self.use_bias:
recurrent_h = K.bias_add(recurrent_h, recurrent_bias[self.units * 2:])
recurrent_h = r * recurrent_h
else:
recurrent_h = K.dot(r * h_tm1_h,
self.recurrent_kernel[:, self.units * 2:])
hh = self.activation(x_h + recurrent_h)
else:
if 0. < self.dropout < 1.:
inputs = inputs * dp_mask[0]
# inputs projected by all gate matrices at once
matrix_x = K.dot(inputs, self.kernel)
if self.use_bias:
# biases: bias_z_i, bias_r_i, bias_h_i
matrix_x = K.bias_add(matrix_x, input_bias)
x_z = matrix_x[:, :self.units]
x_r = matrix_x[:, self.units: 2 * self.units]
x_h = matrix_x[:, 2 * self.units:]
if 0. < self.recurrent_dropout < 1.:
h_tm1 = h_tm1 * rec_dp_mask[0]
if self.reset_after:
# hidden state projected by all gate matrices at once
matrix_inner = K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
matrix_inner = K.bias_add(matrix_inner, recurrent_bias)
else:
# hidden state projected separately for update/reset and new
matrix_inner = K.dot(h_tm1, self.recurrent_kernel[:, :2 * self.units])
recurrent_z = matrix_inner[:, :self.units]
recurrent_r = matrix_inner[:, self.units:2 * self.units]
z = self.recurrent_activation(x_z + recurrent_z)
r = self.recurrent_activation(x_r + recurrent_r)
if self.reset_after:
recurrent_h = r * matrix_inner[:, 2 * self.units:]
else:
recurrent_h = K.dot(r * h_tm1,
self.recurrent_kernel[:, 2 * self.units:])
hh = self.activation(x_h + recurrent_h)
# previous and candidate state mixed by update gate
h = z * h_tm1 + (1 - z) * hh
return h, [h]
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout,
'implementation': self.implementation,
'reset_after': self.reset_after
}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return _generate_zero_filled_state_for_cell(self, inputs, batch_size, dtype)
@keras_export(v1=['keras.layers.GRU'])
class GRU(RNN):
"""Gated Recurrent Unit - Cho et al. 2014.
There are two variants. The default one is based on 1406.1078v3 and
has reset gate applied to hidden state before matrix multiplication. The
other one is based on original 1406.1078v1 and has the order reversed.
The second variant is compatible with CuDNNGRU (GPU-only) and allows
inference on CPU. Thus it has separate biases for `kernel` and
`recurrent_kernel`. Use `'reset_after'=True` and
`recurrent_activation='sigmoid'`.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix, used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
reset_after: GRU convention (whether to apply reset gate after or
before matrix multiplication). False = "before" (default),
True = "after" (CuDNN compatible).
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
reset_after=False,
**kwargs):
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
cell = GRUCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
reset_after=reset_after,
dtype=kwargs.get('dtype'))
super(GRU, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell.reset_dropout_mask()
self.cell.reset_recurrent_dropout_mask()
return super(GRU, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
@property
def reset_after(self):
return self.cell.reset_after
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation,
'reset_after':
self.reset_after
}
base_config = super(GRU, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
@keras_export(v1=['keras.layers.LSTMCell'])
class LSTMCell(DropoutRNNCellMixin, Layer):
"""Cell class for the LSTM layer.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs.
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
Call arguments:
inputs: A 2D tensor.
states: List of state tensors corresponding to the previous timestep.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. Only relevant when `dropout` or
`recurrent_dropout` is used.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
**kwargs):
super(LSTMCell, self).__init__(**kwargs)
self.units = units
self.activation = activations.get(activation)
self.recurrent_activation = activations.get(recurrent_activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.recurrent_initializer = initializers.get(recurrent_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.recurrent_constraint = constraints.get(recurrent_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.dropout = min(1., max(0., dropout))
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.implementation = implementation
# tuple(_ListWrapper) was silently dropping list content in at least 2.7.10,
# and fixed after 2.7.16. Converting the state_size to wrapper around
# NoDependency(), so that the base_layer.__setattr__ will not convert it to
# ListWrapper. Down the stream, self.states will be a list since it is
# generated from nest.map_structure with list, and tuple(list) will work
# properly.
self.state_size = data_structures.NoDependency([self.units, self.units])
self.output_size = self.units
@tf_utils.shape_type_conversion
def build(self, input_shape):
input_dim = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_dim, self.units * 4),
name='kernel',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units * 4),
name='recurrent_kernel',
initializer=self.recurrent_initializer,
regularizer=self.recurrent_regularizer,
constraint=self.recurrent_constraint)
if self.use_bias:
if self.unit_forget_bias:
def bias_initializer(_, *args, **kwargs):
return K.concatenate([
self.bias_initializer((self.units,), *args, **kwargs),
initializers.Ones()((self.units,), *args, **kwargs),
self.bias_initializer((self.units * 2,), *args, **kwargs),
])
else:
bias_initializer = self.bias_initializer
self.bias = self.add_weight(
shape=(self.units * 4,),
name='bias',
initializer=bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.bias = None
self.built = True
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
"""Computes carry and output using split kernels."""
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))
f = self.recurrent_activation(x_f + K.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))
c = f * c_tm1 + i * self.activation(x_c + K.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
"""Computes carry and output using fused kernels."""
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
return c, o
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
h_tm1, training, count=4)
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
k_i, k_f, k_c, k_o = array_ops.split(
self.kernel, num_or_size_splits=4, axis=1)
x_i = K.dot(inputs_i, k_i)
x_f = K.dot(inputs_f, k_f)
x_c = K.dot(inputs_c, k_c)
x_o = K.dot(inputs_o, k_o)
if self.use_bias:
b_i, b_f, b_c, b_o = array_ops.split(
self.bias, num_or_size_splits=4, axis=0)
x_i = K.bias_add(x_i, b_i)
x_f = K.bias_add(x_f, b_f)
x_c = K.bias_add(x_c, b_c)
x_o = K.bias_add(x_o, b_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
x = (x_i, x_f, x_c, x_o)
h_tm1 = (h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o)
c, o = self._compute_carry_and_output(x, h_tm1, c_tm1)
else:
if 0. < self.dropout < 1.:
inputs = inputs * dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 = h_tm1 * rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z = array_ops.split(z, num_or_size_splits=4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
return list(_generate_zero_filled_state_for_cell(
self, inputs, batch_size, dtype))
@keras_export('keras.experimental.PeepholeLSTMCell')
class PeepholeLSTMCell(LSTMCell):
"""Equivalent to LSTMCell class but adds peephole connections.
Peephole connections allow the gates to utilize the previous internal state as
well as the previous hidden state (which is what LSTMCell is limited to).
This allows PeepholeLSTMCell to better learn precise timings over LSTMCell.
From [Gers et al.](http://www.jmlr.org/papers/volume3/gers02a/gers02a.pdf):
"We find that LSTM augmented by 'peephole connections' from its internal
cells to its multiplicative gates can learn the fine distinction between
sequences of spikes spaced either 50 or 49 time steps apart without the help
of any short training exemplars."
The peephole implementation is based on:
[Long short-term memory recurrent neural network architectures for
large scale acoustic modeling.
](https://research.google.com/pubs/archive/43905.pdf)
Example:
```python
# Create 2 PeepholeLSTMCells
peephole_lstm_cells = [PeepholeLSTMCell(size) for size in [128, 256]]
# Create a layer composed sequentially of the peephole LSTM cells.
layer = RNN(peephole_lstm_cells)
input = keras.Input((timesteps, input_dim))
output = layer(input)
```
"""
def build(self, input_shape):
super(PeepholeLSTMCell, self).build(input_shape)
# The following are the weight matrices for the peephole connections. These
# are multiplied with the previous internal state during the computation of
# carry and output.
self.input_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='input_gate_peephole_weights',
initializer=self.kernel_initializer)
self.forget_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='forget_gate_peephole_weights',
initializer=self.kernel_initializer)
self.output_gate_peephole_weights = self.add_weight(
shape=(self.units,),
name='output_gate_peephole_weights',
initializer=self.kernel_initializer)
def _compute_carry_and_output(self, x, h_tm1, c_tm1):
x_i, x_f, x_c, x_o = x
h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1
i = self.recurrent_activation(
x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]) +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(x_f + K.dot(
h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]) +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(x_c + K.dot(
h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))
o = self.recurrent_activation(
x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]) +
self.output_gate_peephole_weights * c)
return c, o
def _compute_carry_and_output_fused(self, z, c_tm1):
z0, z1, z2, z3 = z
i = self.recurrent_activation(z0 +
self.input_gate_peephole_weights * c_tm1)
f = self.recurrent_activation(z1 +
self.forget_gate_peephole_weights * c_tm1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3 + self.output_gate_peephole_weights * c)
return c, o
@keras_export(v1=['keras.layers.LSTM'])
class LSTM(RNN):
"""Long Short-Term Memory layer - Hochreiter 1997.
Note that this cell is not optimized for performance on GPU. Please use
`tf.compat.v1.keras.layers.CuDNNLSTM` for better performance on GPU.
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
Default: hyperbolic tangent (`tanh`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
recurrent_activation: Activation function to use
for the recurrent step.
Default: hard sigmoid (`hard_sigmoid`).
If you pass `None`, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix,
used for the linear transformation of the inputs..
recurrent_initializer: Initializer for the `recurrent_kernel`
weights matrix,
used for the linear transformation of the recurrent state.
bias_initializer: Initializer for the bias vector.
unit_forget_bias: Boolean.
If True, add 1 to the bias of the forget gate at initialization.
Setting it to true will also force `bias_initializer="zeros"`.
This is recommended in [Jozefowicz et
al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
recurrent_regularizer: Regularizer function applied to
the `recurrent_kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
recurrent_constraint: Constraint function applied to
the `recurrent_kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the inputs.
recurrent_dropout: Float between 0 and 1.
Fraction of the units to drop for
the linear transformation of the recurrent state.
implementation: Implementation mode, either 1 or 2.
Mode 1 will structure its operations as a larger number of
smaller dot products and additions, whereas mode 2 will
batch them into fewer, larger operations. These modes will
have different performance profiles on different hardware and
for different applications.
return_sequences: Boolean. Whether to return the last output.
in the output sequence, or the full sequence.
return_state: Boolean. Whether to return the last state
in addition to the output.
go_backwards: Boolean (default False).
If True, process the input sequence backwards and return the
reversed sequence.
stateful: Boolean (default False). If True, the last state
for each sample at index i in a batch will be used as initial
state for the sample of index i in the following batch.
unroll: Boolean (default False).
If True, the network will be unrolled,
else a symbolic loop will be used.
Unrolling can speed-up a RNN,
although it tends to be more memory-intensive.
Unrolling is only suitable for short sequences.
time_major: The shape format of the `inputs` and `outputs` tensors.
If True, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
Call arguments:
inputs: A 3D tensor.
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked.
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the cell
when calling it. This is only relevant if `dropout` or
`recurrent_dropout` is used.
initial_state: List of initial state tensors to be passed to the first
call of the cell.
"""
def __init__(self,
units,
activation='tanh',
recurrent_activation='hard_sigmoid',
use_bias=True,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
bias_initializer='zeros',
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
recurrent_constraint=None,
bias_constraint=None,
dropout=0.,
recurrent_dropout=0.,
implementation=1,
return_sequences=False,
return_state=False,
go_backwards=False,
stateful=False,
unroll=False,
**kwargs):
if implementation == 0:
logging.warning('`implementation=0` has been deprecated, '
'and now defaults to `implementation=1`.'
'Please update your layer call.')
cell = LSTMCell(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
unit_forget_bias=unit_forget_bias,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
implementation=implementation,
dtype=kwargs.get('dtype'))
super(LSTM, self).__init__(
cell,
return_sequences=return_sequences,
return_state=return_state,
go_backwards=go_backwards,
stateful=stateful,
unroll=unroll,
**kwargs)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.input_spec = [InputSpec(ndim=3)]
def call(self, inputs, mask=None, training=None, initial_state=None):
self.cell.reset_dropout_mask()
self.cell.reset_recurrent_dropout_mask()
return super(LSTM, self).call(
inputs, mask=mask, training=training, initial_state=initial_state)
@property
def units(self):
return self.cell.units
@property
def activation(self):
return self.cell.activation
@property
def recurrent_activation(self):
return self.cell.recurrent_activation
@property
def use_bias(self):
return self.cell.use_bias
@property
def kernel_initializer(self):
return self.cell.kernel_initializer
@property
def recurrent_initializer(self):
return self.cell.recurrent_initializer
@property
def bias_initializer(self):
return self.cell.bias_initializer
@property
def unit_forget_bias(self):
return self.cell.unit_forget_bias
@property
def kernel_regularizer(self):
return self.cell.kernel_regularizer
@property
def recurrent_regularizer(self):
return self.cell.recurrent_regularizer
@property
def bias_regularizer(self):
return self.cell.bias_regularizer
@property
def kernel_constraint(self):
return self.cell.kernel_constraint
@property
def recurrent_constraint(self):
return self.cell.recurrent_constraint
@property
def bias_constraint(self):
return self.cell.bias_constraint
@property
def dropout(self):
return self.cell.dropout
@property
def recurrent_dropout(self):
return self.cell.recurrent_dropout
@property
def implementation(self):
return self.cell.implementation
def get_config(self):
config = {
'units':
self.units,
'activation':
activations.serialize(self.activation),
'recurrent_activation':
activations.serialize(self.recurrent_activation),
'use_bias':
self.use_bias,
'kernel_initializer':
initializers.serialize(self.kernel_initializer),
'recurrent_initializer':
initializers.serialize(self.recurrent_initializer),
'bias_initializer':
initializers.serialize(self.bias_initializer),
'unit_forget_bias':
self.unit_forget_bias,
'kernel_regularizer':
regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer':
regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer':
regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint':
constraints.serialize(self.kernel_constraint),
'recurrent_constraint':
constraints.serialize(self.recurrent_constraint),
'bias_constraint':
constraints.serialize(self.bias_constraint),
'dropout':
self.dropout,
'recurrent_dropout':
self.recurrent_dropout,
'implementation':
self.implementation
}
base_config = super(LSTM, self).get_config()
del base_config['cell']
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
if 'implementation' in config and config['implementation'] == 0:
config['implementation'] = 1
return cls(**config)
def _generate_dropout_mask(ones, rate, training=None, count=1):
def dropped_inputs():
return K.dropout(ones, rate)
if count > 1:
return [
K.in_train_phase(dropped_inputs, ones, training=training)
for _ in range(count)
]
return K.in_train_phase(dropped_inputs, ones, training=training)
def _standardize_args(inputs, initial_state, constants, num_constants):
"""Standardizes `__call__` to a single list of tensor inputs.
When running a model loaded from a file, the input tensors
`initial_state` and `constants` can be passed to `RNN.__call__()` as part
of `inputs` instead of by the dedicated keyword arguments. This method
makes sure the arguments are separated and that `initial_state` and
`constants` are lists of tensors (or None).
Arguments:
inputs: Tensor or list/tuple of tensors. which may include constants
and initial states. In that case `num_constant` must be specified.
initial_state: Tensor or list of tensors or None, initial states.
constants: Tensor or list of tensors or None, constant tensors.
num_constants: Expected number of constants (if constants are passed as
part of the `inputs` list.
Returns:
inputs: Single tensor or tuple of tensors.
initial_state: List of tensors or None.
constants: List of tensors or None.
"""
if isinstance(inputs, list):
# There are several situations here:
# In the graph mode, __call__ will be only called once. The initial_state
# and constants could be in inputs (from file loading).
# In the eager mode, __call__ will be called twice, once during
# rnn_layer(inputs=input_t, constants=c_t, ...), and second time will be
# model.fit/train_on_batch/predict with real np data. In the second case,
# the inputs will contain initial_state and constants as eager tensor.
#
# For either case, the real input is the first item in the list, which
# could be a nested structure itself. Then followed by initial_states, which
# could be a list of items, or list of list if the initial_state is complex
# structure, and finally followed by constants which is a flat list.
assert initial_state is None and constants is None
if num_constants:
constants = inputs[-num_constants:]
inputs = inputs[:-num_constants]
if len(inputs) > 1:
initial_state = inputs[1:]
inputs = inputs[:1]
if len(inputs) > 1:
inputs = tuple(inputs)
else:
inputs = inputs[0]
def to_list_or_none(x):
if x is None or isinstance(x, list):
return x
if isinstance(x, tuple):
return list(x)
return [x]
initial_state = to_list_or_none(initial_state)
constants = to_list_or_none(constants)
return inputs, initial_state, constants
def _is_multiple_state(state_size):
"""Check whether the state_size contains multiple states."""
return (hasattr(state_size, '__len__') and
not isinstance(state_size, tensor_shape.TensorShape))
def _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype):
if inputs is not None:
batch_size = array_ops.shape(inputs)[0]
dtype = inputs.dtype
return _generate_zero_filled_state(batch_size, cell.state_size, dtype)
def _generate_zero_filled_state(batch_size_tensor, state_size, dtype):
"""Generate a zero filled tensor with shape [batch_size, state_size]."""
if batch_size_tensor is None or dtype is None:
raise ValueError(
'batch_size and dtype cannot be None while constructing initial state: '
'batch_size={}, dtype={}'.format(batch_size_tensor, dtype))
def create_zeros(unnested_state_size):
flat_dims = tensor_shape.as_shape(unnested_state_size).as_list()
init_state_size = [batch_size_tensor] + flat_dims
return array_ops.zeros(init_state_size, dtype=dtype)
if nest.is_sequence(state_size):
return nest.map_structure(create_zeros, state_size)
else:
return create_zeros(state_size)
|
[
"[email protected]"
] | |
38af6589a64233b172d528c200c9076dee62b25d
|
0806d939ef0e8218c0f727e025eb0b3bb328d343
|
/vote/vote/urls.py
|
74da84523c3b4c2be083cf880e6398fd17ab17ef
|
[] |
no_license
|
aiegoo/django-web
|
bcd89687d951e0877000c23230661ce566144e78
|
5476ed77cf95919d9b825f4cef03d42f217768ce
|
refs/heads/master
| 2021-07-17T01:20:24.279227 | 2020-09-27T05:15:05 | 2020-09-27T05:15:05 | 215,240,173 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 458 |
py
|
from django.urls import path
from . import views
app_name = 'vote'
urlpatterns = [
# ex: /polls/
path('', views.IndexView.as_view(), name='index'),
# ex: /polls/5/
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
# ex: /polls/5/results/
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
# ex: /polls/5/vote/
path('<int:question_id>/vote/', views.vote, name='vote'),
]
|
[
"[email protected]"
] | |
c24181654bf4d3d4afa935bc6785919436c43826
|
82ca891008793f570668a7f2c760ae0f22d40494
|
/src/VAMPzero/Component/Fuel/Mass/mFuelMAX.py
|
3af884d709051594463eba984ea2a716f1333e4e
|
[
"Apache-2.0"
] |
permissive
|
p-chambers/VAMPzero
|
22f20415e83140496b1c5702b6acbb76a5b7bf52
|
4b11d059b1c7a963ec7e7962fa12681825bc2f93
|
refs/heads/master
| 2021-01-19T10:49:06.393888 | 2015-06-24T10:33:41 | 2015-06-24T10:33:41 | 82,208,448 | 1 | 0 | null | 2017-02-16T17:42:55 | 2017-02-16T17:42:55 | null |
UTF-8
|
Python
| false | false | 3,632 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Copyright: Deutsches Zentrum fuer Luft- und Raumfahrt e.V., 2015 (c)
Contact: [email protected] and [email protected]
'''
from cmath import sqrt
from VAMPzero.Handler.Parameter import parameter
class mFuelMAX(parameter):
'''
The maximum fuel mass that can be stored in the tanks
:Unit: [kg]
'''
def __init__(self, value=0., unit='kg', parent='', cpacsPath=''):
super(mFuelMAX, self).__init__(value=value, unit=unit, doc=self.__doc__, status='init', parent=parent,
cpacsPath=cpacsPath)
def calc(self):
'''
Sets the calc method to calcHeinze
'''
self.calc = self.calcHeinze
def calcHeinze(self):
'''
Calculates the maximum fuel mass that can be stored in the wing from the
geometrical definition of a single trapezoid
k Faktor from Heinze was chosen to be 0.32
:Source: Entwerfen von Verkehrsflugzeugen II, W. Heinze, TU Braunschweig, 2005, pp. 169
'''
taperRatio = self.parent.aircraft.wing.taperRatio.getValue()
span = self.parent.aircraft.wing.span.getValue()
cRoot = self.parent.aircraft.wing.cRoot.getValue()
tcRoot = self.parent.aircraft.wing.airfoilr.tc.getValue()
tcTip = self.parent.aircraft.wing.airfoilt.tc.getValue()
k = 0.32
density = 775 #[kg/m3]
#Calculate the tanks Volume
if tcRoot != 0.:
brace1 = 1 + taperRatio ** 2 * tcTip / tcRoot + taperRatio * sqrt(tcTip / tcRoot)
else:
brace1 = 0.
Vtank = 2. / 3. * span / 2. * k * cRoot ** 2 * tcRoot * (brace1)
#Return result as Volume of the tank times the density
return self.setValueCalc(Vtank * density)
def calcFLOPS(self):
'''
Calculation of the maximum Fuel Mass from the amount of fuel that can be stored in the wing
Calculation Method in Flops sets FWMX to 23 as default. This is altered to 23/2.2046 for SI Units
:Source: Flight Optimization System (FLOPS) User's Guide, McCullers, L.A., NASA Langeley, 2009, p.
'''
FWMX = 23 / 2.2046
refArea = self.parent.aircraft.wing.refArea.getValue()
taperRatio = self.parent.aircraft.wing.taperRatio.getValue()
span = self.parent.aircraft.wing.span.getValue()
tcAVG = self.parent.aircraft.wing.tcAVG.getValue()
#Span and Area must be converted into ft / ft**2 for correct results
term1 = tcAVG * (refArea / 0.092903 ) ** 2 / (span / 0.3048)
term2 = taperRatio / (1 + taperRatio) ** 2
FuelMass = FWMX * term1 * (1 - term2)
return self.setValueCalc(FuelMass)
###################################################################################################
#EOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFEOFE#
###################################################################################################
|
[
"[email protected]"
] | |
c352969fb7fafde0ee6e6ff1bb2743e14ce90e4c
|
7e729ea05a6a4e297bb832b77720a18cd0227805
|
/Projects/Online Workouts/w3resource/String/program-52.py
|
5515ab19e111fdd9fa6c453eb8b5e32cb3e6da1c
|
[
"MIT"
] |
permissive
|
ivenpoker/Python-Projects
|
943d127ae900df52b43aac07c395e9d717196115
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
refs/heads/master
| 2022-12-18T16:36:37.954835 | 2020-09-14T19:42:46 | 2020-09-14T19:43:09 | 180,323,469 | 1 | 0 |
MIT
| 2022-12-08T01:05:35 | 2019-04-09T08:42:40 |
Python
|
UTF-8
|
Python
| false | false | 1,891 |
py
|
#############################################################################################
# #
# Program purpose: Prints all permutation with given repetition number of characters #
# of a given string. #
# Program Author : Happi Yvan <[email protected]> #
# Creation Date : October 25, 2019 #
# #
#############################################################################################
from itertools import product
def obtain_user_data(mess: str):
is_valid = False
user_data = ''
while is_valid is False:
try:
user_data = input(mess)
if len(user_data) == 0:
raise ValueError('Please provide some data to work with')
is_valid = True
except ValueError as ve:
print(f'[ERROR]: {ve}')
return user_data
def all_repeat(main_str: str, perm_num: int):
chars = list(main_str)
results = []
for c in product(chars, repeat=perm_num):
results.append(c)
return results
if __name__ == "__main__":
main_data = obtain_user_data(mess='Enter some data: ')
num_perm, valid = 0, False
while not valid:
try:
num_perm = int(obtain_user_data(mess='Enter number of permutations: '))
if num_perm <= 0:
raise ValueError('Please, enter positive number')
valid = True
except ValueError as ve:
print(f'[ERROR]: {ve}')
# main test
print(f"Combinations with repeat #{num_perm}: {all_repeat(main_str=main_data, perm_num=num_perm)}")
|
[
"[email protected]"
] | |
4beb0e58abe9c75d8744830eb9004bed5a7b0751
|
433da00d8da1c28b528a34775db66a53cb505d82
|
/players/Different Experiment and Final Script /Ex7- Script58/Script53.py
|
1848a546e0ad6521427d135594d2fa0e7661a879
|
[] |
no_license
|
maithrreye/Cant-stop-game
|
9cf0e5ffe4862a6f5cd9aaafcb9e0e6359debfd9
|
225b638854643af3168fb75516a08167a431bb35
|
refs/heads/master
| 2022-11-05T21:07:32.627004 | 2020-03-04T09:03:59 | 2020-03-04T09:03:59 | 244,762,576 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,102 |
py
|
from players.player import Player
import random
from players.scripts.DSL import DSL
class Script53(Player):
def __init__(self):
self._counter_calls = []
for i in range(17):
self._counter_calls.append(0)
def get_counter_calls(self):
return self._counter_calls
def get_action(self, state):
actions = state.available_moves()
for a in actions:
if DSL.actionWinsColumn(state,a) and DSL.actionWinsColumn(state,a):
self._counter_calls[0] += 1
return a
if DSL.isStopAction(a) and DSL.isStopAction(a):
self._counter_calls[1] += 1
return a
if DSL.numberPositionsConquered(state, 4 ) > 1 and DSL.containsNumber(a, 4 ):
self._counter_calls[2] += 1
return a
if DSL.containsNumber(a, 4 ) and DSL.actionWinsColumn(state,a):
self._counter_calls[3] += 1
return a
if DSL.isDoubles(a) and DSL.isDoubles(a):
self._counter_calls[4] += 1
return a
if DSL.isDoubles(a):
self._counter_calls[5] += 1
return a
if DSL.actionWinsColumn(state,a) and DSL.hasWonColumn(state,a):
self._counter_calls[6] += 1
return a
if DSL.containsNumber(a, 4 ):
self._counter_calls[7] += 1
return a
if DSL.actionWinsColumn(state,a):
self._counter_calls[8] += 1
return a
if DSL.isStopAction(a):
self._counter_calls[9] += 1
return a
if DSL.isDoubles(a) and DSL.containsNumber(a, 5 ):
self._counter_calls[10] += 1
return a
if DSL.containsNumber(a, 2 ):
self._counter_calls[11] += 1
return a
if DSL.hasWonColumn(state,a):
self._counter_calls[12] += 1
return a
if DSL.containsNumber(a, 3 ) and DSL.containsNumber(a, 3 ) and DSL.isDoubles(a):
self._counter_calls[13] += 1
return a
if DSL.numberPositionsConquered(state, 3 ) > 0 and DSL.containsNumber(a, 3 ):
self._counter_calls[14] += 1
return a
if DSL.actionWinsColumn(state,a) and DSL.actionWinsColumn(state,a) and DSL.actionWinsColumn(state,a):
self._counter_calls[15] += 1
return a
if DSL.containsNumber(a, 3 ):
self._counter_calls[16] += 1
return a
return actions[0]
|
[
"[email protected]"
] | |
590ddb59f9578a5276547b3f499842f78bb87130
|
e7adbd8e9effb5c112e24adab012101fe07f2674
|
/tests/__init__.py
|
40e52aedb91313660e2c9dbfcc2ccd353891d38f
|
[
"MIT"
] |
permissive
|
markmo/naya
|
6e35312ea218d357b1dfccfba5390adea609ccc7
|
8bdf4221ee8c2bc0b80ad5bb05c2d18114edf9e5
|
refs/heads/master
| 2020-04-11T15:49:21.910830 | 2018-12-15T13:14:29 | 2018-12-15T13:14:29 | 161,904,256 | 0 | 0 |
MIT
| 2018-12-15T12:46:26 | 2018-12-15T12:46:26 | null |
UTF-8
|
Python
| false | false | 21 |
py
|
__author__ = 'dyule'
|
[
"[email protected]"
] | |
e0766521e175164493e412fdd41801bcf8248c51
|
65e73c6c4a9e66715be2cbdd93339ebcab93976e
|
/windmill/boletagem/forms.py
|
ed5142d00f80dfe6141151b93321bb3cbde5cdf4
|
[] |
no_license
|
AnimaTakeshi/windmill-django
|
3577f304d5e7f74750c7d95369e87d37209f1ac6
|
78bde49ace1ed215f6238fe94c142eac16e164dc
|
refs/heads/master
| 2022-12-13T11:13:21.859012 | 2019-02-07T20:50:01 | 2019-02-07T20:50:01 | 150,470,109 | 0 | 0 | null | 2022-12-08T01:29:36 | 2018-09-26T18:13:54 |
Python
|
UTF-8
|
Python
| false | false | 786 |
py
|
from django import forms
from . import models
class FormBoletaAcao(forms.ModelForm):
class Meta:
model = models.BoletaAcao
fields = "__all__"
def clean_quantidade(self):
data = self.cleaned_data['quantidade']
print(data)
if self.cleaned_data['operacao'] == 'C':
data = abs(data)
else:
data = -abs(data)
return data
class FormBoletaRendaFixaLocal(forms.ModelForm):
class Meta:
model = models.BoletaRendaFixaLocal
fields = "__all__"
def clean_quantidade(self):
data = self.cleaned_data['quantidade']
print(data)
if self.cleaned_data['operacao'] == 'C':
data = abs(data)
else:
data = -abs(data)
return data
|
[
"[email protected]"
] | |
afd1459f1116172ee4305c5657bb7774a8069b34
|
103c413086fbfadee1c52a7ea3125b9f20864f67
|
/setup.py
|
7c43572a3cb6819a6055ef6a5645b869c9dbb5c1
|
[
"MIT"
] |
permissive
|
tudstlennkozh/python-minifobf
|
2dff2cb407c665afc6cb23f795661092086b4ff7
|
1d4a59ede7298d46cde43a27f739991ad60c5171
|
refs/heads/master
| 2023-07-25T06:27:34.359672 | 2021-05-16T13:30:23 | 2021-07-22T16:41:21 | 398,327,879 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,933 |
py
|
import os.path
from setuptools import setup, find_packages
readme_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.md')
with open(readme_path) as f:
long_desc = f.read()
setup(
name='python_minifier',
description='Transform Python source code into it\'s most compact representation',
author='Daniel Flook',
author_email='[email protected]',
url='https://github.com/dflook/python-minifier',
license='MIT',
project_urls={
'Issues': 'https://github.com/dflook/python-minifier/issues',
'Documentation': 'https://dflook.github.io/python-minifier/',
},
keywords='minify minifier',
use_scm_version=True,
package_dir={'': 'src'},
packages=find_packages('src'),
long_description=long_desc,
long_description_content_type='text/markdown',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, <3.10',
setup_requires=['setuptools_scm'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Intended Audience :: Developers',
'Topic :: Software Development'
],
entry_points = {
'console_scripts': ['pyminify=python_minifier.__main__:main']
},
zip_safe=True
)
|
[
"[email protected]"
] | |
961058db0405e628c78d3744179be1a2aa4f188f
|
d507d0846902e0012a4b2a0aaaea1cbbdb21db46
|
/supervisely_lib/metric/classification_metrics.py
|
4392dea0801a666cd774015e3b43a9e1891fafec
|
[] |
no_license
|
wpilibsuite/supervisely
|
a569fdc0d5e5f2fb912f32beab8f3fedb277504e
|
19805ca9b2bd20e31d6d41a99dc37dc439bc257a
|
refs/heads/master
| 2022-09-09T02:32:54.883109 | 2020-06-01T20:55:49 | 2020-06-01T20:55:49 | 267,916,361 | 2 | 3 | null | 2020-06-03T13:59:56 | 2020-05-29T17:27:30 |
Python
|
UTF-8
|
Python
| false | false | 5,021 |
py
|
# coding: utf-8
from copy import deepcopy
from supervisely_lib.sly_logger import logger
from supervisely_lib.annotation.tag_meta import TagValueType
from supervisely_lib.metric.metric_base import MetricsBase
from supervisely_lib.metric.common import log_line, safe_ratio, sum_counters, TRUE_POSITIVE, TRUE_NEGATIVE, \
FALSE_POSITIVE, FALSE_NEGATIVE, ACCURACY, PRECISION, RECALL, F1_MEASURE
RAW_COUNTERS = [TRUE_POSITIVE, TRUE_NEGATIVE, FALSE_POSITIVE, FALSE_NEGATIVE]
class ClassificationMetrics(MetricsBase):
def __init__(self, tags_mapping, confidence_threshold=0):
if len(tags_mapping) < 1:
raise RuntimeError('At least one tags pair should be defined!')
self._tags_mapping = tags_mapping.copy()
self._confidence_threshold = confidence_threshold
self._counters = {tag_name_gt: {counter: 0 for counter in RAW_COUNTERS} for tag_name_gt in
self._tags_mapping.keys()}
def _classification_metrics(self, ann_1, ann_2):
def is_passes_confidence_threshold(tag):
if tag.meta.value_type == TagValueType.NONE:
return True
elif tag.meta.value_type == TagValueType.ANY_NUMBER:
return tag.value >= self._confidence_threshold
elif tag.meta.value_type == TagValueType.ANY_STRING or tag.meta.value_type == TagValueType.ONEOF_STRING:
logger.warning("Classification tag '{}'".format(tag.name))
return True
current_metric_res = {}
for tag_name_gt, tag_name_pred in self._tags_mapping.items():
tag1 = ann_1.img_tags.get(tag_name_gt)
tag2 = ann_2.img_tags.get(tag_name_pred)
c1 = is_passes_confidence_threshold(tag1) if tag1 is not None else False
c2 = is_passes_confidence_threshold(tag2) if tag2 is not None else False
current_metric_res[tag_name_gt] = {
TRUE_POSITIVE: int(c1 and c2),
TRUE_NEGATIVE: int(not c1 and not c2),
FALSE_POSITIVE: int(not c1 and c2),
FALSE_NEGATIVE: int(c1 and not c2)
}
return current_metric_res
def add_pair(self, ann_gt, ann_pred):
res = self._classification_metrics(ann_gt, ann_pred)
for tag_name_gt, met_data in res.items():
for metric_name, metric_value in met_data.items():
self._counters[tag_name_gt][metric_name] += metric_value
@staticmethod
def _calculate_complex_metrics(values):
tp = values[TRUE_POSITIVE]
tn = values[TRUE_NEGATIVE]
fp = values[FALSE_POSITIVE]
fn = values[FALSE_NEGATIVE]
values[ACCURACY] = safe_ratio(tp + tn, tp + tn + fp + fn)
values[PRECISION] = safe_ratio(tp, tp + fp)
values[RECALL] = safe_ratio(tp, tp + fn)
values[F1_MEASURE] = safe_ratio(2.0 * tp, 2.0 * tp + fp + fn)
def get_metrics(self):
result = deepcopy(self._counters)
for pair_counters in result.values():
self._calculate_complex_metrics(pair_counters)
return result
def get_total_metrics(self):
result = sum_counters(self._counters.values(), (TRUE_POSITIVE, TRUE_NEGATIVE, FALSE_POSITIVE, FALSE_NEGATIVE))
self._calculate_complex_metrics(result)
return result
def log_total_metrics(self):
common_info = """
P = condition positive (the number of real positive cases in the data)
N = condition negative (the number of real negative cases in the data)
TP = True Positive prediction
TN = True Negative prediction
FP = False Positive prediction (Type I error)
FN = False Negative prediction (Type II error)
Accuracy = (TP + TN)/(TP + TN + FP + FN) = TRUE/TOTAL
Precision = TP / (TP + FP)
Recall = TP / (TP + FN)
F1-Measure = (2 * TP) / (2 * TP + FP + FN)
"""
log_line()
log_line(c='*')
for line in common_info.split('\n'):
line = line.strip()
if len(line) > 0:
logger.info(line.ljust(80))
log_line(c='*')
log_line()
def print_evaluation_values(tag_pair_metrics):
labels = [ACCURACY, PRECISION, RECALL, F1_MEASURE, TRUE_POSITIVE, TRUE_NEGATIVE, FALSE_POSITIVE,
FALSE_NEGATIVE]
for label in labels:
logger.info(' {0}: {1:2.4f}'.format(label.ljust(16), tag_pair_metrics[label]))
for i, (tag_name_gt, tag_metrics) in enumerate(self.get_metrics().items(), start=1):
logger.info('{}) {} <--> {}:'.format(i, tag_name_gt, self._tags_mapping[tag_name_gt]))
print_evaluation_values(tag_metrics)
log_line()
logger.info('Total values:')
total_values = self.get_total_metrics()
print_evaluation_values(total_values)
log_line()
log_line(c='*')
|
[
"[email protected]"
] | |
b8d15a47a5af0f68dbc337f3085e8229d1001478
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5769900270288896_0/Python/gilesg/B.py
|
9b6e7164caea2e5f0ee7de5be27548db97ad2c10
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,116 |
py
|
def ints():
return map(int, raw_input().split())
INF = 10 ** 9
LIM = 16
num_cases, = ints()
def count(grid):
R = len(grid)
C = len(grid[0])
ret = 0
for i in range(R):
for j in range(C):
for d in ((0, 1), (1, 0)):
ii = i + d[0]
jj = j + d[1]
if ii < R and jj < C:
if grid[i][j] and grid[ii][jj]:
ret += 1
return ret
def construct(R, C, x):
ret = []
for i in range(R):
row = [0] * C
for j in range(C):
row[j] = x % 2
x /= 2
ret.append(row)
return ret
def bf(R, C):
ret = [INF] * (R*C + 1)
for x in range(2 ** (R*C)):
grid = construct(R, C, x)
n = sum(sum(row) for row in grid)
cost = count(grid)
ret[n] = min(ret[n], cost)
return ret
d = {}
for R in range(1, LIM+1):
for C in range(1, LIM+1):
if R * C <= LIM:
d[(R, C)] = bf(R, C)
for case_num in xrange(1, num_cases + 1):
R, C, N = ints()
print "Case #%d: %s" % (case_num, d[(R, C)][N])
|
[
"[email protected]"
] | |
1fff13e4ffd8369a4724c0d5ac905d670593c68b
|
a8769709aeb7299fa3757f0e7bba5c617eb8cfe3
|
/lesson-3/k8s/lib/python2.7/site-packages/adal/self_signed_jwt.py
|
54c0fd9913797739a85a8e938eb40ccea83fac03
|
[
"Apache-2.0"
] |
permissive
|
simox-83/workshop-k8s
|
2ac5e8b282bb7c3337acc726a7d972717bf649cc
|
04cb18e8b5925a3cfd84ca316952a6cb64960b31
|
refs/heads/master
| 2020-03-31T20:52:21.421995 | 2018-10-11T14:43:08 | 2018-10-11T14:43:08 | 152,558,678 | 0 | 0 |
Apache-2.0
| 2018-10-11T08:37:20 | 2018-10-11T08:37:20 | null |
UTF-8
|
Python
| false | false | 4,883 |
py
|
#------------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation.
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#------------------------------------------------------------------------------
import time
import datetime
import uuid
import base64
import binascii
import re
import jwt
from .constants import Jwt
from .log import Logger
from .adal_error import AdalError
def _get_date_now():
return datetime.datetime.now()
def _get_new_jwt_id():
return str(uuid.uuid4())
def _create_x5t_value(thumbprint):
hex_val = binascii.a2b_hex(thumbprint)
return base64.urlsafe_b64encode(hex_val).decode()
def _sign_jwt(header, payload, certificate):
try:
encoded_jwt = _encode_jwt(payload, certificate, header)
except Exception as exp:
raise AdalError("Error:Invalid Certificate: Expected Start of Certificate to be '-----BEGIN RSA PRIVATE KEY-----'", exp)
_raise_on_invalid_jwt_signature(encoded_jwt)
return encoded_jwt
def _encode_jwt(payload, certificate, header):
return jwt.encode(payload, certificate, algorithm='RS256', headers=header).decode()
def _raise_on_invalid_jwt_signature(encoded_jwt):
segments = encoded_jwt.split('.')
if len(segments) < 3 or not segments[2]:
raise AdalError('Failed to sign JWT. This is most likely due to an invalid certificate.')
class SelfSignedJwt(object):
NumCharIn128BitHexString = 128/8*2
numCharIn160BitHexString = 160/8*2
ThumbprintRegEx = r"^[a-f\d]*$"
def __init__(self, call_context, authority, client_id):
self._log = Logger('SelfSignedJwt', call_context['log_context'])
self._call_context = call_context
self._authortiy = authority
self._token_endpoint = authority.token_endpoint
self._client_id = client_id
def _create_header(self, thumbprint):
x5t = _create_x5t_value(thumbprint)
header = {'typ':'JWT', 'alg':'RS256', 'x5t':x5t}
self._log.debug("Creating self signed JWT header. x5t: %(x5t)s",
{"x5t": x5t})
return header
def _create_payload(self):
now = _get_date_now()
minutes = datetime.timedelta(0, 0, 0, 0, Jwt.SELF_SIGNED_JWT_LIFETIME)
expires = now + minutes
self._log.debug(
'Creating self signed JWT payload. Expires: %(expires)s NotBefore: %(nbf)s',
{"expires": expires, "nbf": now})
jwt_payload = {}
jwt_payload[Jwt.AUDIENCE] = self._token_endpoint
jwt_payload[Jwt.ISSUER] = self._client_id
jwt_payload[Jwt.SUBJECT] = self._client_id
jwt_payload[Jwt.NOT_BEFORE] = int(time.mktime(now.timetuple()))
jwt_payload[Jwt.EXPIRES_ON] = int(time.mktime(expires.timetuple()))
jwt_payload[Jwt.JWT_ID] = _get_new_jwt_id()
return jwt_payload
def _raise_on_invalid_thumbprint(self, thumbprint):
thumbprint_sizes = [self.NumCharIn128BitHexString, self.numCharIn160BitHexString]
size_ok = len(thumbprint) in thumbprint_sizes
if not size_ok or not re.search(self.ThumbprintRegEx, thumbprint):
raise AdalError("The thumbprint does not match a known format")
def _reduce_thumbprint(self, thumbprint):
canonical = thumbprint.lower().replace(' ', '').replace(':', '')
self._raise_on_invalid_thumbprint(canonical)
return canonical
def create(self, certificate, thumbprint):
thumbprint = self._reduce_thumbprint(thumbprint)
header = self._create_header(thumbprint)
payload = self._create_payload()
return _sign_jwt(header, payload, certificate)
|
[
"[email protected]"
] | |
b7f79c4d8d2f67f78e73f488547be2567370dd3a
|
998a978c0cefcb62d462c64ec88f61b8bdcbbbab
|
/braindecode/datautil/splitters.py
|
3cb3f7769369e41edc1817d0ee0b238ab085eb98
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
petsas/braindecode
|
cf16b52f0bb0897165e14110959e486d0e379d64
|
651ef3d7a467b22948802527018a7a8e686c567d
|
refs/heads/master
| 2020-03-31T00:31:35.215291 | 2018-09-19T10:33:20 | 2018-09-19T10:33:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,840 |
py
|
import numpy as np
from braindecode.datautil.iterators import get_balanced_batches
from braindecode.datautil.signal_target import apply_to_X_y, SignalAndTarget
def concatenate_sets(sets):
"""
Concatenate all sets together.
Parameters
----------
sets: list of :class:`.SignalAndTarget`
Returns
-------
concatenated_set: :class:`.SignalAndTarget`
"""
concatenated_set = sets[0]
for s in sets[1:]:
concatenated_set = concatenate_two_sets(concatenated_set, s)
return concatenated_set
def concatenate_two_sets(set_a, set_b):
"""
Concatenate two sets together.
Parameters
----------
set_a, set_b: :class:`.SignalAndTarget`
Returns
-------
concatenated_set: :class:`.SignalAndTarget`
"""
new_X = concatenate_np_array_or_add_lists(set_a.X, set_b.X)
new_y = concatenate_np_array_or_add_lists(set_a.y, set_b.y)
return SignalAndTarget(new_X, new_y)
def concatenate_np_array_or_add_lists(a, b):
if hasattr(a, 'ndim') and hasattr(b, 'ndim'):
new = np.concatenate((a, b), axis=0)
else:
if hasattr(a, 'ndim'):
a = a.tolist()
if hasattr(b, 'ndim'):
b = b.tolist()
new = a + b
return new
def split_into_two_sets(dataset, first_set_fraction=None, n_first_set=None):
"""
Split set into two sets either by fraction of first set or by number
of trials in first set.
Parameters
----------
dataset: :class:`.SignalAndTarget`
first_set_fraction: float, optional
Fraction of trials in first set.
n_first_set: int, optional
Number of trials in first set
Returns
-------
first_set, second_set: :class:`.SignalAndTarget`
The two splitted sets.
"""
assert (first_set_fraction is None) != (n_first_set is None), (
"Pass either first_set_fraction or n_first_set")
if n_first_set is None:
n_first_set = int(round(len(dataset.X) * first_set_fraction))
assert n_first_set < len(dataset.X)
first_set = apply_to_X_y(lambda a: a[:n_first_set], dataset)
second_set = apply_to_X_y(lambda a: a[n_first_set:], dataset)
return first_set, second_set
def select_examples(dataset, indices):
"""
Select examples from dataset.
Parameters
----------
dataset: :class:`.SignalAndTarget`
indices: list of int, 1d-array of int
Indices to select
Returns
-------
reduced_set: :class:`.SignalAndTarget`
Dataset with only examples selected.
"""
# probably not necessary
indices = np.array(indices)
if hasattr(dataset.X, 'ndim'):
# numpy array
new_X = np.array(dataset.X)[indices]
else:
# list
new_X = [dataset.X[i] for i in indices]
new_y = np.asarray(dataset.y)[indices]
return SignalAndTarget(new_X, new_y)
def split_into_train_valid_test(dataset, n_folds, i_test_fold, rng=None):
"""
Split datasets into folds, select one valid fold, one test fold and merge rest as train fold.
Parameters
----------
dataset: :class:`.SignalAndTarget`
n_folds: int
Number of folds to split dataset into.
i_test_fold: int
Index of the test fold (0-based). Validation fold will be immediately preceding fold.
rng: `numpy.random.RandomState`, optional
Random Generator for shuffling, None means no shuffling
Returns
-------
reduced_set: :class:`.SignalAndTarget`
Dataset with only examples selected.
"""
n_trials = len(dataset.X)
if n_trials < n_folds:
raise ValueError("Less Trials: {:d} than folds: {:d}".format(
n_trials, n_folds
))
shuffle = rng is not None
folds = get_balanced_batches(
n_trials, rng, shuffle, n_batches=n_folds)
test_inds = folds[i_test_fold]
valid_inds = folds[i_test_fold - 1]
all_inds = list(range(n_trials))
train_inds = np.setdiff1d(all_inds, np.union1d(test_inds, valid_inds))
assert np.intersect1d(train_inds, valid_inds).size == 0
assert np.intersect1d(train_inds, test_inds).size == 0
assert np.intersect1d(valid_inds, test_inds).size == 0
assert np.array_equal(np.sort(
np.union1d(train_inds, np.union1d(valid_inds, test_inds))),
all_inds)
train_set = select_examples(dataset, train_inds)
valid_set = select_examples(dataset, valid_inds)
test_set = select_examples(dataset, test_inds)
return train_set, valid_set, test_set
def split_into_train_test(dataset, n_folds, i_test_fold, rng=None):
"""
Split datasets into folds, select one test fold and merge rest as train fold.
Parameters
----------
dataset: :class:`.SignalAndTarget`
n_folds: int
Number of folds to split dataset into.
i_test_fold: int
Index of the test fold (0-based)
rng: `numpy.random.RandomState`, optional
Random Generator for shuffling, None means no shuffling
Returns
-------
reduced_set: :class:`.SignalAndTarget`
Dataset with only examples selected.
"""
n_trials = len(dataset.X)
if n_trials < n_folds:
raise ValueError("Less Trials: {:d} than folds: {:d}".format(
n_trials, n_folds
))
shuffle = rng is not None
folds = get_balanced_batches(n_trials, rng, shuffle,
n_batches=n_folds)
test_inds = folds[i_test_fold]
all_inds = list(range(len(n_trials)))
train_inds = np.setdiff1d(all_inds, test_inds)
assert np.intersect1d(train_inds, test_inds).size == 0
assert np.array_equal(np.sort(np.union1d(train_inds, test_inds)),
all_inds)
train_set = select_examples(dataset, train_inds)
test_set = select_examples(dataset, test_inds)
return train_set, test_set
|
[
"[email protected]"
] | |
e7ad8276225b9d99786edd79c442cec80c355739
|
d687928d05e05fac765f3b936c1e0c0d1950c68a
|
/src/hg/hourglass_tiny.py
|
0df5b078f74d54702491d4a5a8d86f714e885111
|
[] |
no_license
|
PeterZs/PuzzleTunnelDiscovery
|
0b75dc3e62d0e8d18941e3c37646aec498b3a251
|
badd6debb5cf4882598038ac2cab454e014fc24b
|
refs/heads/master
| 2022-06-17T20:03:51.210708 | 2020-04-30T05:14:16 | 2020-04-30T05:14:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 41,233 |
py
|
# -*- coding: utf-8 -*-
"""
Deep Human Pose Estimation
Project by Walid Benbihi
MSc Individual Project
Imperial College
Created on Mon Jul 10 19:13:56 2017
@author: Walid Benbihi
@mail : w.benbihi(at)gmail.com
@github : https://github.com/wbenbihi/hourglasstensorlfow/
Abstract:
This python code creates a Stacked Hourglass Model
(Credits : A.Newell et al.)
(Paper : https://arxiv.org/abs/1603.06937)
Code translated from 'anewell' github
Torch7(LUA) --> TensorFlow(PYTHON)
(Code : https://github.com/anewell/pose-hg-train)
Modification are made and explained in the report
Goal : Achieve Real Time detection (Webcam)
----- Modifications made to obtain faster results (trade off speed/accuracy)
This work is free of use, please cite the author if you use it!
"""
import time
import tensorflow as tf
import numpy as np
import sys
import datetime
import os
from scipy.misc import imsave
import progressbar
class HourglassModel():
""" HourglassModel class: (to be renamed)
Generate TensorFlow model to train and predict Human Pose from images (soon videos)
Please check README.txt for further information on model management.
"""
def __init__(self, nFeat = 512, nStack = 4, nModules = 1, nLow = 4, outputDim = 16, batch_size = 16, drop_rate = 0.2, lear_rate = 2.5e-4, decay = 0.96, decay_step = 2000, dataset = None, dataset_name='', training = True, w_summary = True, logdir_train = None, logdir_test = None,tiny = True, attention = False,modif = True,w_loss = False, name = 'tiny_hourglass', joints = ['r_anckle', 'r_knee', 'r_hip', 'l_hip', 'l_knee', 'l_anckle', 'pelvis', 'thorax', 'neck', 'head', 'r_wrist', 'r_elbow', 'r_shoulder', 'l_shoulder', 'l_elbow', 'l_wrist']):
""" Initializer
Args:
nStack : number of stacks (stage/Hourglass modules)
nFeat : number of feature channels on conv layers
nLow : number of downsampling (pooling) per module
outputDim : number of output Dimension (16 for MPII)
batch_size : size of training/testing Batch
dro_rate : Rate of neurons disabling for Dropout Layers
lear_rate : Learning Rate starting value
decay : Learning Rate Exponential Decay (decay in ]0,1], 1 for constant learning rate)
decay_step : Step to apply decay
dataset : Dataset (class DataGenerator)
dataset_name : Name of the Dataset
training : (bool) True for training / False for prediction
w_summary : (bool) True/False for summary of weight (to visualize in Tensorboard)
tiny : (bool) Activate Tiny Hourglass
attention : (bool) Activate Multi Context Attention Mechanism (MCAM)
modif : (bool) Boolean to test some network modification # DO NOT USE IT ! USED TO TEST THE NETWORK
name : name of the model
"""
self.nStack = nStack
self.nFeat = nFeat
self.nModules = nModules
self.outDim = outputDim
self.batchSize = batch_size
self.training = training
self.w_summary = w_summary
self.tiny = tiny
self.dropout_rate = drop_rate
self.learning_rate = lear_rate
self.decay = decay
self.name = name
self.attention = attention
self.decay_step = decay_step
self.nLow = nLow
self.modif = modif
self.dataset = dataset
self.dataset_name = dataset_name
self.cpu = '/cpu:0'
self.gpu = '/gpu:0'
self.logdir_train = logdir_train
self.logdir_test = logdir_test
self.joints = joints
self.njoints = len(self.joints)
self.w_loss = w_loss
self.c_dim = dataset.c_dim
assert self.njoints == dataset.d_dim, 'Number of joints ({}) does not match output dimensions ({})'.format(self.njoints, dataset.d_dim)
# ACCESSOR
def get_input(self):
""" Returns Input (Placeholder) Tensor
Image Input :
Shape: (None,256,256,c_dim)
Type : tf.float32
Warning:
Be sure to build the model first
"""
return self.img
def get_output(self):
""" Returns Output Tensor
Output Tensor :
Shape: (None, nbStacks, 64, 64, outputDim)
Type : tf.float32
Warning:
Be sure to build the model first
"""
return self.output
def get_label(self):
""" Returns Label (Placeholder) Tensor
Image Input :
Shape: (None, nbStacks, 64, 64, outputDim)
Type : tf.float32
Warning:
Be sure to build the model first
"""
return self.gtMaps
def get_loss(self):
""" Returns Loss Tensor
Image Input :
Shape: (1,)
Type : tf.float32
Warning:
Be sure to build the model first
"""
return self.loss
def get_saver(self):
""" Returns Saver
/!\ USE ONLY IF YOU KNOW WHAT YOU ARE DOING
Warning:
Be sure to build the model first
"""
return self.saver
def generate_model(self):
""" Create the complete graph
"""
startTime = time.time()
print('CREATE MODEL:')
with tf.device(self.gpu):
with tf.name_scope('inputs'):
# Shape Input Image - batchSize: None, height: 256, width: 256, channel: 3 (RGB)
self.img = tf.placeholder(dtype= tf.float32, shape= (None, 256, 256, self.c_dim), name = 'input_img')
if self.w_loss:
self.weights = tf.placeholder(dtype = tf.float32, shape = (None, self.outDim))
# Shape Ground Truth Map: batchSize x nStack x 64 x 64 x outDim
self.gtMaps = tf.placeholder(dtype = tf.float32, shape = (None, self.nStack, 64, 64, self.outDim))
# TODO : Implement weighted loss function
# NOT USABLE AT THE MOMENT
#weights = tf.placeholder(dtype = tf.float32, shape = (None, self.nStack, 1, 1, self.outDim))
inputTime = time.time()
print('---Inputs : Done (' + str(int(abs(inputTime-startTime))) + ' sec.)')
if self.attention:
self.output = self._graph_mcam(self.img)
else :
self.output = self._graph_hourglass(self.img)
graphTime = time.time()
print('---Graph : Done (' + str(int(abs(graphTime-inputTime))) + ' sec.)')
with tf.name_scope('loss'):
if self.w_loss:
self.loss = tf.reduce_mean(self.weighted_bce_loss(), name='reduced_loss')
else:
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.output, labels= self.gtMaps), name= 'cross_entropy_loss')
lossTime = time.time()
print('---Loss : Done (' + str(int(abs(graphTime-lossTime))) + ' sec.)')
with tf.device(self.cpu):
with tf.name_scope('accuracy'):
self._accuracy_computation()
accurTime = time.time()
print('---Acc : Done (' + str(int(abs(accurTime-lossTime))) + ' sec.)')
with tf.name_scope('steps'):
self.train_step = tf.Variable(0, name = 'global_step', trainable= False)
with tf.name_scope('lr'):
self.lr = tf.train.exponential_decay(self.learning_rate, self.train_step, self.decay_step, self.decay, staircase= True, name= 'learning_rate')
lrTime = time.time()
print('---LR : Done (' + str(int(abs(accurTime-lrTime))) + ' sec.)')
with tf.device(self.gpu):
#with tf.name_scope('rmsprop'):
#self.optimizer = tf.train.RMSPropOptimizer(learning_rate= self.lr)
with tf.name_scope('adam'):
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
optimTime = time.time()
print('---Optim : Done (' + str(int(abs(optimTime-lrTime))) + ' sec.)')
with tf.name_scope('minimizer'):
self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(self.update_ops):
self.train_optimize = self.optimizer.minimize(self.loss, self.train_step)
minimTime = time.time()
print('---Minimizer : Done (' + str(int(abs(optimTime-minimTime))) + ' sec.)')
self.init = tf.global_variables_initializer()
initTime = time.time()
print('---Init : Done (' + str(int(abs(initTime-minimTime))) + ' sec.)')
with tf.device(self.cpu):
with tf.name_scope('training'):
tf.summary.scalar('loss', self.loss, collections = ['train'])
tf.summary.scalar('learning_rate', self.lr, collections = ['train'])
tf.summary.image('gt_img', self.get_label()[:,0,:,:,:], collections = ['train'])
tf.summary.image('pred_img', self.output[:,0,:,:,:], collections = ['train'])
with tf.name_scope('summary'):
for i in range(len(self.joints)):
tf.summary.scalar(self.joints[i], self.joint_accur[i], collections = ['train', 'test'])
self.train_op = tf.summary.merge_all('train')
self.test_op = tf.summary.merge_all('test')
self.weight_op = tf.summary.merge_all('weight')
endTime = time.time()
print('Model created (' + str(int(abs(endTime-startTime))) + ' sec.)')
del endTime, startTime, initTime, optimTime, minimTime, lrTime, accurTime, lossTime, graphTime, inputTime
def restore(self, load = None):
""" Restore a pretrained model
Args:
load : Model to load (None if training from scratch) (see README for further information)
"""
with tf.name_scope('Session'):
with tf.device(self.gpu):
self._init_session()
self._define_saver_summary(summary = False)
if load is not None:
print('Loading Trained Model')
t = time.time()
self.saver.restore(self.Session, load)
print('Model Loaded (', time.time() - t,' sec.)')
else:
print('Please give a Model in args (see README for further information)')
def _train(self, nEpochs = 10, epochSize = 1000, saveStep = 500, validIter = 10):
"""
"""
with tf.name_scope('Train'):
self.generator = self.dataset._aux_generator(self.batchSize, self.nStack, normalize = True, sample_set = 'train')
self.valid_gen = self.dataset._aux_generator(self.batchSize, self.nStack, normalize = True, sample_set = 'valid')
startTime = time.time()
self.resume = {}
self.resume['accur'] = []
self.resume['loss'] = []
self.resume['err'] = []
for epoch in range(nEpochs):
epochstartTime = time.time()
avg_cost = 0.
cost = 0.
print('Epoch :' + str(epoch) + '/' + str(nEpochs) + '\n')
# Training Set
for i in range(epochSize):
# DISPLAY PROGRESS BAR
# TODO : Customize Progress Bar
percent = (float(i+1)/float(epochSize)) * 100
num = np.int(20*percent/100)
tToEpoch = int((time.time() - epochstartTime) * (100 - percent)/(percent))
sys.stdout.write('\r Train: {0}>'.format("="*num) + "{0}>".format(" "*(20-num)) + '||' + str(percent)[:4] + '%' + ' -cost: ' + str(cost)[:6] + ' -avg_loss: ' + str(avg_cost)[:5] + ' -timeToEnd: ' + str(tToEpoch) + ' sec.')
sys.stdout.flush()
img_train, gt_train, weight_train = next(self.generator)
if saveStep >= 0 and i % saveStep == 0:
if self.w_loss:
_, c, summary = self.Session.run([self.train_optimize, self.loss, self.train_op], feed_dict = {self.img : img_train, self.gtMaps: gt_train, self.weights: weight_train})
else:
_, c, summary = self.Session.run([self.train_optimize, self.loss, self.train_op], feed_dict = {self.img : img_train, self.gtMaps: gt_train})
# Save summary (Loss + Accuracy)
self.train_summary.add_summary(summary, epoch*epochSize + i)
self.train_summary.flush()
else:
if self.w_loss:
_, c, = self.Session.run([self.train_optimize, self.loss], feed_dict = {self.img : img_train, self.gtMaps: gt_train, self.weights: weight_train})
else:
_, c, = self.Session.run([self.train_optimize, self.loss], feed_dict = {self.img : img_train, self.gtMaps: gt_train})
cost += c
avg_cost += c/epochSize
epochfinishTime = time.time()
#Save Weight (axis = epoch)
if self.w_loss:
weight_summary = self.Session.run(self.weight_op, {self.img : img_train, self.gtMaps: gt_train, self.weights: weight_train})
else :
weight_summary = self.Session.run(self.weight_op, {self.img : img_train, self.gtMaps: gt_train})
self.train_summary.add_summary(weight_summary, epoch)
self.train_summary.flush()
#self.weight_summary.add_summary(weight_summary, epoch)
#self.weight_summary.flush()
print('Epoch ' + str(epoch) + '/' + str(nEpochs) + ' done in ' + str(int(epochfinishTime-epochstartTime)) + ' sec.' + ' -avg_time/batch: ' + str(((epochfinishTime-epochstartTime)/epochSize))[:4] + ' sec.')
with tf.name_scope('save'):
self.saver.save(self.Session, os.path.join(os.getcwd(),str(self.name + '_' + str(epoch + 1))))
self.resume['loss'].append(cost)
# Validation Set
accuracy_array = np.array([0.0]*len(self.joint_accur))
for i in range(validIter):
img_valid, gt_valid, w_valid = next(self.generator)
accuracy_pred = self.Session.run(self.joint_accur, feed_dict = {self.img : img_valid, self.gtMaps: gt_valid})
accuracy_array += np.array(accuracy_pred, dtype = np.float32) / validIter
print('--Avg. Accuracy = {} %'.format((np.sum(accuracy_array) / len(accuracy_array)) * 100))
self.resume['accur'].append(accuracy_pred)
self.resume['err'].append(np.sum(accuracy_array) / len(accuracy_array))
valid_summary = self.Session.run(self.test_op, feed_dict={self.img : img_valid, self.gtMaps: gt_valid})
self.test_summary.add_summary(valid_summary, epoch)
self.test_summary.flush()
print('Training Done')
print('Resume:' + '\n' + ' Epochs: ' + str(nEpochs) + '\n' + ' n. Images: ' + str(nEpochs * epochSize * self.batchSize) )
print(' Final Loss: ' + str(cost) + '\n' + ' Relative Loss: ' + str(100*self.resume['loss'][-1]/(self.resume['loss'][0] + 0.1)) + '%' )
print(' Relative Improvement: ' + str((self.resume['err'][-1] - self.resume['err'][0]) * 100) +'%')
print(' Training Time: ' + str( datetime.timedelta(seconds=time.time() - startTime)))
def testing_init(self, nEpochs = 1, epochSize = 1000, saveStep = 0, dataset=None, load=None):
with tf.name_scope('Session'):
with tf.device(self.gpu):
self._init_weight()
self._define_saver_summary()
assert load is not None
ckpt = tf.train.get_checkpoint_state(load)
assert ckpt and ckpt.model_checkpoint_path
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.Session, os.path.join(load, ckpt_name))
self._test(nEpochs=1, epochSize=epochSize, saveStep=0, out_dir=load)
def _test(self, nEpochs = 1, epochSize = 1000, saveStep = 500, out_dir=None):
assert nEpochs == 1
assert self.w_loss is False
assert out_dir is not None
assert saveStep <= 0
tres = 2048
atex = np.zeros(shape=(tres,tres), dtype=np.float32) # accumulator texture
atex_count = np.zeros(shape=(tres,tres), dtype=np.int) # present in the input image
"""
"""
with tf.name_scope('Train'):
self.generator = self.dataset._aux_generator(self.batchSize, self.nStack, normalize = True, sample_set = 'test')
startTime = time.time()
pred = self.output[:, self.nStack - 1]
'''
Notes about profiling (on 1080 Ti, 16384 * 4 samples, '~' means ETA from progressbar):
batch size 4: 19 mins
batch size 4 w/o assignment: ~14 mins
batch size 8: ~17-18 mins (6999 MB)
batch size 8 w/o assignment: ~12 mins
batch size 16: ~13 mins (7511 MB)
batch size 16 w/o assignment: ~11 mins
batch size 32: ~12 mins (7511 MB)
batch size 32 w/o assignment: ~11 mins
batch size 32 rendering only: 3:51
'''
PROFILING2=False # w/o prediction and assignment (generation only)
PROFILING=False or PROFILING2 # w/o assignment
for epoch in range(nEpochs):
epochstartTime = time.time()
print('Epoch :' + str(epoch) + '/' + str(nEpochs) + '\n')
# Training Set
for i in progressbar.progressbar(range(epochSize)):
# DISPLAY PROGRESS BAR
# TODO : Customize Progress Bar
img_test, batch_uv, _ = next(self.generator)
if PROFILING2:
continue
[test_y] = self.Session.run([pred], feed_dict = {self.img : img_test})
if PROFILING:
continue # Profiling, check the % of time used by prediction
for uvi,labeli in zip(batch_uv, test_y):
# np.clip(labeli, 0.0, 1.0, out=labeli)
labeli = np.reshape(labeli, (64,64))
labeli = np.kron(labeli, np.ones((4,4))) # 64x64 -> 256x256
nz = np.nonzero(labeli)
scores = labeli[nz]
uvs = uvi[nz]
us = 1.0 - uvs[:,1]
us = np.array(tres * us, dtype=int)
vs = uvs[:,0]
vs = np.array(tres * vs, dtype=int)
# Filtering US and VS
us_inrange = (us >= 0)
us_inrange = np.logical_and(us < tres, us_inrange) # Not sure the effect when out=one of the input
vs_inrange = (vs >= 0)
vs_inrange = np.logical_and(vs < tres, vs_inrange) # Not sure the effect when out=one of the input
inrange = np.logical_and(us_inrange, vs_inrange)
f_us = us[inrange]
f_vs = vs[inrange]
f_sc = scores[inrange]
atex[f_us,f_vs] += f_sc
atex_count[f_us,f_vs] += 1
'''
# TODO: Better efficiency
for iu,iv,s in zip(us,vs,scores):
if iu < 0 or iu >= tres or iv < 0 or iv > tres:
continue
atex[iu,iv] += s
'''
epochfinishTime = time.time()
print('Epoch ' + str(epoch) + '/' + str(nEpochs) + ' done in ' + str(int(epochfinishTime-epochstartTime)) + ' sec.' + ' -avg_time/batch: ' + str(((epochfinishTime-epochstartTime)/epochSize))[:4] + ' sec.')
if PROFILING or PROFILING2: # Explicit better than implicit (PROFILING2 implies PROFILING)
return
npz_fn = '{}/{}-atex.npz'.format(out_dir, self.dataset_name)
png_fn = '{}/{}-atex.png'.format(out_dir, self.dataset_name)
avgnpz_fn = '{}/{}-atex-avg.npz'.format(out_dir, self.dataset_name)
avgpng_fn = '{}/{}-atex-avg.png'.format(out_dir, self.dataset_name)
print('Testing Done. Saving files to\n{}\n{}'.format(npz_fn, png_fn))
np.clip(atex_count, a_min=1, a_max=None, out=atex_count)
np.savez(npz_fn, ATEX=atex, ATEX_COUNT=atex_count)
np.savez(avgnpz_fn, ATEX=atex/atex_count)
natex = atex / np.amax(atex)
imsave(png_fn, natex)
natex = atex/atex_count
imsave(avgpng_fn, natex)
def record_training(self, record):
""" Record Training Data and Export them in CSV file
Args:
record : record dictionnary
"""
out_file = open(self.name + '_train_record.csv', 'w')
for line in range(len(record['accur'])):
out_string = ''
labels = [record['loss'][line]] + [record['err'][line]] + record['accur'][line]
for label in labels:
out_string += str(label) + ', '
out_string += '\n'
out_file.write(out_string)
out_file.close()
print('Training Record Saved')
def training_init(self, nEpochs = 10, epochSize = 1000, saveStep = 500, dataset = None, load = None):
""" Initialize the training
Args:
nEpochs : Number of Epochs to train
epochSize : Size of one Epoch
saveStep : Step to save 'train' summary (has to be lower than epochSize)
dataset : Data Generator (see generator.py)
load : Model to load (None if training from scratch) (see README for further information)
"""
with tf.name_scope('Session'):
with tf.device(self.gpu):
self._init_weight()
self._define_saver_summary()
if load is not None:
self.saver.restore(self.Session, load)
#try:
# self.saver.restore(self.Session, load)
#except Exception:
# print('Loading Failed! (Check README file for further information)')
self._train(nEpochs, epochSize, saveStep, validIter=10)
def weighted_bce_loss(self):
""" Create Weighted Loss Function
WORK IN PROGRESS
"""
self.bceloss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.output, labels= self.gtMaps), name= 'cross_entropy_loss')
e1 = tf.expand_dims(self.weights,axis = 1, name = 'expdim01')
e2 = tf.expand_dims(e1,axis = 1, name = 'expdim02')
e3 = tf.expand_dims(e2,axis = 1, name = 'expdim03')
return tf.multiply(e3,self.bceloss, name = 'lossW')
def _accuracy_computation(self):
""" Computes accuracy tensor
"""
self.joint_accur = []
for i in range(len(self.joints)):
self.joint_accur.append(self._accur(self.output[:, self.nStack - 1, :, :,i], self.gtMaps[:, self.nStack - 1, :, :, i], self.batchSize))
def _define_saver_summary(self, summary = True):
""" Create Summary and Saver
Args:
logdir_train : Path to train summary directory
logdir_test : Path to test summary directory
"""
if (self.logdir_train == None) or (self.logdir_test == None):
raise ValueError('Train/Test directory not assigned')
else:
with tf.device(self.cpu):
self.saver = tf.train.Saver()
if summary:
with tf.device(self.gpu):
self.train_summary = tf.summary.FileWriter(self.logdir_train, tf.get_default_graph())
self.test_summary = tf.summary.FileWriter(self.logdir_test)
def _init_weight(self):
""" Initialize weights
"""
print('Session initialization')
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
self.Session = tf.Session(config=config)
t_start = time.time()
self.Session.run(self.init)
print('Sess initialized in ' + str(int(time.time() - t_start)) + ' sec.')
def _init_session(self):
""" Initialize Session
"""
print('Session initialization')
t_start = time.time()
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
self.Session = tf.Session(config=config)
print('Sess initialized in ' + str(int(time.time() - t_start)) + ' sec.')
def _graph_hourglass(self, inputs):
"""Create the Network
Args:
inputs : TF Tensor (placeholder) of shape (None, 256, 256, c_dim) #TODO : Create a parameter for customize size
"""
with tf.name_scope('model'):
with tf.name_scope('preprocessing'):
# Input Dim : nbImages x 256 x 256 x 3
pad1 = tf.pad(inputs, [[0,0],[2,2],[2,2],[0,0]], name='pad_1')
# Dim pad1 : nbImages x 260 x 260 x 3
conv1 = self._conv_bn_relu(pad1, filters= 64, kernel_size = 6, strides = 2, name = 'conv_256_to_128')
# Dim conv1 : nbImages x 128 x 128 x 64
r1 = self._residual(conv1, numOut = 128, name = 'r1')
# Dim pad1 : nbImages x 128 x 128 x 128
pool1 = tf.contrib.layers.max_pool2d(r1, [2,2], [2,2], padding='VALID')
# Dim pool1 : nbImages x 64 x 64 x 128
if self.tiny:
r3 = self._residual(pool1, numOut=self.nFeat, name='r3')
else:
r2 = self._residual(pool1, numOut= int(self.nFeat/2), name = 'r2')
r3 = self._residual(r2, numOut= self.nFeat, name = 'r3')
# Storage Table
hg = [None] * self.nStack
ll = [None] * self.nStack
ll_ = [None] * self.nStack
drop = [None] * self.nStack
out = [None] * self.nStack
out_ = [None] * self.nStack
sum_ = [None] * self.nStack
if self.tiny:
with tf.name_scope('stacks'):
with tf.name_scope('stage_0'):
hg[0] = self._hourglass(r3, self.nLow, self.nFeat, 'hourglass')
drop[0] = tf.layers.dropout(hg[0], rate = self.dropout_rate, training = self.training, name = 'dropout')
ll[0] = self._conv_bn_relu(drop[0], self.nFeat, 1, 1, name = 'll')
if self.modif:
# TEST OF BATCH RELU
out[0] = self._conv_bn_relu(ll[0], self.outDim, 1, 1, 'VALID', 'out')
else:
out[0] = self._conv(ll[0], self.outDim, 1, 1, 'VALID', 'out')
out_[0] = self._conv(out[0], self.nFeat, 1, 1, 'VALID', 'out_')
sum_[0] = tf.add_n([out_[0], ll[0], r3], name = 'merge')
for i in range(1, self.nStack - 1):
with tf.name_scope('stage_' + str(i)):
hg[i] = self._hourglass(sum_[i-1], self.nLow, self.nFeat, 'hourglass')
drop[i] = tf.layers.dropout(hg[i], rate = self.dropout_rate, training = self.training, name = 'dropout')
ll[i] = self._conv_bn_relu(drop[i], self.nFeat, 1, 1, name= 'll')
if self.modif:
# TEST OF BATCH RELU
out[i] = self._conv_bn_relu(ll[i], self.outDim, 1, 1, 'VALID', 'out')
else:
out[i] = self._conv(ll[i], self.outDim, 1, 1, 'VALID', 'out')
out_[i] = self._conv(out[i], self.nFeat, 1, 1, 'VALID', 'out_')
sum_[i] = tf.add_n([out_[i], ll[i], sum_[i-1]], name= 'merge')
with tf.name_scope('stage_' + str(self.nStack - 1)):
hg[self.nStack - 1] = self._hourglass(sum_[self.nStack - 2], self.nLow, self.nFeat, 'hourglass')
drop[self.nStack-1] = tf.layers.dropout(hg[self.nStack-1], rate = self.dropout_rate, training = self.training, name = 'dropout')
ll[self.nStack - 1] = self._conv_bn_relu(drop[self.nStack-1], self.nFeat,1,1, 'VALID', 'conv')
if self.modif:
out[self.nStack - 1] = self._conv_bn_relu(ll[self.nStack - 1], self.outDim, 1,1, 'VALID', 'out')
else:
out[self.nStack - 1] = self._conv(ll[self.nStack - 1], self.outDim, 1,1, 'VALID', 'out')
if self.modif:
return tf.nn.sigmoid(tf.stack(out, axis= 1 , name= 'stack_output'),name = 'final_output')
else:
return tf.stack(out, axis= 1 , name = 'final_output')
else:
with tf.name_scope('stacks'):
with tf.name_scope('stage_0'):
hg[0] = self._hourglass(r3, self.nLow, self.nFeat, 'hourglass')
drop[0] = tf.layers.dropout(hg[0], rate = self.dropout_rate, training = self.training, name = 'dropout')
ll[0] = self._conv_bn_relu(drop[0], self.nFeat, 1,1, 'VALID', name = 'conv')
ll_[0] = self._conv(ll[0], self.nFeat, 1, 1, 'VALID', 'll')
if self.modif:
# TEST OF BATCH RELU
out[0] = self._conv_bn_relu(ll[0], self.outDim, 1, 1, 'VALID', 'out')
else:
out[0] = self._conv(ll[0], self.outDim, 1, 1, 'VALID', 'out')
out_[0] = self._conv(out[0], self.nFeat, 1, 1, 'VALID', 'out_')
sum_[0] = tf.add_n([out_[0], r3, ll_[0]], name='merge')
for i in range(1, self.nStack -1):
with tf.name_scope('stage_' + str(i)):
hg[i] = self._hourglass(sum_[i-1], self.nLow, self.nFeat, 'hourglass')
drop[i] = tf.layers.dropout(hg[i], rate = self.dropout_rate, training = self.training, name = 'dropout')
ll[i] = self._conv_bn_relu(drop[i], self.nFeat, 1, 1, 'VALID', name= 'conv')
ll_[i] = self._conv(ll[i], self.nFeat, 1, 1, 'VALID', 'll')
if self.modif:
out[i] = self._conv_bn_relu(ll[i], self.outDim, 1, 1, 'VALID', 'out')
else:
out[i] = self._conv(ll[i], self.outDim, 1, 1, 'VALID', 'out')
out_[i] = self._conv(out[i], self.nFeat, 1, 1, 'VALID', 'out_')
sum_[i] = tf.add_n([out_[i], sum_[i-1], ll_[0]], name= 'merge')
with tf.name_scope('stage_' + str(self.nStack -1)):
hg[self.nStack - 1] = self._hourglass(sum_[self.nStack - 2], self.nLow, self.nFeat, 'hourglass')
drop[self.nStack-1] = tf.layers.dropout(hg[self.nStack-1], rate = self.dropout_rate, training = self.training, name = 'dropout')
ll[self.nStack - 1] = self._conv_bn_relu(drop[self.nStack-1], self.nFeat, 1, 1, 'VALID', 'conv')
if self.modif:
out[self.nStack - 1] = self._conv_bn_relu(ll[self.nStack - 1], self.outDim, 1,1, 'VALID', 'out')
else:
out[self.nStack - 1] = self._conv(ll[self.nStack - 1], self.outDim, 1,1, 'VALID', 'out')
if self.modif:
return tf.nn.sigmoid(tf.stack(out, axis= 1 , name= 'stack_output'),name = 'final_output')
else:
return tf.stack(out, axis= 1 , name = 'final_output')
def _conv(self, inputs, filters, kernel_size = 1, strides = 1, pad = 'VALID', name = 'conv'):
""" Spatial Convolution (CONV2D)
Args:
inputs : Input Tensor (Data Type : NHWC)
filters : Number of filters (channels)
kernel_size : Size of kernel
strides : Stride
pad : Padding Type (VALID/SAME) # DO NOT USE 'SAME' NETWORK BUILT FOR VALID
name : Name of the block
Returns:
conv : Output Tensor (Convolved Input)
"""
with tf.name_scope(name):
# Kernel for convolution, Xavier Initialisation
kernel = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([kernel_size,kernel_size, inputs.get_shape().as_list()[3], filters]), name= 'weights')
conv = tf.nn.conv2d(inputs, kernel, [1,strides,strides,1], padding=pad, data_format='NHWC')
if self.w_summary:
with tf.device('/cpu:0'):
tf.summary.histogram('weights_summary', kernel, collections = ['weight'])
return conv
def _conv_bn_relu(self, inputs, filters, kernel_size = 1, strides = 1, pad = 'VALID', name = 'conv_bn_relu'):
""" Spatial Convolution (CONV2D) + BatchNormalization + ReLU Activation
Args:
inputs : Input Tensor (Data Type : NHWC)
filters : Number of filters (channels)
kernel_size : Size of kernel
strides : Stride
pad : Padding Type (VALID/SAME) # DO NOT USE 'SAME' NETWORK BUILT FOR VALID
name : Name of the block
Returns:
norm : Output Tensor
"""
with tf.name_scope(name):
kernel = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([kernel_size,kernel_size, inputs.get_shape().as_list()[3], filters]), name= 'weights')
conv = tf.nn.conv2d(inputs, kernel, [1,strides,strides,1], padding='VALID', data_format='NHWC')
norm = tf.contrib.layers.batch_norm(conv, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.training)
if self.w_summary:
with tf.device('/cpu:0'):
tf.summary.histogram('weights_summary', kernel, collections = ['weight'])
return norm
def _conv_block(self, inputs, numOut, name = 'conv_block'):
""" Convolutional Block
Args:
inputs : Input Tensor
numOut : Desired output number of channel
name : Name of the block
Returns:
conv_3 : Output Tensor
"""
if self.tiny:
with tf.name_scope(name):
norm = tf.contrib.layers.batch_norm(inputs, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.training)
pad = tf.pad(norm, np.array([[0,0],[1,1],[1,1],[0,0]]), name= 'pad')
conv = self._conv(pad, int(numOut), kernel_size=3, strides=1, pad = 'VALID', name= 'conv')
return conv
else:
with tf.name_scope(name):
with tf.name_scope('norm_1'):
norm_1 = tf.contrib.layers.batch_norm(inputs, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.training)
conv_1 = self._conv(norm_1, int(numOut/2), kernel_size=1, strides=1, pad = 'VALID', name= 'conv')
with tf.name_scope('norm_2'):
norm_2 = tf.contrib.layers.batch_norm(conv_1, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.training)
pad = tf.pad(norm_2, np.array([[0,0],[1,1],[1,1],[0,0]]), name= 'pad')
conv_2 = self._conv(pad, int(numOut/2), kernel_size=3, strides=1, pad = 'VALID', name= 'conv')
with tf.name_scope('norm_3'):
norm_3 = tf.contrib.layers.batch_norm(conv_2, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.training)
conv_3 = self._conv(norm_3, int(numOut), kernel_size=1, strides=1, pad = 'VALID', name= 'conv')
return conv_3
def _skip_layer(self, inputs, numOut, name = 'skip_layer'):
""" Skip Layer
Args:
inputs : Input Tensor
numOut : Desired output number of channel
name : Name of the bloc
Returns:
Tensor of shape (None, inputs.height, inputs.width, numOut)
"""
with tf.name_scope(name):
if inputs.get_shape().as_list()[3] == numOut:
return inputs
else:
conv = self._conv(inputs, numOut, kernel_size=1, strides = 1, name = 'conv')
return conv
def _residual(self, inputs, numOut, name = 'residual_block'):
""" Residual Unit
Args:
inputs : Input Tensor
numOut : Number of Output Features (channels)
name : Name of the block
"""
with tf.name_scope(name):
convb = self._conv_block(inputs, numOut)
skipl = self._skip_layer(inputs, numOut)
if self.modif:
return tf.nn.relu(tf.add_n([convb, skipl], name = 'res_block'))
else:
return tf.add_n([convb, skipl], name = 'res_block')
def _hourglass(self, inputs, n, numOut, name = 'hourglass'):
""" Hourglass Module
Args:
inputs : Input Tensor
n : Number of downsampling step
numOut : Number of Output Features (channels)
name : Name of the block
"""
with tf.name_scope(name):
# Upper Branch
up_1 = self._residual(inputs, numOut, name = 'up_1')
# Lower Branch
low_ = tf.contrib.layers.max_pool2d(inputs, [2,2], [2,2], padding='VALID')
low_1= self._residual(low_, numOut, name = 'low_1')
if n > 0:
low_2 = self._hourglass(low_1, n-1, numOut, name = 'low_2')
else:
low_2 = self._residual(low_1, numOut, name = 'low_2')
low_3 = self._residual(low_2, numOut, name = 'low_3')
up_2 = tf.image.resize_nearest_neighbor(low_3, tf.shape(low_3)[1:3]*2, name = 'upsampling')
if self.modif:
# Use of RELU
return tf.nn.relu(tf.add_n([up_2,up_1]), name='out_hg')
else:
return tf.add_n([up_2,up_1], name='out_hg')
def _argmax(self, tensor):
""" ArgMax
Args:
tensor : 2D - Tensor (Height x Width : 64x64 )
Returns:
arg : Tuple of max position
"""
resh = tf.reshape(tensor, [-1])
argmax = tf.argmax(resh, 0)
return (argmax // tensor.get_shape().as_list()[0], argmax % tensor.get_shape().as_list()[0])
def _compute_err(self, u, v):
""" Given 2 tensors compute the euclidean distance (L2) between maxima locations
Args:
u : 2D - Tensor (Height x Width : 64x64 )
v : 2D - Tensor (Height x Width : 64x64 )
Returns:
(float) : Distance (in [0,1])
"""
u_x,u_y = self._argmax(u)
v_x,v_y = self._argmax(v)
return tf.divide(tf.sqrt(tf.square(tf.to_float(u_x - v_x)) + tf.square(tf.to_float(u_y - v_y))), tf.to_float(91))
def _accur(self, pred, gtMap, num_image):
""" Given a Prediction batch (pred) and a Ground Truth batch (gtMaps),
returns one minus the mean distance.
Args:
pred : Prediction Batch (shape = num_image x 64 x 64)
gtMaps : Ground Truth Batch (shape = num_image x 64 x 64)
num_image : (int) Number of images in batch
Returns:
(float)
"""
err = tf.to_float(0)
for i in range(num_image):
err = tf.add(err, self._compute_err(pred[i], gtMap[i]))
return tf.subtract(tf.to_float(1), err/num_image)
# MULTI CONTEXT ATTENTION MECHANISM
# WORK IN PROGRESS DO NOT USE THESE METHODS
# BASED ON:
# Multi-Context Attention for Human Pose Estimation
# Authors: Xiao Chu, Wei Yang, Wanli Ouyang, Cheng Ma, Alan L. Yuille, Xiaogang Wang
# Paper: https://arxiv.org/abs/1702.07432
# GitHub Torch7 Code: https://github.com/bearpaw/pose-attention
def _bn_relu(self, inputs):
norm = tf.contrib.layers.batch_norm(inputs, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.training)
return norm
def _pool_layer(self, inputs, numOut, name = 'pool_layer'):
with tf.name_scope(name):
bnr_1 = self._bn_relu(inputs)
pool = tf.contrib.layers.max_pool2d(bnr_1,[2,2],[2,2],padding='VALID')
pad_1 = tf.pad(pool, np.array([[0,0],[1,1],[1,1],[0,0]]))
conv_1 = self._conv(pad_1, numOut, kernel_size=3, strides=1, name='conv')
bnr_2 = self._bn_relu(conv_1)
pad_2 = tf.pad(bnr_2, np.array([[0,0],[1,1],[1,1],[0,0]]))
conv_2 = self._conv(pad_2, numOut, kernel_size=3, strides=1, name='conv')
upsample = tf.image.resize_nearest_neighbor(conv_2, tf.shape(conv_2)[1:3]*2, name = 'upsampling')
return upsample
def _attention_iter(self, inputs, lrnSize, itersize, name = 'attention_iter'):
with tf.name_scope(name):
numIn = inputs.get_shape().as_list()[3]
padding = np.floor(lrnSize/2)
pad = tf.pad(inputs, np.array([[0,0],[1,1],[1,1],[0,0]]))
U = self._conv(pad, filters=1, kernel_size=3, strides=1)
pad_2 = tf.pad(U, np.array([[0,0],[padding,padding],[padding,padding],[0,0]]))
sharedK = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([lrnSize,lrnSize, 1, 1]), name= 'shared_weights')
Q = []
C = []
for i in range(itersize):
if i ==0:
conv = tf.nn.conv2d(pad_2, sharedK, [1,1,1,1], padding='VALID', data_format='NHWC')
else:
conv = tf.nn.conv2d(Q[i-1], sharedK, [1,1,1,1], padding='SAME', data_format='NHWC')
C.append(conv)
Q_tmp = tf.nn.sigmoid(tf.add_n([C[i], U]))
Q.append(Q_tmp)
stacks = []
for i in range(numIn):
stacks.append(Q[-1])
pfeat = tf.multiply(inputs,tf.concat(stacks, axis = 3) )
return pfeat
def _attention_part_crf(self, inputs, lrnSize, itersize, usepart, name = 'attention_part'):
with tf.name_scope(name):
if usepart == 0:
return self._attention_iter(inputs, lrnSize, itersize)
else:
partnum = self.outDim
pre = []
for i in range(partnum):
att = self._attention_iter(inputs, lrnSize, itersize)
pad = tf.pad(att, np.array([[0,0],[0,0],[0,0],[0,0]]))
s = self._conv(pad, filters=1, kernel_size=1, strides=1)
pre.append(s)
return tf.concat(pre, axis = 3)
def _residual_pool(self, inputs, numOut, name = 'residual_pool'):
with tf.name_scope(name):
return tf.add_n([self._conv_block(inputs, numOut), self._skip_layer(inputs, numOut), self._pool_layer(inputs, numOut)])
def _rep_residual(self, inputs, numOut, nRep, name = 'rep_residual'):
with tf.name_scope(name):
out = [None]*nRep
for i in range(nRep):
if i == 0:
tmpout = self._residual(inputs,numOut)
else:
tmpout = self._residual_pool(out[i-1],numOut)
out[i] = tmpout
return out[nRep-1]
def _hg_mcam(self, inputs, n, numOut, imSize, nModual, name = 'mcam_hg'):
with tf.name_scope(name):
#------------Upper Branch
pool = tf.contrib.layers.max_pool2d(inputs,[2,2],[2,2],padding='VALID')
up = []
low = []
for i in range(nModual):
if i == 0:
if n>1:
tmpup = self._rep_residual(inputs, numOut, n -1)
else:
tmpup = self._residual(inputs, numOut)
tmplow = self._residual(pool, numOut)
else:
if n>1:
tmpup = self._rep_residual(up[i-1], numOut, n-1)
else:
tmpup = self._residual_pool(up[i-1], numOut)
tmplow = self._residual(low[i-1], numOut)
up.append(tmpup)
low.append(tmplow)
#up[i] = tmpup
#low[i] = tmplow
#----------------Lower Branch
if n>1:
low2 = self._hg_mcam(low[-1], n-1, numOut, int(imSize/2), nModual)
else:
low2 = self._residual(low[-1], numOut)
low3 = self._residual(low2, numOut)
up_2 = tf.image.resize_nearest_neighbor(low3, tf.shape(low3)[1:3]*2, name = 'upsampling')
return tf.add_n([up[-1], up_2], name = 'out_hg')
def _lin(self, inputs, numOut, name = 'lin'):
l = self._conv(inputs, filters = numOut, kernel_size = 1, strides = 1)
return self._bn_relu(l)
def _graph_mcam(self, inputs):
with tf.name_scope('preprocessing'):
pad1 = tf.pad(inputs, np.array([[0,0],[3,3],[3,3],[0,0]]))
cnv1_ = self._conv(pad1, filters = 64, kernel_size = 7, strides = 1)
cnv1 = self._bn_relu(cnv1_)
r1 = self._residual(cnv1, 64)
pool1 = tf.contrib.layers.max_pool2d(r1,[2,2],[2,2],padding='VALID')
r2 = self._residual(pool1, 64)
r3 = self._residual(r2, 128)
pool2 = tf.contrib.layers.max_pool2d(r3,[2,2],[2,2],padding='VALID')
r4 = self._residual(pool2,128)
r5 = self._residual(r4, 128)
r6 = self._residual(r5, 256)
out = []
inter = []
inter.append(r6)
if self.nLow == 3:
nModual = int(16/self.nStack)
else:
nModual = int(8/self.nStack)
with tf.name_scope('stacks'):
for i in range(self.nStack):
with tf.name_scope('houglass_' + str(i+1)):
hg = self._hg_mcam(inter[i], self.nLow, self.nFeat, 64, nModual)
if i == self.nStack - 1:
ll1 = self._lin(hg, self.nFeat*2)
ll2 = self._lin(ll1, self.nFeat*2)
drop = tf.layers.dropout(ll2, rate=0.1, training = self.training)
att = self._attention_part_crf(drop, 1, 3, 0)
tmpOut = self._attention_part_crf(att, 1, 3, 1)
else:
ll1 = self._lin(hg, self.nFeat)
ll2 = self._lin(ll1, self.nFeat)
drop = tf.layers.dropout(ll2, rate=0.1, training = self.training)
if i > self.nStack // 2:
att = self._attention_part_crf(drop, 1, 3, 0)
tmpOut = self._attention_part_crf(att, 1, 3, 1)
else:
att = self._attention_part_crf(ll2, 1, 3, 0)
tmpOut = self._conv(att, filters = self.outDim, kernel_size = 1, strides = 1)
out.append(tmpOut)
if i < self.nStack - 1:
outmap = self._conv(tmpOut, filters = self.nFeat, kernel_size = 1, strides = 1)
ll3 = self._lin(outmap, self.nFeat)
tmointer = tf.add_n([inter[i], outmap, ll3])
inter.append(tmointer)
return tf.stack(out, axis= 1 , name = 'final_output')
|
[
"[email protected]"
] | |
ee2e28c481d7afa48b81d0bcf46feb11e033f0ac
|
61188d30cd69cfa5063b604f73ced3911e377d99
|
/tests/test_solver.py
|
c9333da956b04805e1d358204575c85e140b6173
|
[
"BSD-3-Clause"
] |
permissive
|
bjodah/odelab
|
8dc984f9bcbb7cb9397cb0d8e598ccdfb52e9301
|
ac6e2e375579fe10e8ee4d7fc89d2c937bd64f6e
|
refs/heads/master
| 2021-01-18T10:41:57.973986 | 2013-01-08T21:44:55 | 2013-01-08T21:44:55 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,460 |
py
|
# -*- coding: UTF-8 -*-
from __future__ import division
from odelab.scheme.rungekutta import *
from odelab.scheme.generallinear import *
from odelab.scheme import *
from odelab.scheme.classic import *
from odelab.scheme.exponential import *
from odelab.store import Store, PyTableStore, SimpleStore
from odelab.system.classic import *
from odelab.system.exponential import *
from odelab import *
import tempfile
import os
import numpy as np
import numpy.testing as npt
import nose.tools as nt
from nose.plugins.skip import SkipTest
import pylab as pl
pl.ioff()
Solver.catch_runtime = False
class Harness(object):
no_plot = True
def f(t,u):
return t*np.ones_like(u)
def const_f(c,t,u):
return c*np.ones_like(u)
def time_f(t,u):
return t
def test_solver_autosave():
solver = Solver(ExplicitEuler(h=.1), System(f))
solver.initialize(u0=1.)
solver.run()
nt.assert_equal(solver.guess_name(), 'System_ExplicitEuler_T1.0')
def test_duration():
"""Duration are added from run to run"""
solver = Solver(ExplicitEuler(h=.1), System(f))
solver.initialize(u0=1.,time=1.,)
solver.run()
d1 = solver.store['duration']
solver.run(time=.1)
d2 = solver.store['duration']
nt.assert_greater(d2, d1)
def test_initialize_len1():
solver = Solver(ExplicitEuler(.1),System(f))
solver.initialize(u0=1.)
nt.assert_equal(len(solver),1)
class InitializedTwiceError(ValueError):
pass
class Scheme_init_once(ExplicitEuler):
def __init__(self, *args,**kwargs):
super(Scheme_init_once,self).__init__(*args, **kwargs)
self.is_initialized = False
def initialize(self, events):
if self.is_initialized:
raise InitializedTwiceError('initialized twice!')
super(Scheme_init_once,self).initialize(events)
self.is_initialized = True
def test_start_from_two():
# check that a scheme is not initialized twice, even if we start from more than one event
dt = .1
solver = Solver(Scheme_init_once(dt), System(f))
solver.initialize(u0=1.)
solver.run(2*dt)
nt.assert_equal(len(solver),3)
solver.scheme.is_initialized = False
solver.run(1.)
print len(solver)
print solver.get_events()
@nt.raises(InitializedTwiceError)
def test_initialize_reset_scheme():
solver = Solver(Scheme_init_once(.1), System(f))
solver.initialize(u0=1., name='first')
nt.assert_is(solver.current_scheme, None)
solver.run(1.)
solver.initialize(u0=2.,name='second')
solver.run(1.)
@nt.raises(MultistepInitializationError)
def test_multistep_init_exception():
multi_scheme = AdamsBashforth2(.1)
s = Solver(scheme=multi_scheme, system=System(f))
s.initialize(u0=1.)
with s.open_store() as events:
s.set_scheme(multi_scheme, events)
class Test_Access(object):
"""
Test the Solver.get_events method.
"""
def setUp(self):
self.s = Solver(ExplicitEuler(.1), System(partial(const_f, 1.)))
self.time = 100
self.s.initialize(u0=np.array([0.]),time=self.time)
def test_access(self):
self.s.run()
sampling_rate = .5
evts = self.s.get_events(t0=0, time=50.05, sampling_rate=sampling_rate)
nt.assert_almost_equal(len(evts.T), len(self.s)*sampling_rate/2, -1) # approx 1/4 of total nb of events
## nt.assert_equal(len(evts.T), 250)
npt.assert_array_almost_equal(evts[:,-1], np.array([50.,50.]))
@nt.raises(Solver.NotRun)
def test_notrun(self):
self.s.get_events()
from functools import partial
const_r = partial(const_f, 1.)
const_c = partial(const_f, 1.j)
class Harness_Solver(Harness):
def setUp(self):
self.setup_solver()
dim = 1
def set_system(self, f):
self.solver.system = System(f)
def test_scheme_str(self):
# should not raise an exception even though h is not yet set in the underlying scheme:
print str(self.solver)
def test_initialize(self):
u0 = np.random.rand(self.dim)
self.solver.initialize(u0=u0,)
nt.assert_equal(self.solver.time, Solver.time)
nt.assert_equal(len(self.solver), 1)
@nt.raises(PyTableStore.AlreadyInitialized)
def test_initialize_twice(self):
if Store is SimpleStore:
raise SkipTest()
u0 = np.random.rand(self.dim)
self.solver.initialize(u0=u0)
self.solver.initialize(u0=u0)
def test_initialize_scheme(self):
raise SkipTest('not relevant anymore, time step is initialized directly at the scheme level')
h = 10.
self.solver.initialize(u0=np.random.rand(self.dim),)
e0 = self.solver.initial()
with self.solver.open_store() as events:
self.solver.set_scheme(self.solver.scheme, events)
self.solver.step(e0[-1], e0[:-1],)
nt.assert_equal(self.solver.scheme.h, h)
def test_quadratic(self):
print type(self).__name__
self.set_system(time_f)
self.solver.initialize(u0=1., time=1.,)
self.solver.run()
# u'(t) = t; u(0) = u0; => u(t) == u0 + t**2/2
npt.assert_array_almost_equal(self.solver.final(), np.array([3/2,1.]), decimal=1)
def check_const(self, f, u0, expected):
"""should solve the f=c exactly"""
print type(self).__name__
self.check_skip(u0,f)
self.set_system(f)
self.solver.initialize(u0=u0, time=1.,)
self.solver.run()
expected_event = np.hstack([expected, 1.])
npt.assert_almost_equal(self.solver.final(), expected_event, 1)
def check_skip(self,u0,f):
return
def test_real_const(self):
self.check_const(const_r, 1., 2.)
def test_complex_const(self):
raise SkipTest('Current nonlinear solver does not work with the complex type.')
self.check_const(const_c, 1.+0j, 1.+1.j)
def test_repr(self):
expected = '<Solver: {0}'.format(repr(self.solver.scheme))
r = repr(self.solver)
nt.assert_true(r.startswith(expected))
if self.solver.init_scheme is not None:
nt.assert_regexp_matches(r, repr(self.solver.init_scheme))
class Test_EEuler(Harness_Solver):
def setup_solver(self):
self.solver = Solver(ExplicitEuler(h=.1), System(f))
class Test_ETrapezoidal(Harness_Solver):
def setup_solver(self):
self.solver = Solver(ExplicitTrapezoidal(h=.1), System(f))
class Test_RK4(Harness_Solver):
def setup_solver(self):
self.solver = Solver(RungeKutta4(h=.1), System(f))
class Test_RK34(Harness_Solver):
def setup_solver(self):
self.solver = Solver(RungeKutta34(h=.1), System(f))
class Test_AB(Harness_Solver):
def setup_solver(self):
multi_scheme = AdamsBashforth2(.1)
self.solver = Solver(multi_scheme, System(f), init_scheme=ExplicitEuler(h=.1))
class Test_RK34Vdp(object):
def setUp(self):
time = 7.8
self.h_init = time/50
self.scheme = RungeKutta34(h=self.h_init)
self.s = Solver(self.scheme, VanderPol(mu=1.))
self.s.initialize(u0 = array([.2,1]), time=time, )
def test_run(self):
self.s.run()
nt.assert_less(self.scheme.h, self.h_init)
class Harness_Solver_NoComplex(Harness_Solver):
def check_skip(self,u0,f):
if isinstance(u0,float) and f is const_c:
raise SkipTest('Does not work with real initial conditions and complex vector fields')
class Test_ode15s(Harness_Solver_NoComplex):
def setup_solver(self):
self.solver = Solver(ode15s(h=.1), System(f))
class Test_LawsonEuler(Harness_Solver_NoComplex):
def set_system(self, f):
self.solver.system = NoLinear(f,self.dim)
def setup_solver(self):
self.solver = Solver(LawsonEuler(h=.1), NoLinear(f,self.dim))
class Test_IEuler(Harness_Solver):
def setup_solver(self):
self.solver = Solver(ImplicitEuler(h=.1), System(f))
@nt.raises(Solver.Unstable)
def test_unstable():
s = Solver(LawsonEuler(h=10.), Linear(np.array([[1.e2]])))
s.initialize(u0 = 1., time = 100,)
s.run()
def make_lin(A):
if np.isscalar(A):
def lin(t,u):
return A*u
else:
def lin(t, u):
return dot(A,u)
lin.exact = make_exp(A)
return lin
def make_exp(A):
if np.isscalar(A):
def exact(u0,t0,t):
return u0 * exp((t-t0)*A)
else:
def exact(u0, t0, t):
return dot(expm((t-t0)*A),u0)
return exact
class Harness_Solver_Order(Harness):
a = -1.
u0 = 1.
time = 1.
do_plot=False
def notest_order(self):
self.solver.initialize(u0=self.u0, time=self.time)
order = self.solver.plot_error(do_plot=self.do_plot)
print order
nt.assert_true(order < self.order + .1)
class Test_ExplicitEuler(Harness_Solver_Order):
def setUp(self):
self.solver = Solver(ExplicitEuler(h=.1), System(make_lin(self.a)))
self.order = -1.
class Test_ImplicitEuler(Harness_Solver_Order):
def setUp(self):
self.solver = Solver(ImplicitEuler(h=.1), System(make_lin(self.a)))
self.order = -1.
class Test_RungeKutta4(Harness_Solver_Order):
def setUp(self):
self.solver = Solver(RungeKutta4(h=.1), System(make_lin(self.a)))
self.solver.err_kmin = 1
self.solver.err_kmax = 2.5
self.order = -4.
class DummyException(Exception):
pass
class LimitedSys(System):
def __init__(self, limit):
self.limit = limit
self.i = 0
def f(self, t, x):
if self.i < self.limit:
self.i += 1
return 0
else:
raise DummyException()
class Test_FinalTimeExceptions(object):
limit = 20
def setUp(self):
self.sys = LimitedSys(self.limit)
self.scheme = ExplicitEuler(h=.1)
self.s = Solver(self.scheme, self.sys)
self.s.catch_runtime = True
self.s.initialize(u0=0, time=10, )
@nt.raises(Solver.FinalTimeNotReached)
def test_final_time_not_reached(self):
self.s.run(max_iter = 1)
def test_max_iter(self):
try:
self.s.run()
except self.s.RuntimeError:
pass
nt.assert_greater_equal(self.s._max_iter, self.s.max_iter_factor*self.s.time/self.scheme.h)
time = 50
try:
self.s.run(50)
except self.s.RuntimeError:
pass
nt.assert_greater_equal(self.s._max_iter, self.s.max_iter_factor*time/self.scheme.h)
@nt.raises(Solver.RuntimeError)
def test_sys_exception(self):
self.s.run()
@nt.raises(DummyException)
def test_sys_no_runtime_exception(self):
self.s.catch_runtime = False
self.s.run()
def faulty_function(t,u):
raise Exception('message')
class Test_Exceptions(object):
def setUp(self):
self.s = Solver(ExplicitEuler(h=.1), Linear(np.array([[1]])))
@nt.raises(Solver.NotInitialized)
def test_no_u0(self):
self.s.initialize()
@nt.raises(Solver.NotInitialized)
def test_no_initialize(self):
self.s.run()
@nt.raises(Solver.Unstable)
def test_unstable(self):
self.s = Solver(ExplicitEuler(h=.1), Linear(np.array([[float('inf')]])))
self.s.initialize(u0=np.array([0]))
self.s.run()
@nt.raises(Solver.RuntimeError)
def test_runtime_exception(self):
self.s = Solver(ExplicitEuler(h=.1), System(faulty_function))
self.s.catch_runtime = True
self.s.initialize(u0=0)
self.s.run()
class TotSys(System):
def total(self, xt):
return np.sum(xt[:-1],axis=0)
def minus_x(t, x):
return -x
class Test_Simple(object):
def setUp(self):
sys = TotSys(minus_x)
self.s = Solver(ExplicitEuler(h=.1), sys)
def test_time(self):
sol = self.s
sol.h = Solver.time/10
sol.initialize(u0=0.)
sol.run(sol.h)
npt.assert_(sol.final_time() < Solver.time)
def test_extra_run(self):
"""test that an extra run continues from last time"""
sol = self.s
sol.initialize(u0=1.)
sol.run()
npt.assert_almost_equal(sol.final_time(),Solver.time)
sol.run()
npt.assert_almost_equal(sol.final_time(),2*Solver.time)
def test_plot_args(self):
self.s.initialize(u0=np.array([1.,1.,1.]))
self.s.run()
pl.clf()
lines = self.s.plot(0,lw=5).axis.lines
npt.assert_equal(len(lines),1)
pl.clf()
lines = self.s.plot(lw=5).axis.lines
npt.assert_equal(len(lines),3)
npt.assert_equal(lines[-1].get_linewidth(),5)
def test_plot_function(self):
self.s.initialize(u0=np.array([1.,1.,1.]))
self.s.run()
lines = self.s.plot_function('total', lw=4).axis.lines
npt.assert_equal(lines[-1].get_linewidth(), 4)
pl.ion()
|
[
"[email protected]"
] | |
a259289c6a6b232a8abc5b6e5b43ede75f107410
|
60715c9ea4c66d861708531def532814eab781fd
|
/python-programming-workshop/pythondatastructures/dateandtime/time_yesterday.py
|
e220240661773649db71af463af4edfd0954a20a
|
[] |
no_license
|
bala4rtraining/python_programming
|
6ce64d035ef04486f5dc9572cb0975dd322fcb3e
|
99a5e6cf38448f5a01b310d5f7fa95493139b631
|
refs/heads/master
| 2023-09-03T00:10:26.272124 | 2021-11-01T08:20:52 | 2021-11-01T08:20:52 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 297 |
py
|
#Python program that returns yesterday
from datetime import date
from datetime import timedelta
def yesterday():
# Get today.
today = date.today()
# Subtract timedelta of 1 day.
yesterday = today - timedelta(days=1)
return yesterday
print(date.today())
print(yesterday())
|
[
"[email protected]"
] | |
01a2714dc644c9a68e8435d2bc8bbb20cdd487fb
|
c0c8aeb5aaf08925d8c9e1d660b02c89cbc7ad71
|
/Algorithms/Medium/55. Jump Game/answer.py
|
034d33909be49fa36615b2eb76dd8385990d05ed
|
[
"Apache-2.0"
] |
permissive
|
kenwoov/PlayLeetCode
|
b2fdc43d799c37683a9efdc31c4df159cf553bf5
|
4012a2f0a099a502df1e5df2e39faa75fe6463e8
|
refs/heads/master
| 2022-12-17T05:54:22.775972 | 2020-09-26T14:08:43 | 2020-09-26T14:08:43 | 214,839,611 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 394 |
py
|
from typing import List
class Solution:
def canJump(self, nums: List[int]) -> bool:
N = len(nums)
lastPosition = N - 1
for i in range(N-1, -1, -1):
if i + nums[i] >= lastPosition:
lastPosition = i
return lastPosition == 0
if __name__ == "__main__":
s = Solution()
result = s.canJump([3, 2, 1, 0, 4])
print(result)
|
[
"[email protected]"
] | |
d03ff966642e5d1c4511a4ac1e8024d75bf5f2e2
|
3ca67d69abd4e74b7145b340cdda65532f90053b
|
/LeetCode/Two_Pointers/Valid Palindrome/6047198844.py
|
b5bb7a7d4d16f6b7caa8c0ae6180b37f36c7b1bb
|
[] |
no_license
|
DKU-STUDY/Algorithm
|
19549516984b52a1c5cd73e1ed1e58f774d6d30e
|
6f78efdbefd8eedab24e43d74c7dae7f95c2893b
|
refs/heads/master
| 2023-02-18T06:48:39.309641 | 2023-02-09T07:16:14 | 2023-02-09T07:16:14 | 258,455,710 | 175 | 49 | null | 2023-02-09T07:16:16 | 2020-04-24T08:42:27 |
Python
|
UTF-8
|
Python
| false | false | 145 |
py
|
import re
class Solution:
def isPalindrome(self, s: str) -> bool:
s = re.sub('[^a-z0-9A-Z]','',s).lower()
return s == s[::-1]
|
[
"[email protected]"
] | |
5d22821c7019c51b77ad3f13c64f5f67fa04579d
|
75f0e04c6330950a9bd225fd8b62fdc9fb0021b8
|
/103.binaryTreeZigzagLevelOrderTraversal.py
|
3511ea332bce45326491b783728bd51dc0aa9c57
|
[] |
no_license
|
Minzhe/LeetCode
|
f07bc1edce77cee778d7dc3c4a379921747637a5
|
e5cb0212cb83daac829456c14aec557e26eea68c
|
refs/heads/master
| 2020-05-04T11:25:07.084483 | 2020-03-09T02:10:17 | 2020-03-09T02:10:17 | 179,107,420 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 877 |
py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if root is None: return []
ans, order = [], 1
level = [root]
while level:
if order == 1:
ans.append([node.val for node in level])
elif order == -1:
ans.append([node.val for node in reversed(level)])
level = self.getnextlevel(level)
order *= -1
return ans
def getnextlevel(self, level):
nextlevel = []
for node in level:
if node.left:
nextlevel.append(node.left)
if node.right:
nextlevel.append(node.right)
return nextlevel
|
[
"[email protected]"
] | |
f1c240360245fffab7d3d52118d4c4bc8ff0e397
|
33890b0061d0701b32bb7d78776485c3afb9f67e
|
/pyexe/cpumon.py
|
93ce008962966b4d1ff7aff3145f366a9cc1de24
|
[] |
no_license
|
songzg/winterpy
|
677c7ec77a6923ba89d5f0b98c01d30e981336ae
|
05f8b3eb43588c452a0f76f4b6e04ee37ca0afb3
|
refs/heads/master
| 2020-05-29T11:46:26.768980 | 2016-05-19T04:08:02 | 2016-05-19T04:08:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 849 |
py
|
#!/usr/bin/env python3
# vim:fileencoding=utf-8
'''
监视CPU的使用,过高时自动执行命令
2010年7月17日
'''
cmd = 'echo ================== >> ~/tmpfs/cpumon && top -n 1 -b | awk \'{if($4 != 0) print}\' >> ~/tmpfs/cpumon'
import os
import time
def getCPUUsage():
cpu_before = open('/proc/stat').readline().split()[1:]
time.sleep(1)
cpu_after = open('/proc/stat').readline().split()[1:]
cpu = list(map(lambda x, y: int(y)-int(x), cpu_before, cpu_after))
# print(cpu_before, cpu_after, sep='\n')
# print(cpu, sum(cpu))
return 1 - cpu[3] / sum(cpu)
def monitor(cmd=cmd, threshold=0.9):
while True:
usage = getCPUUsage()
print('CPU Usage: %.2f' % usage)
if usage > threshold:
os.system(cmd)
if __name__ == '__main__':
try:
monitor(threshold=.5)
except KeyboardInterrupt:
print('退出')
|
[
"[email protected]"
] | |
7e9aa43fbb3cbd1f96f219c92e072a7b3b356403
|
223499de35fa67f487f332b70364ec48604fb67e
|
/spotify/migrations/0002_auto_20210326_1421.py
|
9b8029693ec2d846851e241dfd7445f52ab0f193
|
[] |
no_license
|
MichelAtieno/Music_Controller
|
961dfa9c7bbcee0df686a1281d622875eca386e4
|
059e65167e73f44f1b505184a2f41dcf209b38ed
|
refs/heads/master
| 2023-04-07T16:47:17.527827 | 2021-04-15T11:50:26 | 2021-04-15T11:50:26 | 350,689,322 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 568 |
py
|
# Generated by Django 3.1.7 on 2021-03-26 11:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('spotify', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='spotifytoken',
name='access_token',
field=models.CharField(max_length=1500),
),
migrations.AlterField(
model_name='spotifytoken',
name='refresh_token',
field=models.CharField(max_length=1500, null=True),
),
]
|
[
"[email protected]"
] | |
fb106f607a49363343562465ba2d40b67c5618fe
|
e174e13114fe96ad2a4eeb596a3d1c564ae212a8
|
/Python for Finance Analyze Big Financial Data by Y. Hilpisch/Code of Python For Finance/4375OS_06_Code/4375OS_06_23_interpolation.py
|
6866265b574971a283ffc99347e4dfdc5debddea
|
[] |
no_license
|
Kevinqian0501/python_books
|
c1a7632d66dceb46db439f7cbed86d85370aab42
|
0691e4685af03a296aafb02447e3585db55ce461
|
refs/heads/master
| 2021-08-30T19:27:03.985464 | 2017-12-19T05:56:31 | 2017-12-19T05:56:31 | 104,145,012 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 592 |
py
|
"""
Name : 4375OS_06_23_interpolatioin.py
Book : Python for Finance
Publisher: Packt Publishing Ltd.
Author : Yuxing Yan
Date : 12/26/2013
email : [email protected]
[email protected]
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
x = np.linspace(0, 10, 10)
y = np.exp(-x/3.0)
f = interp1d(x, y)
f2 = interp1d(x, y, kind='cubic')
xnew = np.linspace(0, 10, 40)
plt.plot(x,y,'o',xnew,f(xnew),'-', xnew, f2(xnew),'--')
plt.legend(['data', 'linear', 'cubic'], loc='best')
plt.show()
|
[
"[email protected]"
] | |
dc29881f87dbe8e453f4f9473740b0d42fd07bda
|
538aec79c8039e500411647bba4a12343976bd03
|
/00 - Risorse/PYscripts/05scarica_condensatore.py
|
d4473036af722c795053b3b7c4825a9576ec40b6
|
[] |
no_license
|
Andreo95/Lab3_gruppoBF_primosemestre
|
27adff4fa1b1a9c75cf74840c55ee2653d36d07e
|
8866ca1200f95bb86712c3b896409aabc395bf81
|
refs/heads/master
| 2020-01-23T21:43:47.212214 | 2017-02-22T22:35:08 | 2017-02-22T22:35:08 | 74,684,559 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,826 |
py
|
import scipy
from scipy.optimize import curve_fit
import pylab
import numpy
## indirizzo e nome file
indirizzo_dati = '/afs/uz.sns.it/user/albord95/Scrivania/'
file_origine = 'data05scarica.txt'
## importiamo i dati
sxa, sya = pylab.loadtxt( r'%s%s' %(indirizzo_dati,file_origine), unpack = True )
## grafici dei dati iniziali
pylab.figure(1)
pylab.clf()
pylab.xlabel('tempo[$\mu$s]')
pylab.ylabel('valori arduino[u.a.]')
pylab.grid(color = 'gray')
pylab.errorbar(sxa, sya, 1, fmt = '.')
## Funzione di fit
def f(x, a, b):
return a*scipy.exp(-x/b)
## best-fit
popt_a, pcov_a = curve_fit(f, sxa, sya)
a_fit, b_fit = popt_a
da_fit, db_fit = pylab.sqrt(pcov_a.diagonal())
print(' ')
print('V_0 = %f +- %f u.a.' % (a_fit, da_fit))
print('tau = %f +- %f micro s' % (b_fit, db_fit))
#chiquadro e coefficiente di correlazione
chi2 = ((sya-f(sxa,*popt_a))**2).sum()
cov_norm = pcov_a[0,1]/(numpy.sqrt(pcov_a[0,0]*pcov_a[1,1]))
print('chiquadro = %g' %(chi2))
print('dof = %g' %(len(sxa)-2))
print('cov norm = %g' %(cov_norm))
print(' ')
print(pcov_a)
print(numpy.corrcoef(pcov_a))
## grafico fit
pylab.figure(2)
pylab.clf()
pylab.title('carica condensatore')
pylab.ylabel('valore arduino[u.a.]')
pylab.xlabel('tempo[$\mu$s]')
pylab.grid(color = 'gray')
pylab.errorbar(sxa, sya, 1, fmt = '.')
pylab.plot(sxa, f(sxa,*popt_a), label='fit')
pylab.legend()
## grafico degli errori
pylab.figure(3)
pylab.clf()
pylab.xlabel('tempo[$\mu$s]')
pylab.title('carica condensatore')
pylab.ylabel('residui normalizzati')
pylab.grid(color = 'gray')
pylab.plot(sxa,sya-(f(sxa,*popt_a)), '.', label='data')
pylab.plot(sxa,scipy.zeros(len(sxa)) , label='rif')
media=sum((sya-f(sxa,*popt_a)))/len(sxa) #calcolo media residui
pylab.plot(sxa,scipy.ones(len(sxa))*media, label='media')
pylab.legend()
pylab.show()
|
[
"[email protected]"
] | |
eaa89b0f66c0000c7e6c6f864c2936a2557be3e1
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/eventhub/v20210601preview/list_namespace_keys.py
|
bd36206585e0c79e0cb1b7dc610f727d641a1a59
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,240 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListNamespaceKeysResult',
'AwaitableListNamespaceKeysResult',
'list_namespace_keys',
'list_namespace_keys_output',
]
@pulumi.output_type
class ListNamespaceKeysResult:
"""
Namespace/EventHub Connection String
"""
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
"""
Primary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
"""
Secondary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the AuthorizationRule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
Primary connection string of the created namespace AuthorizationRule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
Secondary connection string of the created namespace AuthorizationRule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListNamespaceKeysResult(ListNamespaceKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListNamespaceKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def list_namespace_keys(authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListNamespaceKeysResult:
"""
Namespace/EventHub Connection String
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The Namespace name
:param str resource_group_name: Name of the resource group within the azure subscription.
"""
__args__ = dict()
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:eventhub/v20210601preview:listNamespaceKeys', __args__, opts=opts, typ=ListNamespaceKeysResult).value
return AwaitableListNamespaceKeysResult(
alias_primary_connection_string=__ret__.alias_primary_connection_string,
alias_secondary_connection_string=__ret__.alias_secondary_connection_string,
key_name=__ret__.key_name,
primary_connection_string=__ret__.primary_connection_string,
primary_key=__ret__.primary_key,
secondary_connection_string=__ret__.secondary_connection_string,
secondary_key=__ret__.secondary_key)
@_utilities.lift_output_func(list_namespace_keys)
def list_namespace_keys_output(authorization_rule_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListNamespaceKeysResult]:
"""
Namespace/EventHub Connection String
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The Namespace name
:param str resource_group_name: Name of the resource group within the azure subscription.
"""
...
|
[
"[email protected]"
] | |
e0ed519f505619c172341d75c16b1d767d11c23a
|
a2d36e471988e0fae32e9a9d559204ebb065ab7f
|
/huaweicloud-sdk-lts/huaweicloudsdklts/v2/model/event.py
|
6769cd2f23ac637c24543600a6c7e0b682fecc30
|
[
"Apache-2.0"
] |
permissive
|
zhouxy666/huaweicloud-sdk-python-v3
|
4d878a90b8e003875fc803a61414788e5e4c2c34
|
cc6f10a53205be4cb111d3ecfef8135ea804fa15
|
refs/heads/master
| 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,527 |
py
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class Event:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'metadata': 'Metadata',
'starts_at': 'object'
}
attribute_map = {
'metadata': 'metadata',
'starts_at': 'starts_at'
}
def __init__(self, metadata=None, starts_at=None):
"""Event - a model defined in huaweicloud sdk"""
self._metadata = None
self._starts_at = None
self.discriminator = None
self.metadata = metadata
self.starts_at = starts_at
@property
def metadata(self):
"""Gets the metadata of this Event.
告警信息
:return: The metadata of this Event.
:rtype: Metadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this Event.
告警信息
:param metadata: The metadata of this Event.
:type: Metadata
"""
self._metadata = metadata
@property
def starts_at(self):
"""Gets the starts_at of this Event.
告警产生时间(时间戳)
:return: The starts_at of this Event.
:rtype: object
"""
return self._starts_at
@starts_at.setter
def starts_at(self, starts_at):
"""Sets the starts_at of this Event.
告警产生时间(时间戳)
:param starts_at: The starts_at of this Event.
:type: object
"""
self._starts_at = starts_at
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Event):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
c18b4b2479e2e52a5c6fb4c2daeb6a110dac55d8
|
28ef7c65a5cb1291916c768a0c2468a91770bc12
|
/configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/mpii/cpm_mpii_368x368.py
|
eac8aef24aa08db278f9d31a79fbf5bfe6cd5ab2
|
[
"Apache-2.0"
] |
permissive
|
bit-scientist/mmpose
|
57464aae1ca87faf5a4669991ae1ea4347e41900
|
9671a12caf63ae5d15a9bebc66a9a2e7a3ce617e
|
refs/heads/master
| 2023-08-03T17:18:27.413286 | 2021-09-29T03:48:37 | 2021-09-29T03:48:37 | 411,549,076 | 0 | 0 |
Apache-2.0
| 2021-09-29T06:01:27 | 2021-09-29T06:01:26 | null |
UTF-8
|
Python
| false | false | 3,842 |
py
|
_base_ = ['../../../../_base_/datasets/mpii.py']
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='PCKh', save_best='PCKh')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50, hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=16,
dataset_joints=16,
dataset_channel=list(range(16)),
inference_channel=list(range(16)))
# model settings
model = dict(
type='TopDown',
pretrained=None,
backbone=dict(
type='CPM',
in_channels=3,
out_channels=channel_cfg['num_output_channels'],
feat_channels=128,
num_stages=6),
keypoint_head=dict(
type='TopdownHeatmapMultiStageHead',
in_channels=channel_cfg['num_output_channels'],
out_channels=channel_cfg['num_output_channels'],
num_stages=6,
num_deconv_layers=0,
extra=dict(final_conv_kernel=0, ),
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[368, 368],
heatmap_size=[46, 46],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
use_gt_bbox=True,
bbox_file=None,
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=30,
scale_factor=0.25),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=['image_file', 'center', 'scale', 'rotation', 'flip_pairs']),
]
test_pipeline = val_pipeline
data_root = 'data/mpii'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownMpiiDataset',
ann_file=f'{data_root}/annotations/mpii_train.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='TopDownMpiiDataset',
ann_file=f'{data_root}/annotations/mpii_val.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='TopDownMpiiDataset',
ann_file=f'{data_root}/annotations/mpii_val.json',
img_prefix=f'{data_root}/images/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
)
|
[
"[email protected]"
] | |
96015b222285a8c5b5df3757e5a37985f0e8a737
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/doc3d/Wyx_w_M_w_Sob_to_Wz_focus/IN_Sob_k5_EroMore/Mae_s001/pyr_Tcrop256_pad20_jit15/pyr_2s/L8/step09_2side_L8.py
|
1a23ee7f0618f17d796ffbec8ecc0f1b8c68b60b
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 23,295 |
py
|
#############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_Wxy_w_M_to_Wz_combine import Wyx_w_M_to_Wz
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_Wyx_w_M_to_Wz
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
from step10_a1_loss import Sobel_MAE
Sob_k5_s001_erose_M = Sobel_MAE(sobel_kernel_size=5, sobel_kernel_scale=1, erose_M=True, erose_More=True)
use_gen_op = Wyx_w_M_to_Wz( focus=True, tight_crop=Tight_crop(pad_size=20, resize=(256, 256), jit_scale= 0), sobel=Sob_k5_s001_erose_M, sobel_only=True )
use_train_step = Train_step_Wyx_w_M_to_Wz( focus=True, tight_crop=Tight_crop(pad_size=20, resize=(256, 256), jit_scale=15), sobel=Sob_k5_s001_erose_M, sobel_only=True )
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
pyramid_1side_1__2side_1 = [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]
pyramid_1side_2__2side_1 = [2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2]
pyramid_1side_2__2side_2 = [2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2]
pyramid_1side_3__2side_1 = [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2]
pyramid_1side_3__2side_2 = [2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2]
pyramid_1side_3__2side_3 = [2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2]
pyramid_1side_4__2side_1 = [2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2]
pyramid_1side_4__2side_2 = [2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2]
pyramid_1side_4__2side_3 = [2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2]
pyramid_1side_4__2side_4 = [2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2]
pyramid_1side_5__2side_1 = [2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2]
pyramid_1side_5__2side_2 = [2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2]
pyramid_1side_5__2side_3 = [2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2]
pyramid_1side_5__2side_4 = [2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2]
pyramid_1side_5__2side_5 = [2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2]
pyramid_1side_6__2side_1 = [2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2]
pyramid_1side_6__2side_2 = [2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2]
pyramid_1side_6__2side_3 = [2, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2]
pyramid_1side_6__2side_4 = [2, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 2]
pyramid_1side_6__2side_5 = [2, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2]
pyramid_1side_6__2side_6 = [2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2]
pyramid_1side_7__2side_1 = [2, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2]
pyramid_1side_7__2side_2 = [2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2]
pyramid_1side_7__2side_3 = [2, 2, 2, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2]
pyramid_1side_7__2side_4 = [2, 2, 2, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2]
pyramid_1side_7__2side_5 = [2, 2, 2, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 2, 2, 2]
pyramid_1side_7__2side_6 = [2, 2, 2, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2]
pyramid_1side_7__2side_7 = [2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2]
pyramid_1side_8__2side_1 = [2, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 2]
pyramid_1side_8__2side_2 = [2, 2, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 2, 2]
pyramid_1side_8__2side_3 = [2, 2, 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 2, 2]
pyramid_1side_8__2side_4 = [2, 2, 2, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2]
pyramid_1side_8__2side_5 = [2, 2, 2, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 2, 2, 2]
pyramid_1side_8__2side_6 = [2, 2, 2, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 2, 2, 2]
pyramid_1side_8__2side_7 = [2, 2, 2, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2]
pyramid_1side_8__2side_8 = [2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2]
pyramid_1side_9__2side_1 = [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]
pyramid_1side_9__2side_2 = [2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2]
pyramid_1side_9__2side_3 = [2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2]
pyramid_1side_9__2side_4 = [2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2]
pyramid_1side_9__2side_5 = [2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2]
pyramid_1side_9__2side_6 = [2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2]
pyramid_1side_9__2side_7 = [2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2]
pyramid_1side_9__2side_8 = [2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 2]
pyramid_1side_9__2side_9 = [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
#########################################################################################
ch032_pyramid_1side_1__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_2__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_2__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_3__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_4__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_5__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_6__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_7__2side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_8__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_8__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_8__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_8__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_8__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_8__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_8__2side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_8__2side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_9__2side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_1, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_9__2side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_2, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_9__2side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_9__2side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_9__2side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_9__2side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_9__2side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_9__2side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
ch032_pyramid_1side_9__2side_9 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9, ch_upper_bound= 2 ** 14).set_gen_op( use_gen_op ).set_train_step( use_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_pyramid_1side_4__2side_2
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
|
[
"[email protected]"
] | |
5d608efd4cc1a7060a94a1a35e63cdfe6622a273
|
4be56098894a95da5964622fc4102b69e4530ab6
|
/题库/727.最小窗口子序列.py
|
c2e44407c2dae1162aed896872166b476694e850
|
[] |
no_license
|
ACENDER/LeetCode
|
7c7c7ecc8d0cc52215272f47ec34638637fae7ac
|
3383b09ab1246651b1d7b56ab426a456f56a4ece
|
refs/heads/master
| 2023-03-13T19:19:07.084141 | 2021-03-15T09:29:21 | 2021-03-15T09:29:21 | 299,332,864 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 88 |
py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 727.最小窗口子序列.py
|
[
"[email protected]"
] | |
ca598544f88391eaead44fb1a7f25af038a6a186
|
cc7bbdbb22cb6f7e7916388a5ee8218bc8ffa158
|
/Python3/Django/HelloREST/HelloREST/urls.py
|
a3c7eb05f80c131c40a6cde52f0d8a15ec715445
|
[
"MIT"
] |
permissive
|
youngqqcn/QBlockChainNotes
|
a816e067642f48a6da38b624663254b4016ec496
|
c9c143eaba6c06e3cee866669ec286e4d3cdbba8
|
refs/heads/master
| 2023-04-03T23:31:05.585545 | 2023-03-30T09:29:07 | 2023-03-30T09:29:07 | 155,657,459 | 37 | 15 |
MIT
| 2023-03-06T23:09:32 | 2018-11-01T03:33:11 |
JavaScript
|
UTF-8
|
Python
| false | false | 993 |
py
|
"""HelloREST URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
# path('admin/', admin.site.urls),
path(r'app/', include('app.urls', namespace='app'), name='app'),
path(r'api/', include('api.urls', namespace='api' ), name='api'),
path(r'cbv_demo/', include('cbv_demo.urls', namespace='cbv_demo' ), name='cbv_demo'),
]
|
[
"[email protected]"
] | |
5a4e70186cbcee334c036f14dec1ab81b6f690be
|
7a4ed01a40e8d79126b26f5e8fca43c8e61e78fd
|
/Harshit Vashisth/Chapter-18(Working With Files)/D-File Input Output Read And Write/234.read_and_write.py
|
bcda23d9eecfb48a95e74589b9e413c42cf0ca95
|
[] |
no_license
|
satyam-seth-learnings/python_learning
|
5a7f75bb613dcd7fedc31a1567a434039b9417f8
|
7e76c03e94f5c314dcf1bfae6f26b4a8a6e658da
|
refs/heads/main
| 2023-08-25T14:08:11.423875 | 2021-10-09T13:00:49 | 2021-10-09T13:00:49 | 333,840,032 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 100 |
py
|
with open('File5.txt','r') as rf:
with open('File5a.txt','w') as wf:
wf.write(rf.read())
|
[
"[email protected]"
] | |
d7bb46d9cff6cf7b90cb46af434aa1d1b5d00a2d
|
16b389c8dcace7f7d010c1fcf57ae0b3f10f88d3
|
/lib/jnpr/healthbot/swagger/models/groupgroupid_roles.py
|
ba247dfe7ee49ba923fd077393f66bb21f434add
|
[
"Apache-2.0"
] |
permissive
|
Juniper/healthbot-py-client
|
e4e376b074920d745f68f19e9309ede0a4173064
|
0390dc5d194df19c5845b73cb1d6a54441a263bc
|
refs/heads/master
| 2023-08-22T03:48:10.506847 | 2022-02-16T12:21:04 | 2022-02-16T12:21:04 | 210,760,509 | 10 | 5 |
Apache-2.0
| 2022-05-25T05:48:55 | 2019-09-25T05:12:35 |
Python
|
UTF-8
|
Python
| false | false | 3,796 |
py
|
# coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 3.1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GroupgroupidRoles(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'role_id': 'str',
'role_name': 'str'
}
attribute_map = {
'role_id': 'roleId',
'role_name': 'roleName'
}
def __init__(self, role_id=None, role_name=None): # noqa: E501
"""GroupgroupidRoles - a model defined in Swagger""" # noqa: E501
self._role_id = None
self._role_name = None
self.discriminator = None
if role_id is not None:
self.role_id = role_id
if role_name is not None:
self.role_name = role_name
@property
def role_id(self):
"""Gets the role_id of this GroupgroupidRoles. # noqa: E501
:return: The role_id of this GroupgroupidRoles. # noqa: E501
:rtype: str
"""
return self._role_id
@role_id.setter
def role_id(self, role_id):
"""Sets the role_id of this GroupgroupidRoles.
:param role_id: The role_id of this GroupgroupidRoles. # noqa: E501
:type: str
"""
self._role_id = role_id
@property
def role_name(self):
"""Gets the role_name of this GroupgroupidRoles. # noqa: E501
:return: The role_name of this GroupgroupidRoles. # noqa: E501
:rtype: str
"""
return self._role_name
@role_name.setter
def role_name(self, role_name):
"""Sets the role_name of this GroupgroupidRoles.
:param role_name: The role_name of this GroupgroupidRoles. # noqa: E501
:type: str
"""
self._role_name = role_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GroupgroupidRoles, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GroupgroupidRoles):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
c19c3248b62dd32e7099185d2caa553d438fe77b
|
810c479dcaf8a5685a9d5b1a35a79c27f3bf6ba1
|
/movil/models.py
|
45eba52e1416087f67c8cbbf4daf9fe80b4bd215
|
[] |
no_license
|
xangcastle/multipagos
|
1e8f9cac9033f964e54ab9b9bddb37763f6f275f
|
72d2b1b0f5494dacd413ae87520a902a413197db
|
refs/heads/master
| 2021-01-21T04:55:37.112879 | 2016-06-22T19:42:46 | 2016-06-22T19:42:46 | 50,454,659 | 0 | 1 | null | 2016-03-07T23:31:45 | 2016-01-26T19:49:09 |
Python
|
UTF-8
|
Python
| false | false | 1,694 |
py
|
from django.db import models
from django.contrib.auth.models import User
from metropolitana.models import get_media_url, Zona, Departamento
from cartera.models import TipoGestion
class UserProfile(models.Model):
'''
esta clase es la utilizada para guardar los perfiles de usuario
'''
user = models.OneToOneField(User, help_text="el usuaro que anda el movil")
foto = models.ImageField(upload_to=get_media_url,
null=True, blank=True)
zonas = models.ManyToManyField(Zona, null=True, blank=True)
tipo_gestion = models.ManyToManyField(TipoGestion, null=True, blank=True,
verbose_name="tipos de gestiones que realiza")
celular = models.CharField(max_length=14, null=True)
is_supervisor = models.BooleanField(default=False,
verbose_name="es un supervisor?")
departamentos = models.ManyToManyField(Departamento, null=True,
blank=True, verbose_name="departamentos que atiende")
def __unicode__(self):
return "user " + self.user.username
class Meta:
verbose_name = 'usuario'
verbose_name_plural = "usuarios de app movil"
from math import radians, cos, sin, asin, sqrt
def haversine(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371
# Radius of earth in kilometers. Use 3956 for miles
return c * r
|
[
"[email protected]"
] | |
ebd2465ef5377d7dbdf889989eb9d1f39004f369
|
50f2587a55fd5364c5bf8e127eeeda52df1388f1
|
/ponosen/urls.py
|
d032e955cd80adb8646dd178f813086a508c0721
|
[] |
no_license
|
OpenLuna/luna-api
|
e6cec2ea3adf63e4275f50802130d917bb840142
|
8f186e02d3b0c8939384dbbcd25bd3a1ccc276ab
|
refs/heads/master
| 2021-01-17T12:52:30.149021 | 2016-06-29T13:20:17 | 2016-06-29T13:20:17 | 56,492,804 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 365 |
py
|
from django.conf.urls import url
from ponosen.views import *
urlpatterns = [
url(r'^save_email/(?P<email>[\w.%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4})', saveEmail),
url(r'^req_recover_password/(?P<email>[\w.%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4})', reqRecoverPassword),
url(r'^recover_password/(?P<code>[\w ]+)/', recoverPassword),
url(r'^ping/', ping),
]
|
[
"[email protected]"
] | |
e22e842a4653476d6dcfc969b8723ce9367fd231
|
5bc10755eb843642f272b4d96812e968d1229100
|
/backend/TheFoundater/venv/bin/chardetect
|
419b4ec56688e56623a40df1766a21e189ddde6c
|
[] |
no_license
|
AhmedOmi/Rest_APi_Django_React_blog
|
5c24f90c8da4fd1654788ceab6ab32571158b3b8
|
e8d7fd23e0b27cc7f6ac9289be80db57ca2732ca
|
refs/heads/master
| 2023-04-08T17:33:18.682152 | 2021-04-21T06:12:52 | 2021-04-21T06:12:52 | 360,047,019 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 287 |
#!/Users/ahmedomarmiladi/Documents/theFoundater/backend/TheFoundater/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
4e7a4ccb3def2f594f32a68e5a868d11b1ff7949
|
b968a344f81c78a287ecd7addba096de1e1e61c9
|
/12okt/splitter.py
|
ab0d0456d813abc1a2532b001b66f04940543a7a
|
[] |
no_license
|
TeknikhogskolanGothenburg/Python20_Python_Programming
|
c8f5dbe1ead92418589cc7a10a18658ee708db02
|
b837ddba4003842675533b87cf6209908b04c125
|
refs/heads/master
| 2023-01-22T05:15:22.502267 | 2020-10-28T10:27:33 | 2020-10-28T10:27:33 | 290,459,085 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 120 |
py
|
def main():
values = input('Enter some values: ').split()
print(values)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
e5d24182fdead8cd02d5319ff10780e76a83b905
|
439386f9097632d44d31d1f599df76ec2820d072
|
/常规项目/统一大厅常规checklist/1450/DFQP/src/cases/dfqp_mall.py
|
e7422d7ee1c2b3362eaa672fd34d80bca04b5559
|
[] |
no_license
|
YiFeng0755/testcase
|
33693f0940a6497aa40e2e51a0535c9eb6c12b29
|
edc19480c3e94cbcbf004aa9d20099ec6d1b9304
|
refs/heads/master
| 2020-04-28T04:34:28.232022 | 2019-03-11T11:13:25 | 2019-03-11T11:13:25 | 146,287,761 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,207 |
py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
商城
'''
import time
from runcenter.enums import EnumPriority,EnumStatus
from runcenter.testcase import debug_run_all,TestCase
from uilib.mall_page import Mall_Page
from uilib.hall_page import Hall_Page
from common.common import Common
class C31049_DFQP_Mall(TestCase):
'''
商城界面显示
'''
owner = "MindyZhang"
status = EnumStatus.Design
priority = EnumPriority.High
timeout = 5
def pre_test(self):
self.common = Common()
# 初始化Luadriver
self.luadriver = self.common.setupdriver()
# 每个用例都需要关闭活动,把这个放在初始化里面实现
self.common.closeactivity(self.luadriver)
self.hall_page = Hall_Page()
self.mall_page = Mall_Page()
def run_test(self):
'''
测试用例
'''
self.start_step("等待页面加载完成")
self.hall_page.wait_element("同步标志")
self.start_step("进入商城页面")
self.hall_page.wait_element("商城").click()
time.sleep(2)
self.mall_page.get_element("银币页签").click()
time.sleep(2)
self.mall_page.screenshot('Mall1.png')
self.mall_page.wait_element("金条页签").click()
time.sleep(2)
self.mall_page.screenshot('Mall2.png')
self.mall_page.wait_element("道具页签").click()
time.sleep(2)
self.mall_page.screenshot('Mall3.png')
self.mall_page.wait_element("VIP页签").click()
time.sleep(2)
self.mall_page.screenshot('Mall4.png')
def post_test(self):
'''
测试用例执行完成后,清理测试环境
'''
self.common.closedriver()
class C31056_DFQP_Mall(TestCase):
'''
安装支付宝支付界面显示
'''
owner = "LucyLiu"
status = EnumStatus.Design
priority = EnumPriority.High
timeout = 5
def pre_test(self):
self.common = Common()
# 初始化Luadriver
self.luadriver = self.common.setupdriver()
# 每个用例都需要关闭活动,把这个放在初始化里面实现
self.common.closeactivity_switchserver(self.luadriver,"预发布")
self.hall_page = Hall_Page()
self.mall_page = Mall_Page()
def run_test(self):
'''
测试用例
'''
self.start_step("等待页面加载完成")
self.hall_page.wait_element("同步标志")
self.start_step("进入商城页面")
self.hall_page.wait_element("商城").click()
time.sleep(5)
self.mall_page.get_element("金条商品").click()
time.sleep(2)
self.mall_page.screenshot('zhifu.png')
time.sleep(2)
self.mall_page.get_element("支付宝").click()
self.mall_page.screenshot('zhifubao.png')
def post_test(self):
'''
测试用例执行完成后,清理测试环境
'''
self.common.closedriver()
__qtaf_seq_tests__ = [C31056_DFQP_Mall]
if __name__ == '__main__':
# C002_DFQP_Login_GuestLogin = C002_DFQP_Login_GuestLogin()
# C002_DFQP_Login_GuestLogin.debug_run()
debug_run_all()
|
[
"[email protected]"
] | |
c70e151d43836de3933142d4f083d962c54cc40b
|
ca6fd0c9807f8811f8c023dcd9683ccf882111d1
|
/torch/_inductor/graph.py
|
d39988b7a646b18fd997109b24c3a5ae2c1d4b5f
|
[
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
jiayisunx/pytorch
|
a2a966701b1b77b33641d50b9f5fe02d4f9af440
|
d643a00efc6e1be655708d60dd2d0e702e0d20b9
|
refs/heads/master
| 2023-07-23T23:57:50.236788 | 2023-04-07T01:33:52 | 2023-04-07T06:53:31 | 205,314,147 | 1 | 1 | null | 2019-08-30T05:54:56 | 2019-08-30T05:54:56 | null |
UTF-8
|
Python
| false | false | 27,452 |
py
|
import functools
import logging
import operator
import os
import re
import sys
import time
from typing import Dict, List, Optional, Set
import sympy
import torch
import torch._logging
import torch.fx
from torch._decomp import get_decompositions
from torch._dynamo.utils import dynamo_timed
from torch.fx.experimental.symbolic_shapes import (
magic_methods,
method_to_operator,
ShapeEnv,
SymTypes,
)
from torch.utils._mode_utils import no_dispatch
from .._dynamo import config as dynamo_config
from . import config, ir
from .codegen.wrapper import CppWrapperCodeGen, CudaWrapperCodeGen, WrapperCodeGen
from .exc import (
LoweringException,
MissingOperatorWithDecomp,
MissingOperatorWithoutDecomp,
)
from .ir import Constant, FixedLayout, InputBuffer, Pointwise, Reduction, TensorBox
from .lowering import (
FALLBACK_ALLOW_LIST,
fallback_handler,
fallback_node_due_to_unsupported_type,
layout_constraints,
lowerings,
make_fallback,
needs_realized_inputs,
unsupported_output_tensor,
)
from .sizevars import SizeVarAllocator
from .utils import (
convert_shape_to_inductor,
gather_origins,
get_dtype_size,
sympy_product,
)
from .virtualized import V
log = logging.getLogger(__name__)
output_code_log = torch._logging.getArtifactLogger(__name__, "output_code")
def supported_dtype_of_cpp_wrapper(dtype, cuda):
supported_dtype = {
torch.float32,
torch.float64,
torch.int64,
torch.int32,
torch.int16,
torch.int8,
torch.uint8,
torch.bool,
torch.bfloat16,
# torch.float16, # TODO: implement this
}
if cuda:
supported_dtype.add(torch.float16)
return dtype in supported_dtype
def may_get_constant_buffer_dtype(constant_buffer):
assert isinstance(
constant_buffer, sympy.Symbol
), "get_constant_buffer_dtype only supports input of sympy.Symbol"
if constant_buffer.is_integer:
return torch.int64
elif constant_buffer.is_float:
return torch.float32
else:
return None
def is_magic_method(op):
magic_ops = {method_to_operator(m) for m in magic_methods}
return op in magic_ops
class GraphLowering(torch.fx.Interpreter):
def symbolic_sizes_strides(self, ex: torch.Tensor):
"""
Support dynamic shapes and dynamic strides by assigning variables
to each dimension. We duck-shape tensors, so if two tensors
have the same size they get assigned the same symbolic variable.
"""
if self.reuse_shape_env:
return convert_shape_to_inductor(ex.size()), convert_shape_to_inductor(
ex.stride()
)
else:
from torch._dynamo.source import ConstantSource
# TODO: this should not be needed once #93059 lands
# https://github.com/pytorch/pytorch/pull/94031#discussion_r1096044816
# TODO: make a dedicated UnknownSource for this?
# NB: This is using the legacy default behavior from
# create_symbolic_sizes_strides_storage_offset but we hope we can
# just delete this entirely
source = ConstantSource(
f"__unknown_tensor_{len(self._shape_env.var_to_val)}"
)
(
size,
stride,
_,
) = self._shape_env.create_symbolic_sizes_strides_storage_offset(
ex,
source,
)
size = [i.node.expr if isinstance(i, torch.SymInt) else i for i in size]
stride = [i.node.expr if isinstance(i, torch.SymInt) else i for i in stride]
return size, stride
def static_sizes_strides(self, ex: torch.Tensor):
"""
Primarily used to weights
"""
size = [sympy.Integer(i) for i in ex.size()]
stride = [sympy.Integer(i) for i in ex.stride()]
return size, stride
def __init__(
self,
gm: torch.fx.GraphModule,
shape_env=None,
num_static_inputs=None,
graph_id=None,
cpp_wrapper=False,
aot_mode=False,
):
super().__init__(gm)
self.extra_traceback = False # we do our own error wrapping
if shape_env is None:
shape_env = ShapeEnv()
self.reuse_shape_env = False
else:
self._shape_env = shape_env
self.reuse_shape_env = True
self._shape_env = shape_env
self.sizevars = SizeVarAllocator(shape_env)
self.graph_inputs: Dict[str, TensorBox] = {}
self.graph_inputs_original: Dict[str, InputBuffer] = {}
self.graph_outputs: Optional[List[ir.IRNode]] = None
self.device_types: Set[str] = set()
self.device_idxs: Set[int] = set()
self.buffers: List[ir.ComputedBuffer] = []
self.constants: Dict[str, torch.Tensor] = {}
self.removed_buffers: Set[str] = set()
self.inplaced_to_remove: Set[str] = set()
self.wrapper_code = None
self.num_static_inputs = num_static_inputs
self.mutated_inputs: Set[str] = set()
self.unaligned_buffers: Set[str] = set()
self.randomness_offset = sympy.Integer(0)
self.randomness_seeds: List[str] = []
self.name_to_buffer: Dict[str, ir.ComputedBuffer] = {}
self.creation_time = time.time()
self.name = "GraphLowering"
self.cpp_wrapper = cpp_wrapper
self.aot_mode = aot_mode
self.graph_id = graph_id
self.scheduler = None
self._warned_fallback = {"aten.convolution_backward"}
def warn_fallback(self, name):
if name not in self._warned_fallback:
self._warned_fallback.add(name)
log.info(f"Using FallbackKernel: {name}")
def add_device_idx(self, idx: Optional[int]):
if idx is not None:
self.device_idxs.add(idx)
@property
def fake_mode(self):
return V.fake_mode
def get_buffer(self, buffer_name: str):
if buffer_name in self.name_to_buffer:
return self.name_to_buffer[buffer_name]
if buffer_name in self.graph_inputs:
return self.graph_inputs[buffer_name]
return None
def get_dtype(self, buffer_name: str):
if buffer_name in self.constants:
return self.constants[buffer_name].dtype
if buffer_name in self.name_to_buffer:
return self.name_to_buffer[buffer_name].get_dtype()
if buffer_name in self.graph_inputs:
return self.graph_inputs[buffer_name].get_dtype()
m = re.match(r"as_strided\(([a-zA-Z0-9_]+),", buffer_name)
if m:
return self.get_dtype(m.group(1))
raise KeyError(f"could not find {buffer_name}")
def random_seed_buffer(self, device: torch.device):
"""
Return a device-unique 1-element tensor storing our RNG seed.
This will get initialized at the start of each graph in
`wrapper.py`.
Note this is only used by cuda backends. The CPU backend handles
RNG seeds as a sizevar.
"""
name = f"seed_{device.type}_{device.index}"
if name not in self.constants:
self.constants[name] = torch.zeros((), device=device, dtype=torch.int64)
self.randomness_seeds.append(name)
return ir.RandSeedBuffer(
name=name,
layout=ir.FixedLayout(
device=device,
dtype=torch.int64,
size=[],
stride=[],
),
)
def increment_randomness_offset(self, numel):
"""
A global counter of how many random numbers we have handed out so far.
"""
offset = self.randomness_offset
self.randomness_offset = offset + numel
return offset
@dynamo_timed
def run(self, *args):
return super().run(*args)
def disable_cpp_wrapper(self, cond):
self.cpp_wrapper = False
assert not self.aot_mode, "AOT compilation failed"
log.debug("Set cpp_wrapper to False due to %s", cond)
def check_buffer_for_cpp_wrapper(self, buffer: ir.ComputedBuffer):
if isinstance(buffer, ir.ExternKernel):
if not getattr(buffer, "cpp_kernel", False):
self.disable_cpp_wrapper("ExternKernel")
def register_buffer(self, buffer: ir.ComputedBuffer):
if self.cpp_wrapper:
self.check_buffer_for_cpp_wrapper(buffer)
name = f"buf{len(self.buffers)}"
self.buffers.append(buffer)
self.name_to_buffer[name] = buffer
return name
def realize_users_of(self, name: str):
"""
When a buffer is mutated we need to make sure all the reads to
the old version are realized before the mutation happens.
"""
assert isinstance(name, str)
def visit(value):
if isinstance(value, (list, tuple)):
return [visit(x) for x in value]
if isinstance(value, ir.IRNode):
if value.is_user_of(name):
value.realize()
return value
for key, value in self.env.items():
try:
visit(value)
except Exception:
log.warning("error in realize_users_of", exc_info=True)
def add_tensor_constant(self, data):
def allocate():
for name, value in self.constants.items():
if (
data.size() == value.size()
and data.stride() == value.stride()
and data.dtype == value.dtype
and data.device == value.device
and torch.eq(data, value).all()
):
return name
name = f"constant{len(self.constants)}"
self.constants[name] = data
return name
return TensorBox.create(
ir.ConstantBuffer(
allocate(),
FixedLayout(data.device, data.dtype, *self.static_sizes_strides(data)),
)
)
def constant_name(self, name: str, device_override: torch.device):
"""
We AOT copy constants to the devices they are needed on.
If device_override doesn't match the constant's device, then
copy it and return a different name.
"""
if self.constants[name].device == device_override or device_override is None:
return name
alt_name = f"{name}_{device_override.type}{device_override.index or 0}"
if alt_name not in self.constants:
self.constants[alt_name] = self.constants[name].to(device_override)
return alt_name
def placeholder(self, target: str, args, kwargs):
example = super().placeholder(target, args, kwargs)
if isinstance(example, SymTypes):
expr = example.node.expr
self.graph_inputs[target] = expr
return expr
elif isinstance(example, (int, bool, float)):
expr = sympy.sympify(example)
self.graph_inputs[target] = expr
return expr
assert isinstance(example, torch.Tensor), example
# todo(chilli): We can remove the last check once we turn buffers into
# static shape tensors. That's a hack to workaround Inductor believing
# the buffer should be static but us passing in a fake tensor with
# symbolic shapes.
if (
config.static_weight_shapes
and (
len(self.graph_inputs) < self.num_static_inputs
or not dynamo_config.dynamic_shapes
)
and not example._has_symbolic_sizes_strides
):
# the first N inputs are weights
sizes, strides = self.static_sizes_strides(example)
else:
sizes, strides = self.symbolic_sizes_strides(example)
# TODO(jansel): handle input aliasing
tensor = TensorBox.create(
InputBuffer(
target,
FixedLayout(example.device, example.dtype, sizes, strides),
)
)
self.graph_inputs[target] = tensor
self.graph_inputs_original[target] = tensor.data.data
self.device_types.add(example.device.type)
self.add_device_idx(example.device.index)
return tensor
def call_function(self, target, args, kwargs):
if target is operator.getitem and isinstance(args[0], (list, tuple)):
return super().call_function(target, args, kwargs)
if hasattr(target, "_inductor_lowering_function"):
# passthrough lowerings from .pattern_matcher
return target(*args, **kwargs)
if target not in lowerings:
base_name = target.name().split(".")[0]
if base_name in FALLBACK_ALLOW_LIST:
make_fallback(target)
elif config.implicit_fallbacks:
error = (
MissingOperatorWithDecomp
if get_decompositions([target])
else MissingOperatorWithoutDecomp
)
log.info(
"Creating implicit fallback for:\n%s",
error.operator_str(target, args, kwargs),
)
make_fallback(target)
elif get_decompositions([target]):
# There isn't a good way to dynamically patch this in
# since AOT Autograd already ran. The error message tells
# the user how to fix it.
raise MissingOperatorWithDecomp(target, args, kwargs)
else:
raise MissingOperatorWithoutDecomp(target, args, kwargs)
try:
out = lowerings[target](*args, **kwargs)
return out
except Exception as e:
raise LoweringException(e, target, args, kwargs).with_traceback(
e.__traceback__
) from None
def get_attr(self, target, args, kwargs):
# this is a constant
value = getattr(self.module, target)
if unsupported_output_tensor(value):
return self.add_tensor_constant(value)
with no_dispatch():
if value.shape == ():
return Constant(value.item(), value.dtype, value.device)
if len(value.shape) == 1 and value.shape[0] <= 8:
# tensor lowering has constant inlining logic
from .lowering import tensor
return tensor(value.tolist(), dtype=value.dtype, device=value.device)
return self.add_tensor_constant(value)
def call_module(self, target, args, kwargs):
raise AssertionError()
def call_method(self, target, args, kwargs):
raise AssertionError()
def output(self, target, args, kwargs):
result = super().output(target, args, kwargs)
assert isinstance(result, (tuple, list)), type(result)
assert all(
isinstance(
x,
(
TensorBox,
ir.Constant,
type(None),
ir.ConstantBuffer,
sympy.Expr,
int,
),
)
for x in result
), result
self.graph_outputs = [ir.ExternKernel.realize_input(x) for x in result]
for name, value in self.graph_inputs.items():
assert isinstance(value, (TensorBox, sympy.Expr))
if not isinstance(value, TensorBox):
continue
value.realize()
assert isinstance(value, TensorBox)
value = value.data
assert isinstance(value, ir.StorageBox)
value_storage_box = value
value = value.data
if not isinstance(value, InputBuffer) or value.get_name() != name:
# one of our inputs was mutated, need to turn that into a copy
ir.MutationLayout.realize_into(value, self.graph_inputs_original[name])
# replace output with mutated input
try:
ind = self.graph_outputs.index(value_storage_box)
self.graph_outputs[ind] = self.graph_inputs_original[name]
except ValueError:
pass
self.finalize()
def finalize(self):
for buf in self.buffers:
buf.decide_layout()
def run_node(self, n: torch.fx.Node):
origins = {n}
if n.op == "call_function":
args, kwargs = self.fetch_args_kwargs_from_env(n)
origins |= gather_origins(args, kwargs)
with ir.IRNode.current_origins(origins):
if (
n.op == "call_function"
and n.target is not operator.getitem
and fallback_node_due_to_unsupported_type(n)
):
result = fallback_handler(n.target, add_to_fallback_set=False)(
*args, **kwargs
)
elif n.op == "call_function" and n.target in layout_constraints:
args, kwargs = layout_constraints[n.target](n, *args, **kwargs)
result = self.call_function(n.target, args, kwargs)
elif is_magic_method(n.target):
if isinstance(n.meta["val"], torch.SymInt):
result = n.meta["val"].node.expr
else:
result = super().run_node(n)
else:
result = super().run_node(n)
# require the same stride order for dense outputs,
# 1. user-land view() will not throw because inductor
# output different strides than eager
# long term the solution is to make view() always succeed
# with infallible strides.
# 2: as_strided ops, we need make sure its input has same size/stride with
# eager model to align with eager behavior.
as_strided_ops = [
torch.ops.aten.as_strided.default,
torch.ops.aten.as_strided_.default,
torch.ops.aten.as_strided_scatter.default,
]
if any(
user.op == "output" or user.target in as_strided_ops for user in n.users
) and isinstance(n.meta["val"], torch.Tensor):
strides = n.meta["val"].stride()
dense = torch._prims_common.is_non_overlapping_and_dense(n.meta["val"])
# requiring a stride order for a non-dense output wouldn't
# recreate the same strides, and would fail with view, defer for now.
if dense and len(strides):
result = ir.ExternKernel.require_stride_order(
result, ir.get_stride_order(strides)
)
# Realize if (1) any user need inputs realized, or (2) there is
# already too many reads and rematerializing can be bad.
num_users = len(set(n.users))
if num_users > 1 and isinstance(result, TensorBox):
for user in n.users:
if user.target in needs_realized_inputs:
result.realize_hint()
# This inclusion is somewhat controversial (from
# discussion between Horace, Natalia, and Elias).
# Currently, it's not very clear why this is helpful.
# The general idea here is that even though a node may
# have FlexibleLayout, we still often *treat* it as if
# it was contiguous. This appears to sometimes result in
# suboptimal behavior.
#
# When we do a better job selecting layout, we should
# revisit this.
need_fixed_layout = [
torch.ops.aten.convolution.default,
torch.ops.aten.convolution_backward.default,
torch.ops.aten.mm.default,
torch.ops.aten._int_mm.default,
]
if torch._C.has_mkldnn:
need_fixed_layout += [
torch.ops.mkldnn._convolution_pointwise.default,
torch.ops.mkldnn._convolution_pointwise.binary,
torch.ops.mkldnn._convolution_pointwise_.binary,
torch.ops.mkldnn._convolution_transpose_pointwise.default,
torch.ops.mkldnn._linear_pointwise.default,
torch.ops.mkldnn._linear_pointwise.binary,
]
if torch._C.has_mkl:
need_fixed_layout += [torch.ops.mkl._mkl_linear.default]
if user.target in need_fixed_layout:
result = ir.ExternKernel.require_stride_order(
result, ir.get_stride_order(n.meta["val"].stride())
)
if user.op == "output":
if isinstance(result.data.data, (Pointwise, Reduction)):
result.realize()
# TODO(jansel): introduce a store vs inline choice
result.mark_reuse(len(n.users))
# Realize if the IRNode already has accumulated lots of reads
if isinstance(result, TensorBox) and result.has_exceeded_max_reads():
# Prevent excessive accumulation in a computed buffer, when
# there are multiple branches each with small number of memory
# reads, but they converge to a user.
result.realize_hint()
return result
def check_cpp_codegen_disabled(self):
if config.disable_cpp_codegen:
self.disable_cpp_wrapper("cpp codegen disabled")
def check_platform(self):
if sys.platform != "linux":
self.disable_cpp_wrapper("platform not linux")
@functools.lru_cache(None)
def get_single_device(self):
return list(self.device_types)[0] if len(self.device_types) == 1 else None
def check_input_for_cpp_buffer(self, cuda):
for _, value in self.graph_inputs.items():
dtype = None
if isinstance(value, TensorBox):
dtype = value.get_dtype()
elif isinstance(value, sympy.Symbol):
dtype = may_get_constant_buffer_dtype(value)
if not supported_dtype_of_cpp_wrapper(dtype, cuda):
self.disable_cpp_wrapper("unsupported inputs dtype")
def check_constant_for_cpp_buffer(self):
if self.constants:
self.disable_cpp_wrapper("Constants")
def check_cpp_wrapper(self, cuda):
self.check_cpp_codegen_disabled()
self.check_platform()
self.check_input_for_cpp_buffer(cuda)
self.check_constant_for_cpp_buffer()
def init_wrapper_code(self):
if self.cpp_wrapper:
device = self.get_single_device()
assert device == "cpu" or device == "cuda"
cuda = device == "cuda"
self.check_cpp_wrapper(cuda)
# Re-check self.cpp_wrapper because it might be disabled due to failed checking
if self.cpp_wrapper:
self.wrapper_code = (
CudaWrapperCodeGen() if cuda else CppWrapperCodeGen()
)
return
self.wrapper_code = WrapperCodeGen()
def codegen(self):
from .scheduler import Scheduler
self.init_wrapper_code()
self.scheduler = Scheduler(self.buffers)
assert self.scheduler is not None # mypy can't figure this out
self.scheduler.codegen()
assert self.wrapper_code is not None
return self.wrapper_code.generate()
def count_bytes(self):
from .scheduler import FusedSchedulerNode, NopKernelSchedulerNode, Scheduler
scheduler = Scheduler(self.buffers)
def get_read_write_buffers_sizes(node):
if isinstance(node, NopKernelSchedulerNode):
return 0
reads = {dep.name for dep in node.read_writes.reads}
writes = {dep.name for dep in node.read_writes.writes}
def is_materialized(buf):
buf_uses = {user.node for user in scheduler.name_to_node[buf].users}
return len(buf_uses - set(node.snodes)) > 0
if isinstance(node, FusedSchedulerNode):
removed_buffers = {dep for dep in writes if not is_materialized(dep)}
writes = writes - removed_buffers
reads = reads - removed_buffers
node_bytes = 0
for buf in reads | writes:
if buf in self.name_to_buffer:
buf = self.name_to_buffer[buf]
elif buf in self.graph_inputs:
buf = self.graph_inputs[buf]
else:
continue
node_bytes += V.graph.sizevars.size_hint(
sympy_product(buf.get_size())
) * get_dtype_size(buf.get_dtype())
return node_bytes
total_bytes = 0
node_counts = []
for node in scheduler.nodes:
num_bytes = get_read_write_buffers_sizes(node)
node_counts.append((node, num_bytes // 4))
total_bytes += num_bytes
return total_bytes, node_counts
@dynamo_timed
def compile_to_module(self):
from .codecache import PyCodeCache
code, linemap = self.codegen()
mod = PyCodeCache.load(code, linemap=linemap)
for name, value in self.constants.items():
setattr(mod, name, value)
log.debug(f"Output code written to: {mod.__file__}")
output_code_log.debug(f"Output code: \n{code}")
if config.benchmark_kernel:
print(f"Compiled module path: {mod.__file__}", file=sys.stderr)
V.debug.output_code(mod.__file__)
V.debug.rename(os.path.splitext(mod.__file__)[0] + ".debug")
return mod
def compile_to_fn(self):
if self.aot_mode:
from .codecache import AotCodeCache
code, linemap = self.codegen()
output_code_log.debug(f"Output code: \n{code}")
libpath = AotCodeCache.compile(
code, cuda=(self.get_single_device() == "cuda")
)
return lambda dummy: libpath
else:
return self.compile_to_module().call
def get_output_names(self):
assert self.graph_outputs is not None
return [
node.get_name()
for node in self.graph_outputs
if not isinstance(node, ir.NoneAsConstantBuffer)
and not isinstance(node, ir.ShapeAsConstantBuffer)
]
def is_unspec_arg(self, name: str):
# dynamo wraps unspec variable as 0d CPU tensor,
# need to convert to scalar during codegen (triton only)
return (
name in self.graph_inputs.keys()
and self.graph_inputs[name].get_numel() == 1
and self.graph_inputs[name].get_device().type == "cpu"
)
|
[
"[email protected]"
] | |
25703d130ef26252405e1f3e2b069e65fe2b38c0
|
fe70f357ac403c3ea6f859ae6648ae1aaae989f3
|
/apis_v1/documentation_source/organization_follow_doc.py
|
67287f52be6a872374bbbef485b943783bf12373
|
[
"MIT"
] |
permissive
|
nickelser/WeVoteServer
|
313242945b3b556bc602240cc767bc2e41397163
|
be2c1367e8263f2cdcf3bd2e27e6cd4a6f35af68
|
refs/heads/develop
| 2021-01-22T02:24:35.099734 | 2017-05-25T01:16:47 | 2017-05-25T01:16:47 | 92,358,884 | 1 | 0 | null | 2017-05-25T03:00:26 | 2017-05-25T03:00:25 | null |
UTF-8
|
Python
| false | false | 3,606 |
py
|
# apis_v1/documentation_source/organization_follow_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def organization_follow_doc_template_values(url_root):
"""
Show documentation about organizationFollow
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'organization_id',
'value': 'integer', # boolean, integer, long, string
'description': 'Internal database unique identifier for organization',
},
{
'name': 'organization_we_vote_id',
'value': 'string', # boolean, integer, long, string
'description': 'The unique identifier for this organization across all networks '
'(either organization_id OR organization_we_vote_id required -- not both.) '
'NOTE: In the future we '
'might support other identifiers used in the industry.',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'A valid voter_device_id parameter was not included. Cannot proceed.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'A valid voter_id was not found from voter_device_id. Cannot proceed.',
},
{
'code': 'VALID_ORGANIZATION_ID_MISSING',
'description': 'A valid organization_id was not found. Cannot proceed.',
},
{
'code': 'ORGANIZATION_NOT_FOUND_ON_CREATE FOLLOWING',
'description': 'An organization with that organization_id was not found. Cannot proceed.',
},
{
'code': 'FOLLOWING',
'description': 'Successfully following this organization',
},
]
try_now_link_variables_dict = {
'organization_id': '1',
}
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "organization_id": integer,\n' \
' "organization_we_vote_id": string,\n' \
'}'
template_values = {
'api_name': 'organizationFollow',
'api_slug': 'organizationFollow',
'api_introduction':
"Call this to save that the voter is following this organization.",
'try_now_link': 'apis_v1:organizationFollowView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
|
[
"[email protected]"
] | |
84c206d459224db516ac7058f7b83b45e5b62ea3
|
e17ecf9839e8a4d450f6376f7b79d90ae6b365d8
|
/tlpipe/map/fmmode/util/safeeval.py
|
e899d2991d593fa20a92291beacf6764c22b5146
|
[] |
no_license
|
wufq/tlpipe
|
4f490ac47c351085841d8a2bd7a34765511c49e3
|
62928c7fdd16680d56d76c8e1db5d3b863e23d16
|
refs/heads/master
| 2021-05-12T06:55:50.711254 | 2017-08-02T06:36:00 | 2017-08-02T06:36:00 | 117,227,941 | 0 | 0 | null | 2018-01-12T10:30:00 | 2018-01-12T10:30:00 | null |
UTF-8
|
Python
| false | false | 15,348 |
py
|
#----------------------------------------------------------------------
# I, Babar K. Zafar, the author or of this code dedicate any and all
# copyright interest in this code to the public domain. I make this
# dedication for the benefit of the public at large and to the
# detriment of our heirs and successors. I intend this dedication to
# be an overt act of relinquishment in perpetuity of all present and
# future rights this code under copyright law.
#
# Version 0.1 / May 27 2006
#----------------------------------------------------------------------
import __builtin__
import inspect, compiler.ast
import thread, time
#----------------------------------------------------------------------
# Module globals.
#----------------------------------------------------------------------
# Toggle module level debugging mode.
DEBUG = False
# List of all AST node classes in compiler/ast.py.
all_ast_nodes = \
[name for (name, obj) in inspect.getmembers(compiler.ast)
if inspect.isclass(obj) and issubclass(obj, compiler.ast.Node)]
# List of all builtin functions and types (ignoring exception classes).
# all_builtins = \
# [name for (name, obj) in inspect.getmembers(__builtins__)
# if inspect.isbuiltin(obj) or (inspect.isclass(obj) and \
# not issubclass(obj, Exception))]
all_builtins = \
[name for (name, obj) in inspect.getmembers(__builtin__)
if inspect.isbuiltin(obj) or (inspect.isclass(obj) and \
not issubclass(obj, Exception))]
#----------------------------------------------------------------------
# Utilties.
#----------------------------------------------------------------------
def classname(obj):
return obj.__class__.__name__
def is_valid_ast_node(name):
return name in all_ast_nodes
def is_valid_builtin(name):
return name in all_builtins
def get_node_lineno(node):
return (node.lineno) and node.lineno or 0
#----------------------------------------------------------------------
# Restricted AST nodes & builtins.
#----------------------------------------------------------------------
# Deny evaluation of code if the AST contain any of the following nodes:
unallowed_ast_nodes = [
# 'Add', 'And',
# 'AssAttr', 'AssList', 'AssName', 'AssTuple',
# 'Assert', 'Assign', 'AugAssign',
'Backquote',
# 'Bitand', 'Bitor', 'Bitxor', 'Break',
# 'CallFunc', 'Class', 'Compare', 'Const', 'Continue',
# 'Decorators', 'Dict', 'Discard', 'Div',
# 'Ellipsis', 'EmptyNode',
'Exec',
# 'Expression', 'FloorDiv',
# 'For',
'From',
# 'Function',
# 'GenExpr', 'GenExprFor', 'GenExprIf', 'GenExprInner',
# 'Getattr', 'Global', 'If',
'Import',
# 'Invert',
# 'Keyword', 'Lambda', 'LeftShift',
# 'List', 'ListComp', 'ListCompFor', 'ListCompIf', 'Mod',
# 'Module',
# 'Mul', 'Name', 'Node', 'Not', 'Or', 'Pass', 'Power',
# 'Print', 'Printnl',
'Raise',
# 'Return', 'RightShift', 'Slice', 'Sliceobj',
# 'Stmt', 'Sub', 'Subscript',
'TryExcept', 'TryFinally',
# 'Tuple', 'UnaryAdd', 'UnarySub',
# 'While','Yield'
]
# Deny evaluation of code if it tries to access any of the following builtins:
unallowed_builtins = [
'__import__',
# 'abs', 'apply', 'basestring', 'bool', 'buffer',
# 'callable', 'chr', 'classmethod', 'cmp', 'coerce',
'compile',
# 'complex',
'delattr',
# 'dict',
'dir',
# 'divmod', 'enumerate',
'eval', 'execfile', 'file',
# 'filter', 'float', 'frozenset',
'getattr', 'globals', 'hasattr',
# 'hash', 'hex', 'id',
'input',
# 'int', 'intern', 'isinstance', 'issubclass', 'iter',
# 'len', 'list',
'locals',
# 'long', 'map', 'max', 'min', 'object', 'oct',
'open',
# 'ord', 'pow', 'property', 'range',
'raw_input',
# 'reduce',
'reload',
# 'repr', 'reversed', 'round', 'set',
'setattr',
# 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super',
# 'tuple', 'type', 'unichr', 'unicode',
'vars',
# 'xrange', 'zip'
]
for ast_name in unallowed_ast_nodes:
assert(is_valid_ast_node(ast_name))
for name in unallowed_builtins:
assert(is_valid_builtin(name))
def is_unallowed_ast_node(kind):
return kind in unallowed_ast_nodes
def is_unallowed_builtin(name):
return name in unallowed_builtins
#----------------------------------------------------------------------
# Restricted attributes.
#----------------------------------------------------------------------
# In addition to these we deny access to all lowlevel attrs (__xxx__).
unallowed_attr = [
'im_class', 'im_func', 'im_self',
'func_code', 'func_defaults', 'func_globals', 'func_name',
'tb_frame', 'tb_next',
'f_back', 'f_builtins', 'f_code', 'f_exc_traceback',
'f_exc_type', 'f_exc_value', 'f_globals', 'f_locals']
def is_unallowed_attr(name):
return (name[:2] == '__' and name[-2:] == '__') or \
(name in unallowed_attr)
#----------------------------------------------------------------------
# SafeEvalVisitor.
#----------------------------------------------------------------------
class SafeEvalError(object):
"""
Base class for all which occur while walking the AST.
Attributes:
errmsg = short decription about the nature of the error
lineno = line offset to where error occured in source code
"""
def __init__(self, errmsg, lineno):
self.errmsg, self.lineno = errmsg, lineno
def __str__(self):
return "line %d : %s" % (self.lineno, self.errmsg)
class SafeEvalASTNodeError(SafeEvalError):
"Expression/statement in AST evaluates to a restricted AST node type."
pass
class SafeEvalBuiltinError(SafeEvalError):
"Expression/statement in tried to access a restricted builtin."
pass
class SafeEvalAttrError(SafeEvalError):
"Expression/statement in tried to access a restricted attribute."
pass
class SafeEvalVisitor(object):
"""
Data-driven visitor which walks the AST for some code and makes
sure it doesn't contain any expression/statements which are
declared as restricted in 'unallowed_ast_nodes'. We'll also make
sure that there aren't any attempts to access/lookup restricted
builtin declared in 'unallowed_builtins'. By default we also won't
allow access to lowlevel stuff which can be used to dynamically
access non-local envrioments.
Interface:
walk(ast) = validate AST and return True if AST is 'safe'
Attributes:
errors = list of SafeEvalError if walk() returned False
Implementation:
The visitor will automatically generate methods for all of the
available AST node types and redirect them to self.ok or self.fail
reflecting the configuration in 'unallowed_ast_nodes'. While
walking the AST we simply forward the validating step to each of
node callbacks which take care of reporting errors.
"""
def __init__(self):
"Initialize visitor by generating callbacks for all AST node types."
self.errors = []
for ast_name in all_ast_nodes:
# Don't reset any overridden callbacks.
if getattr(self, 'visit' + ast_name, None): continue
if is_unallowed_ast_node(ast_name):
setattr(self, 'visit' + ast_name, self.fail)
else:
setattr(self, 'visit' + ast_name, self.ok)
def walk(self, ast):
"Validate each node in AST and return True if AST is 'safe'."
self.visit(ast)
return self.errors == []
def visit(self, node, *args):
"Recursively validate node and all of its children."
fn = getattr(self, 'visit' + classname(node))
if DEBUG: self.trace(node)
fn(node, *args)
for child in node.getChildNodes():
self.visit(child, *args)
def visitName(self, node, *args):
"Disallow any attempts to access a restricted builtin/attr."
name = node.getChildren()[0]
lineno = get_node_lineno(node)
if is_unallowed_builtin(name):
self.errors.append(SafeEvalBuiltinError( \
"access to builtin '%s' is denied" % name, lineno))
elif is_unallowed_attr(name):
self.errors.append(SafeEvalAttrError( \
"access to attribute '%s' is denied" % name, lineno))
def visitGetattr(self, node, *args):
"Disallow any attempts to access a restricted attribute."
name = node.attrname
lineno = get_node_lineno(node)
if is_unallowed_attr(name):
self.errors.append(SafeEvalAttrError( \
"access to attribute '%s' is denied" % name, lineno))
def ok(self, node, *args):
"Default callback for 'harmless' AST nodes."
pass
def fail(self, node, *args):
"Default callback for unallowed AST nodes."
lineno = get_node_lineno(node)
self.errors.append(SafeEvalASTNodeError( \
"execution of '%s' statements is denied" % classname(node),
lineno))
def trace(self, node):
"Debugging utility for tracing the validation of AST nodes."
print classname(node)
for attr in dir(node):
if attr[:2] != '__':
print ' ' * 4, "%-15.15s" % attr, getattr(node, attr)
#----------------------------------------------------------------------
# Safe 'eval' replacement.
#----------------------------------------------------------------------
class SafeEvalException(Exception):
"Base class for all safe-eval related errors."
pass
class SafeEvalCodeException(SafeEvalException):
"""
Exception class for reporting all errors which occured while
validating AST for source code in safe_eval().
Attributes:
code = raw source code which failed to validate
errors = list of SafeEvalError
"""
def __init__(self, code, errors):
self.code, self.errors = code, errors
def __str__(self):
return '\n'.join([str(err) for err in self.errors])
class SafeEvalContextException(SafeEvalException):
"""
Exception class for reporting unallowed objects found in the dict
intended to be used as the local enviroment in safe_eval().
Attributes:
keys = list of keys of the unallowed objects
errors = list of strings describing the nature of the error
for each key in 'keys'
"""
def __init__(self, keys, errors):
self.keys, self.errors = keys, errors
def __str__(self):
return '\n'.join([str(err) for err in self.errors])
class SafeEvalTimeoutException(SafeEvalException):
"""
Exception class for reporting that code evaluation execeeded
the given timelimit.
Attributes:
timeout = time limit in seconds
"""
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return "Timeout limit execeeded (%s secs) during exec" % self.timeout
def exec_timed(code, context, timeout_secs):
"""
Dynamically execute 'code' using 'context' as the global enviroment.
SafeEvalTimeoutException is raised if execution does not finish within
the given timelimit.
"""
assert(timeout_secs > 0)
signal_finished = False
def alarm(secs):
def wait(secs):
for n in xrange(timeout_secs):
time.sleep(1)
if signal_finished: break
else:
thread.interrupt_main()
thread.start_new_thread(wait, (secs,))
try:
alarm(timeout_secs)
exec code in context
signal_finished = True
except KeyboardInterrupt:
raise SafeEvalTimeoutException(timeout_secs)
def safe_eval(code, context = {}, timeout_secs = 5):
"""
Validate source code and make sure it contains no unauthorized
expression/statements as configured via 'unallowed_ast_nodes' and
'unallowed_builtins'. By default this means that code is not
allowed import modules or access dangerous builtins like 'open' or
'eval'. If code is considered 'safe' it will be executed via
'exec' using 'context' as the global environment. More details on
how code is executed can be found in the Python Reference Manual
section 6.14 (ignore the remark on '__builtins__'). The 'context'
enviroment is also validated and is not allowed to contain modules
or builtins. The following exception will be raised on errors:
if 'context' contains unallowed objects =
SafeEvalContextException
if code is didn't validate and is considered 'unsafe' =
SafeEvalCodeException
if code did not execute within the given timelimit =
SafeEvalTimeoutException
"""
ctx_errkeys, ctx_errors = [], []
for (key, obj) in context.items():
if inspect.isbuiltin(obj):
ctx_errkeys.append(key)
ctx_errors.append("key '%s' : unallowed builtin %s" % (key, obj))
if inspect.ismodule(obj):
ctx_errkeys.append(key)
ctx_errors.append("key '%s' : unallowed module %s" % (key, obj))
if ctx_errors:
raise SafeEvalContextException(ctx_errkeys, ctx_errors)
ast = compiler.parse(code)
checker = SafeEvalVisitor()
if checker.walk(ast):
exec_timed(code, context, timeout_secs)
else:
raise SafeEvalCodeException(code, checker.errors)
#----------------------------------------------------------------------
# Basic tests.
#----------------------------------------------------------------------
import unittest
class TestSafeEval(unittest.TestCase):
def test_builtin(self):
# attempt to access a unsafe builtin
self.assertRaises(SafeEvalException,
safe_eval, "open('test.txt', 'w')")
def test_getattr(self):
# attempt to get arround direct attr access
self.assertRaises(SafeEvalException, \
safe_eval, "getattr(int, '__abs__')")
def test_func_globals(self):
# attempt to access global enviroment where fun was defined
self.assertRaises(SafeEvalException, \
safe_eval, "def x(): pass; print x.func_globals")
def test_lowlevel(self):
# lowlevel tricks to access 'object'
self.assertRaises(SafeEvalException, \
safe_eval, "().__class__.mro()[1].__subclasses__()")
def test_timeout_ok(self):
# attempt to exectute 'slow' code which finishes within timelimit
def test(): time.sleep(2)
env = {'test':test}
safe_eval("test()", env, timeout_secs = 5)
def test_timeout_exceed(self):
# attempt to exectute code which never teminates
self.assertRaises(SafeEvalException, \
safe_eval, "while 1: pass")
def test_invalid_context(self):
# can't pass an enviroment with modules or builtins
env = {'f' : __builtins__.open, 'g' : time}
self.assertRaises(SafeEvalException, \
safe_eval, "print 1", env)
def test_callback(self):
# modify local variable via callback
self.value = 0
def test(): self.value = 1
env = {'test':test}
safe_eval("test()", env)
self.assertEqual(self.value, 1)
if __name__ == "__main__":
unittest.main()
#----------------------------------------------------------------------
# The End.
#----------------------------------------------------------------------
|
[
"[email protected]"
] | |
cd0d381a1c80726c12b3a0929446246ddd281b45
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4413/codes/1671_1104.py
|
72209cb1de809196d3880e615f97612ea59aa388
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 124 |
py
|
a = float(input("numero 1: "))
b = float(input("numero 2: "))
c = float(input("numero 3: "))
d = float(input("numero 4: "))
|
[
"[email protected]"
] | |
575b7eebc7449a5c217a0894b3470af53972e4bf
|
1dad74c0d1b050e4a66026599f9f883d82dabd60
|
/practices/pil_merge.py
|
ffd3726d223788c952f5145f00a3506c2fd90c2a
|
[] |
no_license
|
DingZiming/python-api-tesing
|
0af0f9963f00f6de38e2c7bf7ebe8d85afb9e286
|
efd220ce3eca5d2728eea5110d8169c767e3eff2
|
refs/heads/master
| 2020-04-09T13:27:23.924448 | 2018-12-04T08:43:32 | 2018-12-04T08:43:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,131 |
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# 技术支持:https://www.jianshu.com/u/69f40328d4f0
# 技术支持 https://china-testing.github.io/
# https://github.com/china-testing/python-api-tesing/blob/master/practices/pil_merge.py
# 项目实战讨论QQ群630011153 144081101
# CreateDate: 2018-12-04
import math
from PIL import Image
column = 2
width = 802
height = 286
size = (802, 286)
list_im = [r'd:\code.jpg', r'd:\code.jpg', r'd:\code.jpg', r'd:\code.jpg',
r'd:\code.jpg', r'd:\code.jpg', r'd:\code.jpg', r'd:\code.jpg',
r'd:\code.jpg', r'd:\code.jpg', r'd:\code.jpg']
list_im = list_im*11
imgs = [Image.open(i) for i in list_im]
row_num = math.ceil(len(imgs)/column)
target = Image.new('RGB', (width*column, height*row_num))
for i in range(len(list_im)):
if i % column == 0:
end = len(list_im) if i + column > len(list_im) else i + column
for col, image in enumerate(imgs[i:i+column]):
target.paste(image, (width*col, height*(i//column),
width*(col + 1), height*(i//column + 1)))
target.show()
target.save('d:\code2.jpg')
|
[
"[email protected]"
] | |
34fd125ce84e0a0bc0a8960ea874107dbbf28dea
|
59afd4e7c4a16838afb11f69ba2f908695c17794
|
/atcoder006/c.py
|
da243ca226b10fdc54e9628581b70bd0187a486f
|
[] |
no_license
|
mugenen/AtCoder-Solution
|
ca29437abdcb90177eca6ff59f499235cb046bfc
|
a25c54a365f41e7a61abc9d4f7a90657c39fdd46
|
refs/heads/master
| 2021-01-01T17:57:49.228749 | 2013-10-12T15:25:10 | 2013-10-12T15:25:10 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 349 |
py
|
# -*- coding: utf-8 -*-
import math
N = int(raw_input())
w = int(raw_input())
stack = [[w]]
for i in xrange(N - 1):
w = int(raw_input())
for j in stack:
if j[-1] >= w:
j.append(w)
break
else:
stack.append([w])
stack.sort(lambda x, y: cmp(x[-1], y[-1]))
# print stack
print len(stack)
|
[
"[email protected]"
] | |
fac680958628294515257aa44df4dceddd5d0101
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03378/s856989995.py
|
f7d9d1fe3966811a933ff601b0728cdcd2a7c4ea
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 197 |
py
|
n,m,x = map(int,input().split())
a=list(map(int,input().split()))
cnt1, cnt2 = 0, 0
for i in range(m):
if 0 <= a[i] < x:
cnt1 += 1
if x < a[i] <= a[-1]:
cnt2 += 1
print(min(cnt1, cnt2))
|
[
"[email protected]"
] | |
507054441fa8c8cb81c76f7c51336ccd96dddea1
|
f4ccf2f4bd7f806d32ef7121d949af2b9d45f159
|
/pytorch/scripts/compile_results_pytorch_bs.py
|
fbe7d1bd2e0856abeba20a8c837e1a4f57ff3b53
|
[] |
no_license
|
shadowkun/deeplearning-benchmark
|
77d6089faa89025248c6794f3dcadcb441399dd3
|
85a7854440d74bce2f07b407077c7892f5c7537b
|
refs/heads/master
| 2023-06-19T03:44:40.088762 | 2021-07-13T05:03:54 | 2021-07-13T05:03:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,088 |
py
|
# -*- coding: utf-8 -*-
import os
import re
import argparse
import pandas as pd
# You need add your own experiments here so it can be included in the generated csv files
# naming convention
# key: config name
# value: renaming the system so it is easier to read
list_system_single = [
('V100', 'V100 32GB'),
('QuadroRTX8000', 'RTX 8000'),
('QuadroRTX6000', 'RTX 6000'),
('QuadroRTX5000', 'RTX 5000'),
('TitanRTX', 'Titan RTX'),
('2080Ti', 'RTX 2080Ti'),
('1080Ti', 'GTX 1080Ti'),
('2080SuperMaxQ', 'RTX 2080 SUPER MAX-Q'),
('2080MaxQ', 'RTX 2080 MAX-Q'),
('2070MaxQ', 'RTX 2070 MAX-Q'),
('3070', 'RTX 3070'),
('3080', 'RTX 3080'),
('3090', 'RTX 3090'),
('A100_PCIe', 'A100 40GB PCIe'),
('A100_SXM4', 'A100 40GB SXM4'),
('A6000', 'RTX A6000'),
('A5000', 'RTX A5000'),
('LambdaCloud_A6000', 'Lambda Cloud — RTX A6000'),
('3080Max-Q', 'RTX 3080 Max-Q'),
('A40', 'RTX A40'),
('A4000', 'RTX A4000'),
]
list_system_multiple = [
('2x2080TiNVlink_trt', '2x RTX 2080Ti NVLink'),
('2x2080Ti_trt', '2x RTX 2080Ti'),
('4x2080TiNVlink_trt', '4x RTX 2080Ti NVLink'),
('4x2080Ti_trt', '4x RTX 2080Ti'),
('8x2080TiNVlink_trt', '8x RTX 2080Ti NVLink'),
('8x2080Ti_trt', '8x RTX 2080Ti'),
('2xQuadroRTX8000NVlink_trt2', '2x RTX 8000 NVLink'),
('2xQuadroRTX8000_trt2', '2x RTX 8000'),
('4xQuadroRTX8000NVlink_trt2', '4x RTX 8000 NVLink'),
('4xQuadroRTX8000_trt2', '4x RTX 8000'),
('8xQuadroRTX8000NVlink_trt2', '8x RTX 8000 NVLink'),
('8xQuadroRTX8000_trt2', '8x RTX 8000'),
('2xV100', '2x V100 32GB'),
('4xV100', '4x V100 32GB'),
('8xV100', '8x V100 32GB'),
('LambdaCloud_4x1080Ti', 'Lambda Cloud — 4x GTX 1080Ti'),
('LambdaCloud_2xQuadroRTX6000', 'Lambda Cloud — 2x RTX 6000'),
('LambdaCloud_4xQuadroRTX6000', 'Lambda Cloud — 4x RTX 6000'),
('LambdaCloud_8xV10016G', 'Lambda Cloud — 8x V100 16GB'),
('Linode_2xQuadroRTX6000', 'Linode Cloud — 2x RTX 6000'),
('p3.16xlarge', 'p3.16xlarge'),
('p3.8xlarge', 'p3.8xlarge'),
('2x3070', '2x RTX 3070'),
('2x3080', '2x RTX 3080'),
('2x3090', '2x RTX 3090'),
('3x3090', '3x RTX 3090'),
('4x3070', '4x RTX 3070'),
('4x3090', '4x RTX 3090'),
('8x3070', '8x RTX 3070'),
('8x3090', '8x RTX 3090'),
('2xA100_PCIe', '2x A100 40GB PCIe'),
('4xA100_PCIe', '4x A100 40GB PCIe'),
('8xA100_PCIe', '8x A100 40GB PCIe'),
('2xA100_SXM4', '2x A100 40GB SXM4'),
('4xA100_SXM4', '4x A100 40GB SXM4'),
('8xA100_SXM4', '8x A100 40GB SXM4'),
('8xA6000', '8x RTX A6000'),
('4xA6000', '4x RTX A6000'),
('2xA6000', '2x RTX A6000'),
('4xA5000', '4x RTX A5000'),
('2xA5000', '2x RTX A5000'),
('8xA100_p4', 'p4d.24xlarge'),
('LambdaCloud_2xA6000', 'Lambda Cloud — 2x RTX A6000'),
('LambdaCloud_4xA6000', 'Lambda Cloud — 4x RTX A6000'),
('8xA40', '8x RTX A40'),
('4xA40', '4x RTX A40'),
('2xA40', '2x RTX A40'),
('8xA4000', '8x RTX A4000'),
('4xA4000', '4x RTX A4000'),
('2xA4000', '2x RTX A4000'),
]
# These are the rules to extract batch size from config files
list_test_fp32 = {
'PyTorch_SSD_FP32': (4, -1, 1, 'ssd'),
'PyTorch_resnet50_FP32': (7, -1, 1, 'resnet50'),
'PyTorch_maskrcnn_FP32': (4, -1, 0, 'maskrcnn'),
'PyTorch_gnmt_FP32': (4, -1, 1, 'gnmt'),
'PyTorch_ncf_FP32': (5, -1, 0, 'ncf'),
'PyTorch_transformerxlbase_FP32': (5, -1, 0, 'transformerxlbase'),
'PyTorch_transformerxllarge_FP32': (5, -1, 0, 'transformerxllarge'),
'PyTorch_tacotron2_FP32': (7, -1, 1, 'tacotron2'),
'PyTorch_waveglow_FP32': (8, -1, 1, 'waveglow'),
'PyTorch_bert_large_squad_FP32': (5, -1, 1, 'bert_large_squad'),
'PyTorch_bert_base_squad_FP32': (5, -1, 1, 'bert_base_squad'),
}
list_test_fp16 = {
'PyTorch_SSD_AMP': (4, -1, 1, 'ssd'),
'PyTorch_resnet50_FP16': (9, -1, 1, 'resnet50'),
'PyTorch_maskrcnn_FP16': (4, -1, 0, 'maskrcnn'),
'PyTorch_gnmt_FP16': (4, -1, 1, 'gnmt'),
'PyTorch_ncf_FP16': (5, -1, 0, 'ncf'),
'PyTorch_transformerxlbase_FP16': (5, -1, 0, 'transformerxlbase'),
'PyTorch_transformerxllarge_FP16': (5, -1, 0, 'transformerxllarge'),
'PyTorch_tacotron2_FP16': (7, -1, 1, 'tacotron2'),
'PyTorch_waveglow_FP16': (8, -1, 1, 'waveglow'),
'PyTorch_bert_large_squad_FP16': (5, -1, 1, 'bert_large_squad'),
'PyTorch_bert_base_squad_FP16': (5, -1, 1, 'bert_base_squad'),
}
def gather(list_test, key, name, df, path_config):
f_name = os.path.join(path_config, 'config_pytorch_' + key + '.sh')
with open(f_name, 'r') as f:
lines = f.readlines()
idx_gpu = [i for i, s in enumerate(lines) if 'NUM_GPU=' in s]
num_gpu = int(lines[idx_gpu[0]].rstrip().split("=")[1])
for test_name, value in sorted(list_test.items()):
idx = lines.index(test_name + "_PARAMS=(\n")
line = lines[idx + value[0]].rstrip().split(" ")
line = list(filter(lambda a: a != "", line))
bs = int(line[value[1]][1:-1]) * (num_gpu if value[2] else 1)
if bs == 1:
bs = 0
df.at[name, value[3]] = bs
df.at[name, 'num_gpu'] = num_gpu
def main():
parser = argparse.ArgumentParser(description='Gather benchmark results.')
parser.add_argument('--path', type=str, default='scripts/config',
help='path that has the results')
parser.add_argument('--precision', type=str, default='fp32',
choices=['fp32', 'fp16'],
help='Choose becnhmark precision')
parser.add_argument('--system', type=str, default='all',
choices=['single', 'multiple', 'all'],
help='Choose system type (single or multiple GPUs)')
args = parser.parse_args()
list_test_all = list_test_fp32.copy()
for key, value in list_test_fp16.items():
list_test_all[key] = value
if args.precision == 'fp32':
list_test = list_test_fp32
elif args.precision == 'fp16':
list_test = list_test_fp16
else:
sys.exit("Wrong precision: " + args.precision + ', choose between fp32 and fp16')
if args.system == 'single':
list_system = list_system_single
elif args.system == 'multiple':
list_system = list_system_multiple
else:
list_system = list_system_single + list_system_multiple
columns = []
columns.append('num_gpu')
for test_name, value in sorted(list_test.items()):
columns.append(value[3])
df = pd.DataFrame(index=[i[1] for i in list_system], columns=columns)
for s in list_system:
key = s[0]
s_name = s[1]
gather(list_test, key, s_name, df, args.path)
df.index.name = 'name_gpu'
df.to_csv('pytorch-train-bs-' + args.precision + '.csv')
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
a42805b8e555c430f0797491343fdda4821e6518
|
7d0fdfeba2f6b1b2c4c2ac519ce31a2e0adc8d5d
|
/loan_prediction/__manifest__.py
|
f3789ad0338609af1dd51b9ccba57270464d51fd
|
[] |
no_license
|
Inoxevious/malin_erp-custom-addons
|
54806cdf2b4083ad3f1833ba2d2ab9a65701e17c
|
11951bbad1df51347c2169da93a6ab21b5554e3c
|
refs/heads/main
| 2023-03-27T23:16:57.396133 | 2021-03-29T14:17:28 | 2021-03-29T14:17:28 | 344,091,174 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,108 |
py
|
# -*- coding: utf-8 -*-
{
'name': "loan_prediction",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "My Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/colossal/colossal/blob/14.0/colossal/addons/base/data/ir_module_category_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'views/views.xml',
'views/templates.xml',
'views/loan.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
'images': [],
'license': 'AGPL-3',
'installable': True,
'application': True,
'auto_install': False,
}
|
[
"[email protected]"
] | |
5fc865d16ff6b2687f32bb164813ffafd1e684be
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/coverage-big-4885.py
|
3edf480f7df952263fa6bc691c5e9064090b4063
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,348 |
py
|
count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: $ID, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.