max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
DIYgod/0006/a.py | saurabh896/python-1 | 3,976 | 12784965 | <gh_stars>1000+
f = open('a.txt', 'rb')
lines = f.readlines()
for line in lines:
pass
f.close()
f = open('a.txt', 'rb')
for line in f:
pass
f.close()
f = open('a.txt', 'rb')
while true:
line = f.readline()
if not line:
break
pass
f.close()
|
harvester/server/expiring_queue.py | Nisthar/CaptchaHarvester | 545 | 12784971 | <reponame>Nisthar/CaptchaHarvester
from queue import Empty, Queue, SimpleQueue
from threading import Timer
from typing import Any
class ExpiringQueue(Queue):
def __init__(self, timeout: int, maxsize=0):
super().__init__(maxsize)
self.timeout = timeout
self.timers: 'SimpleQueue[Timer]' = SimpleQueue()
def put(self, item: Any) -> None:
thread = Timer(self.timeout, self.expire)
thread.start()
self.timers.put(thread)
super().put(item)
def get(self, block=True, timeout=None) -> Any:
thread = self.timers.get(block, timeout)
thread.cancel()
return super().get(block, timeout)
def expire(self):
self.get()
def to_list(self):
with self.mutex:
return list(self.queue)
if __name__ == '__main__':
import time
eq = ExpiringQueue(timeout=1)
print(eq)
eq.put(1)
time.sleep(.5)
eq.put(2)
print(eq.to_list())
time.sleep(.6)
print(eq.get_nowait())
print(eq.get_nowait())
|
ctc_fast/swbd-utils/score_frag_utts.py | SrikarSiddarth/stanford-ctc | 268 | 12784986 | <filename>ctc_fast/swbd-utils/score_frag_utts.py<gh_stars>100-1000
'''
Look at error rates only scoring utterances that contain frags
'''
FRAG_FILE = '/deep/u/zxie/ctc_clm_transcripts/frags.txt'
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('hyp')
parser.add_argument('stm')
parser.add_argument('hyp_out')
parser.add_argument('stm_out')
args = parser.parse_args()
# Read in utt keys with frags
frag_utts = set()
with open(FRAG_FILE, 'r') as fin:
lines = fin.read().strip().splitlines()
for l in lines:
utt_key, frag_word = l.split(' ', 1)
frag_utts.add(utt_key)
# Read in hyps
with open(args.hyp, 'r') as fin:
lines = fin.read().splitlines()
# Write the filtered hyp file
frag_utt_count = 0
tot_utt_count = 0
fout = open(args.hyp_out, 'w')
for l in lines:
parts = l.split(' ', 1)
if len(parts) == 1:
assert False
utt_key, utt = parts
if utt_key not in frag_utts:
tot_utt_count += 1
continue
fout.write(l + '\n')
tot_utt_count += 1
frag_utt_count += 1
fout.close()
# Sanity check
print '%d/%d utts contain frags' % (frag_utt_count, tot_utt_count)
# Read in stm reference file
stm_frag_utts = 0
frag_utt_starts = set()
for ou in frag_utts:
ou_start = '-'.join(ou.split('-')[0:2])
frag_utt_starts.add(ou_start)
with open(args.stm, 'r') as fin:
lines = fin.read().strip().splitlines()
fout = open(args.stm_out, 'w')
for l in lines:
# Handle comments
if l.startswith(';;'):
fout.write(l+'\n')
continue
parts = l.split(' ', 6)
utt_key_part, channel, utt_key, t_start, t_end, metadata, utt = parts
stm_utt_key = '%s-%s_%06d' % (utt_key_part, channel.lower(), int(float(t_start) * 100))
print stm_utt_key
if stm_utt_key not in frag_utt_starts:
continue
fout.write(l + '\n')
stm_frag_utts += 1
fout.close()
# Sanity check
print '%d/%d stm utts contain frags' % (stm_frag_utts, tot_utt_count)
|
samples/python/40.timex-resolution/ambiguity.py | Aliacf21/BotBuilder-Samples | 1,998 | 12784998 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from recognizers_date_time import recognize_datetime, Culture
class Ambiguity:
"""
TIMEX expressions are designed to represent ambiguous rather than definite dates. For
example: "Monday" could be any Monday ever. "May 5th" could be any one of the possible May
5th in the past or the future. TIMEX does not represent ambiguous times. So if the natural
language mentioned 4 o'clock it could be either 4AM or 4PM. For that the recognizer (and by
extension LUIS) would return two TIMEX expressions. A TIMEX expression can include a date and
time parts. So ambiguity of date can be combined with multiple results. Code that deals with
TIMEX expressions is frequently dealing with sets of TIMEX expressions.
"""
@staticmethod
def date_ambiguity():
# Run the recognizer.
results = recognize_datetime(
"Either Saturday or Sunday would work.", Culture.English
)
# We should find two results in this example.
for result in results:
# The resolution includes two example values: going backwards and forwards from NOW in the calendar.
# Each result includes a TIMEX expression that captures the inherent date but not time ambiguity.
# We are interested in the distinct set of TIMEX expressions.
# There is also either a "value" property on each value or "start" and "end".
distinct_timex_expressions = {
value["timex"]
for value in result.resolution["values"]
if "timex" in value
}
print(f"{result.text} ({','.join(distinct_timex_expressions)})")
@staticmethod
def time_ambiguity():
# Run the recognizer.
results = recognize_datetime(
"We would like to arrive at 4 o'clock or 5 o'clock.", Culture.English
)
# We should find two results in this example.
for result in results:
# The resolution includes two example values: one for AM and one for PM.
# Each result includes a TIMEX expression that captures the inherent date but not time ambiguity.
# We are interested in the distinct set of TIMEX expressions.
distinct_timex_expressions = {
value["timex"]
for value in result.resolution["values"]
if "timex" in value
}
# TIMEX expressions don't capture time ambiguity so there will be two distinct expressions for each result.
print(f"{result.text} ({','.join(distinct_timex_expressions)})")
@staticmethod
def date_time_ambiguity():
# Run the recognizer.
results = recognize_datetime(
"It will be ready Wednesday at 5 o'clock.", Culture.English
)
# We should find a single result in this example.
for result in results:
# The resolution includes four example values: backwards and forward in the calendar and then AM and PM.
# Each result includes a TIMEX expression that captures the inherent date but not time ambiguity.
# We are interested in the distinct set of TIMEX expressions.
distinct_timex_expressions = {
value["timex"]
for value in result.resolution["values"]
if "timex" in value
}
# TIMEX expressions don't capture time ambiguity so there will be two distinct expressions for each result.
print(f"{result.text} ({','.join(distinct_timex_expressions)})")
|
tests/test_gosubdag_relationships.py | flying-sheep/goatools | 477 | 12785033 | #!/usr/bin/env python
"""Plot both the standard 'is_a' field and the optional 'part_of' relationship."""
from __future__ import print_function
__copyright__ = "Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved."
import os
import sys
import timeit
import datetime
from goatools.base import download_go_basic_obo
from goatools.obo_parser import GODag
from goatools.gosubdag.gosubdag import GoSubDag
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..")
def test_gosubdag_relationships(prt=sys.stdout):
"""Plot both the standard 'is_a' field and the 'part_of' relationship."""
goids = set([
"GO:0032501",
"GO:0044707", # alt_id: GO:0032501 # BP 1011 L01 D01 B multicellular organismal process
"GO:0050874",
"GO:0007608", # sensory perception of smell
"GO:0050911"]) # detection of chemical stimulus involved in sensory perception of smell
# Load GO-DAG: Load optional 'relationship'
fin_obo = os.path.join(REPO, "go-basic.obo")
download_go_basic_obo(fin_obo, prt, loading_bar=None)
go2obj_plain = GODag(fin_obo)
go2obj_relat = GODag(fin_obo, optional_attrs=['relationship'])
print("\nCreate GoSubDag with GO DAG containing no relationships.")
tic = timeit.default_timer()
# Create Plot object; Plot both 'is_a' and optional 'part_of' relationship
gosubdag = GoSubDag(goids, go2obj_plain, relationships=False, prt=prt)
# gosubdag.prt_goids(gosubdag.go2obj)
goids_plain = set(gosubdag.go2obj)
tic = _rpt_hms(tic, len(gosubdag.go2obj))
print("\nCreate GoSubDag while IGNORING relationships")
# Create Plot object; Plot both 'is_a' and optional 'part_of' relationship
gosubdag = GoSubDag(goids, go2obj_relat, relationships=False, prt=prt)
# gosubdag.prt_goids(gosubdag.go2obj)
goids_false = set(gosubdag.go2obj)
tic = _rpt_hms(tic, len(gosubdag.go2obj))
assert goids_plain == goids_false
print("\nCreate GoSubDag while loading only the 'part_of' relationship")
gosubdag = GoSubDag(goids, go2obj_relat, relationships=['part_of'], prt=prt)
# gosubdag.prt_goids(gosubdag.go2obj)
goids_part_of = set(gosubdag.go2obj)
tic = _rpt_hms(tic, len(gosubdag.go2obj))
assert goids_plain.intersection(goids_part_of) == goids_plain
assert len(goids_part_of) > len(goids_plain)
print("\nCreate GoSubDag while loading all relationships")
gosubdag = GoSubDag(goids, go2obj_relat, relationships=True, prt=prt)
# gosubdag.prt_goids(gosubdag.go2obj)
goids_true = set(gosubdag.go2obj)
tic = _rpt_hms(tic, len(gosubdag.go2obj))
assert goids_part_of.intersection(goids_true) == goids_part_of
assert len(goids_true) >= len(goids_part_of)
def _rpt_hms(tic, num_goids):
"""Report the elapsed time for particular events."""
elapsed_time = str(datetime.timedelta(seconds=(timeit.default_timer()-tic)))
print("Elapsed HMS: {HMS} {N} GO IDs".format(HMS=elapsed_time, N=num_goids))
return timeit.default_timer()
if __name__ == '__main__':
test_gosubdag_relationships()
# Copyright (C) 2016-2018, <NAME>, <NAME>, All rights reserved.
|
flash/video/classification/input_transform.py | Actis92/lightning-flash | 1,457 | 12785058 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Callable
import torch
from flash.core.data.io.input_transform import InputTransform
from flash.core.data.transforms import ApplyToKeys
from flash.core.utilities.imports import _KORNIA_AVAILABLE, _PYTORCHVIDEO_AVAILABLE, requires
if _KORNIA_AVAILABLE:
import kornia.augmentation as K
if _PYTORCHVIDEO_AVAILABLE:
from pytorchvideo.transforms import UniformTemporalSubsample
from torchvision.transforms import CenterCrop, Compose, RandomCrop
else:
ClipSampler, LabeledVideoDataset, EncodedVideo, ApplyTransformToKey = None, None, None, None
@requires("video")
@dataclass
class VideoClassificationInputTransform(InputTransform):
image_size: int = 244
temporal_sub_sample: int = 8
mean: torch.Tensor = torch.tensor([0.45, 0.45, 0.45])
std: torch.Tensor = torch.tensor([0.225, 0.225, 0.225])
data_format: str = "BCTHW"
same_on_frame: bool = False
def per_sample_transform(self) -> Callable:
if self.training:
per_sample_transform = [RandomCrop(self.image_size, pad_if_needed=True)]
else:
per_sample_transform = [CenterCrop(self.image_size)]
return ApplyToKeys(
"video", Compose([UniformTemporalSubsample(self.temporal_sub_sample)] + per_sample_transform)
)
def per_batch_transform_on_device(self) -> Callable:
return ApplyToKeys(
"video",
K.VideoSequential(
K.Normalize(self.mean, self.std),
data_format=self.data_format,
same_on_frame=self.same_on_frame,
),
)
|
tensorflow_graphics/datasets/features/camera_feature_test.py | Liang813/graphics | 2,759 | 12785120 | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for tensorflow_graphics.datasets.features.camera_feature."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow_graphics.datasets.features import camera_feature
class CameraFeatureTest(tfds.testing.FeatureExpectationsTestCase):
"""Test Cases for Camera FeatureConnector."""
def __get_camera_params(self):
pose = {'R': np.eye(3).astype(np.float32),
't': np.zeros(3).astype(np.float32)}
f = 35.
optical_center = (640 / 2, 480 / 2)
return pose, f, optical_center
def test_simple_camera(self):
"""Tests camera parameters with fixed focal length, no skew and no aspect ratio."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_intrinsics = np.asarray([[expected_f, 0, expected_center[0]],
[0, expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f, 'optical_center': expected_center,
'pose': expected_pose}
lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'look_at': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'position': np.array([0, 0, 0], dtype=np.float32)
}
}
raising_pose_entry = {
'f': expected_f,
'optical_center': expected_center,
'pose': np.eye(4)
}
raising_pose_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {'rot': np.eye(3), 'trans': np.zeros(3)}
}
raising_lookat_inputs = {
'f': expected_f,
'optical_center': expected_center,
'pose': {
'l': np.array([0, 0, -1], dtype=np.float32),
'up': np.array([0, 1, 0], dtype=np.float32),
'C': np.array([0, 0, 0], dtype=np.float32)
}
}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=lookat_inputs,
expected=expected_camera
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_lookat_inputs,
raise_cls=ValueError,
raise_msg='Wrong keys for pose feature provided'
),
tfds.testing.FeatureExpectationItem(
value=raising_pose_entry,
raise_cls=ValueError,
raise_msg='Pose needs to be a dictionary'
),
],
)
def test_camera_with_aspect_ratio_and_skew(self):
"""Tests camera parameters with fixed focal length, aspect_ratio and skew."""
expected_pose, expected_f, expected_center = self.__get_camera_params()
expected_aspect_ratio = expected_center[0] / expected_center[1]
expected_skew = 0.6
expected_intrinsics = np.asarray(
[[expected_f, expected_skew, expected_center[0]],
[0, expected_aspect_ratio * expected_f, expected_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_center,
'skew': expected_skew,
'aspect_ratio': expected_aspect_ratio,
'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
],
)
def test_full_camera_calibration_matrix(self):
"""Tests camera parameters with different focal length per camera axis and skew."""
expected_pose, _, expected_optical_center = self.__get_camera_params()
expected_skew = 0.6
expected_f = (35., 40.)
expected_intrinsics = np.array(
[[expected_f[0], expected_skew, expected_optical_center[0]],
[0, expected_f[1], expected_optical_center[1]],
[0, 0, 1]], dtype=np.float32)
expected_camera = {'pose': expected_pose, 'intrinsics': expected_intrinsics}
inputs = {'f': expected_f,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
raising_inputs = {'f': expected_f,
'aspect_ratio': 1.5,
'optical_center': expected_optical_center,
'skew': expected_skew, 'pose': expected_pose}
self.assertFeature(
feature=camera_feature.Camera(),
shape={
'pose': {
'R': (3, 3),
't': (3,)
},
'intrinsics': (3, 3)
},
dtype={
'pose': {
'R': tf.float32,
't': tf.float32
},
'intrinsics': tf.float32
},
tests=[
tfds.testing.FeatureExpectationItem(
value=inputs,
expected=expected_camera,
),
tfds.testing.FeatureExpectationItem(
value=raising_inputs,
raise_cls=ValueError,
raise_msg='If aspect ratio is provided, f needs to '
'be a single float',
),
],
)
if __name__ == '__main__':
tfds.testing.test_main()
|
__scraping__/investing.com - request, BS/main.py | whitmans-max/python-examples | 140 | 12785164 |
# date: 2020.09.11
# author: Bartłomiej "furas" Burek (https://blog.furas.pl)
# https://stackoverflow.com/questions/63840415/how-to-scrape-website-tables-where-the-value-can-be-different-as-we-chose-but-th
import requests
from bs4 import BeautifulSoup
import csv
url = 'https://id.investing.com/instruments/HistoricalDataAjax'
payload = {
"curr_id": "8830",
"smlID": "300004",
"header": "Data+Historis+Emas+Berjangka",
"st_date": "01/30/2020",
"end_date": "12/31/2020",
"interval_sec": "Daily",
"sort_col": "date",
"sort_ord": "DESC",
"action":"historical_data"
}
headers = {
#"Referer": "https://id.investing.com/commodities/gold-historical-data",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:80.0) Gecko/20100101 Firefox/80.0",
"X-Requested-With": "XMLHttpRequest"
}
fh = open('output.csv', 'w')
csv_writer = csv.writer(fh)
for year in range(2010, 2021):
print('year:', year)
payload["st_date"] = f"01/01/{year}"
payload["end_date"] = f"12/31/{year}"
r = requests.post(url, data=payload, headers=headers)
#print(r.text)
soup = BeautifulSoup(r.text, 'lxml')
table = soup.find('table')
for row in table.find_all('tr')[1:]: # [1:] to skip header
row_data = [item.text for item in row.find_all('td')]
print(row_data)
csv_writer.writerow(row_data)
fh.close() |
lte/gateway/python/integ_tests/s1aptests/test_send_error_ind_for_dl_nas_with_auth_req.py | Aitend/magma | 849 | 12785175 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import s1ap_types
import s1ap_wrapper
class TestSendErrorIndForDlNasWithAuthReq(unittest.TestCase):
"""Test sending of error indication for DL NAS message
carrying authentication request
"""
def setUp(self):
"""Initialize"""
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
def tearDown(self):
"""Cleanup"""
self._s1ap_wrapper.cleanup()
def test_send_error_ind_for_dl_nas_with_auth_req(self):
"""Send error indication after receiving authentication request"""
self._s1ap_wrapper.configIpBlock()
self._s1ap_wrapper.configUEDevice(1)
req = self._s1ap_wrapper.ue_req
attach_req = s1ap_types.ueAttachRequest_t()
attach_req.ue_Id = req.ue_id
sec_ctxt = s1ap_types.TFW_CREATE_NEW_SECURITY_CONTEXT
id_type = s1ap_types.TFW_MID_TYPE_IMSI
eps_type = s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH
attach_req.mIdType = id_type
attach_req.epsAttachType = eps_type
attach_req.useOldSecCtxt = sec_ctxt
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_ATTACH_REQUEST, attach_req,
)
print("************************* Sent attach request")
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_AUTH_REQ_IND.value,
)
print("************************* Received authentication request")
# Send error indication
error_ind = s1ap_types.fwNbErrIndMsg_t()
# isUeAssoc flag to include optional MME_UE_S1AP_ID and eNB_UE_S1AP_ID
error_ind.isUeAssoc = True
error_ind.ue_Id = req.ue_id
error_ind.cause.pres = True
# Radio network causeType = 0
error_ind.cause.causeType = 0
# causeVal - Unknown-pair-ue-s1ap-id
error_ind.cause.causeVal = 15
print("*** Sending error indication ***")
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.ENB_ERR_IND_MSG, error_ind,
)
# Context release
response = self._s1ap_wrapper.s1_util.get_response()
self.assertEqual(
response.msg_type, s1ap_types.tfwCmd.UE_CTX_REL_IND.value,
)
print("************************* Received UE_CTX_REL_IND")
if __name__ == "__main__":
unittest.main()
|
gerapy/pipelines/mongodb.py | hantmac/Gerapy | 2,888 | 12785176 | <filename>gerapy/pipelines/mongodb.py<gh_stars>1000+
import pymongo
from twisted.internet.threads import deferToThread
class MongoDBPipeline(object):
def __init__(self, mongodb_uri, mongodb_database):
self.mongodb_uri = mongodb_uri
self.mongodb_database = mongodb_database
@classmethod
def from_crawler(cls, crawler):
return cls(
mongodb_uri=crawler.settings.get('MONGODB_URI'),
mongodb_database=crawler.settings.get('MONGODB_DATABASE')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongodb_uri)
self.database = self.client[self.mongodb_database]
def _process_item(self, item, spider):
allowed_spiders = item.mongodb_spiders
allowed_collections = item.mongodb_collections
if allowed_spiders and spider.name in allowed_spiders:
for allowed_collection in allowed_collections:
self.database[allowed_collection].insert(dict(item))
return item
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
return deferToThread(self._process_item, item, spider) |
examples/pybullet/examples/frictionCone.py | stolk/bullet3 | 158 | 12785184 | import pybullet as p
import time
import math
p.connect(p.GUI)
useMaximalCoordinates = False
p.setGravity(0, 0, -10)
plane = p.loadURDF("plane.urdf", [0, 0, -1], useMaximalCoordinates=useMaximalCoordinates)
p.setRealTimeSimulation(0)
velocity = 1
num = 40
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1) #disable this to make it faster
p.configureDebugVisualizer(p.COV_ENABLE_TINY_RENDERER, 0)
p.setPhysicsEngineParameter(enableConeFriction=1)
for i in range(num):
print("progress:", i, num)
x = velocity * math.sin(2. * 3.1415 * float(i) / num)
y = velocity * math.cos(2. * 3.1415 * float(i) / num)
print("velocity=", x, y)
sphere = p.loadURDF("sphere_small_zeroinertia.urdf",
flags=p.URDF_USE_INERTIA_FROM_FILE,
useMaximalCoordinates=useMaximalCoordinates)
p.changeDynamics(sphere, -1, lateralFriction=0.02)
#p.changeDynamics(sphere,-1,rollingFriction=10)
p.changeDynamics(sphere, -1, linearDamping=0)
p.changeDynamics(sphere, -1, angularDamping=0)
p.resetBaseVelocity(sphere, linearVelocity=[x, y, 0])
prevPos = [0, 0, 0]
for i in range(2048):
p.stepSimulation()
pos = p.getBasePositionAndOrientation(sphere)[0]
if (i & 64):
p.addUserDebugLine(prevPos, pos, [1, 0, 0], 1)
prevPos = pos
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
while (1):
time.sleep(0.01)
|
tools/third_party/pywebsocket3/example/cgi-bin/hi.py | meyerweb/wpt | 2,479 | 12785192 | <gh_stars>1000+
#!/usr/bin/env python
print('Content-Type: text/plain')
print('')
print('Hi from hi.py')
|
scripts/visualize/match.py | facebookresearch/banmo | 201 | 12785246 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# TODO: pass ft_cse to use fine-tuned feature
# TODO: pass fine_steps -1 to use fine samples
from absl import flags, app
import sys
sys.path.insert(0,'')
sys.path.insert(0,'third_party')
import numpy as np
from matplotlib import pyplot as plt
import matplotlib
import torch
import os
import glob
import pdb
import cv2
import trimesh
from scipy.spatial.transform import Rotation as R
import imageio
from utils.io import save_vid, str_to_frame, save_bones, draw_lines, vis_match
from utils.colors import label_colormap
from nnutils.train_utils import v2s_trainer
from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam,\
Kmatinv, K2mat, K2inv, sample_xy, resample_dp,\
raycast
from nnutils.loss_utils import kp_reproj, feat_match, kp_reproj_loss
from ext_utils.util_flow import write_pfm
from ext_utils.flowlib import cat_imgflo
opts = flags.FLAGS
def construct_rays(dp_feats_rsmp, model, xys, rand_inds,
Rmat, Tmat, Kinv, near_far, flip=True):
device = dp_feats_rsmp.device
bs,nsample,_ =xys.shape
opts = model.opts
embedid=model.embedid
embedid = embedid.long().to(device)[:,None]
rays = raycast(xys, Rmat, Tmat, Kinv, near_far)
rtk_vec = rays['rtk_vec']
del rays
feats_at_samp = [dp_feats_rsmp[i].view(model.num_feat,-1).T\
[rand_inds[i].long()] for i in range(bs)]
feats_at_samp = torch.stack(feats_at_samp,0) # bs,ns,num_feat
# TODO implement for se3
if opts.lbs and model.num_bone_used>0:
bone_rts = model.nerf_body_rts(embedid)
bone_rts = bone_rts.repeat(1,nsample,1)
# TODO rearrange inputs
feats_at_samp = feats_at_samp.view(-1, model.num_feat)
xys = xys.view(-1,1,2)
if flip:
rtk_vec = rtk_vec.view(bs//2,2,-1).flip(1).view(rtk_vec.shape)
bone_rts = bone_rts.view(bs//2,2,-1).flip(1).view(bone_rts.shape)
rays = {'rtk_vec': rtk_vec,
'bone_rts': bone_rts}
return rays, feats_at_samp, xys
def match_frames(trainer, idxs, nsample=200):
idxs = [int(i) for i in idxs.split(' ')]
bs = len(idxs)
opts = trainer.opts
device = trainer.device
model = trainer.model
model.eval()
# load frames and aux data
for dataset in trainer.evalloader.dataset.datasets:
dataset.load_pair = False
batch = []
for i in idxs:
batch.append( trainer.evalloader.dataset[i] )
batch = trainer.evalloader.collate_fn(batch)
model.set_input(batch)
rtk = model.rtk
Rmat = rtk[:,:3,:3]
Tmat = rtk[:,:3,3]
Kmat = K2mat(rtk[:,3,:])
kaug = model.kaug # according to cropping, p = Kaug Kmat P
Kaug = K2inv(kaug)
Kinv = Kmatinv(Kaug.matmul(Kmat))
near_far = model.near_far[model.frameid.long()]
dp_feats_rsmp = model.dp_feats
# construct rays for sampled pixels
rand_inds, xys = sample_xy(opts.img_size, bs, nsample, device,return_all=False)
rays, feats_at_samp, xys = construct_rays(dp_feats_rsmp, model, xys, rand_inds,
Rmat, Tmat, Kinv, near_far)
model.update_delta_rts(rays)
# re-project
with torch.no_grad():
pts_pred = feat_match(model.nerf_feat, model.embedding_xyz, feats_at_samp,
model.latest_vars['obj_bound'],grid_size=20,is_training=False)
pts_pred = pts_pred.view(bs,nsample,3)
xy_reproj = kp_reproj(pts_pred, model.nerf_models, model.embedding_xyz, rays)
# draw
imgs_trg = model.imgs.view(bs//2,2,-1).flip(1).view(model.imgs.shape)
xy_reproj = xy_reproj.view(bs,nsample,2)
xys = xys.view(bs,nsample, 2)
sil_at_samp = torch.stack([model.masks[i].view(-1,1)[rand_inds[i]] \
for i in range(bs)],0) # bs,ns,1
for i in range(bs):
img1 = model.imgs[i]
img2 = imgs_trg[i]
img = torch.cat([img1, img2],2)
valid_idx = sil_at_samp[i].bool()[...,0]
p1s = xys[i][valid_idx]
p2s = xy_reproj[i][valid_idx]
p2s[...,0] = p2s[...,0] + img1.shape[2]
img = draw_lines(img, p1s,p2s)
cv2.imwrite('tmp/match_%04d.png'%i, img)
# visualize matching error
if opts.render_size<=128:
with torch.no_grad():
rendered, rand_inds = model.nerf_render(rtk, kaug, model.embedid,
nsample=opts.nsample, ndepth=opts.ndepth)
xyz_camera = rendered['xyz_camera_vis'][0].reshape(opts.render_size**2,-1)
xyz_canonical = rendered['xyz_canonical_vis'][0].reshape(opts.render_size**2,-1)
skip_idx = len(xyz_camera)//50 # vis 50 rays
trimesh.Trimesh(xyz_camera[0::skip_idx].reshape(-1,3).cpu()).\
export('tmp/match_camera_pts.obj')
trimesh.Trimesh(xyz_canonical[0::skip_idx].reshape(-1,3).cpu()).\
export('tmp/match_canonical_pts.obj')
vis_match(rendered, model.masks, model.imgs,
bs,opts.img_size, opts.ndepth)
## construct rays for all pixels
#rand_inds, xys = sample_xy(opts.img_size, bs, nsample, device,return_all=True)
#rays, feats_at_samp, xys = construct_rays(dp_feats_rsmp, model, xys, rand_inds,
# Rmat, Tmat, Kinv, near_far, flip=False)
#with torch.no_grad():
# pts_pred = feat_match(model.nerf_feat, model.embedding_xyz, feats_at_samp,
# model.latest_vars['obj_bound'],grid_size=20,is_training=False)
# pts_pred = pts_pred.view(bs,opts.render_size**2,3)
# proj_err = kp_reproj_loss(pts_pred, xys, model.nerf_models,
# model.embedding_xyz, rays)
# proj_err = proj_err.view(pts_pred.shape[:-1]+(1,))
# proj_err = proj_err/opts.img_size * 2
# results = {}
# results['proj_err'] = proj_err
## visualize current error stats
#feat_err=model.latest_vars['fp_err'][:,0]
#proj_err=model.latest_vars['fp_err'][:,1]
#feat_err = feat_err[feat_err>0]
#proj_err = proj_err[proj_err>0]
#print('feat-med: %f'%(np.median(feat_err)))
#print('proj-med: %f'%(np.median(proj_err)))
#plt.hist(feat_err,bins=100)
#plt.savefig('tmp/viser_feat_err.jpg')
#plt.clf()
#plt.hist(proj_err,bins=100)
#plt.savefig('tmp/viser_proj_err.jpg')
# visualize codes
with torch.no_grad():
fid = torch.Tensor(range(0,len(model.impath))).cuda().long()
D=model.pose_code(fid)
D = D.view(len(fid),-1)
##TODO
#px = torch.Tensor(range(len(D))).cuda()
#py = px*2
#pz = px*5+1
#D = torch.stack([px,py,pz],-1)
D = D-D.mean(0)[None]
A = D.T.matmul(D)/D.shape[0] # fxf
U,S,V=torch.svd(A) #
code_proj_3d=D.matmul(V[:,:3])
cmap = matplotlib.cm.get_cmap('cool')
time = np.asarray(range(len(model.impath)))
time = time/time.max()
code_proj_3d=code_proj_3d.detach().cpu().numpy()
trimesh.Trimesh(code_proj_3d, vertex_colors=cmap(time)).export('tmp/0.obj')
#plt.figure(figsize=(16,16))
plot_stack = []
weight_dir = opts.model_path.rsplit('/',1)[0]
bne_path = sorted(glob.glob('%s/%s-*bne-mrender*.jpg'%\
(weight_dir, opts.seqname)))
img_path = model.impath.copy()
## remove the last img for each video to make shape consistent with bone renders
#for i in model.data_offset[1:][::-1]:
# img_path.remove(img_path[i-1])
# code_proj_3d = np.delete(code_proj_3d, i-1,0)
# plot the first video
img_path = img_path [:model.data_offset[1]-2]
code_proj_3d = code_proj_3d[:model.data_offset[1]-2]
try:
bne_path = bne_path [:model.data_offset[1]-2]
except:
pass
for i in range(len(code_proj_3d)):
plt.plot(code_proj_3d[i,0], code_proj_3d[i,1], color=cmap(time[i]), marker='o')
plt.annotate(str(i), (code_proj_3d[i,0], code_proj_3d[i,1]))
plt.xlim(code_proj_3d[:,0].min(), code_proj_3d[:,0].max())
plt.ylim(code_proj_3d[:,1].min(), code_proj_3d[:,1].max())
fig = plt.gcf()
fig.canvas.draw()
plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
plot = plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
print('plot pose code of frame id:%03d'%i)
if len(bne_path) == len(code_proj_3d):
bneimg = cv2.imread(bne_path[i])
bneimg = cv2.resize(bneimg,\
(bneimg.shape[1]*plot.shape[0]//bneimg.shape[0], plot.shape[0]))
img=cv2.imread(img_path[i])[:,:,::-1]
img = cv2.resize(img,\
(img.shape[1]*plot.shape[0]//img.shape[0], plot.shape[0]))
plot = np.hstack([img, bneimg, plot])
plot_stack.append(plot)
save_vid('tmp/code', plot_stack, suffix='.mp4',
upsample_frame=150.,fps=30)
save_vid('tmp/code', plot_stack, suffix='.gif',
upsample_frame=150.,fps=30)
# vis dps
cv2.imwrite('tmp/match_dpc.png', model.dp_vis[model.dps[0].long()].cpu().numpy()*255)
def main(_):
opts.img_size=opts.render_size
trainer = v2s_trainer(opts, is_eval=True)
data_info = trainer.init_dataset()
trainer.define_model(data_info)
#write matching function
img_match = match_frames(trainer, opts.match_frames)
if __name__ == '__main__':
app.run(main)
|
extras/api/urls.py | maznu/peering-manager | 127 | 12785248 | from peering_manager.api import OrderedDefaultRouter
from . import views
router = OrderedDefaultRouter()
router.APIRootView = views.ExtrasRootView
router.register("ix-api", views.IXAPIViewSet)
router.register("job-results", views.JobResultViewSet)
router.register("webhooks", views.WebhookViewSet)
app_name = "extras-api"
urlpatterns = router.urls
|
pyinfra/facts/yum.py | blarghmatey/pyinfra | 1,532 | 12785286 | from pyinfra.api import FactBase
from .util import make_cat_files_command
from .util.packaging import parse_yum_repositories
class YumRepositories(FactBase):
'''
Returns a list of installed yum repositories:
.. code:: python
[
{
'name': 'CentOS-$releasever - Apps',
'baseurl': 'http://mirror.centos.org/$contentdir/$releasever/Apps/$basearch/os/',
'gpgcheck': '1',
'enabled': '1',
'gpgkey': 'file:///<KEY>',
},
]
'''
command = make_cat_files_command(
'/etc/yum.conf',
'/etc/yum.repos.d/*.repo',
)
requires_command = 'yum'
default = list
def process(self, output):
return parse_yum_repositories(output)
|
codegeneration/code_manip/file_utils.py | sacceus/BabylonCpp | 277 | 12785308 | <gh_stars>100-1000
import os
import os.path
import codecs
from typing import *
from bab_types import *
def files_with_extension(folder: Folder, extension: Extension) -> List[FileFullPath]:
r = []
ext_len = len(extension)
if not os.path.isdir(folder):
print("ouch")
for root, _, files in os.walk(folder):
for file in files:
if file[-ext_len:] == extension:
full_file = root + "/" + file
full_file = full_file.replace("\\", "/")
r.append(full_file)
return r
def file_has_utf8_bom(file: FileFullPath) -> bool:
bom_marker = codecs.BOM_UTF8
with open(file, "rb") as f:
content = f.read()
start = content[:3]
if start == bom_marker: #u'\ufeff':
return True
return False
def file_has_windows_crlf(file: FileFullPath) -> bool:
with open(file, "rb") as f:
content = f.read()
nb_lf = 0
nb_crlf = 0
was_last_char_cr = False
for i, b in enumerate(content):
if b == b'\n'[0]:
if not was_last_char_cr:
nb_lf = nb_lf + 1
if b == b'\r'[0]:
nb_crlf = nb_crlf + 1
was_last_char_cr = True
else:
was_last_char_cr = False
if nb_lf > 0 and nb_crlf > 0:
raise Exception("Mixed CR CRLF!")
return nb_crlf > nb_lf
def read_file_lines_no_eol(file_full_path: FileFullPath) -> List[CodeLine]:
with open(file_full_path, "r") as f:
content = f.read()
lines = content.split("\n")
return lines
def write_file_lines_no_eol(file_full_path: FileFullPath, lines: List[CodeLine]):
content = "\n".join(lines)
with open(file_full_path, "w") as f:
f.write(content)
def write_file_lines_no_eol_formatted(
file: FileFullPath,
lines: List[CodeLine],
has_utf8_bom: bool,
has_windows_crlf: bool
):
bom_marker = codecs.BOM_UTF8
if has_windows_crlf:
content = "\r\n".join(lines)
else:
content ="\n".join(lines)
with open(file, "wb") as f:
if has_utf8_bom:
f.write(bom_marker)
bytes_content = content.encode('utf-8')
f.write(bytes_content)
def is_cpp(file: FileFullPath) -> bool:
return file.endswith(".cpp")
def h_file_from_cpp(cpp_file: FileFullPath, all_h_files: List[FileFullPath]) -> Optional[FileFullPath]:
items = cpp_file.split("/")
file_with_parent_folder = "/".join(items[-2:])
basename_with_parent_folder = file_with_parent_folder.replace(".cpp", "")
found_h_files = list(filter(lambda f: basename_with_parent_folder + ".h" in f, all_h_files))
assert(len(found_h_files) <= 1)
if len(found_h_files) == 1:
return found_h_files[0]
else:
return None
def make_babylon_include_path(h_file: FileFullPath):
include = h_file
idx = include.find("include/babylon")
if idx < 0:
return None
include = include[idx + 8:]
return include
|
doc/examples/skeleton_behaviour.py | andrewbest-tri/py_trees | 201 | 12785316 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import py_trees
import random
class Foo(py_trees.behaviour.Behaviour):
def __init__(self, name):
"""
Minimal one-time initialisation. A good rule of thumb is
to only include the initialisation relevant for being able
to insert this behaviour in a tree for offline rendering to
dot graphs.
Other one-time initialisation requirements should be met via
the setup() method.
"""
super(Foo, self).__init__(name)
def setup(self):
"""
When is this called?
This function should be either manually called by your program
to setup this behaviour alone, or more commonly, via
:meth:`~py_trees.behaviour.Behaviour.setup_with_descendants`
or :meth:`~py_trees.trees.BehaviourTree.setup`, both of which
will iterate over this behaviour, it's children (it's children's
children ...) calling :meth:`~py_trees.behaviour.Behaviour.setup`
on each in turn.
If you have vital initialisation necessary to the success
execution of your behaviour, put a guard in your
:meth:`~py_trees.behaviour.Behaviour.initialise` method
to protect against entry without having been setup.
What to do here?
Delayed one-time initialisation that would otherwise interfere
with offline rendering of this behaviour in a tree to dot graph
or validation of the behaviour's configuration.
Good examples include:
- Hardware or driver initialisation
- Middleware initialisation (e.g. ROS pubs/subs/services)
- A parallel checking for a valid policy configuration after
children have been added or removed
"""
self.logger.debug(" %s [Foo::setup()]" % self.name)
def initialise(self):
"""
When is this called?
The first time your behaviour is ticked and anytime the
status is not RUNNING thereafter.
What to do here?
Any initialisation you need before putting your behaviour
to work.
"""
self.logger.debug(" %s [Foo::initialise()]" % self.name)
def update(self):
"""
When is this called?
Every time your behaviour is ticked.
What to do here?
- Triggering, checking, monitoring. Anything...but do not block!
- Set a feedback message
- return a py_trees.common.Status.[RUNNING, SUCCESS, FAILURE]
"""
self.logger.debug(" %s [Foo::update()]" % self.name)
ready_to_make_a_decision = random.choice([True, False])
decision = random.choice([True, False])
if not ready_to_make_a_decision:
return py_trees.common.Status.RUNNING
elif decision:
self.feedback_message = "We are not bar!"
return py_trees.common.Status.SUCCESS
else:
self.feedback_message = "Uh oh"
return py_trees.common.Status.FAILURE
def terminate(self, new_status):
"""
When is this called?
Whenever your behaviour switches to a non-running state.
- SUCCESS || FAILURE : your behaviour's work cycle has finished
- INVALID : a higher priority branch has interrupted, or shutting down
"""
self.logger.debug(" %s [Foo::terminate().terminate()][%s->%s]" % (self.name, self.status, new_status))
|
dfirtrack_artifacts/admin.py | cclauss/dfirtrack | 273 | 12785349 | from django.contrib import admin
from dfirtrack_artifacts.models import (
Artifact,
Artifactpriority,
Artifactstatus,
Artifacttype,
)
# Register your models here.
admin.site.register(Artifact)
admin.site.register(Artifactpriority)
admin.site.register(Artifactstatus)
admin.site.register(Artifacttype)
|
packages/python/pyfora/Connection.py | ufora/ufora | 571 | 12785350 | <reponame>ufora/ufora<gh_stars>100-1000
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Connection
Manages a connection to a pyfora cluster
"""
import pyfora.Exceptions as Exceptions
import pyfora.Executor as Executor
import pyfora.ObjectConverter as ObjectConverter
import pyfora.RemotePythonObject as RemotePythonObject
import pyfora.SocketIoJsonInterface as SocketIoJsonInterface
import pyfora.ModuleDirectoryStructure as ModuleDirectoryStructure
import threading
# We defer importing SubscribableWebObjects.py to support auto doc generation
# on readthedocs.org without running a full build.
#import pyfora.SubscribableWebObjects as SubscribableWebObjects
import pyfora
import os
class Connection(object):
"""A live connection to a pyfora cluster that can execute submitted Python code.
Note:
This is an internal implementation class that is primarily used by
:class:`~pyfora.Executor.Executor`.
Args:
webObjectFactory (SubscribableWebObjects.WebObjectFactory): A factory
for subscribable web objects.
converter (Optional ObjectConverter.ObjectConverter): an optional object
converter or None for the default converter.
"""
def __init__(self, webObjectFactory, converter):
self.objectConverter = converter
self.webObjectFactory = webObjectFactory
self.closed = False
self.viewOfEntireSystem = self.webObjectFactory.ViewOfEntireCumulusSystem({})
self.subscribeToMessages()
self.logMessageHandler = None
def subscribeToMessages(self):
def onSuccess(messages):
if self.closed:
return
self.pullAllMessages()
def onChanged(messages):
if self.closed:
return
self.pullAllMessages()
self.subscribeToMessages()
def onFailure(err):
pass
self.viewOfEntireSystem.subscribe_totalMessagesEver({
'onSuccess': onSuccess,
'onFailure': onFailure,
'onChanged': onChanged
})
def pullAllMessages(self):
processed = threading.Event()
def onSuccess(messages):
try:
for m in messages:
if self.logMessageHandler:
self.logMessageHandler(m)
else:
if not m['isDeveloperFacing']:
print m['message'],
finally:
processed.set()
def onFailure(err):
processed.set()
self.viewOfEntireSystem.clearAndReturnMostRecentMessages({}, {
'onSuccess': onSuccess,
'onFailure': onFailure
})
return processed
def pullAllMessagesAndProcess(self):
self.pullAllMessages().wait()
def triggerS3DatasetExport(self,
valueAsString,
bucketname,
keyname,
onCompletedCallback):
if not isinstance(valueAsString, RemotePythonObject.ComputedRemotePythonObject):
onCompletedCallback(
Exceptions.PyforaError(
"The argument to triggerS3DatasetExport should be a ComputedRemotePythonObject"
)
)
return
import pyfora.SubscribableWebObjects as SubscribableWebObjects
if not isinstance(valueAsString.computedValue, SubscribableWebObjects.PyforaComputedValue):
onCompletedCallback(
Exceptions.PyforaError(
"The object handle in the object passed to triggerS3DatasetExport should be a ComputedValue"
)
)
return
#first, ensure that the value itself resolves
computedValue = valueAsString.computedValue
computedValueToCalculate = self.webObjectFactory.ComputedValueForMember(
{
'baseComputedValue': computedValue,
'memberName': '@pyfora_string_as_paged_vec_of_char'
})
def onFailure(err):
if not self.closed:
onCompletedCallback(Exceptions.PyforaError(err['message']))
def isFinishedChanged(isFinished):
if not self.closed and isFinished:
self.triggerS3DatasetExportOnFinishedCalculation(
computedValueToCalculate,
bucketname,
keyname,
onCompletedCallback
)
def subscribeToFinished(result):
computedValueToCalculate.subscribe_isFinished({
'onSuccess': isFinishedChanged,
'onFailure': onFailure,
'onChanged': isFinishedChanged
})
computedValueToCalculate.increaseRequestCount(
{},
{'onSuccess':subscribeToFinished, 'onFailure':onFailure}
)
def getClusterStatus(self, onCompletedCallback):
clusterStatus = self.webObjectFactory.PyforaCluster({})
def onSuccess(clusterStatus):
onCompletedCallback(clusterStatus)
def onFailure(err):
onCompletedCallback(Exceptions.PyforaError(err['message']))
clusterStatus.getClusterStatus({}, {
'onSuccess': onSuccess,
'onFailure': onFailure
})
def triggerS3DatasetExportOnFinishedCalculation(self,
computedValue,
bucketname,
keyname,
onCompletedCallback):
def onSuccess(writeToS3TaskObject):
#we have received a WriteToS3Task computed graph location
self.subscribeToWriteToS3TaskResultAndCallCallback(writeToS3TaskObject,
onCompletedCallback)
def onFailure(err):
onCompletedCallback(Exceptions.PyforaError(err['message']))
computedValue.writeToS3(
{'bucketname': bucketname, 'keyname': keyname},
{'onSuccess': onSuccess, 'onFailure': onFailure}
)
def subscribeToWriteToS3TaskResultAndCallCallback(self,
writeToS3TaskObject,
onCompletedCallback):
def onSuccess(result):
if not self.closed and result is not None:
if result['success']:
onCompletedCallback(None)
else:
onCompletedCallback(Exceptions.PyforaError(result['message']))
def onFailure(err):
onCompletedCallback(Exceptions.PyforaError(err['message']))
writeToS3TaskObject.subscribe_successOrError({
'onSuccess': onSuccess,
'onChanged': onSuccess,
'onFailure': onFailure
})
def convertObject(self, objectId, binaryObjectRegistry, callback):
def wrapper(*args, **kwargs):
if not self.closed:
callback(*args, **kwargs)
self.objectConverter.convert(objectId, binaryObjectRegistry, wrapper)
def createComputation(self, fn, args, onCreatedCallback):
"""Create a computation representing fn(*args).
onCreatedCallback - called after defining the object.
called with an Exception.PyforaError if there is an error,
otherwise, called with a ComputedValue object representing the computation
"""
assert isinstance(fn, RemotePythonObject.RemotePythonObject)
assert all([isinstance(arg, RemotePythonObject.RemotePythonObject) for arg in args])
computedValue = self.webObjectFactory.PyforaComputedValue({
'argIds': (fn._pyforaComputedValueArg(),) + tuple(
arg._pyforaComputedValueArg() for arg in args
)
})
def onFailure(err):
if not self.closed:
onCreatedCallback(Exceptions.PyforaError(err))
def onSuccess(computationId):
if not self.closed:
onCreatedCallback(computedValue)
def onChanged(computationId):
pass
computedValue.subscribe_submittedComputationId({
'onSuccess': onSuccess,
'onFailure': onFailure,
'onChanged': onChanged
})
def prioritizeComputation(self,
computedValue,
onPrioritizedCallback,
onCompletedCallback,
onFailedCallback):
"""Prioritize a given computation.
computedValue - the callback result of creating a computation.
onPrioritizedCallback - called with either an error or None on success of the prioritization
onCompletedCallback - called with the "jsonStatus" if the computation finishes with a value
onFailedCallback - called with a pyfora exception if the computation fails
or throws an exception for some reason
"""
def onFailure(err):
if not self.closed:
onPrioritizedCallback(Exceptions.PyforaError(err))
def onSuccess(result):
if not self.closed:
onPrioritizedCallback(None)
self._subscribeToComputationStatus(computedValue,
onCompletedCallback,
onFailedCallback)
computedValue.increaseRequestCount({}, {
'onSuccess': onSuccess,
'onFailure': onFailure
})
def triggerCompilationOnComputation(self, computedValue, onCompleted):
"""Trigger compilation of the code underlying a computation.
This is exclusively used for testing purposes, as it only works when
there is a single in-process cumulus node.
Returns True on success, False on failure.
"""
def onFailure(err):
onCompleted()
def onSuccess(result):
onCompleted()
computedValue.triggerCompilation({}, {
'onSuccess': onSuccess,
'onFailure': onFailure
})
@staticmethod
def cancelComputation(computedValue):
"""Cancel a computation."""
def completed(_):
pass
computedValue.cancel({}, {
'onSuccess': completed,
'onFailure': completed
})
def expandComputedValueToDictOfAssignedVarsToProxyValues(self, computedValue, onExpanded):
"""Given a computedValue that should represent a dictionary,
expand it to a dictionary of ComputedValues.
If it's not a dictionary, or something else happens, this will resolve to a PyforaError.
"""
def onResult(result):
if result is not None and not self.closed:
onExpanded(result)
def onFailure(result):
if isinstance(result, Exception):
onExpanded(result)
else:
onExpanded(
Exceptions.PyforaError(
"Unknown error translating to dictionary of proxies: %s" + str(result)
)
)
computedValue.increaseRequestCount(
{},
{'onSuccess': lambda *args: None, 'onFailure': lambda *args: None}
)
computedValue.subscribe_pyforaDictToAssignedVarsToComputedValues({
'onSuccess': onResult,
'onFailure': onFailure,
'onChanged': onResult
})
def expandComputedValueToTupleOfProxies(self, computedValue, onExpanded):
def onResult(result):
if result is not None and not self.closed:
onExpanded(result)
def onFailure(result):
if isinstance(result, Exception):
onExpanded(result)
else:
onExpanded(
Exceptions.PyforaError(
"Unknown error translating to dictionary of proxies: %s" + str(result)
)
)
computedValue.increaseRequestCount(
{},
{'onSuccess': lambda *args: None, 'onFailure': lambda *args: None}
)
computedValue.subscribe_pyforaTupleToTupleOfComputedValues({
'onSuccess': onResult,
'onFailure': onFailure,
'onChanged': onResult
})
def _subscribeToComputationStatus(self, computedValue, onCompletedCallback, onFailedCallback):
def statusChanged(jsonStatus):
if not self.closed:
if jsonStatus is not None:
if jsonStatus['status'] == 'failure':
onFailedCallback(Exceptions.PyforaError(jsonStatus['message']))
else:
onCompletedCallback(jsonStatus)
def onFailure(err):
if not self.closed:
onFailedCallback(Exceptions.PyforaError(err))
computedValue.subscribe_jsonStatusRepresentation({
'onSuccess': statusChanged,
'onFailure': onFailure,
'onChanged': statusChanged
})
def downloadComputation(self, computedValue, onResultCallback, maxBytecount=None):
"""download the result of a computation as json.
onResultCallback - called with a PyforaError if there is a problem, or
the json representation of the computation's result or exception otherwise.
"""
def onFailure(err):
if not self.closed:
onResultCallback(Exceptions.PyforaError(err['message']))
def resultChanged(jsonStatus):
if not self.closed and jsonStatus is not None:
onResultCallback(jsonStatus)
computedValue.increaseRequestCount(
{},
{'onSuccess': lambda *args: None, 'onFailure': lambda *args: None}
)
def resultStatusChanged(populated):
if not self.closed and populated:
resultComputer.getResultAsJson({}, {
'onSuccess': resultChanged,
'onFailure': onFailure
})
resultComputer = self.webObjectFactory.PyforaResultAsJson(
{'computedValue': computedValue, 'maxBytecount': maxBytecount}
)
resultComputer.subscribe_resultIsPopulated({
'onSuccess': resultStatusChanged,
'onFailure': onFailure,
'onChanged': resultStatusChanged
})
def close(self):
self.closed = True
self.webObjectFactory.getJsonInterface().close()
def createObjectConverter(webObjectFactory):
path = os.path.join(os.path.abspath(os.path.split(pyfora.__file__)[0]), "fora")
moduleTree = ModuleDirectoryStructure.ModuleDirectoryStructure.read(path, "purePython", "fora")
return ObjectConverter.ObjectConverter(webObjectFactory, moduleTree.toJson())
def connect(url, timeout=30.0):
"""Opens a connection to a pyfora cluster
Args:
url (str): The HTTP URL of the cluster's manager (e.g. ``http://192.168.1.200:30000``)
timeout (Optional float): A timeout for the operation in seconds, or None
to wait indefinitely.
Returns:
An :class:`~pyfora.Executor.Executor` that can be used to submit work
to the cluster.
"""
socketIoInterface = SocketIoJsonInterface.SocketIoJsonInterface(
url,
'/subscribableWebObjects'
)
socketIoInterface.connect(timeout=timeout)
return connectGivenSocketIo(socketIoInterface)
def connectGivenSocketIo(socketIoInterface):
import pyfora.SubscribableWebObjects as SubscribableWebObjects
webObjectFactory = SubscribableWebObjects.WebObjectFactory(socketIoInterface)
return Executor.Executor(Connection(webObjectFactory, createObjectConverter(webObjectFactory)))
|
contrib/report_builders/json_report_builder.py | berndonline/flan | 3,711 | 12785384 | import json
from typing import Any, Dict, List
from contrib.descriptions import VulnDescriptionProvider
from contrib.internal_types import ScanResult
from contrib.report_builders import ReportBuilder
class JsonReportBuilder(ReportBuilder):
def __init__(self, description_provider: VulnDescriptionProvider):
self.description_provider = description_provider
self._buffer = {'ips': [], 'vulnerable': {}, 'not_vulnerable': {}}
def init_report(self, start_date: str, nmap_command: str):
self._buffer['start_date'] = start_date
self._buffer['nmap_command'] = nmap_command
def build(self) -> Any:
return json.dumps(self._buffer)
def add_vulnerable_services(self, scan_results: Dict[str, ScanResult]):
for app_name, result in scan_results.items():
self._buffer['vulnerable'][app_name] = {
'vulnerabilities': [],
'locations': self._serialize_locations(result.locations)
}
for v in result.vulns:
data = v.to_dict()
description = self.description_provider.get_description(v.name, v.vuln_type)
data['description'], data['url'] = description.text, description.url
self._buffer['vulnerable'][app_name]['vulnerabilities'].append(data)
def add_non_vulnerable_services(self, scan_results: Dict[str, ScanResult]):
for app_name, result in scan_results.items():
self._buffer['not_vulnerable'][app_name] = {
'locations': self._serialize_locations(result.locations)
}
def add_ip_address(self, ip: str):
self._buffer['ips'].append(ip)
@staticmethod
def _serialize_locations(locations: Dict[str, List[str]]):
return {loc: [int(port) for port in ports] for loc, ports in locations.items()}
|
ros_compatibility/src/ros_compatibility/exceptions.py | SebastianHuch/ros-bridge | 314 | 12785404 | #!/usr/bin/env python
#
# Copyright (c) 2021 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
from ros_compatibility.core import get_ros_version
ROS_VERSION = get_ros_version()
if ROS_VERSION == 1:
import rospy
class ROSException(rospy.ROSException):
pass
class ROSInterruptException(rospy.ROSInterruptException):
pass
class ServiceException(rospy.ServiceException):
pass
elif ROS_VERSION == 2:
import rclpy.exceptions
class ROSException(Exception):
pass
class ROSInterruptException(rclpy.exceptions.ROSInterruptException):
pass
class ServiceException(Exception):
pass
|
tests/unit/cartography/intel/gsuite/test_api.py | sckevmit/cartography | 2,322 | 12785440 | from unittest import mock
from unittest.mock import patch
from cartography.intel.gsuite import api
def test_get_all_users():
client = mock.MagicMock()
raw_request_1 = mock.MagicMock()
raw_request_2 = mock.MagicMock()
user1 = {'primaryEmail': '<EMAIL>'}
user2 = {'primaryEmail': '<EMAIL>'}
user3 = {'primaryEmail': '<EMAIL>'}
client.users().list.return_value = raw_request_1
client.users().list_next.side_effect = [raw_request_2, None]
raw_request_1.execute.return_value = {'users': [user1, user2]}
raw_request_2.execute.return_value = {'users': [user3]}
result = api.get_all_users(client)
emails = [user['primaryEmail'] for response_object in result for user in response_object['users']]
expected = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
]
assert sorted(emails) == sorted(expected)
def test_get_all_groups():
client = mock.MagicMock()
raw_request_1 = mock.MagicMock()
raw_request_2 = mock.MagicMock()
group1 = {'email': '<EMAIL>'}
group2 = {'email': '<EMAIL>'}
group3 = {'email': '<EMAIL>'}
client.groups().list.return_value = raw_request_1
client.groups().list_next.side_effect = [raw_request_2, None]
raw_request_1.execute.return_value = {'groups': [group1, group2]}
raw_request_2.execute.return_value = {'groups': [group3]}
result = api.get_all_groups(client)
emails = [group['email'] for response_object in result for group in response_object['groups']]
expected = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
]
assert sorted(emails) == sorted(expected)
@patch('cartography.intel.gsuite.api.cleanup_gsuite_users')
@patch('cartography.intel.gsuite.api.load_gsuite_users')
@patch(
'cartography.intel.gsuite.api.get_all_users', return_value=[
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
],
)
def test_sync_gsuite_users(get_all_users, load_gsuite_users, cleanup_gsuite_users):
client = mock.MagicMock()
gsuite_update_tag = 1
session = mock.MagicMock()
common_job_param = {
"UPDATE_TAG": gsuite_update_tag,
}
api.sync_gsuite_users(session, client, gsuite_update_tag, common_job_param)
users = api.transform_users(get_all_users())
load_gsuite_users.assert_called_with(
session, users, gsuite_update_tag,
)
cleanup_gsuite_users.assert_called_once()
@patch('cartography.intel.gsuite.api.sync_gsuite_members')
@patch('cartography.intel.gsuite.api.cleanup_gsuite_groups')
@patch('cartography.intel.gsuite.api.load_gsuite_groups')
@patch(
'cartography.intel.gsuite.api.get_all_groups', return_value=[
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
],
)
def test_sync_gsuite_groups(all_groups, load_gsuite_groups, cleanup_gsuite_groups, sync_gsuite_members):
admin_client = mock.MagicMock()
session = mock.MagicMock()
gsuite_update_tag = 1
common_job_param = {
"UPDATE_TAG": gsuite_update_tag,
}
api.sync_gsuite_groups(session, admin_client, gsuite_update_tag, common_job_param)
groups = api.transform_groups(all_groups())
load_gsuite_groups.assert_called_with(session, groups, gsuite_update_tag)
cleanup_gsuite_groups.assert_called_once()
sync_gsuite_members.assert_called_with(groups, session, admin_client, gsuite_update_tag)
def test_load_gsuite_groups():
ingestion_qry = """
UNWIND {GroupData} as group
MERGE (g:GSuiteGroup{id: group.id})
ON CREATE SET
g.firstseen = {UpdateTag}
ON MATCH SET
g.group_id = group.id,
g.admin_created = group.adminCreated,
g.description = group.description,
g.direct_members_count = group.directMembersCount,
g.email = group.email,
g.etag = group.etag,
g.kind = group.kind,
g.name = group.name,
g.lastupdated = {UpdateTag}
"""
groups = []
update_tag = 1
session = mock.MagicMock()
api.load_gsuite_groups(session, groups, update_tag)
session.run.assert_called_with(
ingestion_qry,
GroupData=groups,
UpdateTag=update_tag,
)
def test_load_gsuite_users():
ingestion_qry = """
UNWIND {UserData} as user
MERGE (u:GSuiteUser{id: user.id})
ON CREATE SET
u.firstseen = {UpdateTag}
ON MATCH SET
u.user_id = user.id,
u.agreed_to_terms = user.agreedToTerms,
u.archived = user.archived,
u.change_password_at_next_login = user.changePasswordAtNextLogin,
u.creation_time = user.creationTime,
u.customer_id = user.customerId,
u.etag = user.etag,
u.include_in_global_address_list = user.includeInGlobalAddressList,
u.ip_whitelisted = user.ipWhitelisted,
u.is_admin = user.isAdmin,
u.is_delegated_admin = user.isDelegatedAdmin,
u.is_enforced_in_2_sv = user.isEnforcedIn2Sv,
u.is_enrolled_in_2_sv = user.isEnrolledIn2Sv,
u.is_mailbox_setup = user.isMailboxSetup,
u.kind = user.kind,
u.last_login_time = user.lastLoginTime,
u.name = user.name.fullName,
u.family_name = user.name.familyName,
u.given_name = user.name.givenName,
u.org_unit_path = user.orgUnitPath,
u.primary_email = user.primaryEmail,
u.email = user.primaryEmail,
u.suspended = user.suspended,
u.thumbnail_photo_etag = user.thumbnailPhotoEtag,
u.thumbnail_photo_url = user.thumbnailPhotoUrl,
u.lastupdated = {UpdateTag}
"""
users = []
update_tag = 1
session = mock.MagicMock()
api.load_gsuite_users(session, users, update_tag)
session.run.assert_called_with(
ingestion_qry,
UserData=users,
UpdateTag=update_tag,
)
def test_transform_groups():
param = [
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
{'groups': [{'email': '<EMAIL>'}, {'email': '<EMAIL>'}]},
]
expected = [
{'email': '<EMAIL>'}, {'email': 'group<EMAIL>'},
{'email': '<EMAIL>'}, {'email': '<EMAIL>'},
]
result = api.transform_groups(param)
assert result == expected
def test_transform_users():
param = [
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
{'users': [{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'}]},
]
expected = [
{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'},
{'primaryEmail': '<EMAIL>'}, {'primaryEmail': '<EMAIL>'},
]
result = api.transform_users(param)
assert result == expected
|
mseg_semantic/utils/normalization_utils.py | weblucas/mseg-semantic | 391 | 12785453 | <reponame>weblucas/mseg-semantic<gh_stars>100-1000
#!/usr/bin/python3
import numpy as np
import torch
from typing import Optional, Tuple
def get_imagenet_mean_std() -> Tuple[Tuple[float,float,float], Tuple[float,float,float]]:
""" See use here in Pytorch ImageNet script:
https://github.com/pytorch/examples/blob/master/imagenet/main.py#L197
Returns:
- mean: Tuple[float,float,float],
- std: Tuple[float,float,float] = None
"""
value_scale = 255
mean = [0.485, 0.456, 0.406]
mean = [item * value_scale for item in mean]
std = [0.229, 0.224, 0.225]
std = [item * value_scale for item in std]
return mean, std
def normalize_img( input: torch.Tensor,
mean: Tuple[float,float,float],
std: Optional[Tuple[float,float,float]] = None):
""" Pass in by reference Torch tensor, and normalize its values.
Args:
- input: Torch tensor of shape (3,M,N), must be in this order, and
of type float (necessary).
- mean: mean values for each RGB channel
- std: standard deviation values for each RGB channel
Returns:
- None
"""
if std is None:
for t, m in zip(input, mean):
t.sub_(m)
else:
for t, m, s in zip(input, mean, std):
t.sub_(m).div_(s)
|
algorithms/dynamic_programming/longest_consecutive_subsequence.py | ruler30cm/python-ds | 1,723 | 12785464 | <filename>algorithms/dynamic_programming/longest_consecutive_subsequence.py
"""
Given an array of integers, find the length of the longest sub-sequence
such that elements in the subsequence are consecutive integers, the
consecutive numbers can be in any order.
The idea is to store all the elements in a set first. Then as we are iterating
over the array, we check two things -
1. a number x can be a starting number in a sequence if x-1 is not present in the
set. If this is the case, create a loop and check how many elements from x to x+j are
in the set
2. if x -1 is there in the set, do nothing as this number is not a starting element
and must have been considered in a different sequence
"""
def find_seq(arr, n):
s = set()
for num in arr:
s.add(num)
ans = 0
elements = []
for i in range(n):
temp = []
if arr[i] - 1 not in s:
j = arr[i]
while j in s:
temp.append(j)
j += 1
if j - arr[i] > ans:
ans = j - arr[i]
elements = temp.copy()
return ans, elements
arr = [36, 41, 56, 35, 44, 33, 34, 92, 43, 32, 42]
ans, elements = find_seq(arr, len(arr))
print('Length - ', ans)
print('Elements - ', elements)
|
tracardi/service/wf/domain/error_debug_info.py | bytepl/tracardi | 153 | 12785473 | <filename>tracardi/service/wf/domain/error_debug_info.py
from pydantic import BaseModel
class ErrorDebugInfo(BaseModel):
msg: str
line: int
file: str
|
tests/test_context.py | agronholm/asphalt | 226 | 12785537 | <reponame>agronholm/asphalt<filename>tests/test_context.py<gh_stars>100-1000
from __future__ import annotations
import asyncio
import sys
from collections.abc import Callable
from concurrent.futures import Executor, ThreadPoolExecutor
from inspect import isawaitable
from itertools import count
from threading import Thread, current_thread
from typing import AsyncGenerator, AsyncIterator, Dict, NoReturn, Optional, Tuple, Union
from unittest.mock import patch
import pytest
import pytest_asyncio
from async_generator import yield_
from asphalt.core import (
Context,
Dependency,
NoCurrentContext,
ResourceConflict,
ResourceNotFound,
TeardownError,
callable_name,
context_teardown,
current_context,
executor,
get_resource,
inject,
resource,
)
from asphalt.core.context import ResourceContainer, require_resource
@pytest.fixture
def context() -> Context:
return Context()
@pytest_asyncio.fixture
async def special_executor(context: Context) -> AsyncIterator[ThreadPoolExecutor]:
executor = ThreadPoolExecutor(1)
context.add_resource(executor, "special", types=[Executor])
yield executor
executor.shutdown()
class TestResourceContainer:
@pytest.mark.parametrize("thread", [False, True], ids=["eventloop", "worker"])
@pytest.mark.parametrize(
"context_attr", [None, "attrname"], ids=["no_attr", "has_attr"]
)
@pytest.mark.asyncio
async def test_generate_value(self, thread: bool, context_attr: str | None) -> None:
container = ResourceContainer(
lambda ctx: "foo", (str,), "default", context_attr, True
)
context = Context()
if thread:
value = await context.call_in_executor(container.generate_value, context)
else:
value = container.generate_value(context)
assert value == "foo"
assert context.get_resource(str) == "foo"
if context_attr:
assert getattr(context, context_attr) == "foo"
def test_repr(self) -> None:
container = ResourceContainer("foo", (str,), "default", "attrname", False)
assert repr(container) == (
"ResourceContainer(value='foo', types=[str], name='default', "
"context_attr='attrname')"
)
def test_repr_factory(self) -> None:
container = ResourceContainer(
lambda ctx: "foo", (str,), "default", "attrname", True
)
assert repr(container) == (
"ResourceContainer(factory=test_context.TestResourceContainer"
".test_repr_factory.<locals>.<lambda>, types=[str], name='default', "
"context_attr='attrname')"
)
class TestContext:
@pytest.mark.asyncio
async def test_parent(self) -> None:
"""Test that the parent property points to the parent context instance, if any."""
async with Context() as parent:
async with Context() as child:
assert parent.parent is None
assert child.parent is parent
@pytest.mark.parametrize(
"exception", [None, Exception("foo")], ids=["noexception", "exception"]
)
@pytest.mark.asyncio
async def test_close(self, context: Context, exception: Exception | None) -> None:
"""
Test that teardown callbacks are called in reverse order when a context is closed.
"""
def callback(exception=None):
called_functions.append((callback, exception))
async def async_callback(exception=None):
called_functions.append((async_callback, exception))
called_functions: list[tuple[Callable, BaseException | None]] = []
context.add_teardown_callback(callback, pass_exception=True)
context.add_teardown_callback(async_callback, pass_exception=True)
await context.close(exception)
assert called_functions == [(async_callback, exception), (callback, exception)]
@pytest.mark.asyncio
async def test_close_while_running_teardown(self, context: Context) -> None:
"""
Test that trying to close the context from a teardown callback raises a
RuntimeError.
"""
async def try_close_context() -> None:
with pytest.raises(RuntimeError, match="this context is already closing"):
await context.close()
context.add_teardown_callback(try_close_context)
await context.close()
@pytest.mark.asyncio
async def test_teardown_callback_exception(self, context: Context) -> None:
"""
Test that all callbacks are called even when some teardown callbacks raise
exceptions, and that a TeardownError is raised in such a case, containing the
exception objects.
"""
def callback1() -> None:
items.append(1)
def callback2() -> NoReturn:
raise Exception("foo")
context.add_teardown_callback(callback1)
context.add_teardown_callback(callback2)
context.add_teardown_callback(callback1)
context.add_teardown_callback(callback2)
items: list[int] = []
with pytest.raises(TeardownError) as exc:
await context.close()
assert "foo" in str(exc.value)
assert items == [1, 1]
assert len(exc.value.exceptions) == 2
@pytest.mark.asyncio
async def test_close_closed(self, context: Context) -> None:
"""Test that closing an already closed context raises a RuntimeError."""
assert not context.closed
await context.close()
assert context.closed
with pytest.raises(RuntimeError) as exc:
await context.close()
exc.match("this context has already been closed")
def test_contextmanager_exception(self, context, event_loop):
close_future = event_loop.create_future()
close_future.set_result(None)
exception = Exception("foo")
with patch.object(context, "close", return_value=close_future):
with pytest.raises(Exception) as exc, pytest.deprecated_call():
with context:
raise exception
# close.assert_called_once_with(exception)
assert exc.value is exception
@pytest.mark.asyncio
async def test_async_contextmanager_exception(self, event_loop, context):
"""Test that "async with context:" calls close() with the exception raised in the block."""
close_future = event_loop.create_future()
close_future.set_result(None)
exception = Exception("foo")
with patch.object(context, "close", return_value=close_future) as close:
with pytest.raises(Exception) as exc:
async with context:
raise exception
close.assert_called_once_with(exception)
assert exc.value is exception
@pytest.mark.parametrize("types", [int, (int,), ()], ids=["type", "tuple", "empty"])
@pytest.mark.asyncio
async def test_add_resource(self, context, event_loop, types):
"""Test that a resource is properly added in the context and listeners are notified."""
event_loop.call_soon(context.add_resource, 6, "foo", None, types)
event = await context.resource_added.wait_event()
assert event.resource_types == (int,)
assert event.resource_name == "foo"
assert not event.is_factory
assert context.get_resource(int, "foo") == 6
@pytest.mark.asyncio
async def test_add_resource_name_conflict(self, context: Context) -> None:
"""Test that adding a resource won't replace any existing resources."""
context.add_resource(5, "foo")
with pytest.raises(ResourceConflict) as exc:
context.add_resource(4, "foo")
exc.match(
"this context already contains a resource of type int using the name 'foo'"
)
@pytest.mark.asyncio
async def test_add_resource_none_value(self, context: Context) -> None:
"""Test that None is not accepted as a resource value."""
exc = pytest.raises(ValueError, context.add_resource, None)
exc.match('"value" must not be None')
@pytest.mark.asyncio
async def test_add_resource_context_attr(self, context: Context) -> None:
"""Test that when resources are added, they are also set as properties of the context."""
with pytest.deprecated_call():
context.add_resource(1, context_attr="foo")
assert context.foo == 1
def test_add_resource_context_attr_conflict(self, context: Context) -> None:
"""
Test that the context won't allow adding a resource with an attribute name that conflicts
with an existing attribute.
"""
context.a = 2
with pytest.raises(ResourceConflict) as exc, pytest.deprecated_call():
context.add_resource(2, context_attr="a")
exc.match("this context already has an attribute 'a'")
assert context.get_resource(int) is None
@pytest.mark.asyncio
async def test_add_resource_type_conflict(self, context: Context) -> None:
context.add_resource(5)
with pytest.raises(ResourceConflict) as exc:
context.add_resource(6)
exc.match(
"this context already contains a resource of type int using the name 'default'"
)
@pytest.mark.parametrize(
"name", ["a.b", "a:b", "a b"], ids=["dot", "colon", "space"]
)
@pytest.mark.asyncio
async def test_add_resource_bad_name(self, context, name):
with pytest.raises(ValueError) as exc:
context.add_resource(1, name)
exc.match(
'"name" must be a nonempty string consisting only of alphanumeric characters '
"and underscores"
)
@pytest.mark.asyncio
async def test_add_resource_parametrized_generic_type(
self, context: Context
) -> None:
resource = {"a": 1}
resource_type = Dict[str, int]
context.add_resource(resource, types=[resource_type])
assert context.require_resource(resource_type) is resource
assert context.get_resource(resource_type) is resource
assert await context.request_resource(resource_type) is resource
assert context.get_resource(Dict) is None
assert context.get_resource(dict) is None
@pytest.mark.asyncio
async def test_add_resource_factory(self, context: Context) -> None:
"""Test that resources factory callbacks are only called once for each context."""
def factory(ctx):
assert ctx is context
return next(counter)
counter = count(1)
with pytest.deprecated_call():
context.add_resource_factory(factory, int, context_attr="foo")
assert context.foo == 1
assert context.foo == 1
assert context.__dict__["foo"] == 1
@pytest.mark.asyncio
async def test_add_resource_factory_parametrized_generic_type(
self, context: Context
) -> None:
resource = {"a": 1}
resource_type = Dict[str, int]
context.add_resource_factory(lambda ctx: resource, types=[resource_type])
assert context.require_resource(resource_type) is resource
assert context.get_resource(resource_type) is resource
assert await context.request_resource(resource_type) is resource
assert context.get_resource(Dict) is None
assert context.get_resource(dict) is None
@pytest.mark.parametrize(
"name", ["a.b", "a:b", "a b"], ids=["dot", "colon", "space"]
)
@pytest.mark.asyncio
async def test_add_resource_factory_bad_name(self, context, name):
with pytest.raises(ValueError) as exc:
context.add_resource_factory(lambda ctx: 1, int, name)
exc.match(
'"name" must be a nonempty string consisting only of alphanumeric characters '
"and underscores"
)
@pytest.mark.asyncio
async def test_add_resource_factory_coroutine_callback(
self, context: Context
) -> None:
async def factory(ctx):
return 1
with pytest.raises(TypeError) as exc:
context.add_resource_factory(factory, int)
exc.match('"factory_callback" must not be a coroutine function')
@pytest.mark.asyncio
async def test_add_resource_factory_empty_types(self, context: Context) -> None:
with pytest.raises(ValueError) as exc:
context.add_resource_factory(lambda ctx: 1, ())
exc.match("no resource types were specified")
@pytest.mark.asyncio
async def test_add_resource_factory_context_attr_conflict(
self, context: Context
) -> None:
with pytest.deprecated_call():
context.add_resource_factory(lambda ctx: None, str, context_attr="foo")
with pytest.raises(ResourceConflict) as exc, pytest.deprecated_call():
await context.add_resource_factory(
lambda ctx: None, str, context_attr="foo"
)
exc.match(
"this context already contains a resource factory for the context attribute 'foo'"
)
@pytest.mark.asyncio
async def test_add_resource_factory_type_conflict(self, context: Context) -> None:
context.add_resource_factory(lambda ctx: None, (str, int))
with pytest.raises(ResourceConflict) as exc:
await context.add_resource_factory(lambda ctx: None, int)
exc.match("this context already contains a resource factory for the type int")
@pytest.mark.asyncio
async def test_add_resource_factory_no_inherit(self, context: Context) -> None:
"""
Test that a subcontext gets its own version of a factory-generated resource even if a
parent context has one already.
"""
with pytest.deprecated_call():
context.add_resource_factory(id, int, context_attr="foo")
async with context, Context() as subcontext:
assert context.foo == id(context)
assert subcontext.foo == id(subcontext)
@pytest.mark.asyncio
async def test_add_resource_return_type_single(self, context: Context) -> None:
def factory(ctx: Context) -> str:
return "foo"
async with context:
context.add_resource_factory(factory)
assert context.require_resource(str) == "foo"
@pytest.mark.asyncio
async def test_add_resource_return_type_union(self, context: Context) -> None:
def factory(ctx: Context) -> Union[int, float]:
return 5
async with context:
context.add_resource_factory(factory)
assert context.require_resource(int) == 5
assert context.require_resource(float) == 5
@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires Python 3.10+")
@pytest.mark.asyncio
async def test_add_resource_return_type_uniontype(self, context: Context) -> None:
def factory(ctx: Context) -> int | float:
return 5
async with context:
context.add_resource_factory(factory)
assert context.require_resource(int) == 5
assert context.require_resource(float) == 5
@pytest.mark.asyncio
async def test_add_resource_return_type_optional(self, context: Context) -> None:
def factory(ctx: Context) -> Optional[str]:
return "foo"
async with context:
context.add_resource_factory(factory)
assert context.require_resource(str) == "foo"
@pytest.mark.asyncio
async def test_getattr_attribute_error(self, context: Context) -> None:
async with context, Context() as child_context:
pytest.raises(AttributeError, getattr, child_context, "foo").match(
"no such context variable: foo"
)
@pytest.mark.asyncio
async def test_getattr_parent(self, context: Context) -> None:
"""
Test that accessing a nonexistent attribute on a context retrieves the value from parent.
"""
async with context, Context() as child_context:
context.a = 2
assert child_context.a == 2
@pytest.mark.asyncio
async def test_get_resources(self, context: Context) -> None:
context.add_resource(9, "foo")
context.add_resource_factory(lambda ctx: len(ctx.context_chain), int, "bar")
context.require_resource(int, "bar")
async with context, Context() as subctx:
subctx.add_resource(4, "foo")
assert subctx.get_resources(int) == {1, 4}
@pytest.mark.asyncio
async def test_require_resource(self, context: Context) -> None:
context.add_resource(1)
assert context.require_resource(int) == 1
def test_require_resource_not_found(self, context: Context) -> None:
"""Test that ResourceNotFound is raised when a required resource is not found."""
exc = pytest.raises(ResourceNotFound, context.require_resource, int, "foo")
exc.match("no matching resource was found for type=int name='foo'")
assert exc.value.type == int
assert exc.value.name == "foo"
@pytest.mark.asyncio
async def test_request_resource_parent_add(self, context, event_loop):
"""
Test that adding a resource to the parent context will satisfy a resource request in a
child context.
"""
async with context, Context() as child_context:
task = event_loop.create_task(child_context.request_resource(int))
event_loop.call_soon(context.add_resource, 6)
resource = await task
assert resource == 6
@pytest.mark.asyncio
async def test_request_resource_factory_context_attr(
self, context: Context
) -> None:
"""Test that requesting a factory-generated resource also sets the context variable."""
with pytest.deprecated_call():
context.add_resource_factory(lambda ctx: 6, int, context_attr="foo")
await context.request_resource(int)
assert context.__dict__["foo"] == 6
@pytest.mark.asyncio
async def test_call_async_plain(self, context: Context) -> None:
def runs_in_event_loop(worker_thread: Thread, x: int, y: int) -> int:
assert current_thread() is not worker_thread
return x + y
def runs_in_worker_thread() -> int:
worker_thread = current_thread()
return context.call_async(runs_in_event_loop, worker_thread, 1, y=2)
assert await context.call_in_executor(runs_in_worker_thread) == 3
@pytest.mark.asyncio
async def test_call_async_coroutine(self, context: Context) -> None:
async def runs_in_event_loop(worker_thread, x, y):
assert current_thread() is not worker_thread
await asyncio.sleep(0.1)
return x + y
def runs_in_worker_thread() -> int:
worker_thread = current_thread()
return context.call_async(runs_in_event_loop, worker_thread, 1, y=2)
assert await context.call_in_executor(runs_in_worker_thread) == 3
@pytest.mark.asyncio
async def test_call_async_exception(self, context: Context) -> None:
def runs_in_event_loop() -> NoReturn:
raise ValueError("foo")
with pytest.raises(ValueError) as exc:
await context.call_in_executor(context.call_async, runs_in_event_loop)
assert exc.match("foo")
@pytest.mark.asyncio
async def test_call_in_executor(self, context: Context) -> None:
"""Test that call_in_executor actually runs the target in a worker thread."""
worker_thread = await context.call_in_executor(current_thread)
assert worker_thread is not current_thread()
@pytest.mark.parametrize(
"use_resource_name", [True, False], ids=["direct", "resource"]
)
@pytest.mark.asyncio
async def test_call_in_executor_explicit(self, context, use_resource_name):
executor = ThreadPoolExecutor(1)
context.add_resource(executor, types=[Executor])
context.add_teardown_callback(executor.shutdown)
executor_arg = "default" if use_resource_name else executor
worker_thread = await context.call_in_executor(
current_thread, executor=executor_arg
)
assert worker_thread is not current_thread()
@pytest.mark.asyncio
async def test_call_in_executor_context_preserved(self, context: Context) -> None:
"""
Test that call_in_executor runs the callable in a copy of the current (PEP 567)
context.
"""
async with Context() as ctx:
assert await context.call_in_executor(current_context) is ctx
@pytest.mark.asyncio
async def test_threadpool(self, context: Context) -> None:
event_loop_thread = current_thread()
async with context.threadpool():
assert current_thread() is not event_loop_thread
@pytest.mark.asyncio
async def test_threadpool_named_executor(
self, context: Context, special_executor: Executor
) -> None:
special_executor_thread = special_executor.submit(current_thread).result()
async with context.threadpool("special"):
assert current_thread() is special_executor_thread
class TestExecutor:
@pytest.mark.asyncio
async def test_no_arguments(self, context: Context) -> None:
@executor
def runs_in_default_worker() -> None:
assert current_thread() is not event_loop_thread
current_context()
event_loop_thread = current_thread()
async with context:
await runs_in_default_worker()
@pytest.mark.asyncio
async def test_named_executor(
self, context: Context, special_executor: Executor
) -> None:
@executor("special")
def runs_in_default_worker(ctx: Context) -> None:
assert current_thread() is special_executor_thread
assert current_context() is ctx
special_executor_thread = special_executor.submit(current_thread).result()
async with context:
await runs_in_default_worker(context)
@pytest.mark.asyncio
async def test_executor_missing_context(self, context: Context):
@executor("special")
def runs_in_default_worker() -> None:
current_context()
with pytest.raises(RuntimeError) as exc:
async with context:
await runs_in_default_worker()
exc.match(
r"the first positional argument to %s\(\) has to be a Context instance"
% callable_name(runs_in_default_worker)
)
class TestContextTeardown:
@pytest.mark.parametrize(
"expected_exc", [None, Exception("foo")], ids=["no_exception", "exception"]
)
@pytest.mark.asyncio
async def test_function(self, expected_exc: Exception | None) -> None:
phase = received_exception = None
@context_teardown
async def start(ctx: Context) -> AsyncIterator[None]:
nonlocal phase, received_exception
phase = "started"
exc = yield
phase = "finished"
received_exception = exc
context = Context()
await start(context)
assert phase == "started"
await context.close(expected_exc)
assert phase == "finished"
assert received_exception == expected_exc
@pytest.mark.parametrize(
"expected_exc", [None, Exception("foo")], ids=["no_exception", "exception"]
)
@pytest.mark.asyncio
async def test_method(self, expected_exc: Exception | None) -> None:
phase = received_exception = None
class SomeComponent:
@context_teardown
async def start(self, ctx: Context) -> AsyncIterator[None]:
nonlocal phase, received_exception
phase = "started"
exc = yield
phase = "finished"
received_exception = exc
context = Context()
await SomeComponent().start(context)
assert phase == "started"
await context.close(expected_exc)
assert phase == "finished"
assert received_exception == expected_exc
def test_plain_function(self) -> None:
def start(ctx) -> None:
pass
pytest.raises(TypeError, context_teardown, start).match(
" must be an async generator function"
)
@pytest.mark.asyncio
async def test_bad_args(self) -> None:
with pytest.deprecated_call():
@context_teardown
async def start(ctx: Context) -> None:
pass
with pytest.raises(RuntimeError) as exc:
await start(None)
exc.match(
r"the first positional argument to %s\(\) has to be a Context instance"
% callable_name(start)
)
@pytest.mark.asyncio
async def test_exception(self) -> None:
@context_teardown
async def start(ctx: Context) -> AsyncIterator[None]:
raise Exception("dummy error")
yield
context = Context()
with pytest.raises(Exception) as exc_info:
await start(context)
exc_info.match("dummy error")
@pytest.mark.asyncio
async def test_missing_yield(self) -> None:
with pytest.deprecated_call():
@context_teardown
async def start(ctx: Context) -> None:
pass
await start(Context())
@pytest.mark.asyncio
async def test_py35_generator(self) -> None:
with pytest.deprecated_call():
@context_teardown
async def start(ctx: Context) -> None:
await yield_()
await start(Context())
@pytest.mark.parametrize(
"resource_func",
[
pytest.param(Context.get_resource, id="get_resource"),
pytest.param(Context.require_resource, id="require_resource"),
pytest.param(Context.request_resource, id="request_resource"),
],
)
@pytest.mark.asyncio
async def test_get_resource_at_teardown(self, resource_func) -> None:
resource: str
async def teardown_callback() -> None:
nonlocal resource
resource = resource_func(ctx, str)
if isawaitable(resource):
resource = await resource
async with Context() as ctx:
ctx.add_resource("blah")
ctx.add_teardown_callback(teardown_callback)
assert resource == "blah"
@pytest.mark.parametrize(
"resource_func",
[
pytest.param(Context.get_resource, id="get_resource"),
pytest.param(Context.require_resource, id="require_resource"),
pytest.param(Context.request_resource, id="request_resource"),
],
)
@pytest.mark.asyncio
async def test_generate_resource_at_teardown(self, resource_func) -> None:
resource: str
async def teardown_callback() -> None:
nonlocal resource
resource = resource_func(ctx, str)
if isawaitable(resource):
resource = await resource
async with Context() as ctx:
ctx.add_resource_factory(lambda context: "blah", [str])
ctx.add_teardown_callback(teardown_callback)
assert resource == "blah"
class TestContextFinisher:
@pytest.mark.parametrize(
"expected_exc", [None, Exception("foo")], ids=["no_exception", "exception"]
)
@pytest.mark.asyncio
async def test_context_teardown(self, expected_exc: Exception | None) -> None:
phase = received_exception = None
@context_teardown
async def start(ctx: Context) -> AsyncIterator[None]:
nonlocal phase, received_exception
phase = "started"
exc = yield
phase = "finished"
received_exception = exc
context = Context()
await start(context)
assert phase == "started"
await context.close(expected_exc)
assert phase == "finished"
assert received_exception == expected_exc
@pytest.mark.asyncio
async def test_current_context() -> None:
pytest.raises(NoCurrentContext, current_context)
async with Context() as parent_ctx:
assert current_context() is parent_ctx
async with Context() as child_ctx:
assert current_context() is child_ctx
assert current_context() is parent_ctx
pytest.raises(NoCurrentContext, current_context)
@pytest.mark.asyncio
async def test_get_resource() -> None:
async with Context() as ctx:
ctx.add_resource("foo")
assert get_resource(str) == "foo"
assert get_resource(int) is None
@pytest.mark.asyncio
async def test_require_resource() -> None:
async with Context() as ctx:
ctx.add_resource("foo")
assert require_resource(str) == "foo"
pytest.raises(ResourceNotFound, require_resource, int)
def test_explicit_parent_deprecation() -> None:
parent_ctx = Context()
pytest.warns(DeprecationWarning, Context, parent_ctx)
@pytest.mark.asyncio
async def test_context_stack_corruption(event_loop):
async def generator() -> AsyncGenerator:
async with Context():
yield
gen = generator()
await event_loop.create_task(gen.asend(None))
async with Context() as ctx:
with pytest.warns(
UserWarning, match="Potential context stack corruption detected"
):
try:
await event_loop.create_task(gen.asend(None))
except StopAsyncIteration:
pass
assert current_context() is ctx
pytest.raises(NoCurrentContext, current_context)
class TestDependencyInjection:
@pytest.mark.asyncio
async def test_static_resources(self) -> None:
@inject
async def injected(
foo: int, bar: str = resource(), *, baz: str = resource("alt")
) -> Tuple[int, str, str]:
return foo, bar, baz
async with Context() as ctx:
ctx.add_resource("bar_test")
ctx.add_resource("baz_test", "alt")
foo, bar, baz = await injected(2)
assert foo == 2
assert bar == "bar_test"
assert baz == "baz_test"
@pytest.mark.asyncio
async def test_sync_injection(self) -> None:
@inject
def injected(
foo: int, bar: str = resource(), *, baz: str = resource("alt")
) -> Tuple[int, str, str]:
return foo, bar, baz
async with Context() as ctx:
ctx.add_resource("bar_test")
ctx.add_resource("baz_test", "alt")
foo, bar, baz = injected(2)
assert foo == 2
assert bar == "bar_test"
assert baz == "baz_test"
@pytest.mark.asyncio
async def test_missing_annotation(self) -> None:
async def injected(
foo: int, bar: str = resource(), *, baz=resource("alt")
) -> None:
pass
pytest.raises(TypeError, inject, injected).match(
f"Dependency for parameter 'baz' of function "
f"'{__name__}.{self.__class__.__name__}.test_missing_annotation.<locals>"
f".injected' is missing the type annotation"
)
@pytest.mark.asyncio
async def test_missing_resource(self) -> None:
@inject
async def injected(foo: int, bar: str = resource()) -> None:
pass
with pytest.raises(ResourceNotFound) as exc:
async with Context():
await injected(2)
exc.match("no matching resource was found for type=str name='default'")
@pytest.mark.parametrize(
"annotation",
[
pytest.param(Optional[str], id="optional"),
# pytest.param(Union[str, int, None], id="union"),
pytest.param(
"str | None",
id="uniontype.10",
marks=[
pytest.mark.skipif(
sys.version_info < (3, 10), reason="Requires Python 3.10+"
)
],
),
],
)
@pytest.mark.parametrize(
"sync",
[
pytest.param(True, id="sync"),
pytest.param(False, id="async"),
],
)
@pytest.mark.asyncio
async def test_inject_optional_resource_async(
self, annotation: type, sync: bool
) -> None:
if sync:
@inject
def injected(
res: annotation = resource(), # type: ignore[valid-type]
) -> annotation: # type: ignore[valid-type]
return res
else:
@inject
async def injected(
res: annotation = resource(), # type: ignore[valid-type]
) -> annotation: # type: ignore[valid-type]
return res
async with Context() as ctx:
retval = injected() if sync else (await injected())
assert retval is None
ctx.add_resource("hello")
retval = injected() if sync else (await injected())
assert retval == "hello"
def test_resource_function_not_called(self) -> None:
async def injected(foo: int, bar: str = resource) -> None:
pass
with pytest.raises(TypeError) as exc:
inject(injected)
exc.match(
f"Default value for parameter 'bar' of function "
f"{__name__}.{self.__class__.__name__}.test_resource_function_not_called"
f".<locals>.injected was the 'resource' function – did you forget to add "
f"the parentheses at the end\\?"
)
def test_missing_inject(self) -> None:
def injected(foo: int, bar: str = resource()) -> None:
bar.lower()
with pytest.raises(AttributeError) as exc:
injected(1)
exc.match(
r"Attempted to access an attribute in a resource\(\) marker – did you "
r"forget to add the @inject decorator\?"
)
def test_no_resources_declared(self) -> None:
def injected(foo: int) -> None:
pass
match = (
f"{__name__}.{self.__class__.__name__}.test_no_resources_declared.<locals>"
f".injected does not have any injectable resources declared"
)
with pytest.warns(UserWarning, match=match):
func = inject(injected)
assert func is injected
def test_dependency_deprecated() -> None:
with pytest.deprecated_call():
async def foo(res: str = Dependency()) -> None:
pass
|
video_from_lv.py | pengzhou93/dancenet | 499 | 12785539 | import tensorflow as tf
import numpy as np
from model import decoder,vae
import cv2
vae.load_weights("vae_cnn.h5")
lv = np.load("lv.npy")
fourcc = cv2.VideoWriter_fourcc(*'XVID')
video = cv2.VideoWriter("output.avi", fourcc, 30.0, (208, 120))
for i in range(1000):
data = lv[i].reshape(1,128)
img = decoder.predict(data)
img = np.array(img).reshape(120,208,1)
img = img * 255
img = np.array(img).astype("uint8")
img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
video.write(img)
video.release()
|
alipay/aop/api/domain/MedicalHospitalDeptInfo.py | snowxmas/alipay-sdk-python-all | 213 | 12785558 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MedicalHospitalDeptInfo(object):
def __init__(self):
self._code = None
self._location = None
self._name = None
self._parent_name = None
self._partner_code = None
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def location(self):
return self._location
@location.setter
def location(self, value):
self._location = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def parent_name(self):
return self._parent_name
@parent_name.setter
def parent_name(self, value):
self._parent_name = value
@property
def partner_code(self):
return self._partner_code
@partner_code.setter
def partner_code(self, value):
self._partner_code = value
def to_alipay_dict(self):
params = dict()
if self.code:
if hasattr(self.code, 'to_alipay_dict'):
params['code'] = self.code.to_alipay_dict()
else:
params['code'] = self.code
if self.location:
if hasattr(self.location, 'to_alipay_dict'):
params['location'] = self.location.to_alipay_dict()
else:
params['location'] = self.location
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.parent_name:
if hasattr(self.parent_name, 'to_alipay_dict'):
params['parent_name'] = self.parent_name.to_alipay_dict()
else:
params['parent_name'] = self.parent_name
if self.partner_code:
if hasattr(self.partner_code, 'to_alipay_dict'):
params['partner_code'] = self.partner_code.to_alipay_dict()
else:
params['partner_code'] = self.partner_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MedicalHospitalDeptInfo()
if 'code' in d:
o.code = d['code']
if 'location' in d:
o.location = d['location']
if 'name' in d:
o.name = d['name']
if 'parent_name' in d:
o.parent_name = d['parent_name']
if 'partner_code' in d:
o.partner_code = d['partner_code']
return o
|
idaes/power_generation/costing/power_plant_costing.py | carldlaird/idaes-pse | 112 | 12785559 | <gh_stars>100-1000
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Power Plant costing library
This method leverages NETL costing capabilities. Two main methods have been
developed to calculate the capital cost of power generation plants:
1.- Fossil fueled power plants (from SCPC to IGCC) (get_PP_costing)
2.- supercritical CO2 power cycles (direct and indirect) (get_sCO2_unit_cost)
other methods:
* get_ASU_cost() to cost air separation units
* costing_initialization() to initialize costing blocks
* display_total_plant_costs() to display total plant cost
* display_bare_erected_costs() to display BEC costs
* build_flowsheet_cost_constraint() to display the total cost of the entire
flowsheet
* display_flowsheet_cost() to display flowsheet cost
* check_sCO2_costing_bounds() to display a warnning if costing model have been
used outside the range that where designed for
"""
__author__ = "Costing Team (<NAME> and <NAME>)"
__version__ = "1.0.0"
from pyomo.environ import Param, Var, Block, Constraint, Expression, value, \
Expr_if
import idaes.core.util.scaling as iscale
from idaes.power_generation.costing.costing_dictionaries import \
BB_costing_exponents, BB_costing_params, sCO2_costing_params
from pyomo.util.calc_var_value import calculate_variable_from_constraint
# -----------------------------------------------------------------------------
# Power Plant Costing Library
# -----------------------------------------------------------------------------
def get_PP_costing(self, cost_accounts,
scaled_param, units, tech, ccs='B'):
'''
Power Plant Costing Method
This method relies on the capital cost scaling methodologies developed
by NETL. Report #DOE/NETL-341/013113
Multiple vendors quotes have been used to determine the cost of several
plant equipments (i.e. boiler, pumps, heat exchangers, etc.), other cost
incurred during the plant operation (i.e. solids handling, etc.)
Scaling approach uses one main equation:
SC = RC*(SP/RP)^Exp
where:
SC is the scaled cost
RC is the reference cost
SP is the scaled operational parameter
RP is the reference operational parameter
Exp is the scaling exponent
The scaled cost is computed using ref values for different technoligies.
Categories:
1 - Supercritical PC, air-fired, with and without CO2 capture,
Illinois No. 6 coal
2 - Subcritical PC, air-fired, with and without CO2 capture,
Illinois No. 6 coal
3 - Two-stage, slurry-feed, oxygen-blown gasifier with and without
CO2 capture, Illinois No. 6 coal
4 - Single-stage, slurry-feed, oxygen-blown gasifier with and without
CO2 capture, Illinois No. 6 coal
5 - Single-stage, dry-feed, oxygen-blown, up-flow gasifier with
and without CO2 capture, Illinois No. 6 coal
6 - Natural gas, air-fired, with and without CO2 capture
7 - Advanced Ultrasupercritical PC
This method computes the capital cost of units and main components of the
power plant, and requires a few arguments to build a constraint as part of
your main model.
Args:
* self: A block or unit model where costing constraints can be added to
* accounts: A list of accounts to be included in the total cost,
they should all use the same reference parameter
* scaled_param: the process parameter for the system(s) being costed
* units: the units of the scaled_param, used for verification
* tech: int 1-7 representing the above catagories
* ccs: 'A' or 'B' representing no CCS or CCS
The appropriate scaling parameters for various cost accounts can be found
in the QGESS on capital cost scaling (Report #DOE/NETL-341/013113).
The correct units for the reference parameters are found in the BBR4 COE
spreadsheet.
'''
# ------------------------ Power Plant Cost ------------------------
# check to see if a costing block already exists
if hasattr(self, 'costing'):
raise AttributeError("{} already has an attribute costing. "
"Check that you are not calling get_costing"
" twice on the same model".format(self.name))
# create a costing Block
self.costing = Block()
self.costing.library = 'PP'
# find flowsheet block to create global costing parameters
try:
fs = self.flowsheet()
except AttributeError:
fs = self.parent_block()
# build flowsheet level parameters CE_index = year
if not hasattr(fs, 'costing'):
fs.get_costing(year='2018')
CE_index = fs.costing.CE_index
# define preloaded accounts
PC_preloaded_accounts = {'Coal Handling': ['1.1', '1.2',
'1.3', '1.4', '1.9a'],
'Sorbent Handling': ['1.5', '1.6',
'1.7', '1.8', '1.9b'],
'Coal Feed': ['2.1', '2.2', '2.9a'],
'Sorbent Feed': ['2.5', '2.6', '2.9b'],
'Feedwater System': ['3.1', '3.3'],
'PC Boiler': ['4.9'],
'Steam Turbine': ['8.1'],
'Condenser': ['8.3'],
'Cooling Tower': ['9.1'],
'Circulating Water System': ['9.2', '9.3',
'9.4', '9.6', '9.7'],
'Ash Handling': ['10.6', '10.7', '10.9']}
IGCC_preloaded_accounts = {'Coal Handling': ['1.1', '1.2',
'1.3', '1.4', '1.9'],
'Coal Feed': ['2.1', '2.2',
'2.3', '2.4', '2.9'],
'Feedwater System': ['3.1', '3.3'],
'Gasifier': ['4.1'],
'Syngas Cooler': ['4.2'],
'ASU': ['4.3a'],
'ASU Oxidant Compression': ['4.3b'],
'Combustion Turbine': ['6.1', '6.3'],
'Syngas Expander': ['6.2'],
'HRSG': ['7.1', '7.2'],
'Steam Turbine': ['8.1'],
'Condenser': ['8.3'],
'Cooling Tower': ['9.1'],
'Circulating Water System': ['9.2', '9.3',
'9.4', '9.6',
'9.7'],
'Slag Handling': ['10.1', '10.2',
'10.3', '10.6',
'10.7', '10.8',
'10.9']}
NGCC_preloaded_accounts = {'Feedwater System': ['3.1', '3.3'],
'Combustion Turbine': ['6.1', '6.3'],
'HRSG': ['7.1', '7.2'],
'Steam Turbine': ['8.1'],
'Condenser': ['8.3'],
'Cooling Tower': ['9.1'],
'Circulating Water System': ['9.2', '9.3',
'9.4', '9.6',
'9.7']}
AUSC_preloaded_accounts = {'PC Boiler': ['4.9'],
'Steam Turbine': ['8.1'],
'Steam Piping': ['8.4']}
# preloaded account handling
if type(cost_accounts) == str:
if tech in [1, 2]:
cost_accounts = PC_preloaded_accounts[cost_accounts]
elif tech in [3, 4, 5]:
cost_accounts = IGCC_preloaded_accounts[cost_accounts]
elif tech == 6:
cost_accounts = NGCC_preloaded_accounts[cost_accounts]
elif tech == 7:
cost_accounts = AUSC_preloaded_accounts[cost_accounts]
else:
AttributeError("{} technology not supported".format(self.name))
# check that all accounts use the same process parameter
param_check = None
for account in cost_accounts:
param = BB_costing_exponents[str(tech)][account]['Process Parameter']
if param_check is None:
param_check = param
elif param != param_check:
raise ValueError("{} cost accounts selected do not use "
" the same process parameter".format(self.name))
# check that the user passed the correct units
ref_units = BB_costing_params[str(tech)][ccs][cost_accounts[0]]['Units']
if units != ref_units:
raise ValueError('Account %s uses units of %s. '
'Units of %s were passed.'
% (cost_accounts[0], ref_units, units))
# construct dictionaries
account_names = {}
exponents = {}
reference_costs = {}
reference_params = {}
engineering_fees = {}
process_contingencies = {}
project_contingencies = {}
for account in cost_accounts:
account_names[account] = BB_costing_exponents[str(
tech)][account]['Account Name']
exponents[account] = float(
BB_costing_exponents[str(tech)][account]['Exponent'])
reference_costs[account] = BB_costing_params[str(
tech)][ccs][account]['BEC']
reference_params[account] = BB_costing_params[str(
tech)][ccs][account]['RP Value']
engineering_fees[account] = BB_costing_params[str(
tech)][ccs][account]['Eng Fee']
process_contingencies[account] = BB_costing_params[str(
tech)][ccs][account]['Process Contingency']
project_contingencies[account] = BB_costing_params[str(
tech)][ccs][account]['Project Contingency']
# Used by other functions for reporting results
self.costing.account_names = account_names
# define parameters
self.costing.exp = Param(cost_accounts,
mutable=True,
initialize=exponents,
doc='exponential parameter for account')
self.costing.ref_cost = Param(cost_accounts,
mutable=True,
initialize=reference_costs,
doc='reference cost for account')
self.costing.ref_param = Param(cost_accounts,
mutable=True,
initialize=reference_params,
doc='reference parameter for account')
self.costing.eng_fee = Param(cost_accounts,
mutable=True,
initialize=engineering_fees,
doc='engineering fee percentage')
self.costing.process_conting = Param(cost_accounts,
mutable=True,
initialize=process_contingencies,
doc='process contingency percentage')
self.costing.project_conting = Param(cost_accounts,
mutable=True,
initialize=project_contingencies,
doc='project contingency percentage')
# define variables
self.costing.bare_erected_cost = Var(cost_accounts,
initialize=reference_costs,
bounds=(0, 1e4),
doc='scaled bare erected cost in $MM')
self.costing.total_plant_cost = Var(cost_accounts,
initialize=reference_costs,
bounds=(0, 1e4),
doc='total plant cost in $MM')
# rule for scaling BEC
# reference cost is in 2018 dollars, 671.1 is CE index for 2018
def bare_erected_cost_rule(costing, i):
return (costing.bare_erected_cost[i]*1e3 ==
(CE_index/671.1)*costing.ref_cost[i] *
(scaled_param/costing.ref_param[i])**costing.exp[i])
self.costing.bare_erected_cost_eq = Constraint(
cost_accounts, rule=bare_erected_cost_rule)
# rule for calculating TPC
def total_plant_cost_rule(costing, i):
return (costing.total_plant_cost[i] == costing.bare_erected_cost[i] *
(1 + costing.eng_fee[i] + costing.process_conting[i]) *
(1 + costing.project_conting[i]))
self.costing.total_plant_cost_eq = Constraint(
cost_accounts, rule=total_plant_cost_rule)
# rule for sum of BEC
def BEC_sum_rule(costing):
return sum(costing.bare_erected_cost[i] for i in cost_accounts)
self.costing.bare_erected_cost_sum = Expression(rule=BEC_sum_rule)
# rule for sum of TPC
def TPC_sum_rule(costing):
return sum(costing.total_plant_cost[i] for i in cost_accounts)
self.costing.total_plant_cost_sum = Expression(rule=TPC_sum_rule)
# # add variable and constraint scaling
for i in cost_accounts:
iscale.set_scaling_factor(self.costing.bare_erected_cost[i], 1)
iscale.set_scaling_factor(self.costing.total_plant_cost[i], 1)
iscale.constraint_scaling_transform(self.
costing.bare_erected_cost_eq[i],
1e-3,
overwrite=False)
iscale.constraint_scaling_transform(self.
costing.total_plant_cost_eq[i],
1,
overwrite=False)
# -----------------------------------------------------------------------------
# Supercritical CO2 Costing Library
# -----------------------------------------------------------------------------
def get_sCO2_unit_cost(self, equipment, scaled_param, temp_C=None, n_equip=1):
'''
Args:
self - pyomo Block where constraints will be made
unit_name - the name of the SCO2 equipment to cost
scaling_param - the scaling parameter (in appropriate units) for the
selected equipment
temp - the maximum temperature of the equipment. Not all types of
equipment use a temperature correction factor, so it is optional
n_equip - the number of pieces of equipment to cost
Cost is in M$
'''
# check to see if a costing block already exists
if hasattr(self, 'costing'):
raise AttributeError("{} already has an attribute costing. "
"Check that you are not calling get_costing"
" twice on the same model".format(self.name))
# create a costing Block
self.costing = Block()
self.costing.library = 'sCO2'
self.costing.equipment = equipment
# find flowsheet block to create global costing parameters
try:
fs = self.flowsheet()
except AttributeError:
fs = self.parent_block()
# build flowsheet level parameters CE_index = year
if not hasattr(fs, 'costing'):
fs.get_costing(year='2017')
CE_index = fs.costing.CE_index
param_dict = sCO2_costing_params[equipment]
# define parameters
self.costing.ref_cost = Param(mutable=True,
initialize=param_dict['a'],
doc='Reference cost')
self.costing.exp = Param(mutable=True,
initialize=param_dict['b'],
doc='Scaling exponent')
self.costing.c = Param(mutable=True,
initialize=param_dict['c'],
doc='coefficient for temperature correction')
self.costing.d = Param(mutable=True,
initialize=param_dict['d'],
doc='coefficient for temperature correction')
self.costing.material_cost = Param(mutable=True,
doc='material installation cost',
initialize=param_dict['Material Cost'])
self.costing.labor_cost = Param(mutable=True,
initialize=param_dict['Labor Cost'],
doc='labor installation cost')
# estimates for the percentages of TPC will be added later
self.costing.eng_fee = Param(mutable=True,
initialize=0,
doc='engineering fee percentage')
self.costing.process_conting = Param(mutable=True,
initialize=0,
doc='process contingency percentage')
self.costing.project_conting = Param(mutable=True,
initialize=0,
doc='project contingency percentage')
# define variables
# n_equip is left as a fixed variable to support MINLP optimization
self.costing.n_equip = Var(initialize=n_equip,
doc='number of pieces of equipment')
self.costing.n_equip.fix(n_equip)
self.costing.scaled_param = Var(initialize=scaled_param,
bounds=(0, 1e12),
doc='scaled parameter')
self.costing.temp_factor = Var(initialize=1,
bounds=(0.9, 100),
doc='temperature correction factor')
self.costing.equipment_cost = Var(initialize=self.costing.ref_cost,
bounds=(0, 1e4),
doc='equipment cost of sCO2 unit in $MM')
self.costing.bare_erected_cost = Var(initialize=self.costing.ref_cost,
bounds=(0, 1e4),
doc='bare erected cost of sCO2 unit'
'in $MM')
self.costing.total_plant_cost = Var(initialize=self.costing.ref_cost,
bounds=(0, 1e4),
doc='total plant cost of sCO2 unit'
'in $MM')
# divides the scaled parameter by the number of pieces of equipment
def scaled_param_rule(costing):
return costing.scaled_param*costing.n_equip == scaled_param
self.costing.scaled_param_eq = Constraint(rule=scaled_param_rule)
# check if equipment requires a temperature correction factor
if equipment in ['Axial turbine', 'Radial turbine', 'Coal-fired heater',
'Natural gas-fired heater', 'Recuperator']:
if temp_C is None:
raise ValueError('Temperature argument is '
'required to cost %s equipment' % equipment)
else:
self.costing.temperature = Var(initialize=500,
bounds=(0, 1e6),
doc='dummy var for temperature')
self.costing.temp_eq = Constraint(expr=(self.costing.temperature
== temp_C))
def temp_correction_rule(costing): # rule for temp correction
return (Expr_if(costing.temperature < 550,
1e-6*costing.temperature + 1,
1 + costing.c*(costing.temperature - 550)
+ costing.d*(costing.temperature - 550)**2) ==
costing.temp_factor)
self.costing.temp_correction_eq = Constraint(
rule=temp_correction_rule)
else:
self.costing.temp_factor.fix(1)
# rule for equipment cost
def equipment_cost_rule(costing):
return (costing.equipment_cost*1e6 ==
(CE_index/567.5) * costing.n_equip * costing.ref_cost *
(costing.scaled_param**costing.exp) * costing.temp_factor)
self.costing.equipment_cost_eq = Constraint(rule=equipment_cost_rule)
# rule for bare erected cost
def bare_erected_cost_rule(costing):
return (costing.bare_erected_cost == costing.equipment_cost *
(1 + costing.material_cost + costing.labor_cost))
self.costing.bare_erected_cost_eq = Constraint(rule=bare_erected_cost_rule)
# rule for calculating total plant cost
def total_plant_cost_rule(costing):
return (costing.total_plant_cost == costing.bare_erected_cost *
(1 + costing.eng_fee + costing.process_conting
+ costing.project_conting))
self.costing.total_plant_cost_eq = Constraint(rule=total_plant_cost_rule)
# add variable and constraint scaling
if equipment in ["Recuperator", "Direct air cooler"]:
iscale.set_scaling_factor(self.costing.scaled_param, 1e-5)
else:
iscale.set_scaling_factor(self.costing.scaled_param, 1)
iscale.set_scaling_factor(self.costing.equipment_cost, 1e3)
iscale.set_scaling_factor(self.costing.bare_erected_cost, 1e3)
iscale.set_scaling_factor(self.costing.total_plant_cost, 1e3)
iscale.constraint_scaling_transform(
self.costing.equipment_cost_eq, 1e-6, overwrite=False)
iscale.constraint_scaling_transform(
self.costing.bare_erected_cost_eq, 1e3, overwrite=False)
iscale.constraint_scaling_transform(
self.costing.bare_erected_cost_eq, 1e3, overwrite=False)
# -----------------------------------------------------------------------------
# Air Separation Unit Costing Library
# -----------------------------------------------------------------------------
def get_ASU_cost(self, scaled_param):
# scaled parameter is O2 flowrate in TPD
params = {'Reference Cost': 3.26e6,
'Reference Parameter': 13078,
'Exponent': 0.7,
'Eng Fee': 0.097,
'Process': 0,
'Project': 0.110}
# check to see if a costing block already exists
if hasattr(self, 'costing'):
raise AttributeError("{} already has an attribute costing. "
"Check that you are not calling get_costing"
" twice on the same model".format(self.name))
# create a costing Block
self.costing = Block()
self.costing.library = 'ASU'
# find flowsheet block to create global costing parameters
try:
fs = self.flowsheet()
except AttributeError:
fs = self.parent_block()
# build flowsheet level parameters CE_index = year
if not hasattr(fs, 'costing'):
fs.get_costing(year='2017')
CE_index = fs.costing.CE_index
# define parameters
self.costing.ref_cost = Param(initialize=params['Reference Cost'],
mutable=True,
doc='ASU reference cost')
self.costing.ref_param = Param(initialize=params['Reference Parameter'],
mutable=True,
doc='ASU reference parameter value')
self.costing.exp = Param(initialize=params['Exponent'],
mutable=True,
doc='ASU scaling exponent')
self.costing.eng_fee = Param(mutable=True,
initialize=params['Eng Fee'],
doc='engineering fee percentage')
self.costing.process_conting = Param(mutable=True,
initialize=params['Process'],
doc='process contingency percentage')
self.costing.project_conting = Param(mutable=True,
initialize=params['Project'],
doc='project contingency percentage')
# define variables
self.costing.bare_erected_cost = Var(initialize=params['Reference Cost'],
bounds=(0, 1e4),
doc='scaled bare erected cost in $MM')
self.costing.total_plant_cost = Var(initialize=params['Reference Cost'],
bounds=(0, 1e4),
doc='total plant cost in $MM')
# rule for scaling BEC
# reference cost is in 2008 dollars, 566.2 is CE index for Nov 2008
def bare_erected_cost_rule(costing):
return (costing.bare_erected_cost*1e3 ==
(CE_index/566.2)*costing.ref_cost *
(scaled_param/costing.ref_param)**costing.exp)
self.costing.bare_erected_cost_eq = Constraint(rule=bare_erected_cost_rule)
# rule for calculating TPC
def total_plant_cost_rule(costing):
return (costing.total_plant_cost == costing.bare_erected_cost *
(1 + costing.eng_fee + costing.process_conting
+ costing.project_conting))
self.costing.total_plant_cost_eq = Constraint(rule=total_plant_cost_rule)
# add variable and constraint scaling
iscale.set_scaling_factor(self.costing.bare_erected_cost, 1)
iscale.set_scaling_factor(self.costing.total_plant_cost, 1)
iscale.constraint_scaling_transform(
self.costing.bare_erected_cost_eq, 1e-3, overwrite=False)
iscale.constraint_scaling_transform(
self.costing.total_plant_cost_eq, 1, overwrite=False)
# -----------------------------------------------------------------------------
# Costing Library Utility Functions
# -----------------------------------------------------------------------------
def costing_initialization(fs):
for o in fs.component_objects(descend_into=False):
# look for costing blocks
if hasattr(o, 'costing'):
if o.costing.library == 'sCO2':
if o.costing.equipment in ['Axial turbine',
'Radial turbine',
'Coal-fired heater',
'Natural gas-fired heater',
'Recouperator']:
calculate_variable_from_constraint(o.costing.temperature,
o.costing.temp_eq)
calculate_variable_from_constraint(o.costing.temp_factor,
o.costing.
temp_correction_eq)
calculate_variable_from_constraint(o.costing.scaled_param,
o.costing.scaled_param_eq)
calculate_variable_from_constraint(o.costing.equipment_cost,
o.costing.equipment_cost_eq)
calculate_variable_from_constraint(o.costing.bare_erected_cost,
o.costing.
bare_erected_cost_eq)
calculate_variable_from_constraint(o.costing.total_plant_cost,
o.costing.
total_plant_cost_eq)
elif o.costing.library in ['PP', 'ASU']:
for key in o.costing.bare_erected_cost.keys():
calculate_variable_from_constraint(o.costing.
bare_erected_cost[key],
o.costing.
bare_erected_cost_eq[
key])
calculate_variable_from_constraint(o.costing.
total_plant_cost[key],
o.costing.
total_plant_cost_eq[
key])
def display_total_plant_costs(fs):
print('-----Total Plant Costs-----')
for o in fs.component_objects(descend_into=False):
# look for costing blocks
if hasattr(o, 'costing') and hasattr(o.costing, 'total_plant_cost'):
print('%s: $%.2f Million' % (value(o.name),
value(o.costing.total_cost)))
def display_bare_erected_costs(fs):
print('-----Bare Erected Costs-----')
for o in fs.component_objects(descend_into=False):
# look for costing blocks
if hasattr(o, 'costing') and hasattr(o.costing, 'bare_erected_cost'):
print('%s: $%.2f Million' % (value(o.name),
value(o.costing.bare_erected_cost)))
def display_equipment_costs(fs):
print('-----Equipment Costs-----')
for o in fs.component_objects(descend_into=False):
# look for costing blocks
if hasattr(o, 'costing') and hasattr(o.costing, 'equipment_cost'):
print('%s: $%.2f Million' % (value(o.name),
value(o.costing.equipment_cost)))
def build_flowsheet_cost_constraint(m):
total_cost_list = []
for o in m.fs.component_objects(descend_into=False):
# look for costing blocks
if hasattr(o, 'costing'):
for key in o.costing.total_plant_cost.keys():
total_cost_list.append(o.costing.total_plant_cost[key])
m.fs.flowsheet_cost = Var(initialize=0,
bounds=(0, 1e12),
doc='cost of entire process')
def flowsheet_cost_rule(fs):
return fs.flowsheet_cost == sum(total_cost_list)
m.fs.flowsheet_cost_eq = Constraint(rule=flowsheet_cost_rule)
def display_flowsheet_cost(m):
print('\n')
print('Total flowsheet cost: $%.3f Million' %
value(m.fs.flowsheet_cost_exp))
def check_sCO2_costing_bounds(fs):
# interate through the children of the flowsheet
for o in fs.component_objects(descend_into=False):
# look for costing blocks
if hasattr(o, 'costing'):
costing = o.costing
if costing.library == 'sCO2':
equipment = costing.equipment
lower_bound = sCO2_costing_params[equipment]['Lower Bound']
upper_bound = sCO2_costing_params[equipment]['Upper Bound']
if value(costing.scaled_param) < lower_bound:
print('''%s: The scaled parameter (%f) is below the lower
bound (%f).''' % (value(o.name),
value(costing.scaled_param),
lower_bound))
elif value(costing.scaled_param) > upper_bound:
print('''%s: The scaled parameter (%f) is above the upper
bound (%f).''' % (value(o.name),
value(costing.scaled_param),
upper_bound))
else:
print('''%s: The scaled parameter is within the bounds.'''
% value(o.name))
|
src/masonite/commands/__init__.py | cercos/masonite | 1,816 | 12785561 | <reponame>cercos/masonite<filename>src/masonite/commands/__init__.py
from .CommandCapsule import CommandCapsule
from .AuthCommand import AuthCommand
from .TinkerCommand import TinkerCommand
from .KeyCommand import KeyCommand
from .ServeCommand import ServeCommand
from .QueueWorkCommand import QueueWorkCommand
from .QueueRetryCommand import QueueRetryCommand
from .QueueTableCommand import QueueTableCommand
from .QueueFailedCommand import QueueFailedCommand
from .MakeControllerCommand import MakeControllerCommand
from .MakeJobCommand import MakeJobCommand
from .MakeMailableCommand import MakeMailableCommand
from .MakeProviderCommand import MakeProviderCommand
from .PublishPackageCommand import PublishPackageCommand
from .MakePolicyCommand import MakePolicyCommand
from .MakeTestCommand import MakeTestCommand
from .DownCommand import DownCommand
from .UpCommand import UpCommand
from .MakeCommandCommand import MakeCommandCommand
from .MakeViewCommand import MakeViewCommand
from .MakeMiddlewareCommand import MakeMiddlewareCommand
from .PresetCommand import PresetCommand
from .Command import Command
|
calvinextras/calvinsys/web/pushbullet/Pushbullet.py | gabrielcercel/calvin-base | 334 | 12785574 | <reponame>gabrielcercel/calvin-base<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pushbullet
from calvin.runtime.south.async import threads, async
from calvin.utilities.calvinlogger import get_logger
from calvin.runtime.south.calvinsys import base_calvinsys_object
_log = get_logger(__name__)
class Pushbullet(base_calvinsys_object.BaseCalvinsysObject):
"""
Pushbullet - Post messages to pushbullet channel
Requires pushbullet.py (pip install pushbullet.py)
"""
init_schema = {
"type": "object",
"properties": {
"api_key": {
"description": "API key, see https://www.pushbullet.com/account",
"type": "string"
},
"channel_tag": {
"description": "Pushbullet to post to, see http://www.pushbullet.com",
"type": "string"
}
},
"required": ["api_key", "channel_tag"],
"description": "Setup up api key and tag of channel to use for pushbullet messages"
}
can_write_schema = {
"description": "Returns True if data can be posted, otherwise False",
"type": "boolean"
}
write_schema = {
"description": "Post update to configured pushbullet channel",
"type": ["object", "string"],
"properties": {
"title": {"type": "string", "description": "title of message"},
"message": {"type": "string", "description": "message to post to channel"}
}
}
def init(self, api_key, channel_tag, title=None):
def init_pb():
try:
# pushbullet = pbullet.Pushbullet({"api_key": api_key})
pb_api = pushbullet.PushBullet(api_key)
ch = pb_api.get_channel(channel_tag)
return (pb_api, ch)
except Exception as e:
_log.error("Failed to initialize pushbullet: {}".format(e))
def done(pb_chan):
self.pushbullet, self.channel = pb_chan
self.busy = False
self.title = title
self.busy = True
in_progress = threads.defer_to_thread(init_pb)
in_progress.addCallback(done)
def can_write(self):
return not self.busy
def write(self, data):
def send():
try:
self.channel.push_note(title, message)
except Exception as e:
_log.error("Failed to send pushbullet: {}".format(e))
done()
def done(*args, **kwargs):
self.busy = False
if isinstance(data, basestring):
message = data
title = self.title
else :
message = data.get("message")
title = data.get("title")
self.busy = True
in_progress = threads.defer_to_thread(send)
in_progress.addBoth(done)
def close(self):
del self.channel
self.channel = None
del self.pushbullet
self.pushbullet = None
|
home.admin/BlitzTUI/blitztui/ui/qcode.py | PatrickScheich/raspiblitz | 1,908 | 12785590 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer/qcode.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_DialogShowQrCode(object):
def setupUi(self, DialogShowQrCode):
DialogShowQrCode.setObjectName("DialogShowQrCode")
DialogShowQrCode.resize(480, 320)
self.buttonBox = QtWidgets.QDialogButtonBox(DialogShowQrCode)
self.buttonBox.setGeometry(QtCore.QRect(326, 268, 150, 50))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.buttonBox.sizePolicy().hasHeightForWidth())
self.buttonBox.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(24)
self.buttonBox.setFont(font)
self.buttonBox.setStyleSheet("background-color: lightgrey;\n"
"font: 24pt \"Arial\";")
self.buttonBox.setOrientation(QtCore.Qt.Vertical)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.top_right_logo = QtWidgets.QLabel(DialogShowQrCode)
self.top_right_logo.setGeometry(QtCore.QRect(430, 2, 40, 60))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.top_right_logo.sizePolicy().hasHeightForWidth())
self.top_right_logo.setSizePolicy(sizePolicy)
self.top_right_logo.setText("")
self.top_right_logo.setPixmap(QtGui.QPixmap(":/RaspiBlitz/images/RaspiBlitz_Logo_Berry.png"))
self.top_right_logo.setScaledContents(True)
self.top_right_logo.setAlignment(QtCore.Qt.AlignCenter)
self.top_right_logo.setObjectName("top_right_logo")
self.frame = QtWidgets.QFrame(DialogShowQrCode)
self.frame.setGeometry(QtCore.QRect(0, 0, 320, 320))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setStyleSheet("background-color: rgb(255, 255, 255);")
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.qcode = QtWidgets.QLabel(self.frame)
self.qcode.setGeometry(QtCore.QRect(1, 1, 318, 318))
self.qcode.setStyleSheet("background-color: white")
self.qcode.setText("")
self.qcode.setPixmap(QtGui.QPixmap(":/RaspiBlitz/images/RaspiBlitz_Logo_Stacked.png"))
self.qcode.setScaledContents(True)
self.qcode.setAlignment(QtCore.Qt.AlignCenter)
self.qcode.setObjectName("qcode")
self.label = QtWidgets.QLabel(DialogShowQrCode)
self.label.setGeometry(QtCore.QRect(330, 4, 88, 60))
self.label.setText("")
self.label.setPixmap(QtGui.QPixmap(":/RaspiBlitz/images/RaspiBlitz_Logo_Stacked.png"))
self.label.setScaledContents(True)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.horizontalLayoutWidget = QtWidgets.QWidget(DialogShowQrCode)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(320, 70, 161, 191))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.horizontalLayoutWidget)
self.verticalLayout.setContentsMargins(6, 0, 6, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.line = QtWidgets.QFrame(self.horizontalLayoutWidget)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.memo_key = QtWidgets.QLabel(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setItalic(False)
font.setUnderline(False)
font.setWeight(75)
self.memo_key.setFont(font)
self.memo_key.setScaledContents(False)
self.memo_key.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.memo_key.setWordWrap(True)
self.memo_key.setObjectName("memo_key")
self.verticalLayout.addWidget(self.memo_key)
self.memo_value = QtWidgets.QLabel(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
self.memo_value.setFont(font)
self.memo_value.setScaledContents(False)
self.memo_value.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTop|QtCore.Qt.AlignTrailing)
self.memo_value.setWordWrap(True)
self.memo_value.setObjectName("memo_value")
self.verticalLayout.addWidget(self.memo_value)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.status_key = QtWidgets.QLabel(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setUnderline(False)
font.setWeight(75)
self.status_key.setFont(font)
self.status_key.setScaledContents(False)
self.status_key.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.status_key.setWordWrap(True)
self.status_key.setObjectName("status_key")
self.horizontalLayout.addWidget(self.status_key)
self.status_value = QtWidgets.QLabel(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
self.status_value.setFont(font)
self.status_value.setScaledContents(False)
self.status_value.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTop|QtCore.Qt.AlignTrailing)
self.status_value.setWordWrap(True)
self.status_value.setObjectName("status_value")
self.horizontalLayout.addWidget(self.status_value)
self.verticalLayout.addLayout(self.horizontalLayout)
self.inv_amt_key = QtWidgets.QLabel(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.inv_amt_key.setFont(font)
self.inv_amt_key.setObjectName("inv_amt_key")
self.verticalLayout.addWidget(self.inv_amt_key)
self.inv_amt_value = QtWidgets.QLabel(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(False)
font.setWeight(50)
self.inv_amt_value.setFont(font)
self.inv_amt_value.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.inv_amt_value.setObjectName("inv_amt_value")
self.verticalLayout.addWidget(self.inv_amt_value)
self.amt_paid_key = QtWidgets.QLabel(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
font.setBold(True)
font.setWeight(75)
self.amt_paid_key.setFont(font)
self.amt_paid_key.setObjectName("amt_paid_key")
self.verticalLayout.addWidget(self.amt_paid_key)
self.amt_paid_value = QtWidgets.QLabel(self.horizontalLayoutWidget)
font = QtGui.QFont()
font.setFamily("Arial")
font.setPointSize(11)
self.amt_paid_value.setFont(font)
self.amt_paid_value.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.amt_paid_value.setObjectName("amt_paid_value")
self.verticalLayout.addWidget(self.amt_paid_value)
self.spinner = QtWidgets.QWidget(DialogShowQrCode)
self.spinner.setGeometry(QtCore.QRect(440, 0, 40, 40))
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.spinner.sizePolicy().hasHeightForWidth())
self.spinner.setSizePolicy(sizePolicy)
self.spinner.setObjectName("spinner")
self.spinner.raise_()
self.buttonBox.raise_()
self.top_right_logo.raise_()
self.frame.raise_()
self.label.raise_()
self.horizontalLayoutWidget.raise_()
self.retranslateUi(DialogShowQrCode)
self.buttonBox.accepted.connect(DialogShowQrCode.accept)
QtCore.QMetaObject.connectSlotsByName(DialogShowQrCode)
def retranslateUi(self, DialogShowQrCode):
_translate = QtCore.QCoreApplication.translate
DialogShowQrCode.setWindowTitle(_translate("DialogShowQrCode", "Dialog"))
self.memo_key.setText(_translate("DialogShowQrCode", "Memo"))
self.memo_value.setText(_translate("DialogShowQrCode", "RB-Vivid-Badger"))
self.status_key.setText(_translate("DialogShowQrCode", "Status"))
self.status_value.setText(_translate("DialogShowQrCode", "Open/Paid"))
self.inv_amt_key.setText(_translate("DialogShowQrCode", "Invoice Amount"))
self.inv_amt_value.setText(_translate("DialogShowQrCode", "123456798"))
self.amt_paid_key.setText(_translate("DialogShowQrCode", "Amount Paid"))
self.amt_paid_value.setText(_translate("DialogShowQrCode", "N/A"))
from . import resources_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
DialogShowQrCode = QtWidgets.QDialog()
ui = Ui_DialogShowQrCode()
ui.setupUi(DialogShowQrCode)
DialogShowQrCode.show()
sys.exit(app.exec_())
|
recipes/Python/473781_mthreadpy_version_2/recipe-473781.py | tdiprima/code | 2,023 | 12785601 | <reponame>tdiprima/code
# #include <windows.h>
import thread
# #include <math.h>
import math
# #include <stdio.h>
import sys
# #include <stdlib.h>
import time
# static int runFlag = TRUE;
runFlag = True
# void main(int argc, char *argv[]) {
def main(argc, argv):
global runFlag
# unsigned int runTime
# PYTHON: NO CODE
# SYSTEMTIME now;
# PYTHON: NO CODE
# WORD stopTimeMinute, stopTimeSecond;
# PYTHON: NO CODE
# // Get command line argument, N
try:
N = abs(int(argv[1]))
except:
sys.exit(1)
# // Get the time the threads should run, runtime
try:
runTime = abs(int(argv[2]))
except:
sys.exit(1)
# // Calculate time to halt (learn better ways to do this later)
# GetSystemTime(&now);
now = time.localtime()
# printf("mthread: Suite starting at system time
# %d:%d:%d\n", now.wHour, now.wMinute, now.wSecond);
sys.stdout.write('mthread: Suite starting at system time %d:%d:%d\n' \
% (now.tm_hour, now.tm_min, now.tm_sec))
# stopTimeSecond = (now.wSecond + (WORD) runTime) % 60;
stopTimeSecond = (now.tm_sec + runTime) % 60
# stopTimeMinute = now.wMinute + (now.wSecond +
# (WORD) runTime) / 60;
stopTimeMinute = now.tm_min + (now.tm_sec + runTime) / 60
# // For 1 to N
# for (i = 0; i < N; i++) {
for i in range(N):
# // Create a new thread to execute simulated word
thread.start_new_thread(threadWork, ())
# Sleep(100); // Let newly created thread run
time.sleep(0.1)
# }
# PYTHON: NO CODE
# // Cycle while children work ...
# while (runFlag) {
while runFlag:
# GetSystemTime(&now);
now = time.localtime()
# if ((now.wMinute >= stopTimeMinute)
# &&
# (now.wSecond >= stopTimeSecond)
# )
if now.tm_min >= stopTimeMinute \
and now.tm_sec >= stopTimeSecond:
# runFlag = FALSE;
runFlag = False
# Sleep(1000);
time.sleep(1)
# }
# PYTHON: NO CODE
# Sleep(5000);
time.sleep(5)
# }
# PYTHON: NO CODE
# // The code executed by each worker thread (simulated work)
# DWORD WINAPI threadWork(LPVOID threadNo) {
def threadWork():
threadNo = thread.get_ident()
# // Local variables
# double y;
# PYTHON: NO CODE
# const double x = 3.14159;
x = 3.14159
# const double e = 2.7183;
e = 2.7183
# int i;
# PYTHON: NO CODE
# const int napTime = 1000; // in milliseconds
napTime = 1000
# const int busyTime = 40000;
busyTime = 40000
# DWORD result = 0;
result = 0
# // Create load
# while (runFlag) {
while runFlag:
# // Parameterized processor burst phase
# for (i = 0; i < busyTime; i++)
for i in range(busyTime):
# y = pow(x, e);
y = math.pow(x, e)
# // Parameterized sleep phase
# Sleep(napTime);
time.sleep(napTime / 1000.0)
# // Write message to stdout
sys.stdout.write('Thread %s just woke up.\n' % threadNo)
# }
# PYTHON: NO CODE
# // Terminating
# return result;
return result
# }
# PYTHON: NO CODE
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
|
atest/testdata/keywords/PositionalOnly.py | bhirsz/robotframework | 7,073 | 12785671 | def one_argument(arg, /):
return arg.upper()
def three_arguments(a, b, c, /):
return '-'.join([a, b, c])
def with_normal(posonly, /, normal):
return posonly + '-' + normal
def defaults(required, optional='default', /):
return required + '-' + optional
def types(first: int, second: float, /):
return first + second
def kwargs(x, /, **y):
return '%s, %s' % (x, ', '.join('%s: %s' % item for item in y.items()))
|
rx/internal/basic.py | mmpio/RxPY | 4,342 | 12785672 | from typing import Any
from datetime import datetime
# Defaults
def noop(*args, **kw):
"""No operation. Returns nothing"""
pass
def identity(x: Any) -> Any:
"""Returns argument x"""
return x
def default_now() -> datetime:
return datetime.utcnow()
def default_comparer(x: Any, y: Any) -> bool:
return x == y
def default_sub_comparer(x, y):
return x - y
def default_key_serializer(x: Any) -> str:
return str(x)
def default_error(err) -> Exception:
if isinstance(err, BaseException):
raise err
else:
raise Exception(err)
|
src/transformer_deploy/backends/pytorch_utils.py | dumpmemory/transformer-deploy | 698 | 12785687 | <filename>src/transformer_deploy/backends/pytorch_utils.py
# Copyright 2022, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utils related to Pytorch inference.
"""
from typing import Callable, Dict, Tuple
import torch
from torch.onnx import TrainingMode
from transformers import AutoConfig, PreTrainedModel
def infer_classification_pytorch(
model: PreTrainedModel, run_on_cuda: bool
) -> Callable[[Dict[str, torch.Tensor]], torch.Tensor]:
"""
Perform Pytorch inference for classification task
:param model: Pytorch model (transformers)
:param run_on_cuda: True if should be ran on GPU
:return: a function to perform inference
"""
def infer(inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
model_output = model(**inputs).logits.detach() # noqa: F821
if run_on_cuda:
torch.cuda.synchronize()
return model_output
return infer
def infer_feature_extraction_pytorch(
model: PreTrainedModel, run_on_cuda: bool
) -> Callable[[Dict[str, torch.Tensor]], torch.Tensor]:
"""
Perform Pytorch inference for feature extraction task
:param model: Pytorch model (sentence-transformers)
:param run_on_cuda: True if should be ran on GPU
:return: a function to perform inference
"""
def infer(inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
model_output = model(**inputs).detach() # noqa: F821
if run_on_cuda:
torch.cuda.synchronize()
return model_output
return infer
def get_model_size(path: str) -> Tuple[int, int]:
"""
Find number of attention heads and hidden layer size of a model
:param path: path to model
:return: tupple of # of attention heads and hidden layer size (0 if not found)
"""
config = AutoConfig.from_pretrained(pretrained_model_name_or_path=path)
num_attention_heads = getattr(config, "num_attention_heads", 0)
hidden_size = getattr(config, "hidden_size", 0)
return num_attention_heads, hidden_size
# TODO manage encoder / decoder architecture + cache
def convert_to_onnx(
model_pytorch: torch.nn.Module,
output_path: str,
inputs_pytorch: Dict[str, torch.Tensor],
quantization: bool,
var_output_seq: bool,
) -> None:
"""
Convert a Pytorch model to an ONNX graph by tracing the provided input inside the Pytorch code.
Pytorch sometimes fails to infer output tensor shape of models
In ONNX graph, some axis name may be marked like "Divoutput_dim_1" which is a generated name,
and there may be a warning:
** "WARNING: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference
for the exported graph. Please consider adding it in symbolic function." **
ex.: https://discuss.pytorch.org/t/bidirectional-lstm-and-onnx-runtime-warnings/136374
:param model_pytorch: Pytorch model (transformers)
:param output_path: where to save ONNX file
:param inputs_pytorch: Tensor, can be dummy data, shape is not important as we declare all axes as dynamic.
Should be on the same device than the model (CPU or GPU)
:param quantization: model is quantized
:param var_output_seq: variable size sequence
"""
if quantization:
try:
from pytorch_quantization.nn import TensorQuantizer
except ImportError:
raise ImportError(
"It seems that pytorch-quantization is not yet installed. "
"It is required when you enable the quantization flag and use CUDA device."
"Please find installation instructions on "
"https://github.com/NVIDIA/TensorRT/tree/main/tools/pytorch-quantization or use:\n"
"pip3 install git+ssh://[email protected]/NVIDIA/TensorRT#egg=pytorch-quantization\\&"
"subdirectory=tools/pytorch-quantization/"
)
TensorQuantizer.use_fb_fake_quant = True
if hasattr(model_pytorch, "config") and hasattr(model_pytorch.config, "use_cache"):
use_cache = getattr(model_pytorch.config, "use_cache")
setattr(model_pytorch.config, "use_cache", False)
# dynamic axis == variable length axis
dynamic_axis = dict()
for k in inputs_pytorch.keys():
if var_output_seq:
# seq axis name is fixed to be matched with output seq axis name (for output shape prediction)
dynamic_axis[k] = {0: "batch_size", 1: "sequence"}
else:
# if there is no specific requirement, each axis name is unique, fix some issue on T5 model
dynamic_axis[k] = {0: "batch_size", 1: f"sequence-{k}"}
dynamic_axis["output"] = {0: "batch_size"}
if var_output_seq:
dynamic_axis["output"][1] = "sequence"
# replace int64 input tensors by int32 -> for ONNX Runtime binding API and expected by TensorRT engine
for k, v in inputs_pytorch.items():
if not isinstance(v, torch.Tensor):
continue
if v.dtype in [torch.long, torch.int64]:
inputs_pytorch[k] = v.type(torch.int32)
with torch.no_grad():
torch.onnx.export(
model_pytorch, # model to optimize
args=tuple(inputs_pytorch.values()), # tuple of multiple inputs
f=output_path, # output path / file object
opset_version=13, # the ONNX version to use, >= 13 supports channel quantized model
do_constant_folding=True, # simplify model (replace constant expressions)
input_names=list(inputs_pytorch.keys()), # input names
output_names=["output"], # output axis name, hard coded so only 1 output supported
dynamic_axes=dynamic_axis, # declare dynamix axis for each input / output
training=TrainingMode.EVAL, # always put the model in evaluation mode
verbose=False,
)
if quantization:
TensorQuantizer.use_fb_fake_quant = False
if hasattr(model_pytorch, "config") and hasattr(model_pytorch.config, "use_cache"):
setattr(model_pytorch.config, "use_cache", use_cache)
|
runtime/stdlib/jitlog.py | cheery/lever | 136 | 12785693 | <gh_stars>100-1000
# There is a convenient PYPYLOG=jit-log-opt:logfile
# to enable jit logging from outside.
# But I like having the option to
# enable it from the inside.
from rpython.rtyper.lltypesystem import rffi, lltype, llmemory
from rpython.rlib.rjitlog import rjitlog
from rpython.rlib import jit
from space import *
import fs
module = Module(u'jitlog', {}, frozen=True)
def builtin(fn):
name = fn.__name__.rstrip('_').decode('utf-8')
module.setattr_force(name, Builtin(fn, name))
return fn
@builtin
@signature(fs.File)
def enable(fileobj):
try:
rjitlog.enable_jitlog(rffi.r_long(fileobj.fd))
except rjitlog.JitlogError as error:
raise unwind(LError(
error.msg.decode('utf-8')))
return null
@builtin
@signature()
@jit.dont_look_inside
def disable():
rjitlog.disable_jitlog()
return null
|
packages/core/minos-microservice-common/minos/common/datetime.py | sorasful/minos-python | 247 | 12785700 | from datetime import (
datetime,
timezone,
)
def current_datetime() -> datetime:
"""Get current datetime in `UTC`.
:return: A ``datetime`` instance.
"""
return datetime.now(tz=timezone.utc)
NULL_DATETIME = datetime.max.replace(tzinfo=timezone.utc)
|
wpca/tests/test_utils.py | radicamc/wpca | 123 | 12785717 | from itertools import chain, combinations
import numpy as np
from numpy.testing import assert_allclose
from wpca.tests.tools import assert_allclose_upto_sign
from wpca.utils import orthonormalize, random_orthonormal, weighted_mean
def test_orthonormalize():
rand = np.random.RandomState(42)
X = rand.randn(3, 4)
X2 = orthonormalize(X)
assert_allclose_upto_sign(X[0] / np.linalg.norm(X[0]), X2[0])
assert_allclose(np.dot(X2, X2.T), np.eye(X2.shape[0]), atol=1E-15)
def test_random_orthonormal():
def check_random_orthonormal(N, M, rows):
X = random_orthonormal(N, M, rows=rows, random_state=42)
assert X.shape == (N, M)
if rows:
C = np.dot(X, X.T)
else:
C = np.dot(X.T, X)
assert_allclose(C, np.eye(C.shape[0]), atol=1E-15)
for M in [5]:
for N in range(1, M + 1):
yield check_random_orthonormal, N, M, True
yield check_random_orthonormal, M, N, False
def test_weighted_mean():
def check_weighted_mean(shape, axis):
rand = np.random.RandomState(0)
x = rand.rand(*shape)
w = rand.rand(*shape)
wm = weighted_mean(x, w, axis)
assert_allclose(wm, np.average(x, axis, w))
assert_allclose(wm, (w * x).sum(axis) / w.sum(axis))
for ndim in range(1, 5):
shape = tuple(range(3, 3 + ndim))
axis_tuples = chain(*(combinations(range(ndim), nax)
for nax in range(ndim + 1)))
for axis in chain([None], range(ndim), axis_tuples):
yield check_weighted_mean, shape, axis
|
xc/xc7/tests/serdes/generate_tests.py | bl0x/symbiflow-arch-defs | 183 | 12785746 | #!/usr/bin/env python3
"""
Creates the header file for the OSERDES test with the correct configuration
of the DATA_WIDTH and DATA_RATE
"""
import argparse
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--input', required=True, help="Input top file to be generated"
)
parser.add_argument(
'--output', required=True, help="Output top file to be generated"
)
parser.add_argument(
'--data_width', required=True, help="Data width of the OSERDES"
)
parser.add_argument(
'--data_rate', required=True, help="Data rate of the OSERDES"
)
args = parser.parse_args()
with open(args.input, "r") as f:
lines = f.read().splitlines()
with open(args.output, 'w') as f:
print('`define DATA_WIDTH_DEFINE {}'.format(args.data_width), file=f)
print('`define DATA_RATE_DEFINE \"{}\"'.format(args.data_rate), file=f)
for line in lines:
print(line, file=f)
if __name__ == "__main__":
main()
|
src/bitmessageqt/bitmessage_icons_rc.py | coffeedogs/PyBitmessage | 1,583 | 12785761 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Sa 21. Sep 13:45:58 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x03\x66\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x03\x08\x49\x44\x41\x54\x78\xda\x84\
\x53\x6d\x48\x53\x51\x18\x7e\xce\xfd\xd8\x75\x9b\x8e\xdc\x2c\xdd\
\x4c\x5d\x4e\xa7\xc9\xe6\xb7\xf6\x61\x61\x11\x14\x52\x16\xf5\xc7\
\x0a\x0b\xa2\x3f\x41\x51\x41\x61\x7f\x0a\x84\xa2\x9f\xfd\xeb\x67\
\x7f\xfa\x51\x44\x50\x91\x14\x15\x5a\x14\x41\x25\x6d\x44\x59\x68\
\x69\xd9\x74\xa6\x6d\xd7\x7d\x38\xb6\xdd\x6d\x77\xa7\x73\x2d\x4d\
\x84\xe8\x81\x87\xf7\x7d\xef\x7b\xde\xe7\xbe\xe7\x9c\xf7\x10\x30\
\x48\x84\x20\x4f\xb3\xf8\x8b\xb3\x1b\xe9\xbc\xe5\x38\x14\xb3\x74\
\x2f\x73\xab\x18\x47\x28\x45\x6f\x36\x0b\xff\xc2\x3a\xde\xc6\xb2\
\x06\xcd\x61\x24\x4b\x04\xbe\x87\x09\x48\x82\x89\x8a\xb8\x62\xaf\
\x76\x75\x5a\x4a\xcb\x9d\x31\x85\xae\x9d\x0d\xce\x15\x7c\xf1\xa3\
\xef\x67\x18\xd0\xc8\xe1\x1f\xf0\xcf\x01\x43\x53\xc4\xf1\x33\x04\
\x57\x20\x12\x29\xcc\x31\x5b\x84\x4d\x7b\xf6\x18\xb5\x78\xcc\x0f\
\x07\x23\x34\x0a\xcb\xea\x0a\x19\x4f\x32\xda\x19\xc7\x53\x04\x91\
\x99\x10\xc4\xde\xd3\xa7\x61\x30\x1a\xa1\xb2\xde\xb5\x98\xe7\xb0\
\x85\xe5\xc7\xb4\x02\x81\x2e\xa9\x66\xfe\xb9\x86\xd6\xd6\xfd\xee\
\xba\x3a\xcb\x3b\x8f\x47\x9e\x78\xe7\x8d\xc5\x13\x88\x4a\x3a\x1d\
\x94\x78\x1c\x82\x28\x22\xae\x6d\x8b\x47\x23\x5b\x7e\x6d\x5e\xa0\
\xdd\xf9\x77\xe7\xcf\x3e\xd3\x0d\xbd\xa7\x3a\xac\x2e\xa7\x15\x43\
\x9f\x6d\xd6\xae\x43\xde\xb0\x51\x44\x74\x6c\x78\x18\xf6\x8a\x0a\
\x68\x96\xc5\x1a\x4a\x16\x6a\x84\xad\xce\xc5\xfa\xae\xc1\x69\x53\
\x65\xbd\xdb\x8e\x74\x32\x09\xcd\xea\xf2\x4c\xb9\x0e\x5b\x94\x0c\
\xdc\xba\xe9\x6d\xda\xbe\xa3\xd1\xf3\xe4\xb1\x37\xf7\xb7\x40\xc1\
\xa2\x40\x26\xbb\x28\xc0\x75\xd5\x29\x23\xc9\xb9\xb9\x8d\x99\x74\
\x1a\x2a\xe3\xae\xfa\xf4\xc7\xf1\x92\xa2\x60\xce\xc4\x0f\x4b\x85\
\xb3\x0a\xcf\xfb\x6e\xd2\x57\xdd\x35\x1f\x73\x43\xc9\x47\x33\x25\
\x26\x4c\x15\xe7\x82\x27\xb5\x07\x41\x09\x87\x7c\x75\x66\xc8\x28\
\x66\xaa\x4b\x2a\xdd\x4d\xec\x42\x85\xf0\x6c\x20\xf5\x32\x3c\xfa\
\x4d\x3a\xd1\xe3\xd4\xd7\xb4\x54\xa5\x14\x17\xa6\xdb\xaa\x6d\x85\
\x5b\xda\x0b\x9e\xe6\x04\x12\xe1\x3c\xc1\x8e\x2c\xfd\xc2\x7f\x6d\
\xba\x8c\x41\x7d\x07\x1e\x99\x8e\x40\xa5\x24\xc0\x7d\xb8\xb1\x3e\
\x96\x26\xb6\x57\xaf\x07\xfc\x74\x77\x77\x45\xc1\x6a\x87\x79\x2a\
\x91\xc0\xd9\x8e\xa3\xb8\x3d\xe5\x41\xe9\xaa\x62\x93\xcb\x5c\x5e\
\x6b\xa0\xba\x35\xdf\x02\x93\xe2\x92\x39\xa0\xcd\xfd\xa6\xc3\x3b\
\x83\xf2\x2c\x69\x6c\x6e\x41\x24\x1a\x13\xef\x8f\xb4\xbe\x1f\xf7\
\x49\x93\x49\x76\x26\xb2\x2c\x43\xb3\x1a\xd4\x54\x46\xaa\x36\x97\
\xb9\x69\x54\x69\x23\x7c\x77\xdf\x0a\x70\xe2\x7e\x83\x24\xd4\x1c\
\xeb\x74\xef\x5b\x19\x19\x2a\xb6\x4b\x32\xc6\x15\x0b\x82\xf9\x95\
\xa1\xab\x0f\xfb\x3d\x49\xce\x17\x6b\x19\xf6\x0e\x0c\x6e\xf0\x6f\
\xa3\x69\x55\x0f\x45\x35\xd0\x74\x36\x07\xa3\xd1\x27\x84\x3f\x70\
\xe7\x4c\xe7\xfa\xf2\xee\xa6\x2a\xeb\x5a\x4b\x7e\x9e\xe4\xf3\x4d\
\xe3\xd2\xde\x52\x9c\xbf\xeb\x43\x59\x99\x15\x72\x28\x9a\x7a\xfb\
\xe9\xfb\x68\x5f\xff\xeb\x7b\xea\x83\x93\xd7\x97\x0d\x9e\xcc\x41\
\x89\x36\xd7\xda\xcd\xf5\xd9\x4c\x76\xfe\x2d\x2d\x6f\x97\xaa\xd0\
\xd5\x39\xac\x35\x90\x4c\xe5\xfc\xe6\x9e\x11\xed\x41\x2d\x61\x90\
\xf0\xf5\x87\x2e\xc0\xda\xd0\x4e\x79\x29\x41\x05\x7d\x0c\x82\x3e\
\xde\x36\x7d\xf5\xcd\xcb\xa2\xe3\xeb\x48\x26\x69\x20\x99\x84\x91\
\xa8\x8a\x1e\x3f\xbc\x2f\xe8\xec\xe8\x45\x1a\x99\x04\x8d\x4c\x2c\
\xb6\x40\xfe\x0c\x85\x05\xff\x87\xac\xfd\x71\xf9\xc7\x5f\x02\x0c\
\x00\x00\x31\x44\x70\x94\xe4\x6d\xa8\x00\x00\x00\x00\x49\x45\x4e\
\x44\xae\x42\x60\x82\
\x00\x00\x02\xaf\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x51\x49\x44\x41\x54\x78\xda\x9c\
\x53\xcf\x6b\x13\x51\x10\xfe\x36\xfb\x62\x8d\x69\x48\x62\x9b\x18\
\x8d\xab\x5d\xd3\x84\xa2\x10\xbd\x48\x0f\x62\xa9\xd8\x93\xe0\x49\
\x0f\xa5\x20\x52\xb0\xe0\x7f\xe0\x45\x3c\xf5\xa6\x77\x7b\xe8\x55\
\x28\x41\x3d\x78\x28\x7a\xf0\x47\x51\xa4\xbd\x58\x0d\xa8\x60\x5a\
\x13\x51\xd0\x43\x89\x69\xf3\x63\xb3\xc9\xee\x3e\x67\x9e\xd9\xa2\
\x08\x4a\x1d\x18\xde\xdb\x99\xf9\xbe\xf7\xcd\x7b\xb3\xda\x8d\x85\
\x05\xb0\x69\x9a\x76\x9e\x96\xfd\xf8\xbb\x3d\x71\x1c\x67\xad\xdb\
\xe9\xe0\xdc\xf3\x19\x48\x0a\x08\xd7\x75\xfd\xe4\x81\xeb\x93\x93\
\x73\x0e\x7d\x73\xc2\x95\x12\x5d\xda\x77\x3d\x4f\xed\x2b\x95\x0a\
\x1e\x15\x8b\x57\xa5\x94\x1a\xa5\x4b\x3e\x28\x30\xf1\xf8\x32\xcc\
\xb5\x7b\x20\x56\x4d\x72\xb1\xe3\xc0\xe9\x76\xe1\xf6\xbc\xd3\x6e\
\xc3\x6a\x36\xd1\x68\x34\x30\x3b\x35\x35\x47\xb9\xb3\x44\x92\xf5\
\x09\x04\xfb\xf0\xa7\x07\x57\x5a\x32\x78\x41\xd3\x2e\xe1\xc5\xea\
\x2a\x3c\x22\x8a\xc5\x62\x68\xb5\x5a\x38\x3e\x32\xa2\x0a\xab\xd5\
\x2a\xee\x2c\x2e\x22\x9f\x4c\xde\x5e\x29\xcc\x3e\x85\x8e\x02\x85\
\xe7\x05\xa9\x1b\x44\x40\xcf\x65\x8f\x9e\x9c\x60\x6d\x99\x4c\x06\
\x74\x82\x22\x89\xc7\xe3\x08\xea\xba\x22\x38\x35\x3a\x8a\x0e\xa9\
\x0b\x85\xc3\x18\x68\x5d\x3c\x23\x1f\xbe\x7a\x2d\x3d\x77\x50\xb8\
\x12\xc6\x5e\xe3\xd8\xf0\x26\x5d\x4c\x40\xd3\x54\xaf\xd1\x68\x54\
\x9d\xc8\x24\x1f\x89\x8c\x09\x39\xc6\x8a\x4e\xe4\xf3\xb0\x6d\x1b\
\x49\xc2\x54\x2b\x45\x43\xb8\x1e\x0e\xed\x8e\x26\xf7\x59\x56\x1b\
\xbf\x2a\xe0\xd3\x7d\x25\xb2\x47\xe2\x2b\xe2\x5a\xc6\x30\x96\x14\
\xc8\xa1\x60\x38\x16\x6a\x12\x3b\x3d\x25\xca\xe5\xf2\x36\xc0\x57\
\xc2\x2b\x7f\xb3\x82\xc3\xa9\x14\xb8\x96\x31\x8c\x15\x8e\x87\x5c\
\x24\x65\x26\xac\xf7\x75\x94\x0b\xd7\x30\x40\xb7\xde\x97\x1b\x47\
\x5f\x76\xec\x37\x25\xf6\x87\x25\x04\x4b\x4b\xf8\xba\xbe\x07\x56\
\xdb\x46\xc4\x34\x13\x8c\xe5\x16\x44\x24\x91\x4e\x4d\x27\x7e\x3e\
\x0b\x4f\xd2\xca\xf2\x7d\x38\xc2\x50\x40\x7e\x0d\x6e\x63\x73\xf9\
\x2e\x4e\x8f\x8d\xab\x9a\x69\x53\x2d\x29\xc6\xb2\x02\xb1\xb5\xb1\
\x41\x7d\x59\x2a\xda\x4f\x00\x23\x9d\xc6\x97\x67\x37\x15\x41\x93\
\x62\x3c\x58\xe6\x90\x89\x66\xbd\x8e\x46\xad\xa6\xea\x42\xa1\x10\
\x1c\x45\xe0\x4a\xe1\xf0\xf0\x90\xb3\xd5\x88\xcc\xc8\x66\x71\xd0\
\x3c\xf2\xc7\x1c\x7f\x2e\x6d\x0f\xa0\xaa\x67\xac\xe8\x7a\x08\x76\
\x3a\x34\x71\xe4\xbe\xad\xbf\x7d\x87\x7f\x99\xae\x0b\x30\x56\x34\
\x6c\xf4\x4b\xc9\x5a\x74\xec\xc4\x18\xc3\x58\xf1\xe6\x9b\xac\x6c\
\xcd\xdf\x7a\x89\xff\xb0\xf2\x77\x54\x78\x76\x76\x91\xc7\x7a\xff\
\xc5\x4e\x8c\x2f\xad\xf6\x43\x80\x01\x00\xc1\x52\x4e\xcc\x97\x5f\
\x6c\x5a\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xcc\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x01\x68\xf4\xcf\xf7\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x02\x6c\x49\x44\x41\x54\x38\
\xcb\x4d\xd2\xbd\x6b\xdd\x65\x14\x07\xf0\xcf\xaf\xb9\x49\x9a\x10\
\x69\x08\x06\x6a\x2c\x82\xf4\x25\x2e\xed\x50\x87\x4e\x85\x2e\x42\
\xb2\x34\xe0\xe4\x58\x10\xea\x58\x69\x57\x47\x57\x83\xd2\x49\xc1\
\xad\xfe\x01\xba\x28\xb8\xa8\x93\x83\xa0\x4b\x41\x89\x10\x1b\x43\
\x20\x37\x37\xf7\xe6\xbe\xfe\xee\xef\x2d\xc7\xe1\xb9\x6a\x1f\x38\
\x1c\x9e\xf3\x7c\xbf\xe7\x7c\x9f\x73\x4e\x16\xb7\x6e\x71\xf5\x6a\
\x2d\xae\x5f\x2f\x82\x10\x0f\x1e\x6c\xc6\x8d\x1b\xc4\xc5\x8b\xf1\
\x17\x21\xee\xdf\xbf\x19\x77\xee\x44\xac\xad\x45\xcc\xcf\x47\x36\
\xc4\x11\x91\xe3\x67\x64\xb1\xb3\xc3\xa5\x4b\xbf\xd8\xdf\xff\xd1\
\xf3\xe7\x4f\xc4\xce\x4e\xc4\x95\x2b\x11\xab\xab\x31\xa0\x16\x1b\
\x1b\x11\x44\x45\xfc\x40\x64\x07\xc4\x18\x2f\xb0\xc7\x6e\x16\xdb\
\xdb\xac\xaf\x7f\xab\x69\xb6\x74\x3a\x9c\x9d\x31\x1e\x27\xdf\xed\
\x2e\xb6\x8c\xc7\x7b\x8e\x8f\xaf\x39\x3c\xe4\xf4\x94\xf3\x73\xd0\
\x8f\xd0\xa6\x10\xcb\xcb\x4f\x83\x28\x67\x56\x10\x6d\xe2\x27\xe2\
\x19\x91\x75\x92\x6e\x6d\x86\x7d\x56\x06\xe8\xa2\xe6\x83\xd7\xf9\
\x22\x8b\x7b\xf7\xd8\xd8\x60\x71\xf1\x13\x79\xfe\xc8\xd9\xd9\x6f\
\xda\xed\xf7\xb4\xdb\x7f\xea\xf5\xb4\x5c\xbe\xbc\x60\x7e\xbe\xd0\
\xef\xd3\xe9\x30\x1a\xbd\x6d\x30\xd8\x33\x99\x7c\xa7\x28\xb6\x5b\
\xca\x72\xa2\xdb\xe5\xe0\x20\x89\xac\x6b\xea\x5a\x33\x1c\x6e\x9d\
\xb1\xd9\x72\x7c\x3c\xa7\xdd\xe6\xf0\x90\xe9\x14\x54\x11\x4e\xd0\
\xe1\xab\x96\xa3\x23\xfa\xfd\xf4\x18\x21\x90\xe3\x24\x89\x7f\x23\
\x8b\x56\x2b\x9a\xba\x56\x63\x0e\x25\xfe\xc6\xef\x18\xf0\x59\xd6\
\xe6\xd3\x21\x8f\x4a\x34\x29\xe8\x45\xfa\xb6\x55\xb2\xd6\x84\x0f\
\x8f\xd9\xef\x26\xa0\x5e\x02\x8d\x96\x79\xe5\x35\x64\x71\xf7\x2e\
\x6b\x6b\xac\xac\xb0\xb0\xf0\x58\x96\x7d\xac\xae\x97\x14\x45\xd2\
\x35\x9d\x52\x14\xe4\x39\x93\x49\x6e\x32\xf9\xc8\x64\xb2\x2b\xcf\
\x29\xcb\xd9\x42\x2c\x2d\x7d\xee\xc2\x85\x87\xaa\x2a\x01\x87\x43\
\x46\xa3\x44\x2e\x4b\x9a\x26\x59\x59\xa6\x58\x9e\x8b\xe9\x74\xb7\
\xe2\x49\x4b\x51\x3c\x55\x96\x0f\x4d\xa7\x89\xd8\xeb\xa5\x4d\xc8\
\x73\xaa\x8a\x08\x20\xcb\xa8\x6b\x65\x84\x1c\x13\x1e\x17\xcc\x65\
\x71\xfb\x76\xa1\xae\x17\xe4\x79\x5a\xa3\xe1\x30\x91\x9b\xe6\x7f\
\x32\xff\xb5\x77\x34\x6b\xd4\x20\x25\x39\x69\x39\x3a\x3a\x50\x55\
\xd7\x54\x55\xaa\x58\x96\xa2\x69\xbc\x7c\xce\x67\xed\x1f\xa6\xe1\
\xe9\xe2\x0c\x05\x07\x59\xc3\xcd\x29\xbf\x56\xcc\xd5\xb3\x4a\x90\
\xcd\x7c\x83\xe9\x4b\xe4\x53\xf4\x53\xc2\x66\x81\xb7\xb2\x6e\x92\
\xb5\x30\xe6\xeb\x9c\xad\x7f\xe7\xd9\xa0\x4a\x55\xe4\x33\xc9\xa3\
\xd9\x1d\xdf\x2c\xf3\xee\xab\x34\x59\x0f\xe3\x19\xa0\x9f\xfc\x9b\
\x23\xde\x1f\xf1\x4e\xce\x66\x91\x12\xfd\xd1\xf0\xfd\x1c\x5f\x2e\
\xb1\x7f\x09\xeb\x33\xfb\x07\x6a\x4f\x76\xe7\x35\x05\x41\x4b\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\x24\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x01\xc6\x49\x44\x41\x54\x78\xda\x8c\
\x53\x3d\x4b\x03\x41\x10\x7d\xd9\x3b\xf5\x44\x45\x8b\x44\x34\xa2\
\xc4\x42\x49\x2b\xe8\xcf\xb0\xf3\x07\x28\xd6\x82\x9d\x20\x58\x88\
\x0a\x36\x82\x8d\x95\xa0\x58\x0b\x16\xda\x8b\xd8\x08\x09\x44\x0b\
\x8b\x54\x22\x7e\x44\x10\x41\x42\xce\xe4\x2e\xb7\x1f\xce\x9c\xe6\
\xcb\x5c\xc0\xe5\x06\x6e\xdf\xbe\xf7\x66\x76\x76\x37\x96\xdd\x05\
\x62\xc0\x12\x80\x14\xfe\x3f\x1e\x0d\x70\x3c\xbb\x66\x60\x1b\xfa\
\xa3\x6f\x72\x6e\xf5\x62\x03\x5a\x03\x4a\x75\x96\x59\x16\x20\x04\
\xb2\xfb\xf3\x5b\x35\x48\x84\x06\x06\xe2\x25\x93\x01\x1b\x18\x29\
\x61\xaa\x7e\x7b\x10\xce\xeb\xcc\x63\x3e\xeb\x42\x03\xc5\x49\x35\
\x44\x6f\x3c\x8e\xfb\xcb\x4b\xca\x22\x60\x44\x7b\x30\xce\xeb\xcc\
\x63\x3e\xeb\x78\xd8\xfa\xc7\xc9\x1a\x1a\x4e\xa0\xe2\x96\x70\x73\
\x7e\x51\xaf\xd8\xf3\x3c\x38\x8e\x53\x9f\x4f\x4c\x4f\x81\x79\xa4\
\xb1\x6a\x98\xfd\xeb\x24\x0c\xed\x7d\x38\x39\x1a\x46\x08\x74\x75\
\xe3\x29\x9f\xc7\x44\x3a\x0d\x1d\x54\xeb\x26\xcc\xe3\x0a\xfe\x1a\
\x58\x5a\x05\x50\x32\x68\x34\x4c\xc4\x30\xd0\xd7\x87\x28\x9c\x34\
\x56\xbb\x81\x54\xd0\xdc\xa8\xdf\x11\x13\x16\x1d\x08\x63\x11\x78\
\x94\x81\x51\x92\xb2\x35\x88\x42\x59\x90\x94\x39\x0a\xef\x50\x41\
\x00\xdd\x54\xaa\x1f\x28\x2c\xf6\x6c\xa2\xfa\xa6\xa8\x99\x92\x22\
\x80\xef\x2b\x64\xa6\x8f\x5a\x0d\xa4\xaa\x19\x48\xda\x6b\x23\x53\
\xd9\xf5\x70\x32\x53\x6e\xba\x45\x22\x0c\xf7\xae\x04\xd2\x44\x54\
\x10\x96\xda\xa8\xc0\xfd\x2c\xc2\xae\x54\x90\xcb\xe5\x90\x48\x24\
\xc2\x7e\xa4\x52\x29\xe8\x62\xa9\x53\x0f\xa8\x59\x4d\xd7\xd8\x25\
\x62\x77\xb9\x8c\x34\x1d\x63\xbd\x2a\x9a\xeb\xd2\x57\xab\xc1\xdd\
\x23\x90\x4e\xc2\x79\x79\x7a\xa5\x9b\xaa\x9a\x7a\xe0\xe3\xe3\x74\
\xa5\xed\x39\x0c\xc6\x87\xe0\x55\xe1\xe4\x0b\xc0\x02\x1b\xec\x9c\
\x61\xf0\x60\x19\xfd\xe3\xe3\xc9\xd6\xf3\x1e\x1b\x89\x7e\x4f\x76\
\x17\x6e\xaf\xd1\xcf\xba\x6d\xa0\x68\xb3\xe9\xfd\x33\x0a\x87\x7b\
\xeb\x57\xff\x7d\xcb\x0f\xef\x28\xb0\x8e\xa2\xf8\x2d\xc0\x00\x14\
\x2c\x1a\x71\xde\xeb\xd2\x54\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x06\xe3\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x90\x00\x00\x00\x90\x08\x06\x00\x00\x00\xe7\x46\xe2\xb8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x06\x85\x49\x44\x41\x54\x78\x9c\xed\
\x9d\x4b\x72\xe3\x38\x10\x44\xc1\x8e\x5e\xf8\x3c\x3a\x8e\x0f\xa8\
\xe3\xf8\x3c\xda\x79\x16\x0e\x4c\xd3\x10\x89\x6f\x7d\xb2\x0a\xf5\
\x96\x0e\x59\x2c\x24\x12\x29\x90\x20\xc1\xe3\xf3\xe3\xeb\x3b\xa5\
\x94\x9e\xaf\xc7\x91\x0c\x92\xeb\xb7\x8c\x65\xed\x8f\xb2\x03\x2c\
\x37\x46\xbb\x86\x59\xac\x69\x7e\xd6\xfa\x28\xff\x90\xb1\xd6\xa8\
\x8c\x45\x23\x59\xd1\xfa\x4a\xdb\x5b\x03\x65\xac\x34\xae\xc4\x92\
\x91\xd0\x35\xbe\xd3\xf2\xf9\x7a\x1c\x47\xeb\x43\xe7\x0f\x53\x17\
\x26\x81\x05\x23\xa1\x6a\xdb\xe3\x89\x6e\x03\x9d\xff\x69\xb5\x30\
\x0d\x90\x8d\x84\xa6\x69\x8f\x56\xb9\xe6\x5f\x85\x8f\x88\x8c\xd6\
\xe8\x5e\x10\x8d\x84\xa2\xe5\x4c\xff\x4f\x1b\xa8\xfc\x22\x6b\x20\
\x19\x49\x5b\xc3\x51\x2d\xce\xf5\xbe\x15\x3e\x2b\xac\xb6\x08\xb3\
\x20\x18\x49\x4b\x3b\x8a\xbe\x26\x33\xd0\xd5\x97\x5b\x42\xd3\x48\
\xd2\x9a\xad\xb4\xb5\xac\xf5\xb2\x70\x0a\x31\xc3\x48\xfd\x48\x69\
\xc5\xd1\xaf\x6c\x06\xba\x3b\xa0\x15\x24\x8d\xc4\xad\x11\x55\x5b\
\xae\xea\xbc\x2d\x9c\x5a\x40\x8b\x46\x92\x32\x11\x97\x36\x12\x7d\
\xf8\x97\xf2\x00\x35\x2c\x2d\xda\x5e\xad\x0f\x22\x4c\xb6\x7b\xe1\
\xa8\xf5\xae\xdf\xaa\x9d\xc9\x29\x1a\xa2\x91\x6a\x97\xec\x5b\x9f\
\x59\x81\x4a\x0b\x8d\xfe\x12\x4b\xa0\x12\xa4\x44\x9a\xb9\x80\x86\
\x94\x48\xdc\xb5\xd4\xfa\xa8\xd9\x79\xd6\xe7\x01\x35\x28\x96\x6f\
\x34\xcf\x58\x11\xfa\x46\x2d\x81\x4a\x24\x13\x89\xe3\x2c\x53\x32\
\x91\x90\xce\x10\xbb\x3a\xcb\xcb\xb5\x11\x89\xab\xec\x9c\xcb\x41\
\x88\xfd\x00\x93\x40\x25\x94\x89\xa4\x31\x62\x29\x8f\xa9\x35\xdf\
\xea\xd1\x9e\x75\x64\x51\x32\x63\x24\xce\x0b\x68\x94\x35\xdc\x7d\
\xbf\x05\xcd\x61\x13\xa8\x64\x24\x91\xb4\x85\x3f\x33\x93\x48\x08\
\xf5\xf7\x0e\x9a\xa1\x91\x85\xd0\xb0\xcc\x55\x03\xb9\xea\xa3\x9c\
\x8f\xd5\xee\x3f\x47\xd7\xf7\x0a\xb3\x06\xca\x48\x5c\x25\x46\x9a\
\xd0\x4b\x30\xd2\xde\x3f\x5c\x5f\x2c\x05\x72\x47\xd4\x40\xd4\x72\
\x86\x21\x03\xa1\x62\xad\x33\x3e\x3f\xbe\xbe\x51\x8d\x3f\xaa\xe5\
\xb0\x81\x50\x3b\xeb\xf9\x7a\x1c\xa8\xb5\x65\x90\x8d\x33\x8b\x99\
\xb3\xb0\x5e\x10\x27\xa4\x48\xb5\xd4\x98\x19\x80\x53\x3f\x61\xe8\
\x23\x3d\x25\x8c\x44\xf2\x98\x38\x25\xee\x12\xa8\xc4\xfb\x5a\x15\
\x15\xb3\x83\x6d\x7a\x12\xad\x3d\xba\x47\x91\x48\xa4\x1d\x12\xa7\
\xc4\x7d\x02\x95\x78\x5a\xab\xa2\x62\x65\x60\x2d\x9d\xc6\x5b\x4b\
\xa1\x33\x14\x89\xb4\x63\xe2\x94\x6c\x97\x40\x25\x56\xd7\xaa\xa8\
\x58\x1d\x44\xcb\x17\x12\x2d\xa7\xd0\x99\x9e\x44\x8a\xc4\x79\x07\
\xfe\x66\xee\x1e\x76\x5b\xab\xa2\x82\x42\x37\x92\xa5\x0c\x2f\x29\
\x74\xc6\x63\x9b\x38\xd8\x7e\x0e\x74\x45\xa4\x4f\x3f\x64\x8b\xa9\
\x1e\x46\x6c\xcc\x71\xc6\x89\x04\x4a\x7b\x24\xce\x19\xca\xc1\x4e\
\x7a\x3b\x87\xb5\x14\x8a\xc4\x59\x67\xcb\x04\xda\xd9\x34\xd4\x83\
\x9c\xfc\x86\x32\xe4\x14\x8a\xc4\xa1\x67\x8b\x04\x0a\xd3\xfc\xc0\
\x31\xb8\x59\x6e\x69\x45\x49\xa1\x48\x1c\x7e\x5c\x26\x50\x98\xe6\
\x1d\xae\x41\xcd\x76\x53\xbd\xd6\x6e\x1b\x61\x1e\x59\x4c\xec\xcd\
\x17\xac\xc1\x39\x98\xff\x46\x27\x07\x2b\xb8\x9c\x03\x59\xc3\xca\
\x26\x9b\x57\xdf\xcf\xfe\x60\x21\xca\x19\x19\x32\x12\x1d\xcd\xf5\
\xdd\xac\x06\x0a\xf3\xe8\x21\x65\x4a\x91\x47\x9b\xc3\x48\x6d\xac\
\xa6\x90\xab\xd3\xf8\xe0\x07\x49\x33\x8a\x6d\xae\x10\x86\x6a\x63\
\x31\x85\x5c\x2f\x65\xec\x88\xb4\x09\x45\xb7\x77\x09\x63\xb5\xb1\
\x96\x42\x5b\xdd\xce\xe1\x1d\x0d\xf3\x89\x6f\x30\x15\x06\x6b\x63\
\x29\x85\xb6\xbe\xa5\xd5\x13\x5a\xa6\x53\xd9\xe2\x2e\x8c\xd6\xc6\
\x4a\x0a\xc5\x63\x3d\x0e\xd0\x34\x9b\xda\x26\x9b\x61\xb8\x36\x16\
\x52\x28\x1e\x6d\x36\x8e\xb6\xc9\x54\xb7\xf9\x0d\xe3\xb5\xd1\x36\
\x48\x8b\xd8\xde\xc5\x30\x08\xe6\x52\xdf\x68\x3c\x0c\xd8\x06\xc1\
\x28\x77\x6c\xbb\xc5\x9d\x75\x50\x4c\xa5\x9e\x40\x29\x85\x11\x7b\
\x40\x31\x4c\xc9\x36\xdb\xfc\x7a\x02\xc9\x4c\x10\x09\x94\x52\x18\
\xb2\x07\x24\xe3\x64\xa6\xde\xb5\x65\xf5\x29\x82\x80\x1e\xf6\x97\
\xb5\x05\xbe\x89\xe7\xc2\x0c\x82\xf4\x0b\x00\xf7\xbe\xb0\x98\x0b\
\xb5\x41\xfa\xd5\x80\x7a\xe5\x65\x98\x47\x0f\xd1\xd3\xf8\x30\x92\
\x3e\x28\x29\xd4\x6d\xa0\x30\x8d\x5f\x54\x96\x32\xc2\x50\xfa\x20\
\xa4\x50\x97\x81\xc2\x2c\x7e\x51\xbd\x9d\x23\x8c\xa5\x8f\x76\x0a\
\x35\x0d\x14\x26\xf1\x0b\xc4\x2d\xad\x61\x30\x7d\x34\x53\xa8\x6a\
\xa0\x30\x87\x5f\xa0\x1e\xeb\x09\xa3\xe9\xa3\x95\x42\xb7\x06\x0a\
\x53\xf8\x05\xf2\xd1\xe6\x30\x9c\x3e\x1a\x29\x74\x69\xa0\x30\x83\
\x5f\xa0\xb7\x77\x09\xe3\xe9\x23\x9d\x42\x6f\x06\x0a\x13\xf8\xc5\
\xc4\x16\x77\x61\x40\x7d\x24\x53\xe8\x97\x81\xa2\xf3\xfd\x62\x6a\
\x9b\xdf\x30\xa2\x3e\x52\x29\xf4\xbf\x81\xa2\xd3\xfd\x62\xf2\x55\
\x07\x61\x48\x7d\x24\x52\xe8\x4f\x4a\xd1\xd9\x9e\xe1\x36\xd1\xf1\
\xf9\xf1\xf5\xcd\xd9\xc1\xda\xf7\xab\x04\xbc\xc4\x1b\x0b\x15\x79\
\xbe\x1e\x22\x0f\x76\x72\x06\x04\xcc\xb3\xf1\x3b\xf1\x7c\x3d\x0e\
\xc9\x9f\x75\x4e\x93\xb2\x3d\x99\x1a\xe9\xf3\x8e\xc7\xb9\x60\x24\
\x90\x00\xd2\x89\x73\x05\xd7\x80\x66\x49\xa0\x48\x9f\x1f\xb4\x4d\
\x23\x41\x24\x10\x03\x08\x89\x73\x05\xc7\xc0\x26\x4f\xa0\x9d\xd3\
\x07\xd1\x34\xdc\x44\x02\x11\x80\x9a\x38\x57\x50\x0f\x70\xd2\x04\
\xda\x2d\x7d\xac\x98\x86\x93\x48\xa0\x09\x2c\x25\xce\x15\x94\x03\
\x9d\x2c\x81\x76\x48\x1f\xcb\xa6\xe1\x22\x12\xa8\x13\x6f\xe6\x81\
\x7a\xb0\xd0\x6b\xfa\x9c\x4d\xf3\xf9\xf1\xf5\xed\xb5\x9d\x2b\x44\
\x02\x5d\x50\x9b\xe3\x78\x32\x12\x45\x3b\x96\xe7\x40\x5e\xc4\x4c\
\x69\xec\x67\x2a\xb7\xdb\xdb\x4f\xdb\x28\x91\x40\x69\xed\xac\xca\
\x7a\x22\xad\xd6\xbe\x94\x40\x96\x85\x4b\x89\x36\x3d\x76\x4d\xa4\
\x2d\x13\x88\xf3\x3a\x8e\xc5\x44\x5a\xa9\x77\x3a\x81\xac\x89\x94\
\x92\x6c\x3a\xec\x92\x48\x5b\x24\x90\xe6\x95\x63\x2b\x89\x34\x5b\
\xe3\x54\x02\x59\x10\x24\x25\xac\xd1\xef\x35\x91\x5c\x26\x10\xf2\
\x5a\x15\x72\x22\xcd\xd4\x35\x9c\x40\xa8\x8d\x4f\xc9\xd6\xe8\x46\
\xd6\x71\x04\x37\x09\x64\xc9\x3c\xc8\x8c\x1a\x7b\xc8\x40\x68\xa3\
\xc6\xfa\x5a\x55\xfe\xa9\xb5\x6c\xfe\xa1\xc2\x51\x3a\xa8\x34\x4e\
\xeb\x33\x2b\x70\xb4\xb9\x56\x1b\xa2\xc6\x35\xba\xe7\x40\x08\x0d\
\xb3\xbe\x56\xd5\x53\x4b\xfe\x0c\x82\xde\x3d\x0c\x77\x88\x06\x14\
\x23\x76\x65\xad\x6b\xe6\xff\x28\x8e\x4d\x75\xfc\x59\x7a\xea\xee\
\x4a\x20\xad\x46\x58\x5f\xab\xa2\x38\x16\x7a\x22\x75\x35\x50\xba\
\xf8\x99\x9f\x2a\xae\x63\x20\xbd\x16\x3d\x25\xbc\xbe\x68\x26\x90\
\x64\xc1\xd6\xd7\xaa\x24\xea\x47\x4b\xa4\x66\x83\xd1\xb7\x1f\xa1\
\xaa\xaf\x76\x07\xe2\xec\xff\x4a\xa0\xdd\x3f\xd5\x04\xe2\x2e\x0e\
\xe9\x0c\x69\x26\x91\x10\xea\xd7\x4e\xa4\xaa\x00\x5c\x45\x71\x4c\
\x8e\xa9\xa9\x75\x0c\x82\x71\xee\x90\xee\x33\xd1\x0b\x5a\x1c\xc2\
\x7b\x9d\xa3\xad\x42\xad\x8b\xaa\x81\x3c\x9c\x95\x58\x32\xcf\x19\
\xee\x7e\x9c\x9e\x38\xce\x1e\x90\x1a\xb4\xd3\x5a\x54\xb8\x2e\x88\
\xb2\x18\xc8\xcb\xfe\x7f\x35\x76\x35\x52\xd9\xee\x37\x11\x56\x0e\
\xa0\x21\xaa\xf6\xf5\x90\xdd\x8c\xc4\x62\x20\xef\xd7\x41\x7a\xd8\
\xc9\x48\xe7\xb6\xfe\x6a\xf4\xe8\x97\x21\x88\x86\x62\xa0\x0c\x82\
\x26\x33\x8c\xe8\xb8\x6c\x20\x24\x91\xd0\x0c\x94\x41\xd2\x68\x84\
\x51\x0f\x34\x6f\xcc\xba\xfa\x27\x24\x50\x0d\x94\x41\xd4\xac\x87\
\x96\xae\x43\x06\x42\x16\x01\xdd\x40\x19\x64\x0d\x6b\xb4\x7c\x51\
\x5d\x47\xb1\xd0\x68\x2b\x06\xca\x58\xd0\xf4\x8a\xbb\x25\x9d\x4b\
\x03\x59\x6a\xa4\x35\x03\x65\x2c\x69\x7c\xa6\xd4\xfb\xd7\xdb\x62\
\x2c\x36\xca\xaa\x81\x32\x16\x35\x4f\xe9\x9f\xee\xff\x01\x8b\x65\
\xc9\x17\x1c\x9e\xef\x70\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\
\x60\x82\
\x00\x00\x02\xf0\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x92\x49\x44\x41\x54\x78\xda\x84\
\x53\x5f\x48\x53\x61\x14\x3f\xf7\xee\x6e\x22\x0b\x4c\x6b\xac\x60\
\x6a\xb5\x39\xdd\x06\x36\xd9\x70\x6b\xb8\x31\xd4\xb2\x35\x82\x3d\
\x4c\x50\x50\x08\x1f\x24\x84\x24\x56\xea\xc3\xdc\x2a\x63\x36\x1d\
\x4d\xa1\xb0\xf0\xc1\x17\x1f\x7c\xb0\x3f\x0f\x51\x96\x12\xe8\xa6\
\x84\xa6\xeb\x45\xa9\x84\x22\x25\x08\xb7\x97\xec\x45\xe6\xee\xdd\
\xed\x7c\x97\xb5\x36\x1a\x74\xe0\xc7\xe1\xfb\x9d\xf3\xfb\x9d\xf3\
\xdd\x8f\x4b\x41\x4e\xd0\xc5\xc5\x20\xa9\xa8\x80\xc3\xdd\x5d\x48\
\x1f\x1c\x40\x75\x43\x03\x68\x6d\x36\xa0\x45\x22\xa0\x69\x5a\x85\
\x2d\x8d\x90\x1f\x3f\x18\x28\x10\x4a\xa3\x11\xaa\x4d\x26\x41\x98\
\x89\xaa\x74\x3a\xdd\x38\x3b\x34\xf4\xf8\x0f\x41\x21\xdc\x7e\xff\
\xd5\x3c\x83\x53\x7a\xbd\x20\x16\x31\x79\x74\x55\x9a\xe3\x9a\x66\
\x03\x81\x47\xd1\xf5\x75\xf8\xb0\xb5\x05\x75\x3a\x1d\x58\xb1\x0f\
\x79\x4a\xe8\x2c\xaf\xab\x83\xd3\x48\x30\xcc\x3f\x0b\x55\x71\x1c\
\xd7\xfc\x34\x18\x9c\xc0\x0c\x89\xfd\x7d\x28\x2d\x2b\xa3\x30\xf3\
\xa4\xc8\x11\x03\x53\x47\x07\x88\xc4\xe2\x42\x37\x51\xe3\x84\xf3\
\xcf\x42\xa1\x87\xc9\x64\x12\x44\x78\x1d\x8d\x52\x09\xdf\xe3\x71\
\xbe\x5c\x2e\x17\x1a\xb0\x4e\xd3\x50\x38\xd4\x2c\xc7\x5d\x78\x82\
\xe2\x58\x2c\x06\x57\x70\xc8\xd6\xe6\x26\x9c\x51\x28\xc0\x6e\x30\
\x80\xba\xb2\x12\x2e\x79\x3c\xd7\x70\x83\x85\x42\x06\xd5\x1c\xcb\
\xb6\x3c\x0f\x87\x1f\xbc\x5f\x5b\x83\xbb\x7e\x3f\x1c\xe0\x8b\xdc\
\x1a\x1c\x24\x2b\x0b\x1f\xd6\xd1\xdb\xdb\x8b\xd3\x17\xf0\x1e\xdb\
\x4c\x01\xf1\xc5\x17\x13\x13\xe3\xef\x56\x56\xe0\x8e\xd7\x9b\x2d\
\x04\x46\x47\x41\x52\x54\x04\x2d\x3d\x3d\xd7\x29\x8a\x9a\x47\xa3\
\xcf\x84\xcf\x35\xa8\x61\x59\xd6\xf1\x6a\x72\x32\xbc\xbc\xb4\x04\
\xbe\xfe\xfe\x6c\x61\x64\x6c\x0c\x8c\xf5\xf5\xd0\xdc\xdd\xed\x41\
\xf1\x1b\x51\x46\x9c\x6b\xa0\x21\xe2\xd7\x53\x53\xf7\x23\x8b\x8b\
\xe0\xef\xeb\xcb\x8a\xef\x85\xc3\x60\xb6\x58\xa0\xb1\xab\xeb\x06\
\x8a\xe7\x50\xfc\x29\x77\x65\x62\xa0\xe1\x52\x29\xe7\xfc\xf4\x74\
\x28\x8a\xe2\x00\xae\x2d\x91\x48\x84\xe2\xed\x60\x10\x2c\x56\x2b\
\xd8\x3b\x3b\xfb\x80\x88\x19\x26\x2b\xfe\x8a\xdf\xe7\xcb\xea\x2a\
\x30\x38\xf9\xf2\xdb\x99\x99\x91\xbd\x78\x1c\xc6\x87\x87\x41\x2a\
\x95\x0a\x0d\x37\x7d\x3e\x41\x6c\x6b\x6f\x1f\xc0\xc9\x2f\x71\xf2\
\x47\xc2\xef\xe0\xab\xec\x6c\x6c\xfc\x5d\x41\xdf\xda\xaa\x3d\xeb\
\x76\x0f\xfc\xe4\x79\x7e\x2e\x12\xe1\x5d\x2e\x17\x3f\x1f\x8d\xf2\
\xbf\xf0\x4c\x78\x52\x37\xb4\xb5\x81\xa2\xb6\xb6\xf0\x83\x9f\xd0\
\x6a\xa1\xc6\xe9\xd4\xa9\x1d\x0e\x2f\x31\xd9\xde\xdb\xe3\xf7\x31\
\x93\x33\xe1\x49\xfd\x7f\x41\xfe\x98\x92\x12\x95\xaa\x49\x6e\x36\
\x0f\x11\x13\x92\x8f\xaa\x54\x76\xe4\x8f\x21\x4a\x49\x1d\x71\x04\
\x51\x8c\x28\x42\x88\x33\x3a\x8a\xca\x1c\x4e\x22\x8e\x4b\x64\x32\
\x85\x58\x26\x3b\x97\x4a\x24\x96\x0f\x13\x89\x6f\xc8\xa5\x10\x6c\
\x26\x13\x1c\xe6\x70\x04\xdc\x6f\x01\x06\x00\x2d\x06\x04\x62\x7f\
\xe8\x51\x71\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\x37\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\xd9\x49\x44\x41\x54\x78\xda\x6c\
\x93\x5d\x48\x14\x51\x14\xc7\xff\xbb\xb3\xbb\xb6\xab\x95\x5b\x46\
\xae\x99\xba\x7e\x40\x42\x46\x12\xa5\x4b\x49\xe8\x83\x60\x22\x11\
\x3d\x94\xf6\xd6\x43\xe9\x6b\x81\x6f\x46\xd1\x43\x54\xd0\xf6\x49\
\xf6\x24\x44\x81\x6f\x3d\x04\x8b\x08\x9a\x60\x59\xb6\x18\x65\xe9\
\x22\xbb\xce\xb8\xea\xfa\xb1\x8b\xdf\xe3\x3a\xbb\xb3\xce\xed\xcc\
\x75\x35\xad\x2e\x73\xb9\x77\xe6\x9c\xf3\xbf\xbf\x73\xee\x19\xc3\
\xad\xf6\x76\x18\x0c\x06\x3e\x35\x4d\x83\x3e\x68\x5f\x47\x8b\x03\
\xff\x1f\xdd\xeb\x89\x44\x40\x4d\x24\xc0\x18\x83\x69\xbb\xc5\x48\
\x22\x09\x32\xd0\xc8\x6a\xa9\xaf\x6f\x9d\x8e\x44\x61\xb7\x5b\xb8\
\xb0\x46\xce\x43\x81\x00\x3a\x07\x07\x1b\xf5\x33\x68\xfa\x79\xcc\
\x0e\x6d\x12\x30\x0a\x02\x74\xf5\xc8\x9c\x02\x8a\x44\x24\xb2\x86\
\x99\xd9\x28\xa6\x66\x56\x21\xcb\x32\xee\x36\x34\xb4\x92\xbd\x8a\
\xbc\x8b\xf4\x90\x4d\x82\x3a\xc2\x71\x24\xf1\x21\x08\x42\xc5\xe4\
\x62\x08\xcb\xb2\x8a\xe2\x9c\x83\xc8\xb0\x5a\xa1\xae\xaf\xe3\xc7\
\xf0\x3c\xde\x7a\x3c\x28\xc9\xc8\x68\x7d\xd2\xde\x7e\x83\xdc\xdd\
\x26\x8d\x0c\x9b\xc8\x23\x81\x15\xe4\xe6\x59\x77\x20\x0f\x07\xa7\
\x91\x99\xbe\x1f\xa9\x29\x36\x9c\x38\xea\x42\x82\x6c\x66\xb3\x19\
\xe5\xc1\xa0\xc2\x09\xd4\x8d\x9c\xe1\x17\x65\x3d\x03\x04\xc7\xd6\
\x78\x71\xf4\x7a\xea\xc8\x35\xe5\xe5\xf8\xe8\xf3\xc1\xbe\xc7\x8c\
\x0c\xbb\x8d\x93\x08\x24\x10\x8b\xc5\x0c\x1b\x02\xaa\xca\xc9\x8b\
\x9c\xa9\xf0\x4b\xab\x70\x1e\xb6\xf0\x53\x74\xc7\x21\x71\x03\x59\
\x1f\x83\xbf\xfc\xa8\xad\xa8\x24\x1b\xa3\xca\xa9\x88\x93\xc0\xc9\
\xee\x6e\x12\x88\xc7\xb9\x80\x38\x1e\x85\xd1\x68\xc0\xd8\x64\x9c\
\x13\xd0\x83\x92\xc2\xd3\x9c\x44\x7f\x5f\x54\xc7\x71\x60\x5f\x0a\
\xdf\xc7\x07\x06\xd0\xe8\x76\x5f\xd3\xc2\xe1\x21\x23\xa1\x70\x9c\
\xc2\x1c\x1b\x4f\xa1\x20\x67\x17\xf2\xf9\x4c\x41\x2e\xd1\x64\x67\
\x0b\xc8\xcb\xb7\x52\x41\xe3\x98\x5f\x4a\x60\xc4\x1f\x42\xaf\xf7\
\x3b\xca\x9a\x9a\x8e\x45\x80\x3b\x26\x42\xe1\x04\x52\x68\x8d\xdf\
\xc0\x58\x28\xc6\x4f\xd7\x34\xb6\x45\xc2\x98\x02\x9b\x05\xb0\xa8\
\xfd\x08\x8e\x2e\xa3\xe6\xfa\x55\xd4\xb9\x5c\x3d\x17\x19\xbb\x67\
\x8a\x25\x05\x0a\xb2\x6d\x18\x9d\x8c\x22\x2f\xcb\xca\x6f\x80\x17\
\x32\xb9\x1a\xa8\x37\xc4\x2e\x2f\x7c\xa1\xf7\xa8\x39\x75\x1c\xee\
\xa7\x12\x66\x9d\xce\xaf\xdf\x04\xa1\xd3\xa4\x28\xca\x06\xc1\x54\
\x92\x60\x4a\xd9\xca\x7b\x93\x24\xb6\xf8\x09\xc6\xb9\x37\x28\xab\
\x3c\x8b\x8e\x8e\x7e\x5c\xba\xd0\x82\xd7\x7d\x3d\xe1\xb6\x89\x09\
\xfc\x21\x38\x44\x04\xa1\x28\xf2\x75\x02\x60\x8b\x60\x61\xb6\x17\
\xe2\xec\x73\x54\x53\xf0\xc3\x47\xee\xd1\x8e\x61\xc7\x87\xa1\x97\
\xcd\x7e\x4d\x96\x97\xe5\x70\x38\xdf\x34\x23\x8a\xd8\xeb\x70\x18\
\x44\x22\xd0\x5b\x5c\x9a\x56\x92\x79\x33\x44\x17\x46\x11\x09\x3c\
\xc0\x19\x57\x29\x1e\x3f\x7b\x21\x05\x25\xa5\xb9\xcf\x23\x7d\x01\
\xa4\xcd\xe6\xd7\x83\x90\x7e\xe4\xfc\xf9\x9b\x14\xc0\x88\x40\x5f\
\x18\x9d\xcc\xd6\x89\xfd\xb3\xe7\x36\x63\xf2\x08\x7b\xd7\x56\xc5\
\x6a\xaf\x94\xbe\x22\x5f\xfb\xdf\xbf\xa6\xde\x4d\xb9\xbb\x8b\x8b\
\xab\x8d\x69\x69\xff\x18\x97\xbc\xde\xfb\x97\xcf\xa5\xfe\x1c\x5e\
\xcd\xec\x93\xc2\x96\x81\x15\x9f\xaf\x8b\x3e\x8b\xdb\x7d\x7e\x0b\
\x30\x00\x66\x8d\xa1\xfd\x87\x65\xe6\x27\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x02\xa1\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x43\x49\x44\x41\x54\x78\xda\xa4\
\x93\xcf\x6b\x13\x51\x10\xc7\xbf\xfb\x23\x6b\xd3\x4d\x63\x53\x51\
\x69\x93\x2d\x52\x62\xc5\xb6\x54\x08\x52\x28\x28\xb4\xa8\x08\xc5\
\x5e\x84\xfe\x01\x3d\x49\x2f\x9e\x7a\x29\x96\x9e\xf2\x07\xf4\xa4\
\x88\x17\x7f\x1c\x82\x22\x2a\xb9\x05\x5a\xe8\x49\x94\x0a\xb1\x58\
\x24\x97\x68\x49\xa5\x31\x35\x90\xd2\xcd\x26\xd9\xcd\x6e\xd6\x99\
\xad\x5b\x82\xb4\x50\x71\x60\xf6\xbd\x37\xcc\x7c\xdf\x7c\xde\xdb\
\x27\xb8\xae\x8b\xff\x31\x79\x66\x69\xe9\x70\x21\x08\xc2\x34\x0d\
\x7d\xff\x50\xbf\x23\xf3\xd7\x69\xb5\xfc\x40\xf4\x4d\x32\xf9\xe8\
\x24\x3d\x09\xe4\x77\x17\x17\xe7\x64\xda\x15\x92\x28\xc2\x34\x4d\
\x8e\x8b\x0e\x21\x7d\x2e\xba\xa8\x14\xbe\x42\x8b\x0f\x63\x20\xd2\
\x3a\x52\x40\xa4\x1a\xbb\xd9\x14\xbd\x0e\x04\x5a\x28\x8a\x82\x9a\
\x61\x88\x36\x09\xec\x15\xbe\xa0\x98\xdf\x84\x08\x07\x5a\xe2\x32\
\x0a\xa5\x12\x82\xc1\x20\x6c\xdb\x46\x6f\x4f\x8f\x27\x20\xd1\xc6\
\xbe\xc0\x34\x5c\xb7\x8f\x15\x03\x8a\x72\x6d\x65\x7d\x1d\xdb\xbb\
\x3a\x8a\xe5\x32\x6a\xe1\x5f\xa8\x7e\x32\xd0\xa0\x42\xdf\xce\x77\
\x77\xe3\x4a\x3c\x8e\x00\xe5\x37\x2d\x4b\x94\x6d\xc7\x39\x86\xfb\
\xe6\x91\xdc\x4f\x33\x19\x9c\x56\x55\x5c\xd0\x34\x58\x96\x25\xc9\
\xdc\x06\x73\x3f\xcb\xba\xf8\xfe\xfe\x35\xc6\x6f\xcf\xe0\xd6\xc0\
\xf1\xdc\x6a\x67\x27\x62\xd1\x28\x6c\x3a\x78\xcb\x34\x45\x91\x05\
\x98\xfb\xe7\x87\x57\xd8\x5c\x4d\x61\x63\xe5\x25\x9a\x8e\x83\xb5\
\x6c\x16\x1b\x5b\x5b\xf8\x98\xcb\x79\x6b\x76\xce\x4b\x2e\x2f\xa7\
\x9f\xa4\x52\xab\xcd\x03\x01\x49\x66\x0e\x56\x3b\xa3\x0d\xa1\x5a\
\xad\xe2\x5c\xff\x10\x2c\x62\x8e\xc5\x62\xde\xae\x2a\xb5\x6b\xfd\
\x39\x03\xe6\x56\x43\x21\x69\x6e\x76\xf6\x06\xd5\xc1\xd0\xf5\x80\
\xcc\x1c\xac\xf6\xee\x6d\x1a\x86\x61\x60\x2d\x93\xc6\x9d\xeb\xf7\
\x91\xa3\x9d\x7d\x2b\x45\x22\xa8\xd7\xeb\x18\x4f\x24\x50\xd3\xf5\
\xca\xd9\x78\x7c\x21\x14\x0e\x77\x39\x86\x51\x96\x99\x83\x3b\x78\
\xf1\x70\x9e\x52\xe7\xbd\x82\x7a\xad\x86\xab\xa3\xa3\xde\x3c\x48\
\xcc\xbe\x71\x9e\x24\x49\xdf\xec\x7c\xfe\xf9\x1e\xc0\xe7\x5e\x11\
\x99\x83\x3b\x60\xae\xde\x91\x91\x05\x1e\x2d\xe2\xf5\xbd\x3d\xce\
\x79\xa4\x60\x5c\x9c\x9c\xdc\xa1\xe2\x22\x79\x03\x97\xa6\xa6\x1e\
\xec\x9a\xa6\x5b\xa1\x57\xc5\x73\x1e\x7f\xe8\xfa\xa1\xb7\xc7\x39\
\x8f\xe7\xe4\x88\x8d\x8d\x1d\x5c\x6d\xd7\xe0\xe0\x3d\x49\x55\xfb\
\xab\xfb\xfb\xba\xd2\x68\x6c\x5b\x1d\x1d\x1a\xf3\xf9\x6d\xff\x1d\
\x27\xee\x02\xf9\xe3\xf6\x7f\xe3\x14\x79\x84\xaf\xf9\x04\x6f\xc8\
\xe3\xf6\x5a\x27\x1b\x9e\x98\xc0\x6f\x01\x06\x00\x48\xae\x45\x78\
\x60\x4e\x1f\xe2\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x02\x75\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x17\x49\x44\x41\x54\x78\xda\xa4\
\x93\x4f\x6b\x13\x51\x14\xc5\xcf\x9b\x37\x93\x49\xc6\xa9\x25\xa3\
\x15\x63\x16\x2e\x02\x06\xad\x0b\x45\xc1\x95\x5f\xc0\x3f\x0b\x21\
\x1b\xa5\x20\xb8\x31\xea\x7c\x82\xac\xf4\x2b\x54\x5b\x23\x88\x1b\
\x4b\xb3\x70\xe1\x42\xb7\x82\x20\xe8\x42\xbb\x70\xa1\x62\x36\xa2\
\xa0\x51\xda\x1a\xc2\x4c\xa6\x26\x93\xcc\x78\xef\x75\x5a\xb4\x2a\
\x54\x7c\x70\xb8\x2f\x93\x7b\x7e\xf7\xbc\x37\x89\xaa\xd5\x6a\x50\
\x4a\x9d\x06\xb0\x07\xff\xb6\x3e\xa5\x69\xfa\xc0\x4c\x92\x84\x3f\
\x94\x9b\xcd\xe6\xcd\x38\x8e\xb7\xe4\xb4\x2c\x0b\xf5\x7a\xfd\x12\
\xef\xcd\xd1\x68\xc4\xd5\x18\x0c\x06\xf0\x7d\x1f\x0c\x64\x11\x5d\
\xea\x78\x3c\x46\x18\xf6\xa9\xa6\x62\xf4\x3c\x0f\xf3\xf3\xd7\x41\
\x3e\xe3\x67\x80\xe2\xca\x86\x6a\xb5\x0a\x4e\xf2\xed\x68\x05\xa3\
\xc7\x2f\xb1\xb2\xf2\x95\x9e\x6b\x32\xdb\xb0\xed\x3c\xa2\x28\x60\
\x33\x4b\x09\x20\x8b\x6d\xf0\x43\x9e\xc6\x49\x58\x69\x79\x07\x56\
\x57\xbb\x64\x88\x91\xcf\x6f\x13\xb3\x65\xe5\xa9\x27\x16\x00\xf9\
\x8c\x0d\x23\x49\x33\x48\x00\x34\x39\x8a\x22\xa8\xbd\xbb\x08\x94\
\x60\xf2\x60\x05\xe5\xd9\x3a\x26\x4f\x1c\x13\x90\x69\xda\x92\x90\
\x3d\xec\x35\x86\xc3\x21\x48\x3f\x40\xa5\x22\x9d\x37\x84\x73\xed\
\xbc\x5c\xd6\xf6\xe9\x0a\x3c\xff\x14\x7a\x8d\x16\x01\x8b\xa8\x35\
\xaf\x12\xc0\x94\x04\xec\x61\xef\xc6\x11\xb8\x26\xef\xbf\xa0\xdf\
\x5d\x43\xf7\xe1\x53\xb8\x07\xf6\xa1\x78\xf9\x24\xfa\xb7\x1f\xc1\
\x75\x8b\x48\x5b\x4b\xb8\x77\xf7\x19\xbf\x72\x49\xb0\x7e\x04\x93\
\x29\xb4\x24\x8e\xe3\x38\xe8\xf5\x7a\x30\x0c\x0b\xfa\xed\x07\x84\
\xfe\x2c\x0a\x85\x09\x0c\x0c\x2d\x46\x5e\xb6\xad\xd7\x13\x68\x01\
\xb4\xdb\x6d\x94\x4a\xa5\x1c\x37\x34\x1a\x8d\x2d\xfd\x0e\xb8\x37\
\x08\x82\x5c\xa7\xd3\x01\x63\x3d\xd7\x75\x67\xb4\xd6\xbb\x37\x37\
\xd2\xa4\xb2\x4c\x31\xcd\x8f\x9b\xbf\xa3\x0b\xff\x4c\xf7\xb5\xc0\
\x80\x02\x69\x82\xfb\x7e\xe9\x98\xce\x01\xaf\x86\x7e\xb6\xbf\x41\
\xfb\xdf\xf8\xa4\x40\xfd\x35\xe7\xe2\xd4\x2d\xbc\x89\x8f\xc8\x7e\
\xbf\xb5\x84\x73\xcb\x17\xff\xd4\xc6\x53\x77\x92\x2a\xa4\x43\xa4\
\xc3\xa4\xaa\x24\x5a\x0c\x71\xe6\xce\x59\x01\xdc\xbf\xd0\xe2\xf2\
\x82\x93\x93\xde\x65\xfb\xe7\xa4\xd7\x9c\xc0\xca\x8e\xe1\x66\x72\
\xf8\xad\xe0\x8a\x33\x83\x29\x75\x5c\xc6\x2c\xa7\x4f\x30\x17\x2d\
\x64\x43\xd7\x38\x7a\xa6\x48\xf1\x9f\xe6\x7f\xd6\x77\x01\x06\x00\
\xf9\x1f\x11\xa0\x42\x25\x9c\x34\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x07\x62\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x01\x97\x70\x0d\x6e\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x07\x02\x49\x44\x41\x54\x48\
\xc7\x55\x95\x6b\x70\x5d\x55\x15\xc7\x7f\xfb\x3c\xee\x3d\xe7\x9e\
\x7b\x93\xfb\x68\x4a\x93\x86\xb6\x69\x4b\xaa\x38\x16\x06\x3a\x16\
\x4b\x81\x22\x4c\xad\x55\xa9\x83\xce\xe0\xa3\x55\xa1\x0a\xc3\x07\
\xf0\x05\x0c\x33\x38\xea\x28\xc2\xe0\x03\x44\x1c\x18\x64\x8a\x03\
\xa3\x32\x08\x88\xa0\x0c\x8f\x3e\xd2\x34\x84\x16\xab\x96\x3e\xd2\
\x34\x49\x9b\x26\x6d\x92\x9b\xc7\xcd\x4d\x9a\xe6\xde\xdc\x7b\xf6\
\x39\x67\xf9\x21\x24\xc0\xfa\xb8\x67\xd6\xfa\xed\xb5\xd6\xff\xbf\
\xb7\x12\x11\x82\x20\xa0\xe2\x57\x2f\x41\x44\x78\xef\xf0\x51\x29\
\x57\x66\x04\x11\xa1\xa9\xa9\x59\x0e\x1f\x3d\x22\x68\xad\x69\x6b\
\x6b\x7f\x48\x6b\x8d\xaa\x56\xab\x4c\x97\x4b\x72\xf8\xbd\xa3\x18\
\x57\xac\xbd\x4a\x9c\xb8\xc3\xe2\xc6\x06\xc5\xfb\xd5\x6a\x47\xc7\
\x0a\xd7\x74\xf5\x74\x8b\xef\xfb\xf2\xce\x81\x03\x62\x00\xb4\xee\
\x6b\x9b\x0c\x42\xbd\x77\xf9\xb2\x26\x76\xed\xde\x8d\x6d\x59\xeb\
\x0d\x80\x96\xdd\xfb\x9e\xfc\xfa\x4d\xdf\xe4\xca\x75\x1b\x48\x67\
\xd2\x6a\xcd\xe5\x6b\xde\x26\x08\x02\x82\x20\x20\x0c\x43\x0e\x1d\
\x3a\xfc\x42\x10\x04\x68\xad\x21\x08\x02\xa5\xb5\x66\x4f\x4b\xab\
\xe4\x47\x86\x45\x6b\x2d\x22\x82\xa1\x94\x92\xb6\xb6\xf6\xb1\x65\
\x4d\x4b\x58\x90\xcd\xd1\xd6\xde\xce\xc0\xc0\xd0\x76\xd5\xdd\xd3\
\x43\x32\x99\x94\x64\x32\x89\x48\xc4\xa1\x43\x47\x58\x50\x97\x55\
\x6a\xed\xa7\xd6\xcb\xe4\xe4\x24\x7e\xe0\xf3\xf8\xe3\x7f\xc0\x30\
\x0d\xe3\xd2\x4b\x57\x8b\x71\xff\x03\x3f\x57\xb9\x5c\x8e\x86\x0b\
\xea\x19\x2f\x14\xd8\x78\xfd\x17\xa2\xa1\x81\x85\xa2\x44\x84\x30\
\x0c\x11\x11\xa5\x94\x32\x80\xb0\xe3\x78\x67\x7b\xa9\x34\xb3\xae\
\x7e\xf1\x42\x1a\x16\x35\x30\x75\xee\x5c\x21\x9b\xcd\xd6\x45\x51\
\x88\xc1\xfb\xa1\x94\x12\xd3\x34\xc3\x7f\xbd\xf6\xba\x80\x5a\x77\
\xe1\x92\x06\xea\x72\x75\xd8\x96\x05\x8a\x05\xbb\x5b\xf6\x4a\x7f\
\xff\xd9\x9b\x95\x88\x70\xac\xe3\x04\x41\xa8\x1f\x9d\x3a\x37\x75\
\x67\x43\x43\x3d\xa9\x9a\x24\xe9\x54\x2d\x96\x6d\xd1\xd5\xdd\xcd\
\xe0\x60\x9e\xfa\x86\x45\xac\x6a\x6e\x56\x4a\x44\xb8\x62\xed\xd5\
\x92\x1f\x1e\x22\x0c\x23\x1e\x7c\xf0\x97\x6c\xdb\xf6\x35\xb4\xd6\
\xb4\xef\xdf\x8f\x5f\xd5\xd4\xd6\xa6\x4e\x15\x27\x8a\x2b\x2f\xbb\
\xec\x32\x94\x88\xf0\xd8\x63\x4f\x7c\xef\xaf\x7f\x79\xee\x77\x5a\
\x6b\xb4\xd6\x84\x51\x44\xa9\x34\xcd\x8b\x7f\x7f\x9e\x6a\xb5\xf2\
\xd6\xba\x2b\xee\xdb\x08\x1d\x84\xe1\xc4\x6c\xc2\xce\xb7\x5a\xf0\
\xb5\xaf\x06\x06\x07\x6b\x0e\x1e\x3c\xf8\x8f\xb3\x67\x07\x36\x44\
\x41\xc8\x8f\xee\xfe\xfe\x77\x1a\x16\xd7\xef\x08\xfc\xd5\xfe\xd8\
\x18\x76\x47\x07\xcf\xcf\xef\x6e\x6e\x67\x5a\x6b\x46\x47\x47\xd7\
\xb6\xb4\xec\xdb\x7b\xba\xaf\xef\xae\x48\xa2\xb9\x73\x25\x22\x1f\
\x4c\x09\xb0\x94\x52\x18\x86\x61\xff\xef\xd0\x91\x03\x0b\xea\x72\
\xd7\x78\xa9\xe4\xaf\xc3\x20\x94\x62\xb1\xf8\x25\x40\x01\xf3\x09\
\x1e\x10\x98\xa6\xc9\x1b\x6f\xee\x2a\xcd\x4d\x2a\x1e\x8b\x61\x18\
\x06\x75\x75\x75\x2f\x9f\x3c\xd5\x7b\xe7\x87\x13\x4a\xb3\xe3\x3d\
\xfe\x70\x26\x93\xb6\x1d\x37\x46\x22\x91\xc0\x75\x5c\x82\x20\xe0\
\xc0\xbf\xdf\xa5\xb7\xb7\xef\x11\x00\x43\x29\x85\x52\x8a\x99\x4a\
\xf5\xaa\xfe\xfe\x33\x3f\x48\x67\xd2\x78\x9e\x47\xca\xf3\x30\x0d\
\x83\xa1\xfc\x10\xe5\x52\x85\x85\x0b\xeb\x66\xef\x7d\xbc\xb3\x1b\
\xa5\x14\x3d\x3d\xdd\xfb\x56\x35\x37\xe3\x79\x2e\x5e\xc2\xc3\xb2\
\x6c\x2a\x95\x0a\xdd\x3d\x27\xb1\x4c\x9b\x8b\x9a\x57\xdc\x00\x60\
\x84\x91\x06\x09\x5f\x6a\x6e\xbe\x88\x4c\x2e\x4d\x22\x91\x20\xee\
\xc4\x51\x4a\xd1\xd9\x75\x82\x28\x8c\x48\x26\x93\xd5\x7c\x3e\xff\
\x4f\x00\xa3\xab\xb3\xfb\x8e\x2f\xdf\x78\xd3\x8d\xd7\x6e\xb8\x9e\
\xad\xdf\xb8\x99\x94\x97\xc2\x89\xc5\x19\x2f\x8e\x33\x38\x90\x27\
\x9b\xcd\x31\x31\x39\xbe\xb4\x50\x28\x00\x60\x8c\x0c\x8f\xd6\x64\
\xb3\x59\xea\xea\xea\xe8\xeb\xeb\x63\x71\x63\x13\xcf\x3d\xf7\x3c\
\xa7\x7a\x7b\xc9\xe6\x72\x04\x81\xde\xa7\x94\x39\x72\xf1\xc5\x1f\
\x9f\x15\xe9\xce\xdd\x2d\xfc\xf4\xc7\x3f\x93\x30\x0c\xf1\x7d\x1f\
\xad\x03\x7c\x5d\xe5\xf6\xdb\x6f\x63\xd3\xe6\x8d\x7c\xac\xf9\xbe\
\x5e\x38\xb4\x1c\x26\x98\x98\x98\xd8\x6c\x48\x14\x71\xf7\x3d\x77\
\x27\x52\xa9\x14\xa9\x54\x8a\x4c\x26\x4d\x36\x93\xe5\xe9\x1d\xcf\
\x50\xa9\x54\xfe\x08\xef\x2c\x87\x5e\xca\xe5\x09\xda\xdb\xb9\xd5\
\x22\x52\xa4\x92\x5e\x75\xfb\x77\xb7\xab\xe3\xc7\x8f\xdf\x72\xf4\
\xe8\xb1\x27\xa7\xcf\x4f\x5b\xcb\x96\x37\x95\x6d\xdb\xbe\xad\xbf\
\x3f\x7f\x6c\x70\x90\xdf\x77\x76\x82\xd6\x6c\x9d\x77\xdc\x5c\x88\
\x08\x4a\x29\x00\x25\x22\x02\x60\x9a\x26\x85\xc2\xf8\xe7\x8e\x1c\
\x39\xfa\xd2\x74\xa9\xe4\xe6\x72\x59\x72\x0b\x72\x24\x93\x1e\xa6\
\x61\xcc\x58\xa6\xf5\x94\xeb\xba\x8f\x24\x12\x89\x3e\x80\x28\x12\
\x40\xb0\x6d\x9b\x8f\x00\x44\xc4\x02\x16\x29\xa5\xf2\x40\x38\x57\
\xbc\xf7\x74\xdf\xfd\x9d\x27\xba\xef\x4b\xd7\xd6\x90\x4c\x26\xf1\
\xbc\x04\xb1\xb8\x8d\xeb\xba\x24\x13\x1e\xf1\x78\x1c\xc3\x30\x98\
\x9a\x9a\x92\xf3\xe7\xcf\x3f\x5c\x5f\x5f\x7f\x97\x42\x50\x86\xf9\
\x11\x80\xfa\xa0\x89\xd9\x2e\xb4\xd6\xa9\xfd\xfb\xdf\x7d\xbb\xea\
\xfb\xab\x73\xb9\x1c\x6e\xc2\xc1\xf3\x12\xc4\xe3\x71\x1c\xc7\xc1\
\x4b\x24\xb0\x2d\x1b\x11\xc1\xf7\x7d\x86\x47\x46\xe8\xeb\xef\xa7\
\x5c\x9a\x29\x7c\xe6\xda\x0d\x97\xbb\xae\x73\xe6\xc3\xee\x11\x40\
\x94\x52\x58\x96\x45\xb1\x38\xb1\xe9\x8d\x37\x77\x4e\x99\x96\xb5\
\xba\xb1\xb1\x91\x74\x3a\x4d\x2a\x95\xc4\x71\x1d\x3c\xcf\x23\x99\
\x48\x60\x9b\x16\x22\x42\xb9\x5c\xa6\xb7\xef\x34\xbd\xa7\xfb\x08\
\x83\x88\x54\x2a\xb5\xa0\xab\xab\xfb\x16\x00\x0b\x60\x64\x74\xf4\
\x43\x14\x89\x9d\xe9\x1f\x78\x75\x64\x64\xec\xb3\x8d\x8d\x8d\x78\
\x09\x8f\xb8\x13\x23\xe6\xd8\x38\xb1\x38\xae\xe3\x12\x8b\xc5\x30\
\x0d\x83\x50\x84\xa9\xa9\x29\x7a\x7a\x4e\x52\x2a\x95\x09\xc3\x90\
\x78\x3c\xc6\xd2\x65\x4b\x06\x16\xd7\xd7\xef\x98\x07\x14\xc6\xc6\
\x67\x8d\x64\x59\x6b\xf2\xc3\xc3\xed\x5a\xeb\x58\xd3\xb2\xa5\xc4\
\xdd\x38\x8e\x13\x23\x16\x8b\x11\x77\x1c\xdc\x78\x1c\xdb\xb2\x31\
\x0c\x83\x20\x0c\x29\x8c\x17\xe8\xea\xea\x41\xeb\x00\x11\xa1\xb6\
\xb6\x96\x4c\x26\xbd\x2f\x1e\x8f\x5d\x33\x5e\x2c\x72\xc1\xc2\x85\
\xb3\x00\xc7\x71\x38\x71\xa2\xfb\xde\xd6\xd6\xd6\x07\xc3\x28\x62\
\xe9\x92\x0b\xb1\x6c\x93\x35\x6b\x2e\xa7\x26\x55\x83\x44\x11\x22\
\xb3\x1f\x9f\xa1\x0c\xb4\xd6\x0c\x0e\xe7\xe9\xe9\xea\x41\x04\x1c\
\xc7\x25\x93\x49\x33\x75\xfe\xdc\x43\xf9\xe1\xa1\x7b\x87\xf2\x83\
\xb8\xae\xfb\x01\xe0\x85\xbf\xbd\xf8\xea\xcb\x2f\xbf\xf2\x45\xa5\
\x14\x95\x4a\x05\x5f\xfb\xf8\x5a\x53\x2a\x95\xa8\x56\xab\xac\x5f\
\x7f\x25\x0f\x3c\xf0\x0b\x2e\x59\xfd\x49\x44\x84\x9e\x13\x27\x39\
\x75\xaa\x77\xbe\xb0\x9b\x70\x28\x14\x0a\xb7\x06\x81\x7e\x2a\x16\
\x8b\xb1\x72\xe5\x4a\x5c\xd7\x65\x4e\xeb\xbc\xf2\xca\x6b\x8d\xcf\
\x3e\xf3\xec\x7f\x06\x07\x87\x2e\x98\x83\x84\x61\x48\x18\x86\x44\
\x51\x84\x20\x54\x7c\x9f\xca\xcc\x0c\xdf\xfa\xf6\x56\xb6\x6c\xb9\
\x61\xf6\xf9\x4f\xd7\x00\x42\x6d\x4d\xcb\x1d\xcb\x9b\x8e\x6c\x87\
\x73\x97\x42\x11\x98\x28\xc0\xd0\x4f\x44\xa6\x9f\x50\x22\xc2\xae\
\xdd\x2d\xa0\x14\xad\x7b\x5a\x7f\xdb\xb2\x67\xef\x0f\xc3\x28\xc4\
\x34\x4d\xa2\x28\x9a\x87\x44\x51\x84\x44\xc2\x4c\x65\x86\xfa\x86\
\x7a\x1e\x79\xf4\x37\xd8\xb6\xe5\x17\x0a\x85\xeb\xae\x5e\x7f\x4f\
\x1b\x9c\x01\x86\x81\x88\x28\x82\xb1\x31\x18\x1b\xe3\xcf\xb3\xdf\
\xc8\xce\x96\x59\x05\x45\xc2\xe4\xe4\x24\xbd\xa7\x4f\x7f\xf5\xd8\
\xb1\x63\xbf\x1a\x19\x1e\xb9\x30\x08\x02\x4c\xd3\x44\x29\x35\x6f\
\xc8\x72\x79\x86\x1b\xbf\xb2\xe5\xfc\xe6\xcf\x6f\xfa\x74\x14\x45\
\x1d\xab\x2e\xba\xa4\xec\xfb\xe2\x2a\x05\x61\x08\x03\x03\xd0\xd1\
\x01\xa5\x12\x77\xcc\x76\xb0\x6b\xcf\x9c\x4a\x4d\x11\xe1\xdc\xd4\
\x74\x18\x86\x21\x95\x4a\x85\x62\xb1\x48\xb5\xea\xaf\x31\x4d\x63\
\xb3\x32\x68\x4e\xd7\xa6\x27\x57\xac\x58\xfe\xa7\x74\xa6\xf6\xbf\
\x86\xa9\xb0\x4c\x8b\xb8\xfd\x09\xfa\xfb\xb9\x6e\x68\x88\x6d\xc5\
\x22\xb5\x33\x33\xbc\x6e\xdb\x3c\x9d\x48\x10\xfc\x1f\x86\x93\xb9\
\x1a\xfd\x43\x9a\xa3\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
\x00\x00\x02\x77\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\x19\x49\x44\x41\x54\x78\xda\x8c\
\x93\xbd\xaf\xa1\x41\x14\xc6\xcf\xcb\x2b\xe2\x26\x16\x05\xb1\x42\
\x43\xa2\x11\xbd\x66\x23\xa2\x22\xb9\xff\xc1\xaa\xf7\xd6\x1a\x0a\
\x09\x89\x46\xb7\xd1\xdd\x68\x97\xbf\x60\x2b\x8a\x0d\xb1\x85\x8f\
\x42\x50\xae\x50\x21\x44\x04\xb1\xbe\x3f\x76\x9e\xb3\x5e\xb9\x37\
\x5b\xac\x93\x8c\x31\x73\xcc\xef\x79\xce\x99\x21\x25\x93\x49\xba\
\x5e\xaf\x24\x49\xd2\x33\x11\x7d\xa4\xff\xc7\x8f\xf3\xf9\xdc\x3b\
\x1e\x8f\x94\xc9\x64\x48\xc6\x8e\x4a\xa5\xa2\xd3\xe9\x64\x4b\x24\
\x12\xaf\x22\xc9\x40\x0c\xb1\x77\x1f\x58\xf7\x7a\x3d\x2a\x95\x4a\
\x2f\x37\x50\x0f\x1f\x00\x3c\x8b\x24\x94\x3f\xb5\x5a\x2d\x9a\x4e\
\xa7\xa4\x56\xab\xc9\x60\x30\xd0\x78\x3c\x26\x9d\x4e\x47\xbb\xdd\
\x8e\xdc\x6e\x37\xad\xd7\x6b\x4a\xa7\xd3\xaf\xf1\x78\x1c\x10\x49\
\x8c\x5f\x92\x50\xfd\xf2\x88\x32\x20\xc3\xe1\x90\x34\x1a\x0d\xcb\
\x67\xb3\xd9\x68\xa3\xd1\xf8\x2a\xa3\x16\xfc\xe8\x72\xb9\xfc\x33\
\x2b\x10\x28\x87\x42\x21\x2a\x16\x8b\x64\xb5\x5a\xc9\x62\xb1\x50\
\xbd\x5e\xdf\x71\xf9\x02\x20\xfa\x27\x51\xb3\xd9\xa4\x6e\xb7\xcb\
\xfd\xa8\x56\xab\xd4\xef\xf7\xa9\xdd\x6e\x93\x2c\xcb\x34\x9f\xcf\
\xa9\x50\x28\xd0\x6c\x36\xa3\x72\xb9\x8c\x86\xd3\x7e\xbf\x97\xb8\
\x07\x0a\xc0\xe7\xf3\xdd\x95\x03\x81\x00\xcf\x18\x70\xe8\xf7\xfb\
\xd9\x89\x56\xab\xa5\x5a\xad\xc6\xd0\xc3\xe1\xf0\x17\x00\x12\x00\
\xb9\x5c\x8e\xed\x79\xbd\x5e\x4a\xa5\x52\x64\xb3\xd9\xc8\xe9\x74\
\x52\x24\x12\xa1\x7c\x3e\x4f\x66\xb3\x99\x3c\x1e\x0f\x94\x19\x70\
\x77\x00\x12\x00\xe1\x70\x98\x1d\x38\x1c\x0e\xee\xbc\xc9\x64\xe2\
\x32\x50\x52\x30\x18\x64\x37\xc8\x0d\x06\x03\x06\x88\xa6\x32\x40\
\xa5\x38\xc0\x95\x2d\x16\x0b\xae\x0f\xca\x88\xd1\x68\xc4\x80\xc9\
\x64\x42\xcb\xe5\x92\x0f\xe2\xb6\xde\xf5\x00\x24\x6c\xc0\x32\x1c\
\xe0\x40\x2c\x16\xbb\x5f\x29\x94\xed\x76\x3b\xcf\x6f\x01\xdb\xed\
\xf6\xbd\x03\x58\x53\x1c\x54\x2a\x15\xea\x74\x3a\x3c\x03\x88\x1c\
\xe6\xdb\x41\x5a\xad\x56\x70\xcc\xaf\x58\x56\x48\x8a\xed\xb7\x25\
\xa0\x0f\x58\xbb\x5c\x2e\xe5\xff\x82\x07\xf4\x3d\x1a\x8d\xfe\x14\
\x8e\x26\x0c\x00\x49\x51\xc7\x7d\x23\xb0\x36\x1a\x8d\xac\x86\x40\
\x0e\x00\xc4\x66\xb3\x69\x89\x7e\x7c\x13\x5f\x57\x2c\xa8\xd7\xeb\
\x3f\x0b\x7b\x36\x7a\x30\x84\xf2\x48\xf4\x2d\xaf\xbc\x60\xd8\x7f\
\x12\xe3\x03\xfa\xf1\xc0\xf9\xeb\x4d\xf9\x37\x2f\x04\xe0\x8f\x00\
\x03\x00\xe7\xe3\x7a\x6e\x30\xbb\xf3\xb7\x00\x00\x00\x00\x49\x45\
\x4e\x44\xae\x42\x60\x82\
\x00\x00\x06\x71\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x90\x00\x00\x00\x90\x08\x02\x00\x00\x00\x68\x24\x75\xef\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\x01\
\x95\x2b\x0e\x1b\x00\x00\x06\x14\x49\x44\x41\x54\x78\x9c\xed\x9d\
\x4b\x76\xa3\x30\x10\x45\x71\x8e\x07\xc9\x72\xb2\x9d\x64\x7d\xde\
\x4e\xb2\x1c\x0f\xe9\x01\x69\x2c\x0b\xa1\x6f\x7d\x5e\x95\xf5\x66\
\xdd\xb1\x41\xd4\xd5\xe5\x23\x0b\xb8\x7c\xbd\xff\x2c\xcb\x72\xbb\
\x7f\x2e\x18\xf9\xfe\xf8\xd5\x6e\x42\x1c\xa8\xe2\x5c\x36\x60\x5b\
\xa0\x5a\xa6\xdd\x84\x47\x10\xca\xb2\x17\xe4\xb2\xae\x6b\x54\x1d\
\x84\xf6\x6d\x01\xc1\xa6\x5b\x90\xa8\x08\xd7\xb3\x4f\x20\x60\xdb\
\xda\x00\x82\x4d\x3e\xc7\x0d\xbf\xdd\x3f\x2f\xeb\xba\x26\xff\xb6\
\x7f\x82\xbd\x5d\x75\x51\xc4\x26\x5f\x84\x0c\x8e\x84\x61\xc7\x6f\
\x22\x60\x7b\x11\xdb\x32\x1b\xb8\x55\xe0\xcf\xb0\xfc\x47\xc3\x2f\
\x20\x44\x18\x9b\xcc\x86\x57\xd6\xbf\x60\xd8\x71\x89\x08\xd8\x9c\
\xd9\x56\xb3\x21\x7b\xd9\x1f\x86\x55\x7e\x33\xfa\xbe\x7a\x04\xb0\
\xf1\x6d\x6c\x47\xc1\x1b\x0c\x3b\xae\x09\x01\x9b\x51\xdb\x9a\x1a\
\x1c\xd6\xf9\xc9\xb0\xd6\x05\x1d\x17\xa7\x1b\x26\x6c\xb4\x1b\x38\
\x58\xe1\x4e\xc3\x8e\x2d\x40\xc0\x06\x6e\x5b\x5f\xc3\xa2\xc2\xbe\
\xe5\xff\xdc\xd4\x1a\x90\x4a\x21\x74\x9d\x28\x84\xc5\x21\x30\x2c\
\x8c\xba\x6d\x61\x5d\x6e\xf7\x4f\xf5\x3e\x34\xd8\x80\x63\x25\x13\
\xc0\xc6\xb7\x53\x05\x5b\xb2\xcd\x8a\x3b\x49\xa6\x95\x12\x1b\x16\
\x46\x0c\x5b\xe5\x25\xa7\x18\x36\xaa\x15\x25\x4b\x97\x06\x46\xb8\
\x33\x61\xc5\xd6\x71\x72\xcc\x8a\x4d\xa0\x4f\x30\x1a\x16\x86\x1c\
\x5b\x77\x69\x98\xb0\x91\x2f\xf0\xac\x56\xa7\xc0\x38\x8e\xd8\x24\
\xd8\x48\x5a\x45\x88\x4d\xf8\x00\x29\x64\x58\x98\x6e\x6c\x4c\xbd\
\xb8\x7b\xb1\x7c\xa8\x32\xc5\xc9\x01\x63\x3d\x2d\x6e\xc2\xc6\xda\
\x8b\x3b\xb0\x29\x5e\x2d\x28\x18\x16\xa6\x88\x4d\xac\x34\x95\xd8\
\xd4\xc7\x9a\x0b\xc0\x64\xae\x3d\x93\xd8\x54\x7a\x71\x06\x9b\xfa\
\x35\xf8\x96\x78\xf0\xf7\x18\xf9\x5f\x0b\x59\xaf\x63\xea\xa3\xd8\
\x63\x32\x89\xc7\x12\x3b\x16\x41\x1b\x90\x8e\xbc\x40\x8e\x49\x2e\
\x35\xc0\xe4\x83\x50\x29\x95\xb1\xec\x9a\x0d\xaf\x02\x26\x5f\xc1\
\xdb\xfd\x53\x0b\x1b\xce\xcf\x0e\xc9\x28\x9f\x25\xe6\x63\x74\x0c\
\xb0\x2f\x95\x1d\xb4\x76\x97\xa8\xb8\x9b\x12\xb0\x0d\xdc\xaa\x30\
\xd0\x86\x85\xb1\x32\x06\xd8\x97\xfa\x1e\xd9\x70\xd2\x81\x70\x2e\
\x40\x68\x9b\x21\xab\xc2\x98\x31\x2c\x0c\xec\x18\x60\x5f\x9a\xba\
\x60\xdb\x69\x3d\x82\x64\x7b\x3a\x6c\x33\x6a\x55\x18\x93\x86\x85\
\xc1\x19\x03\xec\x4b\x6b\x9f\x6b\xbe\x70\x86\x92\x6c\x4f\xc6\x36\
\x07\x56\x85\x31\x6f\x58\x98\xc8\x36\x7c\x4e\x1d\xbd\xbf\x67\x68\
\x0a\x53\xb2\x3d\xe0\xcd\x1b\x8c\x2b\xc3\x16\x0b\x56\xed\xe9\xeb\
\x58\x9d\x83\xbf\x80\xbd\xd8\xd9\xb1\xea\x2c\x1e\x0c\xb3\xc8\xa9\
\xbb\xc7\xf7\xff\xbc\x82\x20\xd9\x8b\x58\x15\xc6\xaa\x61\xa6\x39\
\x8d\xf4\xf5\xa1\x1f\x30\x55\x24\x7b\x41\xab\xc2\x58\x32\xcc\x07\
\xa7\xc1\x5e\x3e\x3a\x45\x40\xec\x0e\x7b\x1f\xb4\xc6\x83\x6e\x98\
\x33\x4e\xe3\xfd\x9b\x60\x12\x0e\xdf\x9d\x29\xce\x68\x91\x04\xd1\
\x30\xaf\x9c\x48\x7a\xf6\xd5\x6b\x75\xbc\x06\xd1\x30\xb4\x8c\x9b\
\x41\x78\x77\x24\xd9\x44\x52\x84\x81\x0f\xa6\xd0\xde\x8c\x3a\x18\
\x1a\x60\x8e\x69\x8d\x87\x96\x37\xe5\x54\x6d\xc7\xd8\x70\x24\xc3\
\x3d\xad\xf7\x11\x72\xd2\xc4\x37\x43\x38\x86\x07\x22\x99\x8d\xa1\
\x29\xa3\xe1\x60\x4c\x7f\xbb\x91\x63\x84\x08\x92\xd9\xfb\x79\xc5\
\x4a\x98\xe8\xb2\xdc\xd0\xe7\x18\xa4\xba\x64\xb6\xa7\x08\xc0\x86\
\x8f\x2b\xd7\x2d\xb3\x8e\x71\xea\x4a\xe6\x67\x9a\x1b\x4e\x58\x89\
\x32\xde\x94\xee\x18\xaa\xa2\x64\x0e\xa7\x6a\xeb\x86\x9b\x25\xef\
\x63\x1f\x1c\xa3\xd5\x92\xcc\xc9\xed\x46\x20\x11\xa0\xc8\xfe\x60\
\x15\xc7\x80\x55\x24\x33\x7c\xcb\x2c\x5a\x64\xf8\x49\x3c\xba\xc8\
\x31\x66\x79\xc9\x8c\x3d\xf6\x01\x36\x62\xe4\x84\x1e\x0e\xe6\x18\
\xb6\xb0\x64\x4f\x6f\x99\xcd\x04\x67\xe6\xd0\x8b\xa7\x16\xd8\x0c\
\x48\xe6\xbc\x44\x9a\x88\xed\x81\x44\x9f\x97\x38\x8f\x64\xe3\x91\
\x7b\x84\xac\x63\x5a\xe3\xa1\x3f\xad\x9f\xd8\x8a\x91\x91\xac\x00\
\x6c\x72\x12\x08\xd7\xd0\xd4\x84\x57\x8c\x80\x64\x39\x60\x93\x90\
\x40\x78\x7f\x5e\x99\x08\x8b\xe1\x96\xec\x14\xd8\x64\x23\x10\x89\
\x29\x02\x13\x64\x31\xac\x92\xa5\x81\x4d\x2a\x02\x91\x9b\xe6\x36\
\x71\x16\xc3\x27\x59\x02\xd8\xe4\x21\x10\xe9\xa9\xda\x13\x6a\x31\
\x4c\x92\x91\xbd\xda\x9e\x69\x39\x2e\xa3\x73\xbb\xd1\x44\x5b\x0c\
\x87\x64\x4f\xc0\x26\x03\x81\x68\xde\x32\x3b\x01\x17\x43\x2e\xd9\
\x03\xd8\xac\xbe\x40\xf4\x1f\xfb\x30\x31\x17\x43\x2b\xd9\x1f\xb0\
\x59\x77\x81\xa0\x3c\xba\x68\xc2\x2e\x86\x50\xb2\xb7\x65\x56\x5c\
\x24\x54\xcc\x2e\x5f\xef\x3f\x24\x85\x9e\xf3\x44\x65\x52\x7e\x53\
\x7a\x4d\xbc\xd2\x22\x7c\x6d\xfb\x42\xb4\x07\x42\x7c\xf1\x36\x42\
\x38\x5e\x6d\x4b\xc2\x9e\x60\xe6\xaf\x33\xbd\xc0\x8f\xc4\xd3\xb0\
\x47\x64\x5e\x18\x3d\xb8\x84\x51\xc3\x7c\xe8\x05\x6e\x55\x98\x57\
\x37\x4c\xc0\xaa\x28\x83\x5d\x7c\xc8\x30\xd3\x7a\x19\xb2\x2a\xcc\
\x2b\x1a\x26\x6f\x55\x94\x91\x8e\xde\x6f\x98\x45\xbd\x8c\x5a\x15\
\xe6\x55\x0c\x53\xb7\x2a\x4a\x77\x77\xef\x34\xcc\x90\x5e\x50\x9c\
\xc6\xe3\xdc\x30\x64\x5a\x72\x13\x49\xf1\xf5\xda\x39\xf9\x7b\xa7\
\x95\x37\xc3\x92\xc7\x2a\x58\x6c\x1d\xad\x6a\x3e\x86\x61\x6e\xf9\
\x52\xb1\xf7\xdb\x5a\x8e\xbc\x93\xac\x89\x07\xc3\x9a\xce\x00\xd1\
\x6c\x6b\x6d\x4c\x9b\x61\x50\x9b\xba\x0c\xe8\x62\xd7\x36\xab\x86\
\x91\x5c\x57\x81\xd8\xd6\xd4\x86\x06\xc3\x10\xb6\x6d\x61\xd0\xc2\
\x96\x6d\x96\x0c\x63\x1d\xad\xd0\xb5\xad\x7e\xd5\xb5\x86\xe9\xea\
\x25\xd6\xfd\xf1\x6d\x43\x37\x4c\x65\x0c\x50\xc5\xb6\xca\x35\x56\
\x19\xa6\xa2\x97\x7a\x37\x07\x39\x66\x47\x01\x35\x4c\x9d\x96\x4a\
\x6a\xba\x48\x19\x98\x64\x47\x43\x1b\x03\xdc\x76\xc8\x50\xbd\x07\
\xe5\x01\x97\xc9\xa2\x28\x9e\x02\x44\x2b\xdd\xfe\x29\xd0\x87\xbe\
\x3f\x7e\xf3\xdb\x5b\x00\x26\xd0\x44\xb4\x31\xc0\xcc\x8a\xc4\xb0\
\x65\xa2\x69\x58\x13\x03\x01\x6c\x95\x0b\xe7\xc6\x96\x97\x2c\x07\
\x8c\xaf\x4d\x68\x63\x80\x1d\x0b\xd4\xb2\x4d\xda\x30\xc2\x3b\x65\
\x48\x16\x35\xb8\x10\x26\x6c\x19\xc9\x4e\x81\x91\x37\x02\x6d\x0c\
\x90\xb0\x3d\x92\xb6\x49\x18\xc6\x7a\xe0\xe9\xc0\xc6\xd4\x1e\x5a\
\x6c\x67\x92\xa5\x81\x51\xad\x15\x6d\x0c\x50\xa0\x3d\xdc\xb6\x71\
\x19\xa6\x72\xf1\x94\xc1\x26\xdc\x1e\x12\x6c\x49\xc9\x12\xc0\x06\
\x57\xa3\x3e\x2e\x10\xb5\x5f\xb1\x3d\x1c\xb6\x51\x8e\x25\xa2\x8d\
\xe2\x2c\x00\xbd\x67\x19\x2b\xcb\x11\x76\x6c\x58\x5f\x77\x40\xa8\
\x4b\x32\x38\xbf\x6f\x51\xd9\x16\xdf\x94\xde\xba\x44\xcc\x1b\x81\
\x93\x41\xc0\xb6\x65\xa4\xc8\x4f\x86\x35\x2d\x08\x67\xfb\x2b\xe3\
\xc3\xb6\x27\xc3\x2a\x17\x21\x70\x5d\xc5\x1d\x04\x6c\x5b\x5a\x6b\
\xfe\x30\xac\xe6\x9b\x38\xdb\x39\x18\xbb\xb6\x3d\x0c\xcb\x7f\x47\
\xf8\x12\x58\x32\x08\xd8\xb6\xd4\x20\xb8\x16\x3f\x8a\xb3\x3d\x4c\
\xb1\x65\xdb\x9f\x61\xc9\x0f\x29\x8e\x56\x68\x05\x01\xdb\x96\x33\
\x22\xd7\xe4\xdf\x70\xda\x2d\x1c\x7c\xdb\x2e\xeb\xba\x86\xff\xab\
\xde\x56\x84\xb9\x37\x5b\xd4\x4b\xb1\x27\xac\xc9\xe3\xb5\xc0\x20\
\xed\xc3\x01\xb6\x05\xa4\x2c\xcb\xff\xca\xfc\x03\x0c\x3a\xb7\xd7\
\x9d\x1e\xca\x90\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\
\x00\x00\x07\x3c\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x01\x97\x70\x0d\x6e\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x06\xdc\x49\x44\x41\x54\x48\
\xc7\x55\x95\x59\x6c\x54\xd7\x19\xc7\xff\xe7\x2e\x73\x77\xcf\x6e\
\x7b\xc6\x0e\x60\x30\x13\xb1\x04\x12\x07\x95\x94\x90\x1a\xda\x4a\
\x49\x53\x35\x54\xb4\x52\x9a\x86\xb4\x4d\xe9\xf2\xd4\x28\x6d\x43\
\x15\xa9\x2f\x51\x93\x80\xd2\x2c\x28\x52\x15\xa9\x8d\x68\x95\xbc\
\x44\x6d\x1a\x51\xd2\x46\x4d\xd8\x8c\xed\xb8\x40\x69\x4b\xd8\xed\
\x19\xb0\xc7\x60\x7b\x6c\x33\x1e\xcc\xc0\x8c\x67\xee\xb9\xe7\x7c\
\x7d\x18\x6c\xc8\x79\x3c\xba\xff\xef\xf7\x2d\xff\xef\x1e\x46\x44\
\x08\x82\x00\x35\xbf\xbe\x16\x44\x84\xcf\x4e\x9d\xa1\x6a\x6d\x8e\
\x40\x44\xe8\xe8\xc8\xd0\xa9\x33\xa7\x09\x9c\x73\xf4\xf7\x0f\xbc\
\xc2\x39\x07\xab\xd7\xeb\xb8\x59\xad\xd0\xa9\xcf\xce\x40\x79\x60\
\xfd\x43\x64\x1a\x26\xda\xda\xd3\x0c\xb7\xa2\x85\xa7\xaf\x16\xbb\
\x87\x72\x59\xf2\x7d\x9f\xfe\x75\xec\x18\x29\x00\xd0\xdb\xd7\x3f\
\x1b\x08\x7e\x64\xe9\x92\x0e\x1c\x3c\x74\x08\xba\xa6\x6d\x54\x00\
\xa0\xe7\x50\xdf\xef\xbf\xfb\xf8\xf7\xf0\xe0\x86\x4d\x88\x44\x23\
\x6c\x5d\x57\xd7\x00\x82\x20\x40\x10\x04\x10\x42\xe0\xe4\xc9\x53\
\xef\x07\x41\x00\xce\x39\x10\x04\x01\xe3\x9c\xe3\x70\x4f\x2f\x15\
\xa6\x26\x89\x73\x4e\x44\x04\x85\x31\x46\xfd\xfd\x03\x57\x97\x74\
\x2c\x42\x22\x16\x47\xff\xc0\x00\xc6\xc6\x26\xb6\xb3\x6c\x2e\x07\
\xd7\x75\xc9\x75\x5d\x10\x49\x9c\x3c\x79\x1a\x89\x64\x8c\xb1\xf5\
\x5f\xd8\x48\xb3\xb3\xb3\xf0\x03\x1f\x6f\xbd\xf5\x3b\x28\xaa\xa2\
\xdc\x7b\xef\x1a\x52\x5e\xda\xf9\x1b\x16\x8f\xc7\x91\x6e\x49\x61\
\xa6\x58\x44\x24\xd2\x44\xc9\x78\x02\x8c\x88\x20\x84\x00\x11\x31\
\xc6\x98\x02\x40\x9c\x3b\x7f\x61\xa0\x52\x99\xdb\x90\x6a\x6b\x46\
\xba\x35\x8d\xf2\xf5\xeb\xc5\x58\x2c\x96\x94\x52\x40\xc1\xad\xc3\
\x18\x23\x55\x55\xc5\x3f\x3e\xfa\x27\x01\x6c\xc3\x5d\x8b\xd2\x48\
\xc6\x93\xd0\x35\x0d\x60\x48\x1c\xea\x39\x42\xa3\xa3\x57\x9e\x66\
\x44\x84\xb3\xe7\x06\x11\x08\xfe\x66\xf9\x7a\xf9\x99\x74\x3a\x05\
\xaf\xc9\x45\xc4\x0b\x43\xd3\x35\x0c\x65\xb3\x18\x1f\x2f\x20\x95\
\x6e\xc5\xdd\x99\x0c\x63\x44\x84\x07\xd6\x7f\x89\x0a\x93\x13\x10\
\x42\x62\xd7\xae\x97\xf1\xd4\x53\x4f\x80\x73\x8e\x81\xa3\x47\xe1\
\xd7\x39\xc2\x61\xef\x52\xe9\x5a\xa9\xb3\xab\xab\xab\x91\xd2\x93\
\xdb\x9e\x78\x36\x9d\x4a\xa3\xb5\xa5\x05\xaf\xbd\xfa\x3a\x56\xaf\
\xee\x42\x26\xb3\x0a\x9e\xe7\xc1\xf5\xec\xfd\x3e\xf7\x3b\xd3\xe9\
\x74\x23\x75\x22\xc2\x81\xfd\x3d\xf0\xb9\xcf\xc6\xc6\xc7\x9b\x4e\
\x9c\x38\xf1\xb7\x2b\x57\xc6\x36\xc9\x40\xe0\x97\x3b\x9e\xfd\x51\
\xba\x2d\xb5\x67\xd5\x8a\x95\x60\x8c\x35\x8a\x9d\x9f\xdd\xfc\xcc\
\x38\xe7\x98\x9e\x9e\x5e\xdf\xd3\xd3\x77\x64\x24\x9f\x7f\x4e\x92\
\x9c\xbf\x67\x44\x74\xbb\x4b\x00\x34\xc6\x18\x14\x45\xd1\xff\x77\
\xf2\xf4\xb1\x44\x32\xde\xed\x78\xee\xab\x22\x10\x54\x2a\x95\xbe\
\x09\x80\x01\x58\x10\x38\x00\x02\x55\x55\xf1\xf1\x27\x07\x2b\xf3\
\x9d\x32\x42\x21\x28\x8a\x82\x64\x32\xb9\xf7\xe2\xa5\xe1\x67\xee\
\x14\x54\x1a\xed\x3d\xff\x46\x34\x1a\xd1\x4d\x2b\x04\xdb\xb6\x61\
\x99\x16\x82\x20\xc0\xb1\x7f\x1f\xc7\xf0\x70\x7e\x37\x00\x28\x8c\
\x31\x30\xc6\x30\x57\xab\x3f\x34\x3a\x7a\xf9\xe7\x91\x68\x04\x8e\
\xe3\xc0\x73\x1c\xa8\x8a\x82\x89\xc2\x04\xaa\x95\x1a\x9a\x9b\x93\
\x8d\xbc\xcf\x5f\xc8\x82\x31\x86\x5c\x2e\xdb\x77\x77\x26\x03\xc7\
\xb1\xe0\xd8\x0e\x34\x4d\x47\xad\x56\x43\x36\x77\x11\x9a\xaa\x63\
\x79\x66\xd9\x63\x00\xa0\x08\xc9\x01\x12\x1f\x64\x32\xcb\x11\x8d\
\x47\x60\xdb\x36\x0c\xd3\x00\x63\x0c\x17\x86\x06\x21\x85\x84\xeb\
\xba\xf5\x42\xa1\xf0\x77\x00\x50\x86\x2e\x64\x7f\xf6\xad\xad\x8f\
\x6f\xdd\xbc\xe9\xab\xd8\xf6\xe4\xd3\xf0\x1c\x0f\x66\xc8\xc0\x4c\
\x69\x06\xe3\x63\x05\xc4\x62\x71\x5c\x9b\x9d\x59\x5c\x2c\x16\x01\
\x00\xca\xd4\xe4\x74\x53\x2c\x16\x43\x32\x99\x44\x3e\x9f\x47\x5b\
\x7b\x07\xde\x7b\xef\xcf\xb8\x34\x3c\x8c\x58\x3c\x8e\x20\xe0\x7d\
\x8c\xa9\x53\x2b\x57\xae\x68\x08\x56\xac\x5e\xf1\x32\x63\x0c\xae\
\xe3\xa2\xc9\xf5\x90\x6a\x69\xc5\x0b\x2f\xbc\x88\xe3\x47\x4f\x20\
\x91\x88\x22\x10\xbc\xdb\x30\x34\x08\x21\x1b\x02\x92\x12\x3b\x7e\
\xb5\xc3\xf6\x3c\x0f\x9e\xe7\x21\x1a\x8d\x20\x16\x8d\xe1\x8f\x7b\
\xde\x41\xad\x56\xfb\x83\xae\xeb\xb8\x6f\xed\x7d\x48\xc4\xe2\x0d\
\x01\x24\x83\xe7\x3a\xf5\xed\x3f\xde\xce\xba\x37\x77\x6f\x8f\x27\
\x13\x81\xeb\x79\x58\x75\xcf\xaa\xaa\xae\xeb\x3f\x4d\xb5\xa6\xe0\
\x79\x1e\x88\xe8\xb6\xf9\x84\x10\x0b\xfe\x20\xa2\x79\xa3\x31\xba\
\xf5\x95\xaa\xaa\x28\x16\x67\xbe\x76\xfa\xf4\x99\x0f\x6e\x56\x2a\
\x56\x3c\x1e\x43\x3c\x11\x87\xeb\x3a\x50\x15\x65\x4e\x53\xb5\xb7\
\x2d\xcb\xda\x6d\xdb\x76\x1e\x00\xa4\x24\x00\x04\x5d\xd7\x3f\x0f\
\x20\x22\x0d\x40\x2b\x63\xac\x00\x40\xcc\x07\x1f\x1e\xc9\xbf\x74\
\x61\x30\xfb\xeb\x48\xb8\x09\xae\xeb\xc2\x71\x6c\x84\x0c\x1d\x96\
\x65\xc1\xb5\x1d\x18\x86\x01\x45\x51\x50\x2e\x97\xe9\xc6\x8d\x1b\
\x6f\xa4\x52\xa9\xe7\x18\x08\x4c\x51\x3f\x07\x60\xb7\x8b\x68\x54\
\xc1\x39\xf7\x8e\x1e\x3d\xfe\x69\xdd\xf7\xd7\xc4\xe3\x71\x58\xb6\
\x09\xc7\xb1\x61\x18\x06\x4c\xd3\x84\x63\xdb\xd0\x35\x1d\x44\x04\
\xdf\xf7\x31\x39\x35\x85\xfc\xe8\x28\xaa\x95\xb9\xe2\x97\x37\x6f\
\xba\xdf\xb2\xcc\xcb\x77\x6e\x0f\x01\x20\xc6\x18\x34\x4d\x43\xa9\
\x74\xed\x91\x8f\x3f\x39\x50\x56\x35\x6d\x4d\x7b\x7b\x3b\x22\x91\
\x08\x3c\xcf\x85\x69\x99\x70\x1c\x07\xae\x6d\x43\x57\x35\x10\x11\
\xaa\xd5\x2a\x86\xf3\x23\x18\x1e\xc9\x43\x04\x12\x9e\xe7\x25\x86\
\x86\xb2\x3f\x04\x00\x0d\x00\xa6\xa6\xa7\xef\xa0\x50\xe8\xf2\xe8\
\xd8\x87\x53\x53\x57\x1f\x6e\x6f\x6f\x87\x63\x3b\x30\xcc\x10\x42\
\xa6\x0e\x33\x64\xc0\x32\x2d\x84\x42\x21\xa8\x8a\x02\x41\x84\x72\
\xb9\x8c\x5c\xee\x22\x2a\x95\x2a\x84\x10\x30\x8c\x10\x16\x2f\x59\
\x34\xd6\x96\x4a\xed\x59\x00\x14\xaf\xce\x34\x16\x49\xd3\xd6\x15\
\x26\x27\x07\x38\xe7\xa1\x8e\x25\x8b\x61\x58\x06\x4c\x33\x84\x50\
\x28\x04\xc3\x34\x61\x19\x06\x74\x4d\x87\xa2\x28\x08\x84\x40\x71\
\xa6\x88\xa1\xa1\x1c\x38\x0f\x40\x44\x08\x87\xc3\x88\x46\x23\x7d\
\x86\x11\xea\x9e\x29\x95\xd0\xd2\xdc\xdc\x00\x98\xa6\x89\xc1\xc1\
\xec\xf3\xbd\xbd\xbd\xbb\x84\x94\x58\xbc\xe8\x2e\x68\xba\x8a\x75\
\xeb\xee\x47\x93\xd7\x04\x92\x12\x44\x8d\x87\x4f\x61\x0a\x38\xe7\
\x18\x9f\x2c\x20\x37\x94\x03\x11\x60\x9a\x16\xa2\xd1\x08\xca\x37\
\xae\xbf\x52\x98\x9c\x78\x7e\xa2\x30\x0e\xcb\xb2\x6e\x03\xde\xff\
\xcb\x5f\x3f\xdc\xbb\x77\xdf\x37\x18\x63\xa8\xd5\x6a\xf0\xb9\x0f\
\x9f\x73\x54\x2a\x15\xd4\xeb\x75\x6c\xdc\xf8\x20\x76\xee\x7c\x11\
\x6b\xd7\xdc\x03\x22\x42\x6e\xf0\x22\x2e\x5d\x1a\x5e\x08\x6c\xd9\
\x26\x8a\xc5\xe2\x4f\x82\x80\xbf\x1d\x0a\x85\xd0\xd9\xd9\x09\xcb\
\xb2\x6e\xef\xc1\xbe\x7d\x1f\xb5\xbf\xfb\xce\xbb\xff\x19\x1f\x9f\
\x68\x99\x87\x08\x21\x20\x84\x80\x94\x12\x04\x42\xcd\xf7\x51\x9b\
\x9b\xc3\xf7\x7f\xb0\x0d\x5b\xb6\x3c\xd6\xf8\xfd\x47\x9a\x00\x10\
\x4a\xa5\x6b\x0f\x83\xb1\xfd\x52\x4a\x2c\x5d\xda\x81\x70\x53\x13\
\xa4\x94\x68\x4e\x24\x1b\x80\x83\x87\x7a\x00\xc6\xd0\x7b\xb8\xf7\
\xf5\x9e\xc3\x47\x7e\x21\xa4\x80\xaa\xaa\x90\x52\x2e\x40\xa4\x94\
\x20\x49\x98\xab\xcd\x21\x95\x4e\x61\xf7\x9b\xaf\x41\xd7\x35\xbf\
\x58\x2c\x7e\xc5\x34\xcd\x4f\x89\x08\xaa\xaa\xc2\x73\x3d\xa4\xd3\
\x69\xa8\xaa\x0a\xdb\xb2\x6e\x3d\x23\x07\x7a\x1a\x0e\x92\x84\xd9\
\xd9\x59\x0c\x8f\x8c\x7c\xe7\xec\xd9\xb3\xbf\x9d\x9a\x9c\xba\x2b\
\x08\x02\xa8\xaa\x0a\xc6\xd8\xc2\x42\x56\xab\x73\xd8\xfa\xed\x2d\
\x37\x1e\xfd\xfa\x23\x5f\x94\x52\x9e\xe3\x9c\xa3\xb5\xa5\x15\x6d\
\xe9\xf4\xc2\xac\x00\x40\x51\x94\x5b\x15\x1c\x3c\x3c\xef\x52\x95\
\x88\x70\xbd\x7c\x53\x08\x21\x50\xab\xd5\x50\x2a\x95\x50\xaf\xfb\
\xeb\x54\x55\x79\x94\x29\xc8\x44\xc2\x91\xd9\x65\xcb\x96\xfe\x29\
\x12\x0d\xff\x57\x51\x19\x34\x55\x43\xa6\x73\x39\x74\x5d\x87\x94\
\x12\x77\x1e\x45\x51\xf0\x7f\x60\x84\x69\x65\x48\xcf\xfa\x14\x00\
\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\x65\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x01\x97\x70\x0d\x6e\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x07\x05\x49\x44\x41\x54\x48\
\xc7\x55\x95\x6b\x70\x54\xe5\x19\xc7\x7f\xef\x39\x67\x77\xcf\xd9\
\x73\x36\xd9\x4b\x16\xd8\x10\xb9\x08\xc6\x82\x8a\x8a\x99\x62\x11\
\x05\x6a\x47\xad\x9d\x8a\x83\x9d\xb1\x17\x6c\x6b\x69\xeb\x27\xed\
\x4d\x3a\x76\xec\x4c\x6d\x55\x1c\xab\x15\x1d\x3b\xf6\xc2\xd0\x8e\
\x8e\x2d\xb5\x6a\x29\xb6\x4e\xbd\x00\x21\x89\x29\x58\x6c\xb9\x5f\
\xb2\x81\x64\x03\x49\x36\x09\x9b\x4d\x08\x6c\xb2\xbb\xe7\xf2\xf4\
\x43\x48\xd0\xe7\xe3\x3b\xf3\x3c\xbf\xf7\x79\x9e\xff\xff\x7d\x95\
\x88\xe0\x79\x1e\xe5\x6a\xe5\x5a\x44\x84\x03\x07\x0f\xcb\x78\x79\
\x42\x10\x11\xe6\xcf\x6f\x94\x83\x87\x0f\x09\xae\xeb\xd2\xd6\xd6\
\xfe\xb4\xeb\xba\xa8\x4a\xa5\xc2\x85\xf1\x92\x1c\x3c\x70\x18\xed\
\xc6\x65\x37\x8b\x19\x31\x99\xdd\x50\xaf\xb8\x58\xad\x76\xe8\x6c\
\x61\x65\x47\x67\x56\xaa\xd5\xaa\xfc\x7b\xef\x5e\xd1\x00\x5a\x5a\
\xdb\x46\x3d\xdf\xdd\x7d\xf9\xbc\xf9\xec\xd8\xb9\x93\x90\x61\xac\
\xd0\x00\x9a\x77\xb6\xfe\xee\xab\xf7\x7e\x9d\x9b\x96\xaf\x22\x9e\
\x88\xab\xa6\xa6\xa6\x0f\xf0\x3c\x0f\xcf\xf3\xf0\x7d\x9f\xfd\xfb\
\x0f\xbe\xee\x79\x1e\xae\xeb\x82\xe7\x79\xca\x75\x5d\x76\x35\xb7\
\x48\x7e\x70\x40\x5c\xd7\x15\x11\x41\x53\x4a\x49\x5b\x5b\xfb\xd9\
\x79\xf3\xe7\x50\x97\x4c\xd1\xd6\xde\x4e\x6f\x6f\xff\x7a\x95\xed\
\xec\xc4\x71\x1c\x71\x1c\x07\x91\x80\xfd\xfb\x0f\x51\x97\x4e\x2a\
\xb5\xec\xd3\x2b\x64\x74\x74\x94\xaa\x57\xe5\xa5\x97\x7e\x8d\xa6\
\x6b\xda\x75\xd7\x2d\x11\xed\x89\x8d\xbf\x50\xa9\x54\x8a\xfa\x99\
\x19\x86\x0b\x05\x6e\xbb\x67\x6d\xd0\x9b\x9e\x21\x4a\x44\xf0\x7d\
\x1f\x11\x51\x4a\x29\x0d\xf0\x8f\x1e\x3b\xde\x5e\x2a\x4d\x2c\xcf\
\xcc\x9e\x41\xfd\xac\x7a\xc6\xce\x9d\x2b\x24\x93\xc9\x74\x10\xf8\
\x68\x5c\x0c\xa5\x94\xe8\xba\xee\xff\xf3\xed\x7f\x09\xa8\xe5\x97\
\xcd\xa9\x27\x9d\x4a\x13\x32\x0c\x50\xd4\xed\x6c\xde\x2d\x3d\x3d\
\x67\xee\x57\x22\xc2\x91\xa3\x27\xf0\x7c\xf7\x85\xb1\x73\x63\x0f\
\xd5\xd7\x67\x88\xd5\x38\xc4\x63\xb5\x18\x21\x83\x8e\x6c\x96\xbe\
\xbe\x3c\x99\xfa\x59\x5c\xd9\xd8\xa8\x94\x88\x70\xe3\xb2\x5b\x24\
\x3f\xd0\x8f\xef\x07\x3c\xf5\xd4\x93\xdc\x77\xdf\x57\x70\x5d\x97\
\xf6\x3d\x7b\xa8\x56\x5c\x6a\x6b\x63\xa7\x8a\x23\xc5\x85\x4b\x97\
\x2e\x45\x89\x08\x2f\xbe\xf8\x9b\xef\xfd\xf9\x4f\x5b\x9f\x77\x5d\
\x17\xd7\x75\xf1\x83\x80\x52\xe9\x02\x6f\xfc\xed\x35\x2a\x95\xf2\
\x7b\xcb\x9f\x79\xf6\x36\x0e\x1c\x40\xfa\xfb\xb3\x4a\x44\x78\xff\
\xbd\x66\xaa\x6e\x55\xf5\xf6\xf5\xd5\xec\xdb\xb7\xef\xef\x67\xce\
\xf4\xae\x0a\x3c\x9f\x1f\x6d\xf8\xfe\xb7\xeb\x67\x67\xb6\x04\xd7\
\x2c\x39\xd8\x07\x4b\xb2\xf0\xc4\xf4\xee\xa6\x76\xe6\xba\x2e\x43\
\x43\x43\xcb\x9a\x9b\x5b\x77\x77\xe7\x72\x0f\x07\x12\x4c\x9d\x2b\
\x11\xb9\x34\x25\xc0\x50\x4a\xa1\x69\x5a\xe8\x7f\xfb\x0f\xed\xad\
\x4b\xa7\x56\xda\x31\xe7\x19\xdf\xf3\xa5\x58\x2c\xde\x0d\x28\x60\
\x3a\xc1\x06\x3c\x5d\xd7\x79\xe7\xdd\x1d\xa5\xa9\x49\x45\xc2\x61\
\x34\x4d\x23\x9d\x4e\x6f\x3b\x79\xaa\xeb\xa1\x8f\x27\x94\x26\xc7\
\x7b\xec\xb9\x44\x22\x1e\x32\xad\x30\xd1\x68\x14\xcb\xb4\xf0\x3c\
\x8f\xbd\xff\xf9\x90\xae\xae\xdc\x26\x00\x4d\x29\x85\x52\x8a\x89\
\x72\xe5\xe6\x9e\x9e\xd3\x3f\x88\x27\xe2\xd8\xb6\x4d\xcc\xb6\xd1\
\x35\x8d\xfe\x7c\x3f\xe3\xa5\x32\x33\x66\xa4\x27\xef\x7d\xec\x78\
\x16\xa5\x14\x9d\x9d\xd9\xd6\x2b\x1b\x1b\xb1\x6d\x0b\x3b\x6a\x63\
\x18\x21\xca\xe5\x32\xd9\xce\x93\x18\x7a\x88\x2b\x1a\x17\xdc\x05\
\xa0\xf9\x81\x0b\xe2\xbf\xd9\xd8\x78\x05\x89\x54\x9c\x68\x34\x4a\
\xc4\x8c\xa0\x94\xe2\x78\xc7\x09\x02\x3f\xc0\x71\x9c\x4a\x3e\x9f\
\xff\x07\x80\xd6\x71\x3c\xfb\xe0\x3d\x6b\xef\x5d\xbb\x7a\xd5\xe7\
\x58\xf7\xb5\xfb\x89\xd9\x31\xcc\x70\x84\xe1\xe2\x30\x7d\xbd\x79\
\x92\xc9\x14\x23\xa3\xc3\x73\x0b\x85\x02\x00\xda\xe0\xc0\x50\x4d\
\x32\x99\x24\x9d\x4e\x93\xcb\xe5\x98\xdd\x30\x9f\xad\x5b\x5f\xe3\
\x54\x57\x17\xc9\x54\x0a\xcf\x73\x5b\x95\xd2\x07\x17\x2f\x5e\x34\
\x99\xb0\xe8\xea\x45\x4f\x2a\xa5\x70\x6c\x87\x1a\x27\x46\x66\xe6\
\x2c\x1e\x7b\xec\x71\x3e\xdc\xb3\x8f\xba\xba\x04\xcb\x9f\xdf\xf4\
\x97\xdb\x7e\xf2\x88\x24\x17\x5f\x2d\x25\x5d\xbf\x5e\x93\x20\x60\
\xc3\x8f\x37\x44\x63\xb1\x18\xb1\x58\x8c\x44\x22\x4e\x32\x91\xe4\
\x0f\x5b\x5e\xa6\x5c\x2e\xff\x9e\x7d\xfb\x7e\xce\x47\x1f\x31\x91\
\xcf\xd3\x1a\x04\x3f\xd5\x08\x14\x31\xc7\xae\xac\xff\xce\x7a\xb5\
\x72\xf5\xca\xf5\xa9\x74\x9d\xe7\xc4\x62\x5c\x75\xcd\x55\xe3\xa1\
\x50\xe8\x81\xfe\xee\xee\x9b\xdb\x80\x57\xc0\xcf\xc1\xba\x69\xc7\
\x4d\x85\x88\xa0\x94\x02\x50\x22\x22\x00\xba\xae\x53\x28\x0c\x7f\
\xfe\xd0\xa1\xc3\x6f\x5e\x28\x95\xac\x54\x2a\x49\xaa\x2e\x85\xe3\
\xd8\xe8\x9a\x36\x61\xe8\xc6\x66\xcb\xb2\x36\x45\xa3\xd1\x1c\x40\
\x10\x08\x20\x84\x42\x21\x3e\x01\x10\x11\x03\x98\xa5\x94\xca\x03\
\xfe\x54\xf1\xae\xee\xdc\x13\xc7\x4f\x64\x1f\x8d\xd7\xd6\xe0\x38\
\x0e\xb6\x1d\x25\x1c\x09\x61\x59\x16\x4e\xd4\x26\x12\x89\xa0\x69\
\x1a\x63\x63\x63\x72\xfe\xfc\xf9\xe7\x32\x99\xcc\xc3\x0a\x41\x69\
\xfa\x27\x00\xea\x52\x13\x93\x5d\xb8\xae\x1b\xdb\xb3\xe7\xc3\x0f\
\x2a\xd5\xea\x92\x54\x2a\x85\x15\x35\xb1\xed\x28\x91\x48\x04\xd3\
\x34\xb1\xa3\x51\x42\x46\x08\x11\xa1\x5a\xad\x32\x30\x38\x48\xae\
\xa7\x87\xf1\xd2\x44\xe1\xb3\xab\x57\xdd\x60\x59\xe6\xe9\x8f\xbb\
\x47\x00\x51\x4a\x61\x18\x06\xc5\xe2\xc8\x1d\xef\xbc\xfb\xfe\x98\
\x6e\x18\x4b\x1a\x1a\x1a\x88\xc7\xe3\xc4\x62\x0e\xa6\x65\x62\xdb\
\x36\x4e\x34\x4a\x48\x37\x10\x11\xc6\xc7\xc7\xe9\xca\x75\xd3\xd5\
\x9d\xc3\xf7\x02\x62\xb1\x58\x5d\x47\x47\xf6\x5b\x00\x06\xc0\xe0\
\xd0\xd0\xc7\x28\x12\x3e\xdd\xd3\xfb\xd6\xe0\xe0\xd9\xdb\x1b\x1a\
\x1a\xb0\xa3\x36\x11\x33\x4c\xd8\x0c\x61\x86\x23\x58\xa6\x45\x38\
\x1c\x46\xd7\x34\x7c\x11\xc6\xc6\xc6\xe8\xec\x3c\x49\xa9\x34\x8e\
\xef\xfb\x44\x22\x61\xe6\xce\x9b\xd3\x3b\x3b\x93\xd9\x32\x0d\x28\
\x9c\x1d\x9e\x34\x92\x61\x34\xe5\x07\x06\xda\x5d\xd7\x0d\xcf\x9f\
\x37\x97\x88\x15\xc1\x34\xc3\x84\xc3\x61\x22\xa6\x89\x15\x89\x10\
\x32\x42\x68\x9a\x86\xe7\xfb\x14\x86\x0b\x74\x74\x74\xe2\xba\x1e\
\x22\x42\x6d\x6d\x2d\x89\x44\xbc\x35\x12\x09\xaf\x1c\x2e\x16\x99\
\x39\x63\xc6\x24\xc0\x34\x4d\x4e\x9c\xc8\x3e\xd2\xd2\xd2\xf2\x94\
\x1f\x04\xcc\x9d\x73\x19\x46\x48\xa7\xa9\xe9\x06\x6a\x62\x35\x48\
\x10\x20\x32\xf9\xf1\x69\x4a\xc3\x75\x5d\xfa\x06\xf2\x74\x76\x74\
\x22\x02\xa6\x69\x91\x48\xc4\x19\x3b\x7f\xee\xe9\xfc\x40\xff\x23\
\xfd\xf9\x3e\x2c\xcb\xba\x04\x78\xfd\xaf\x6f\xbc\xb5\x6d\xdb\xf6\
\x2f\x2a\xa5\x28\x97\xcb\x54\xdd\x2a\x55\xd7\xa5\x54\x2a\x51\xa9\
\x54\x58\xb1\xe2\x26\x36\x6e\x7c\x9c\x6b\x97\x5c\x83\x88\xd0\x79\
\xe2\x24\xa7\x4e\x75\x4d\x17\xb6\xa2\x26\x85\x42\xe1\xbb\x9e\xe7\
\x6e\x0e\x87\xc3\x2c\x5c\xb8\x10\xcb\xb2\x98\xd2\x3a\xdb\xb7\xbf\
\xdd\xf0\xca\xcb\xaf\x7c\xd4\xd7\xd7\x3f\x73\x0a\xe2\xfb\x3e\xbe\
\xef\x13\x04\x01\x82\x50\xae\x56\x29\x4f\x4c\xf0\x8d\x6f\xae\x63\
\xcd\x9a\xbb\x26\x9f\xff\x78\x0d\x20\x14\x8b\x23\xb7\xdf\xfa\xea\
\xab\xb3\xa9\x56\x1f\x62\x64\x64\x16\x83\x83\xbb\xc9\xe5\x1e\xa5\
\x54\xea\x52\x22\xc2\x8e\x9d\xcd\xa0\x14\x2d\xbb\x5a\x7e\xd5\xbc\
\x6b\xf7\x0f\xfd\xc0\x47\xd7\x75\x82\x20\x98\x86\x04\x41\x80\x04\
\xc2\x44\x79\x82\x4c\x7d\x86\x4d\x2f\x3c\x4b\x28\x64\x54\x0b\x85\
\xc2\xad\xb7\xfc\xec\xb1\xcd\xe4\xf3\x9f\xa2\xbb\x1b\x2a\x15\x04\
\x28\x88\x30\x02\xbf\x9d\xfc\x46\xde\x6f\x9e\x54\x50\x20\x8c\x8e\
\x8e\xd2\xd5\xdd\xfd\xe5\x23\x47\x8e\xfc\x72\x70\x60\xf0\x32\xcf\
\xf3\xd0\x75\x1d\xa5\xd4\xb4\x21\xc7\xc7\x27\x58\xfb\xa5\x35\xe7\
\xef\xfc\xc2\x1d\x9f\x09\x82\xe0\xe8\xe2\xeb\xae\xdf\x56\xf6\xfd\
\xbb\xe5\xa2\xd6\x4f\x03\x87\x81\x32\x3c\x38\xd9\xc1\x8e\x5d\x53\
\x2a\xd5\x45\x84\x73\x63\x17\x7c\xdf\xf7\x29\x97\xcb\x14\x8b\x45\
\x2a\x95\x6a\x93\xae\x6b\x77\x2a\x8d\xc6\x78\x6d\x7c\x74\xc1\x82\
\xcb\xff\x18\x4f\xd4\xfe\x57\xd3\x15\x86\x6e\xe0\x2c\xbe\x8a\x3e\
\x58\x7c\x1a\x1e\x38\x0b\x0d\x25\x78\x2f\x0c\x5b\x2c\xf0\xfe\x0f\
\xa4\xa4\xa5\x79\xe8\x4b\xcf\x5e\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x03\x34\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x01\x68\xf4\xcf\xf7\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x02\xd4\x49\x44\x41\x54\x38\
\xcb\x55\x92\x4f\x68\x5d\x65\x10\xc5\x7f\xdf\xbb\x37\xaf\x49\x48\
\xd5\x04\x23\x22\x22\x88\x85\x08\x12\x05\x0b\xa2\x0b\xc1\x8d\x90\
\x6c\x14\x04\xa1\xee\x94\x42\x15\x14\x22\xed\x56\x77\x6e\x0d\x82\
\x2b\x05\x97\xba\x15\xdd\x58\x10\x0a\x82\xab\x5a\x50\x2b\x41\x24\
\xb1\x31\x31\x79\x6d\xfe\xbc\x97\xfb\xde\xfd\xf7\xdd\xef\x7e\x33\
\xe3\xe2\x3e\x0a\xce\x76\xe6\x9c\x39\x67\xe6\xb8\x67\x77\xde\xe4\
\xa9\xfe\xe3\xb1\x57\xab\x6f\xbe\x9d\xdc\x48\x92\xb7\x0f\x3e\x5e\
\xa9\xde\x7b\x68\xe8\x66\xb7\x5e\x30\x7f\x98\xe3\x5e\xdb\xdb\x58\
\x3d\x8a\xc3\xdb\xdb\x61\x9f\x5c\x4b\x1c\x37\x57\xe1\xb8\x35\x1a\
\x85\xdf\x2b\xdc\xeb\x7b\x1b\x3c\x98\x9c\xbf\xb5\x1b\x0e\x7f\xda\
\x6a\xfe\xbe\x96\x02\x76\xa3\xbc\x49\xa1\xd5\xc5\x6c\x32\xde\x48\
\x7f\xa9\xb7\x18\xc4\x13\x10\x83\x3f\xab\x24\x1d\x1c\x0c\xa0\x56\
\x18\x04\xd8\x6b\x36\xdd\xfa\x3f\xef\xb3\x9c\x2e\xfe\x20\x26\x6b\
\xa7\x92\x91\x49\x4e\xa9\x35\x99\xe6\x8c\x64\x7c\x2e\x2d\xb5\xde\
\x3e\xf2\xc3\x0b\x07\xf1\x88\xa1\x64\xa8\x19\x00\x56\x44\x18\xc6\
\x26\xbd\xe5\xb7\xae\x57\xea\x3f\x20\x76\x0d\x0c\x28\x04\xee\x34\
\x70\x37\xe0\xf8\xf9\x19\x38\x89\x30\x8c\x39\x85\x2c\x50\x08\x8c\
\x05\xc4\xde\xe5\x91\x99\x2f\xdd\x2b\xbb\x97\x79\x2c\x5d\xe6\x9c\
\xeb\x7f\x5a\x5b\xb3\x91\x49\xfe\xdb\x71\x1c\x5d\x3a\x96\xd1\xce\
\x99\x4c\x48\x1f\x4d\x1f\xee\xcf\xb8\xb4\x19\x6b\xc1\x69\xcc\x28\
\xb4\xba\x38\xd1\x62\xbb\x52\x7f\xbd\xb1\xb0\x9e\x06\x6b\xab\x91\
\x8c\xd9\x6f\xef\x31\x94\x8c\x68\x42\x34\x21\x8f\xc5\x1a\x13\x59\
\x49\x8f\xe2\x30\x39\x8e\x23\x0e\xe2\x11\x5e\x43\xa7\x33\x2a\x8c\
\x22\x64\xf2\x75\x3a\x88\x27\x8c\xa5\xc0\x6b\xc0\xb0\xce\x85\x57\
\x38\x8b\x70\x1c\x9f\x48\xff\x6d\xef\x11\x25\x42\x04\x12\xa0\x35\
\x38\x8d\x70\x18\xa0\xd4\x6f\x12\x7d\x6b\x69\x91\x91\xbc\x48\x26\
\x1d\xed\xdd\x00\xbb\x4d\x67\xbd\xdf\x7b\x29\xa5\xd6\x0f\x19\xb6\
\xbb\x8c\xe5\x33\x4a\x85\x89\x40\x21\x05\xb3\xbd\xf3\x2c\xa7\xb8\
\x97\xef\xbc\xc3\x52\xf2\x00\x0b\xbd\x79\xfa\x6e\xe6\xaa\x73\xee\
\x93\x68\x32\xd7\x58\xc0\x6b\x83\xb7\x40\x63\x81\x5a\x1b\x2a\xf3\
\x75\xa5\xfe\xa3\x4a\xeb\xcd\xda\x1a\x82\xb5\x5d\x20\xe6\x7a\xb3\
\x5f\xf4\x70\x57\x5a\x8b\xd4\xd6\x90\x6b\x49\xa1\x35\x5e\xbb\x21\
\x41\x11\x13\x82\xb5\xf8\x29\x99\xd7\x66\x93\x68\xd7\xd2\xc6\xda\
\xcf\x83\xc4\x2b\x7e\x0a\x3c\x93\x9c\x4c\x26\xd4\xd6\xd0\x5a\xec\
\x2e\x03\x38\x1c\xd1\x04\x6b\x15\x1a\x85\x5a\xaf\x12\x2c\x71\xcf\
\xef\x5c\x6a\x22\xd2\xaf\xd5\x53\x6a\x4d\xae\x15\xb5\x79\xc4\xf4\
\x3e\xf8\x7e\x48\x1a\x85\x4a\xbb\xb0\x14\x0a\x5e\x4f\xd2\x41\x3c\
\xd9\x6f\xad\xbd\xd0\x5a\xa4\x25\x76\x92\x55\xf9\x5f\x99\x75\xe7\
\x2f\xa7\xff\x19\x0b\xe4\x02\xc1\xf6\x1d\xb7\x9f\x5b\x25\xe8\xaf\
\x44\x4b\x88\xd3\x47\x76\x9a\xbb\xd2\xe9\xe6\x52\x21\x8b\x90\x4d\
\xc1\xad\x09\x33\xee\xe9\x94\x42\xfe\xa0\xd2\x79\x6a\xfd\x0e\xaf\
\x6b\xb4\x06\x6a\x20\x40\x34\x08\xd6\x11\x14\xd2\xc9\x0f\x06\xf0\
\x3d\x73\xbd\x37\x98\xef\x49\x8a\x03\x7a\x04\xcc\xd6\x09\x06\xa5\
\x3c\x49\xa5\x97\xa9\xf4\x55\xbc\xae\xd0\x1a\xb4\xf6\x17\x6a\x3f\
\xd2\x73\x5f\x31\xeb\x76\x59\x48\x60\x29\x85\xc5\x94\xff\x00\xe1\
\x78\x1f\x4c\x73\x1c\xbc\x8b\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x07\x6a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x06\x00\x00\x01\x97\x70\x0d\x6e\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x07\x0a\x49\x44\x41\x54\x48\
\xc7\x55\x95\x6b\x70\x5c\x65\x19\xc7\x7f\xef\x39\x67\xf7\x9c\xb3\
\x97\x64\x2f\xd9\x96\x4d\x43\x2f\x50\xc2\x08\xd2\x32\x50\x2d\x96\
\x62\x5b\x75\x10\x61\x04\x06\x54\x04\x01\xc5\xaa\x0c\x1f\x44\x40\
\x40\x1c\x9d\xd1\x51\x04\xf1\x02\x32\x38\xf5\xc2\x54\x07\x66\xb4\
\x53\x2e\x56\x40\x46\xa0\x97\x34\x09\xb1\x05\x94\xd2\x0b\xb4\x49\
\xda\x5c\x20\xc9\xe6\xb2\xd9\x26\xdb\xec\x66\xf7\xbc\xe7\x9c\xc7\
\x0f\x21\x01\x9e\x8f\xef\xcc\xfb\xfc\xde\xe7\x79\xfe\xff\xf7\x51\
\x22\x82\xef\xfb\xd4\xbc\xfa\x6a\x44\x84\xb7\x0e\x1e\x96\x6a\x6d\
\x56\x10\x11\x56\xac\x68\x95\x83\x87\x0f\x09\x5a\x6b\x3a\x3b\xbb\
\x1e\xd2\x5a\xa3\xea\xf5\x3a\x33\xd5\x8a\x1c\x7c\xeb\x30\xc6\x45\
\x6b\x2f\x11\xc7\x76\x58\xd2\xd2\xac\x78\x3f\x5b\xe3\xf8\x44\x71\
\x43\x77\x6f\x8f\x78\x9e\x27\xff\xd9\xbf\x5f\x0c\x80\xf6\x8e\xce\
\x29\x3f\xd0\x7b\xcf\x58\xbe\x82\x5d\xbb\x77\x13\xb1\xac\xf5\x06\
\x40\xdb\xee\x8e\x3f\xdd\x70\xdd\xcd\x5c\xbc\x6e\x23\xa9\x74\x4a\
\x7d\xc2\xdd\xfc\x2a\xbe\xef\xe3\xfb\x3e\x41\x10\x70\xe0\xc0\xc1\
\xa7\x7d\xdf\x47\x6b\x0d\xbe\xef\x2b\xad\x35\x7b\xda\xda\xa5\x30\
\x36\x2a\x5a\x6b\x11\x11\x0c\xa5\x94\x74\x76\x76\x4d\x2c\x5f\xb1\
\x94\xa6\x4c\x96\xce\xae\x2e\x86\x86\x46\x36\xab\x9e\xde\x5e\x12\
\x89\x84\x24\x12\x09\x44\x42\x0e\x1c\x38\x44\x53\x2e\xa3\xd4\xda\
\x4f\xae\x97\xa9\xa9\x29\x3c\xdf\x63\xcb\x96\xdf\x63\x98\x86\x71\
\xfe\xf9\xab\xc4\xb8\xff\x81\x9f\xa9\x6c\x36\x4b\xf3\xe2\x3c\x93\
\xc5\x22\x57\x2c\xf9\x41\xb8\xf8\xa5\x75\xa2\x44\x84\x20\x08\x10\
\x11\xa5\x94\x32\x80\xe0\xed\x77\x8e\x76\x55\x2a\xb3\xeb\xf2\x4b\
\x16\xd1\x7c\x5a\x33\xe5\xe9\xe9\x62\x26\x93\xc9\x85\x61\x80\xc1\
\xfb\xa1\x94\x12\xd3\x34\x83\x7f\xbd\xf8\x6f\x01\xb5\xee\xf4\xa5\
\xcd\xe4\xb2\x39\x22\x96\x05\x8a\xa6\xdd\x6d\x7b\x65\x70\xf0\xbd\
\x5b\x94\x88\x70\xe4\xed\x63\xf8\x81\x7e\xb4\x3c\x5d\xbe\xbd\xb9\
\x39\x4f\xb2\x21\x41\x2a\xd9\x88\x15\xb1\xe8\xee\xe9\x61\x78\xb8\
\x40\xbe\xf9\x34\xce\x6e\x6d\x55\x4a\x44\xb8\x68\xed\xa7\xa5\x30\
\x3a\x42\x10\x84\x3c\xf8\xe0\x2f\xb8\xe9\xa6\xeb\xd1\x5a\xd3\xb5\
\x6f\x1f\x5e\x5d\xd3\xd8\x98\x3c\x51\x3a\x59\x5a\x79\xc1\x05\x17\
\xa0\x44\x84\xc7\x1e\xfb\xc3\xf7\xfe\xfe\xb7\x6d\xbf\xd3\x5a\xa3\
\xb5\x26\x08\x43\x2a\x95\x19\x9e\xf9\xc7\x76\xea\xf5\xda\x2b\x77\
\x2d\xda\x72\x69\x8f\x37\xc8\x74\xad\x1c\x28\x11\x61\xe7\x2b\x6d\
\x78\xda\x53\x43\xc3\xc3\x0d\x6f\xbc\xf1\xc6\x3f\xdf\x7b\x6f\x68\
\x63\xe8\x07\x7c\xff\x9e\x3b\xbe\xd5\xbc\x24\xbf\x75\xf5\xa1\x6b\
\xa7\x99\xd0\x0d\x0c\x7b\x77\x2c\xcc\x6e\x7e\x66\x5a\x6b\xc6\xc7\
\xc7\xd7\xb6\xb5\x75\xec\xed\x1f\x18\xb8\x3b\x94\x70\xfe\x5c\x89\
\xc8\x07\x5d\x02\x2c\xa5\x14\x86\x61\x44\xde\x3c\x70\x68\x7f\x53\
\x2e\xbb\x21\x9e\x4c\xfc\x3a\xf0\x03\x29\x95\x4a\x57\x03\x0a\x58\
\xb8\x10\x07\x7c\xd3\x34\x79\xe9\xe5\x5d\x95\xf9\x4e\xd9\xd1\x28\
\x86\x61\x90\xcb\xe5\x76\x1c\x3f\xd1\x77\xfb\x87\x2f\x54\xe6\xda\
\xfb\xce\xc3\xe9\x74\x2a\xe2\xb8\x51\x62\xb1\x18\xae\xe3\xe2\xfb\
\x3e\xfb\x5f\x7f\x8d\xbe\xbe\x81\x47\x00\x0c\xa5\x14\x4a\x29\x66\
\x6b\xf5\x4b\x06\x07\xdf\xbd\x33\x95\x4e\x11\x8f\xc7\x49\xc6\xe3\
\x98\x86\xc1\x48\x61\x84\x6a\xa5\xc6\xa2\x45\xb9\xb9\x77\xbf\x73\
\xb4\x07\xa5\x14\xbd\xbd\x3d\x1d\x67\xb7\xb6\x12\x8f\xbb\xc4\x63\
\x71\x2c\x2b\x42\xad\x56\xa3\xa7\xf7\x38\x96\x19\xe1\xac\xd6\x33\
\xaf\x04\x30\x82\x50\x83\x04\xcf\xb6\xb6\x9e\x45\x3a\x9b\x22\x16\
\x8b\x61\x3b\x36\x4a\x29\x8e\x76\x1f\x23\x0c\x42\x12\x89\x44\xbd\
\x50\x28\xbc\x00\x60\x74\x1f\xed\xf9\xee\xb5\xd7\x5c\x77\xcd\xa6\
\x8d\x9f\xe3\xc6\xaf\xdd\x42\x32\x9e\xc4\x89\xda\x4c\x96\x26\x19\
\x1e\x2a\x90\xc9\x64\x39\x39\x35\xb9\xac\x58\x2c\x02\x60\x8c\x8d\
\x8e\x37\x64\x32\x19\x72\xb9\x1c\x03\x03\x03\x2c\x69\x59\xc1\xb6\
\x6d\xdb\x39\xd1\xd7\x47\x26\x9b\xc5\xf7\x75\x87\x52\xe6\xd8\x39\
\xe7\x7c\x6c\x4e\xa4\x3b\x77\xb7\xf1\x93\x1f\xff\x54\x82\x20\xc0\
\xf3\x3c\xb4\xf6\xf1\x74\x9d\xdb\x6e\xbb\x95\xcb\x2e\xbf\x94\x1f\
\x3a\x7f\x9c\x7d\xbd\x76\xc4\x2d\x87\x15\x66\x26\xcb\xd7\x19\x12\
\x86\xdc\x73\xef\x3d\xb1\x64\x32\x49\x32\x99\x24\x9d\x4e\x91\x49\
\x67\xf8\xcb\xd6\x27\xa8\xd5\x6a\x7f\x7e\xb5\x76\xc0\x1d\xf1\x27\
\x98\x39\x59\x86\x37\xab\x5f\x36\x08\x15\xc9\x44\xbc\xbe\xf9\xdb\
\x9b\xd5\x86\x4d\x1b\x36\x67\x73\x4d\x7e\x22\x99\xe4\xdc\xf3\xce\
\xad\x46\x22\x91\x5b\x8b\x7d\x63\xb7\xb2\x7f\x06\x5e\x98\xaa\x32\
\xa1\xaf\x5f\x70\xdc\x7c\x88\x08\x4a\x29\x00\x25\x22\x02\x60\x9a\
\x26\xc5\xe2\xe4\x17\x0e\x1d\x3a\xfc\xec\x4c\xa5\xe2\x66\xb3\x19\
\xb2\x4d\x59\x12\x89\x38\xa6\x61\xcc\x5a\xa6\xf5\xb8\xeb\xba\x8f\
\xc4\x62\xb1\x01\x80\x30\x14\x40\x88\x44\x22\x7c\x04\x20\x22\x16\
\x70\x9a\x52\xaa\x00\x04\xf3\xc9\xfb\xfa\x07\xee\x3f\x7a\xac\xe7\
\x47\xa9\xc6\x06\x12\x89\x04\xf1\x78\x8c\xa8\x1d\xc1\x75\x5d\x12\
\xb1\x38\xb6\x6d\x63\x18\x06\xe5\x72\x59\x4e\x9d\x3a\xf5\x70\x3e\
\x9f\xbf\x5b\x21\x28\xc3\xfc\x08\x40\x7d\x50\xc4\x5c\x15\x5a\xeb\
\xe4\xbe\x7d\xaf\xbd\x5a\xf7\xbc\x55\xd9\x6c\x16\x37\xe6\x10\x8f\
\xc7\xb0\x6d\x1b\xc7\x71\x88\xc7\x62\x44\xac\x08\x22\x82\xe7\x79\
\x8c\x8e\x8d\x31\x30\x38\x48\xb5\x32\x5b\xfc\xcc\xa6\x8d\x17\xba\
\xae\xf3\xee\x87\xdd\x23\x80\x28\xa5\xb0\x2c\x8b\x52\xe9\xe4\x65\
\x2f\xbd\xbc\xb3\x6c\x5a\xd6\xaa\x96\x96\x16\x52\xa9\x14\xc9\x64\
\x02\xc7\x75\x88\xc7\xe3\x24\x62\x31\x22\xa6\x85\x88\x50\xad\x56\
\xe9\x1b\xe8\xa7\xaf\x7f\x80\xc0\x0f\x49\x26\x93\x4d\xdd\xdd\x3d\
\xdf\x04\xb0\x00\xc6\xc6\xc7\x3f\x44\x91\xe8\xbb\x83\x43\xcf\x8f\
\x8d\x4d\x7c\xbe\xa5\xa5\x85\x78\x2c\x8e\xed\x44\x89\x3a\x11\x9c\
\xa8\x8d\xeb\xb8\x44\xa3\x51\x4c\xc3\x20\x10\xa1\x5c\x2e\xd3\xdb\
\x7b\x9c\x4a\xa5\x4a\x10\x04\xd8\x76\x94\x65\xcb\x97\x0e\x2d\xc9\
\xe7\xb7\x2e\x00\x8a\x13\x93\x73\x46\xb2\xac\x35\x85\xd1\xd1\x2e\
\xad\x75\x74\xc5\xf2\x65\xd8\xae\x8d\xe3\x44\x89\x46\xa3\xd8\x8e\
\x83\x6b\xdb\x44\xac\x08\x86\x61\xe0\x07\x01\xc5\xc9\x22\xdd\xdd\
\xbd\x68\xed\x23\x22\x34\x36\x36\x92\x4e\xa7\x3a\x6c\x3b\xba\x61\
\xb2\x54\x62\xf1\xa2\x45\x73\x00\xc7\x71\x38\x76\xac\xe7\xbe\xf6\
\xf6\xf6\x07\x83\x30\x64\xd9\xd2\xd3\xb1\x22\x26\x6b\xd6\x5c\x48\
\x43\xb2\x01\x09\x43\x44\xe6\x16\x9f\xa1\x0c\xb4\xd6\x0c\x8f\x16\
\xe8\xed\xee\x45\x04\x1c\xc7\x25\x9d\x4e\x51\x3e\x35\xfd\x50\x61\
\x74\xe4\xbe\x91\xc2\x30\xae\xeb\x7e\x00\x78\xfa\xa9\x67\x9e\xdf\
\xb1\xe3\xb9\x2f\x2a\xa5\xa8\xd5\x6a\x78\xda\xc3\xd3\x9a\x4a\xa5\
\x42\xbd\x5e\x67\xfd\xfa\x8b\x79\xe0\x81\x9f\xb3\x7a\xd5\x79\x88\
\x08\xbd\xc7\x8e\x73\xe2\x44\xdf\x42\x62\x37\xe6\x50\x2c\x16\xbf\
\xe3\xfb\xfa\xf1\x68\x34\xca\xca\x95\x2b\x71\x5d\x97\x79\xad\xf3\
\xdc\x73\x2f\xb6\x3c\xf9\xc4\x93\xff\x1d\x1e\x1e\x59\x3c\x0f\x09\
\x82\x80\x20\x08\x08\xc3\x10\x41\xa8\x79\x1e\xb5\xd9\x59\xbe\xfe\
\x8d\x1b\xb9\xea\xaa\x2b\xe7\xbe\xff\x54\x03\x20\xec\xb0\x3b\x7e\
\x39\x90\x1c\xdf\x3c\x19\x4c\xe7\x4e\xfa\x65\xa6\xc2\xf2\x54\xc1\
\x2f\xde\x3b\x1b\xd4\xb6\x2a\x11\x61\xd7\xee\x36\x50\x8a\xf6\x3d\
\xed\xbf\x6d\xdb\xb3\xf7\xae\x20\x0c\x30\x4d\x93\x30\x0c\x17\x20\
\x61\x18\x22\xa1\x30\x5b\x9b\x25\xdf\x9c\xe7\x91\x47\x7f\x43\x24\
\x62\x79\xc5\x62\xf1\xb3\x77\xe6\xb7\x74\x0e\xea\x11\x26\x83\xe9\
\x39\xb1\xeb\x10\x26\x7c\x98\x0e\xb6\xcf\xad\x91\x9d\x6d\x73\x0a\
\x0a\x85\xa9\xa9\x29\xfa\xfa\xfb\xbf\x7a\xe4\xc8\x91\x5f\x8d\x8d\
\x8e\x9d\xee\xfb\x3e\xa6\x69\xa2\x94\x5a\x30\x64\xb5\x3a\xcb\x35\
\x5f\xba\xea\xd4\xe5\x57\x5c\xf6\xa9\x30\x0c\xdf\x5e\x5d\xbb\xa1\
\x8e\x27\x51\x94\x02\x2d\x30\x54\x87\xfe\x3a\x04\xdc\x3c\x57\xc1\
\xae\x3d\xf3\x2a\x35\x45\x84\xe9\xf2\x4c\x10\x04\x01\xb5\x5a\x8d\
\x52\xa9\x44\xbd\xee\xad\x31\x4d\xe3\x72\x65\xd0\x9a\x6a\x4c\x4d\
\x9d\x79\xe6\x19\x7f\x4d\xa5\x1b\xff\x67\x98\x0a\xcb\xb4\xf8\x78\
\xdf\xb5\x30\xe2\x5d\xcd\x84\xfe\x0a\xe5\xc0\xa4\x2e\xcf\x12\x55\
\x4f\x61\x1b\xfc\x1f\x0b\x03\xc8\x05\x59\x65\x3b\x42\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x00\xc5\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x90\x00\x00\x00\x90\x08\x06\x00\x00\x00\xe7\x46\xe2\xb8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x00\x67\x49\x44\x41\x54\x78\x9c\xed\
\xc1\x31\x01\x00\x00\x00\xc2\xa0\xf5\x4f\xed\x69\x09\xa0\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xe0\x06\
\x44\x9f\x00\x01\xc3\xcd\x96\xea\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
\x00\x00\x02\x7a\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07\x74\x49\x4d\x45\x07\
\xdd\x06\x0d\x08\x1d\x33\x51\xf1\xd4\x9e\x00\x00\x02\x07\x49\x44\
\x41\x54\x38\xcb\x65\x91\x3d\x6b\x14\x41\x18\xc7\x7f\x73\xb7\x2c\
\x6c\x0c\xb9\x20\xa6\x0d\x58\x45\x90\x88\x1f\x40\xb1\x12\x2e\x8d\
\x82\x5f\x41\x08\x58\x05\xb4\xb5\x0c\xd8\x09\xa2\x20\x08\x7e\x88\
\x58\x05\xac\x04\xfb\x08\x56\x42\x40\x48\x9d\xdc\xe5\xf6\x66\xe7\
\x75\x67\xc7\xe2\x19\x72\x07\x0e\xfc\x18\x06\xe6\xff\x32\xcf\xa8\
\x9c\x1f\x00\xb7\x81\x09\x70\x0b\xa8\xf7\x40\x1d\x42\x7a\x02\xe1\
\x21\x78\xc0\xfe\x82\xee\x07\x74\x9f\x41\x9f\x83\x41\xf0\xa8\x9c\
\x1f\x17\x83\x4d\xa0\x7e\x0d\xea\x18\xfa\x46\x84\xae\xe0\x01\x0b\
\x18\x0b\xe6\x2d\x98\xf7\x72\x0e\xa8\x9c\x0f\x80\x49\x0d\xf5\x09\
\xa8\x29\xf4\xe5\x72\x57\x76\x0f\x44\x20\xac\x19\x9a\x53\x70\xcf\
\x21\x84\x11\xd4\x00\x1f\xa1\x9f\x4a\xad\x05\x70\x05\x5c\x96\x7d\
\x06\x5c\x03\xcb\x62\xda\x01\x66\x9a\xb3\x79\x17\x42\x8f\xca\xf9\
\xd9\x3e\x54\x67\x90\xc6\x92\xb8\x28\xe8\x92\x9e\x80\x5c\x48\x80\
\x23\xa5\x88\x31\xa4\x10\xb8\x5f\x41\x38\x84\x38\x96\x6a\x4b\x60\
\x5e\x12\x6d\xa9\x9e\x91\xa5\x80\x9e\x10\x32\xd6\x82\x31\x8c\xbd\
\xe7\x55\x05\x66\x2a\xce\xb6\x18\x2c\xcb\x84\x03\x30\xb0\xbe\x62\
\x14\x71\xd7\x09\xd6\xf2\xa8\x02\xbd\xfb\xff\xe0\x62\x11\xe7\x1b\
\x71\xce\x10\x23\x78\x0f\xc6\xc0\x72\x09\xc6\xb0\x5b\x49\x62\xcf\
\xea\xdb\xe2\xda\xbb\x57\xe2\x94\xa0\xef\xb9\x69\x50\x0c\x18\xc1\
\xf2\x02\xda\x32\x34\x49\xcf\x39\x93\x33\x37\x0c\x83\xa4\x5b\x0b\
\x5a\x43\xdb\x0a\x5d\xc7\xc5\x08\xda\x53\x99\x7a\x4b\x4a\x96\x18\
\x13\x21\x48\x5a\x4a\xab\xda\x5a\xc3\xf5\x35\xcc\x66\x42\xdb\x82\
\xb5\xfc\x54\x29\xb1\xef\x1c\x67\x31\x32\xee\x7b\x49\x04\x50\x4a\
\xf6\x94\xc0\x39\xa9\x7c\x79\x09\x57\x57\xb0\x58\x40\x08\xa4\xba\
\xe6\x5e\x65\x0c\xbf\xad\xe5\x93\x73\x1c\xc5\x28\xc9\xc3\xb0\x12\
\xf7\xbd\xbc\xb5\x6d\x61\x3e\x17\xb1\xf7\x30\x1a\xf1\xa1\x69\x38\
\x57\xb3\x19\x68\x4d\xdd\x75\x9c\x58\xcb\x34\x04\x11\xae\xd7\xb7\
\x56\x0c\xb4\x96\x33\xf0\x6d\x63\x83\x17\x77\xee\x90\xaa\x61\x80\
\x61\x20\xc4\xc8\x81\x73\x1c\x19\xc3\xb1\x73\x6c\x7a\x0f\x21\x48\
\x7d\x6b\x85\x18\xd1\x4a\xf1\xa6\x69\xf8\xb2\xb5\x05\xdb\xdb\xa0\
\xe6\x73\xf9\x96\xb6\x95\x7a\x6d\xcb\x5d\xad\x79\xa9\x35\x4f\xad\
\x65\xcf\x7b\x88\x91\x3f\x29\xf1\x7d\x3c\xe6\x6b\xd3\xf0\x77\x32\
\x81\x9d\x1d\xe1\x1f\x3c\x20\x6c\x94\x65\x65\x77\x27\x00\x00\x00\
\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x06\xc9\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x90\x00\x00\x00\x90\x08\x02\x00\x00\x00\x68\x24\x75\xef\
\x00\x00\x00\x03\x73\x42\x49\x54\x08\x08\x08\xdb\xe1\x4f\xe0\x00\
\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\x01\
\x95\x2b\x0e\x1b\x00\x00\x06\x6c\x49\x44\x41\x54\x78\x9c\xed\x9d\
\x3b\x7a\xd3\x40\x14\x46\xc7\x7c\x2e\x48\x9f\x02\x0a\x16\x91\x2d\
\x50\xb2\x04\x52\x7a\x5b\xa4\xcc\x16\x52\x66\x09\x64\x11\x14\x2e\
\x48\xef\x52\x14\x0a\xf2\x58\x96\x34\x8f\xfb\xfa\xe7\x7a\x4e\x05\
\xc4\x1e\x8b\x39\x3e\x1e\x7b\x90\xc5\xee\xd7\xdf\x2f\x21\x84\xc3\
\xfd\x31\x60\xf0\x78\xf7\x66\x7d\x08\x73\x9e\x4f\x0f\xd6\x87\xf0\
\xc1\xd3\xfb\xd7\xfd\xf4\xab\x80\xa1\x6d\x9c\x1d\x40\x6d\xb6\x8c\
\x82\x42\x08\xbb\x61\x18\xa6\xdf\x8c\x20\x68\x1b\x01\xd1\x66\x5b\
\xd8\xcc\xce\x7e\xed\x16\x08\xda\x6e\xbc\xb6\x99\xaa\x10\xc2\xe1\
\xfe\xb8\x1b\x86\x61\xf1\x67\xd3\x2d\xc4\x8f\x2b\x0f\x43\x6d\xfa\
\x85\x6d\xe8\x58\x28\xec\xfa\x9e\x08\xda\x6e\xa4\xb6\x35\x55\xe1\
\xbf\x85\x8f\xc2\xb6\x6f\x1a\xdf\x01\x01\x65\x6d\x3a\x85\x65\xce\
\x7f\xa2\xb0\xeb\x11\x11\xb4\x39\xab\x2d\xa9\x2a\x44\xd3\x7e\x2e\
\x2c\xf3\x9e\xb3\xfb\x9b\xa3\xa0\x4d\xae\xb0\x8a\x09\x2f\x28\xec\
\xfa\x91\x10\xb4\x35\x5a\x5b\xbe\xaa\x70\x39\xcf\x17\x85\x95\x0e\
\x74\x3d\x9c\x2d\x42\xda\x78\x0b\x23\xce\x70\x65\x61\xd7\x47\x80\
\xa0\x0d\xbc\xb6\x0a\x55\xe1\x6a\x62\x3f\x6d\xff\xb8\xe8\x68\xea\
\x0e\x88\x1d\x9c\xad\xbf\x09\xc6\xc9\x61\x28\x2c\xc6\xbc\xb6\x38\
\xaf\xe7\xd3\x83\x79\x6d\x44\x4f\xd7\x33\xb9\x20\xec\x70\x7f\x24\
\x3e\x8c\x89\xb6\x45\x37\x86\x2f\x92\x42\xaf\x37\xcc\x85\xc5\xa8\
\x69\x4b\xfa\x50\xd6\xc6\xa5\x6a\x71\xea\x96\x85\xd1\x23\x9b\x10\
\xd5\x56\xe4\x40\x41\x9b\xc2\x2a\x2e\x58\x58\x0c\xbb\xb6\xea\x79\
\x17\xd2\xc6\xae\x6a\x6d\xae\x56\x85\x31\x46\x36\xc1\xa2\x8d\x65\
\xae\x19\xb5\x29\xbf\x37\x56\x2a\x2c\xa6\x5a\x1b\x7b\x16\x44\x6d\
\x72\xaa\x36\x26\x67\x4b\x98\x44\x64\x13\x45\xda\x44\x17\x9e\x0a\
\x6d\x86\x9f\x38\x0d\x0a\x8b\x49\x6a\x53\x7b\x6b\x97\xa9\x4d\x41\
\xd5\xf6\x93\x38\x21\x4c\x34\xb2\x89\x45\x6d\x26\x1f\x9e\x36\xb4\
\x81\xec\xe3\x18\x17\x16\x33\x69\x33\xdf\x9e\x98\x69\xd3\x54\x95\
\x5c\x23\xe6\x7b\x89\x15\x43\xf0\x02\xf2\x44\x0e\xff\xb5\x7d\xff\
\xf3\xc3\xfa\x40\x2e\x48\x0b\xd3\x07\x61\xf7\xf6\xf1\xee\x4d\x3f\
\xf4\x9c\x36\xb2\x5e\x12\x75\x56\xb2\x18\xc3\x3d\x40\xf3\x17\xe4\
\x6d\x80\xd6\xb0\x6b\x94\xb5\xd9\xaa\xca\x5c\x7a\x72\x85\xe9\x47\
\x36\xa1\xa0\x0d\xbc\xaa\x18\xe8\xc2\x62\x84\xb4\x81\xa8\xca\x7f\
\x67\x57\x20\xcc\x30\xb2\x09\x46\x6d\x20\xaa\x4a\x69\xa6\xb0\x18\
\xa2\x36\x34\x55\x45\x1f\x9c\xca\x84\x21\x44\x36\x51\xa1\x0d\x4d\
\x55\x05\x4d\x16\x16\x93\xa9\x0d\x56\x55\xe9\xbe\x44\xb1\x30\xa8\
\xc8\x26\x36\xb4\xc1\xaa\xaa\xa3\xf9\xc2\x62\x66\xda\xf0\x55\x55\
\x6c\xfb\xd5\x6c\x4d\x21\x9c\x33\xba\x01\xc2\xce\x96\x1c\xae\x0a\
\x0b\x2d\x54\x35\x51\xf7\xbc\xaf\x14\x06\xb8\x92\x35\xa4\x8a\x82\
\x87\xc2\x5a\x54\x55\xbd\xac\xd4\x0b\x43\x88\xac\x45\x55\x44\x5a\
\x2d\xac\x69\x55\x94\x77\x6d\x24\x61\x26\x91\x35\xad\x8a\x4e\x4b\
\x85\xf9\x50\x45\xfc\x50\x44\x15\xa6\x13\x99\x0f\x55\x2c\xa0\x17\
\xe6\x4c\x15\x7d\xcf\x81\x41\x98\x50\x64\xce\x54\x71\x81\x58\x98\
\x57\x55\x2c\x5b\x7a\x7b\xa6\xd9\x79\x41\x3b\x7f\x0f\x8d\xd7\x6f\
\x2f\x87\x13\xc3\x38\xbb\x9f\x9f\x7f\x33\x0c\xe3\x1a\xfa\x6e\xf2\
\x58\x05\xcb\x38\x6c\x27\x92\x3a\xde\x23\xe7\x7a\x89\x66\x19\x87\
\x47\x98\x63\x5b\x74\x78\x7d\x73\x9e\xaa\xed\x58\x1b\x4e\x64\x0c\
\xc2\x1c\x7b\xa2\xc3\x6e\x9a\xf9\xcb\x10\x8e\xe5\x81\x44\x46\x15\
\xe6\xd8\x10\x1d\x09\xc7\xfc\x5f\x37\x72\xac\x10\x21\x32\x92\x30\
\xc7\x6e\xe8\x08\xd9\x15\xf9\x42\x9f\x63\x91\xe6\x91\xd5\x0b\x73\
\x6c\x85\x8e\x9c\x57\xa9\xaf\xcc\x3a\xd6\x69\x1b\x59\xa5\x30\xc7\
\x3e\xe8\x88\x1a\x15\xfc\x52\xba\x63\xa9\x86\x91\xd5\x08\x73\x6c\
\x82\x8e\xb4\x4b\xd9\xcb\x3e\x38\x56\x6b\x15\x59\xb1\x30\xc7\x0e\
\xe8\x28\x58\x14\xbf\xb0\x8a\x63\xc1\x26\x91\x95\x09\x73\x3c\xfb\
\x74\x74\xfc\x69\x5c\xba\xc8\xb1\x66\xfd\xc8\x0a\x84\x39\x9e\x77\
\x3a\x6a\xe6\x94\x2e\x0e\xe6\x58\xb6\x72\x64\xb9\x67\x4d\x71\x9d\
\x39\xd4\x21\xd2\x4f\x73\x6b\x0c\xc4\x33\x7f\x5b\x44\xed\x15\x28\
\x6b\x0d\xe3\x5a\x81\xfa\x4a\x46\x27\x2d\xac\xdb\x52\x80\xff\x6d\
\x7d\xd7\x96\x44\x27\xb2\x84\xb0\xee\x49\x01\xa9\xad\xa9\x2e\x2f\
\x89\x42\x64\x5b\xc2\xba\x21\x05\x64\xff\x79\xa5\x2b\x4c\x22\x1d\
\xd9\xaa\xb0\xee\x46\x01\x8d\x53\x04\xba\xc8\x24\xa2\x91\x2d\x0b\
\xeb\x56\x14\xd0\x3b\xcd\xad\xeb\x4c\x22\x17\xd9\x82\xb0\xee\x43\
\x01\xed\x53\xb5\xbb\xd4\x24\x42\x91\xcd\x85\x75\x13\x0a\xd8\x7c\
\xdd\xa8\xab\x4d\x22\x11\xd9\x85\xb0\xee\x40\x01\xcb\xaf\xcc\x76\
\xc1\x49\xd8\x23\x3b\x0b\xeb\xb3\xaf\x80\xfd\x65\x1f\xba\xe6\x24\
\xbc\x91\x7d\x08\xeb\xf3\xae\x00\xca\xa5\x8b\xba\xec\x24\x8c\x91\
\x7d\x0a\x7d\xc6\x55\xe0\x72\xb6\xfb\xf9\xf9\x37\xcb\x44\x33\x5e\
\x94\xf4\xf5\xdb\x0b\xd7\x50\x74\x18\xaf\x03\xc9\xf2\xf7\xda\xa3\
\xd9\x82\xe2\xf9\xf4\xf0\xf4\xce\x39\x1a\x7d\x90\x7e\x22\xe9\x32\
\x12\x2f\xef\x4f\xef\x5f\x21\x2e\xd2\xec\x2c\x2f\xf0\x95\xb8\x17\
\x76\x46\x41\x15\x3d\x32\xaa\x30\x1f\x79\x81\x57\x15\x73\xeb\x85\
\xe9\xab\x22\x46\x46\x12\xd6\x74\x5e\x0d\x55\x15\x73\x8b\x85\x99\
\xab\xa2\x44\x56\x2f\xac\xc5\xbc\xcc\x55\xd1\xb9\x95\xc2\xd0\x54\
\x55\x47\x56\x29\xac\xa1\xbc\xd0\x54\x11\x51\xba\x8a\x80\x15\xc8\
\xb6\xea\x9e\xf4\x35\xc2\xf0\xf3\x9a\x3c\x3d\xde\xbd\x39\xbb\x7a\
\x81\xb7\x35\x6c\x31\x29\xae\xff\xab\x86\x9d\x8a\x95\xac\x58\x18\
\x6c\x5e\x49\x1f\xb0\xda\x8a\xf0\x50\x58\x91\x03\x34\x6d\xa5\x91\
\x95\x09\x43\xcb\xab\x7a\xde\xd1\xb4\xe5\xd3\x6a\x61\x2c\x73\x0d\
\xa2\xad\x28\xb2\x02\x61\x20\x79\xb1\xcf\x2f\x88\xb6\x4c\x5a\x2a\
\x4c\x74\x4e\x6d\xb5\xe5\x47\x96\x2b\xcc\x36\x2f\xb5\x79\xc4\xaf\
\x0d\xbd\x30\x93\xb9\x33\xd1\x96\x19\x59\x96\x30\x93\xbc\xcc\x9f\
\xe6\xa3\xb6\xef\x7f\x6c\x8f\x62\x0e\xdc\x5e\xe2\x78\xf2\x9e\xb9\
\x2d\x13\x72\xc2\x48\x17\xa6\x99\xd7\xe1\xfe\xf8\x1a\xde\x02\xcc\
\x5a\xf2\x7c\x7a\x08\xe1\x18\xac\x97\xf0\x18\x94\x35\x6c\xf1\xe5\
\xdb\x50\xdb\xec\x41\xc7\xc3\x53\xd0\x96\x5c\xc9\x12\xc2\x14\x0e\
\x31\xb9\xd2\x2a\x6b\xdb\x78\x20\x35\x6d\x1b\x58\x16\x56\xb4\x87\
\xa6\xa0\x2d\x73\x70\x69\x6d\xdb\x91\x6d\x09\x93\x3b\xa6\xea\x53\
\x50\x84\xb4\x55\x0c\x68\x55\x9b\x76\x61\xf4\x93\xcb\x03\xab\x36\
\xe2\x20\x42\xda\x36\x22\x5b\x15\xc6\x7e\x10\x2c\xaa\x62\x88\xda\
\x18\x33\xd5\xac\x4d\xa3\x30\x76\x55\x31\x15\xda\x84\x16\x42\x5e\
\x6d\x6b\x91\x2d\x0b\xe3\x7a\x54\x51\x55\x31\x99\xda\x14\xde\x6a\
\x4a\xd7\x26\x55\x98\x9a\xaa\x98\x0d\x6d\xca\x1f\xe6\x58\xb4\x2d\
\x46\xb6\x20\x8c\xf8\x30\x26\xaa\x62\x66\xa7\x49\x19\x6e\x97\x48\
\xd4\xc6\xb9\x97\x78\xb8\x3f\x9a\xdb\x9a\x61\xbe\xb9\x15\x68\xd3\
\x72\x2d\x7b\x5e\x58\xdd\xd3\x01\xcd\xd3\x04\xc8\x9e\x64\xe0\xab\
\x6d\x37\x0c\x43\xfc\xfb\xd2\x11\x85\xde\xac\x4b\x80\xa0\x6d\x84\
\x32\xc9\x17\x85\x15\x0d\x04\x5b\xd5\x1a\x3e\x6a\xbb\x28\x2c\x73\
\x08\x85\xcf\x55\xd2\x20\x68\x1b\x29\x9d\xf3\x73\x61\x39\xf7\x6c\
\xae\xaa\x35\xda\xad\xed\x5c\xd8\xf6\x7d\x94\x3f\x02\x6b\x82\xa0\
\x6d\x24\x47\xc1\x3e\x79\x53\x37\x55\xad\xd1\x56\x6d\x1f\x85\x2d\
\xde\xc8\x70\xb7\xc2\x0a\x04\x6d\x23\x6b\x46\xf6\x8b\x3f\x73\x5f\
\xd5\x1a\xf8\xb5\xed\x86\x61\x88\xff\xd4\x5c\x15\xce\xf7\xef\x10\
\xb4\x8d\xc4\x82\x76\xbf\xfe\x7e\x19\x7f\x65\xae\x6a\x04\x47\xd8\
\x08\x9a\xb6\x7f\x3b\xcf\xca\x48\x61\xee\x5b\x97\x00\x00\x00\x00\
\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x03\xef\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x01\x68\xf4\xcf\xf7\
\x00\x00\x00\x06\x62\x4b\x47\x44\x00\xff\x00\xff\x00\xff\xa0\xbd\
\xa7\x93\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0b\x13\x00\x00\
\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x03\x8f\x49\x44\x41\x54\x38\
\xcb\x3d\x91\xdf\x4f\x5b\x75\x00\xc5\xcf\xf7\x7e\x2f\xb4\xbd\xb7\
\xdc\x76\x8c\xfe\x80\x5b\xe4\x87\x73\xcc\x84\x8c\x9f\x0d\x83\x25\
\x2e\x6e\x3e\x8c\x44\xb3\x64\x66\x21\x6a\x62\x8c\xba\x68\xa6\x0f\
\xea\x93\x0f\x26\x8b\x0f\xc6\x7f\xc0\x44\x97\xc5\x2c\x53\x5f\xdc\
\xe2\xa3\xc8\xa0\xb0\x1f\x22\x14\xc1\x6d\x08\x6d\x2d\x90\x51\xda\
\x4b\x27\x3f\xd6\x52\xee\xbd\x50\x7a\xbf\xbd\x5f\x1f\x10\x5e\x4f\
\xce\x39\xf9\xe4\x1c\xca\x18\x43\x4f\x4f\xdf\x23\xc4\xe2\x09\x30\
\xc6\x38\xb1\x2c\x0b\x00\x20\xe6\xb6\xf2\x7c\xf9\xc9\x0a\x08\x63\
\x0c\xdb\xdb\x7a\x6f\xb1\x54\x9c\x30\x0c\x03\x88\x8c\xde\x43\xb8\
\xbb\x8f\xf7\xf6\xbe\xc4\x1f\x8c\xff\x0e\x58\x96\x85\x72\xb9\x8c\
\x8c\xa6\xfd\x64\x59\x16\xc0\x18\xc3\xf4\xcc\x43\x5e\x2e\x97\xf9\
\x48\x64\xcc\xa4\xe1\x70\x8f\xd6\x15\xee\x54\xa2\xd1\x28\x8e\x54\
\x7b\x2b\x71\xee\xec\x79\xb3\xed\x64\x37\x9f\x8b\xcd\x9b\x1b\xcf\
\x36\xf7\x23\x96\x65\x29\x8c\x31\xcc\xfc\xf5\x68\x28\x91\x4c\xf2\
\x7c\x61\x8b\xdb\xb6\xcd\x4d\xd3\xac\x21\xa9\x95\x34\x04\x41\x40\
\x3c\x9e\xe4\x27\x4e\x1c\x43\xd0\x1f\x00\x07\xc7\xfd\x07\xe3\xe8\
\xec\x6c\xbf\x86\xef\xbe\xbd\xfe\x59\x7b\x5b\x98\x07\x82\xf5\xfc\
\xe2\xeb\x03\x5c\xd7\x75\x3e\x7a\xf7\x1e\x1f\x1e\x89\xb4\x65\xff\
\x7d\x0a\x32\x3c\x3c\x06\x70\xe0\xe7\x5b\xb7\x22\xa9\xe5\xd4\x39\
\xc3\x30\x71\xe3\x87\x6b\xde\x50\x5d\xa8\x20\xcb\xf2\x3e\xc3\xff\
\x1c\xd0\xb4\xec\xab\xd3\x33\x0f\xe7\x0d\xd3\xec\x38\xd0\x0e\x0c\
\x4e\x9b\x73\x8c\xdd\xbd\xcf\x57\x32\x69\x5e\x2e\x97\xb9\xb6\xaa\
\x4d\x70\xce\x21\x02\x00\xa5\xb4\xf8\xeb\xe0\x90\xd5\xd2\xf2\x02\
\x8e\x56\x1f\x85\x6d\x73\xc4\x13\xc9\xde\xbd\xa2\x75\x41\xd4\x56\
\xb3\x58\x5b\x5f\xbf\xd1\xdc\xd4\x24\x2a\x1e\x05\x2e\xa7\x13\x8f\
\x67\x67\xe1\x74\x38\xd0\xd4\xd4\x10\x11\x76\x4c\xb3\xc6\xd0\xf5\
\x77\x1a\x9b\x1b\xa0\x54\x29\xd0\x75\x1d\x6b\x6b\x1b\xa8\xac\xac\
\x7c\xf3\xf1\xdf\xb3\x26\xe9\xeb\x3d\xc3\xd7\x37\x36\x90\xcb\xe7\
\x30\x15\x1d\xc7\xc2\xe2\x22\x82\xc1\x20\x37\x4c\x5d\x68\x6d\x6d\
\x85\xf0\xd1\xc7\x57\xfa\x8e\x78\xbd\x68\xa8\x7f\x0e\xfd\xe7\x5f\
\x83\xa2\x28\x30\x77\x8c\xe7\x5d\x2e\x17\x00\x40\xf0\xf9\x7c\x93\
\xef\xbd\xff\xee\x8b\xb5\x75\xb5\x7b\xaa\xaa\x22\x12\x19\x4b\x29\
\x8a\xb2\x1c\xee\xea\x86\x57\xf1\xec\x3f\x0e\x00\x9c\x73\x00\xa0\
\x84\x10\x4e\x08\xb1\x4b\xa5\x52\xf3\x64\xf4\xcf\x39\x87\xc3\x21\
\xf9\x03\x35\xf0\x7a\xbd\x90\x5d\x92\xb5\xbb\xbb\x7b\xdd\xed\x96\
\x3f\xa1\x54\xb4\x38\xb7\x0f\x0b\x08\x00\x0a\x80\x51\x4a\xb1\xb0\
\xb0\xf8\xf9\x93\xe5\xd4\xd7\x75\x75\xb5\x50\x3c\x55\x90\xdd\x32\
\x14\xb7\x82\x4a\xb1\x02\x7b\xa5\x22\xe6\x62\x71\x80\xe3\xc7\x8e\
\xf6\x93\x57\x0e\x09\x00\x80\x31\xe6\x9f\x98\x8c\xfe\x51\x51\xe1\
\x38\xe6\xf3\xd7\x40\x92\x9c\xa8\xaa\xaa\x82\xec\x92\x40\x29\x45\
\x2e\x9f\x43\x2c\xfe\x0f\xac\x92\x05\x8f\xc7\xb3\x13\x0c\x04\xde\
\x12\xe7\x63\x09\x08\x44\x00\x21\xe4\xed\x8c\x96\xb9\x19\x0a\x85\
\x20\x49\x4e\xb8\x24\x17\xdc\x6e\x37\x24\xa7\x0b\x84\x10\xa4\xd3\
\x69\x24\x17\x96\x40\x29\x85\x3f\xe0\x5b\x57\xd5\xba\x16\x4a\xe9\
\x96\x98\xcf\xe5\x4e\xdd\xbe\xfd\xcb\x37\x53\x53\xd3\x5d\x82\x40\
\x50\xab\xd6\xe2\x8d\x81\x01\x5c\xba\x74\x11\x94\x52\x94\x4a\x25\
\xc4\x12\x09\x68\x99\x55\x54\x57\x57\x83\x08\x18\x5a\x5d\xd5\xfa\
\x4d\xd3\x40\x30\x18\x84\x90\xcf\x17\xb6\x0c\xdd\x14\x08\x00\x66\
\x31\x2c\x25\x97\x70\xf5\xea\x97\x50\xd5\x46\x74\x74\xf6\xe0\xb7\
\x3b\x77\xa0\x6f\x1b\x08\xd5\xab\x00\xe1\xdf\xeb\xba\xde\xaf\xaa\
\x21\xb4\x1c\x3f\x0e\x49\x92\x40\x46\x22\x63\x10\x45\x11\xb1\xb9\
\xc4\x85\xc1\xc1\xc1\x9b\xf9\x7c\xde\x23\x08\xc2\xc1\x26\x28\x14\
\x0a\xf8\xe0\xc3\xcb\x38\xfb\xca\x99\x2f\x4c\x73\xe7\x2b\x59\x96\
\xd1\xd4\xd0\x08\x49\x92\x20\x08\x02\xc8\xc8\xf0\x28\x00\x80\x08\
\x84\x1a\xe6\x4e\x59\xd7\x0d\x64\xb3\xd9\xe6\xcd\xcd\x67\x97\x19\
\xb3\x5e\xf6\xfb\xfd\x4f\x4f\x9f\x3e\xf5\xa9\xc7\xab\xa4\x82\x81\
\x00\xfc\x3e\x3f\x6c\xdb\x3e\x1c\xfe\x3f\x11\x5f\xc4\xbb\xcd\x16\
\x27\xa0\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
\x00\x00\x07\x22\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x90\x00\x00\x00\x90\x08\x06\x00\x00\x00\xe7\x46\xe2\xb8\
\x00\x00\x00\x04\x73\x42\x49\x54\x08\x08\x08\x08\x7c\x08\x64\x88\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x0e\xc4\x00\x00\x0e\xc4\
\x01\x95\x2b\x0e\x1b\x00\x00\x06\xc4\x49\x44\x41\x54\x78\x9c\xed\
\x9d\xbb\x71\x1b\x31\x14\x45\xb1\x1e\x05\x6a\x80\x33\x4a\xdc\x04\
\x4b\x70\xe8\x12\x14\xb3\x2c\xc5\x2a\xc1\xa1\x4b\x50\x13\x4e\x38\
\xa3\x06\x94\xd1\x81\x06\x36\x04\xee\x2e\x7e\xef\x73\x1f\xf6\x9d\
\x50\x23\xed\x82\x78\x87\x97\x58\x40\x00\x97\x97\xf7\xa7\x5b\x08\
\x21\x5c\x4e\xd7\x25\x18\xe4\xf9\xf1\xed\xa6\xdd\x86\x51\x5e\x3f\
\xce\x26\xfb\xfe\xe5\xfd\xe9\xb6\x44\x81\x22\x2e\x92\x3c\xd6\x04\
\x4a\x9d\x59\xf2\x1f\x44\x5c\x24\x39\xac\x08\xb4\xe6\xc9\xa6\x40\
\x11\x17\x89\x1f\x74\x81\xb6\xfc\xb8\x9c\xae\xcb\x52\xfa\xa5\xf4\
\x97\xa9\x1b\x26\x81\x05\x91\x50\x05\xaa\x71\xa2\x5a\xa0\xf4\x8f\
\x46\x1b\xa6\x01\xb2\x48\x68\x02\xd5\xb8\x10\x3d\xf8\xd2\xf0\x5a\
\x89\xd2\x0b\x58\x03\x51\x24\x14\x81\x7a\xea\xdf\x2d\x50\x7e\x21\
\x6b\x20\x89\xa4\x2d\x50\x6b\xdd\xd3\x9a\xdf\x35\xbc\x47\xa2\xfc\
\xa2\x96\x40\x10\x49\x4b\x20\x8a\x5a\x93\x09\xb4\x76\x71\x4b\x68\
\x8a\x24\x2d\xd0\x48\x8d\xf3\xfa\xae\x36\x7c\x54\xa2\xb5\x1b\x59\
\x41\x43\x24\x29\x81\x38\xea\xca\x26\xd0\xd6\x0d\xad\x20\x29\x12\
\xb7\x40\x54\xf5\x5c\xab\xe5\xb7\xda\x5f\xec\xe5\xe5\xfd\xe9\x46\
\x29\xa4\x14\xda\x03\x5b\x0a\x24\xfa\xfe\x81\xf3\xe2\x29\x96\x16\
\x6d\xf3\xf4\x79\xfd\x38\x2f\x08\x83\xed\x5a\x38\xa4\xd9\xaa\xdb\
\xa6\x40\x97\xd3\xf5\x6e\xa1\x95\x02\x64\x91\xf6\x24\x89\x89\x84\
\x2c\x92\x46\xd2\x8b\x25\x50\x0e\x92\x48\x2d\x52\x20\x8a\xc4\x2d\
\xce\x5e\x8d\x76\x05\xe2\x4a\xa1\x14\x4d\x91\x46\x24\x40\x10\x09\
\x61\x6c\xa9\x96\x40\x39\x92\x22\x51\x16\x5d\x43\x24\x49\x71\x4a\
\xf5\x28\x0a\x24\x91\x42\x29\x9c\x22\x71\x16\x59\x42\x24\x84\xc4\
\xc9\x81\x49\xa0\x1c\x4a\x91\x34\xe6\x74\x28\xef\xa9\x25\x4e\x4d\
\xdf\x57\x09\x24\x9d\x42\x29\x23\x22\x21\x2c\x4f\x8c\xb4\x01\x31\
\x71\x72\x60\x13\x28\xa7\x45\x24\xa4\x27\xa4\x1e\x91\x10\xc4\xa9\
\x7d\xc3\x56\x0b\xa4\x99\x42\x29\x7b\x22\x21\x89\x93\x53\x23\x12\
\x42\xff\xb6\x62\x26\x81\x72\x52\x91\x90\xc5\xc9\x59\x13\x09\x4d\
\x9c\x96\xe1\xc2\xea\x5a\x18\xc5\x85\xa5\x40\xeb\xfc\x5a\xa2\x48\
\x3f\xfe\xfc\xd4\x6e\xca\x10\x4d\x02\xa1\x62\x6d\xe1\xf3\xf9\xf1\
\xed\x86\x9a\x9a\xad\x21\xd1\xfc\x11\x86\x32\x16\xca\x41\x98\x19\
\x2e\x81\xdc\xb6\x5e\xcc\x8e\x81\xb6\x40\x14\x09\xa9\x2d\x7b\xf4\
\x0c\x51\xba\x04\x42\x4d\xa1\x14\x04\x91\xac\x88\x33\xc2\x74\x09\
\x94\xa3\x21\x92\x45\x71\x7a\x1f\x90\xba\x05\xb2\x90\x42\x29\x12\
\x22\x59\x14\x67\x94\xe9\x13\x28\x87\x43\x24\xeb\xe2\x8c\x4c\xcf\
\x0c\x09\x64\x2d\x85\x52\x28\x44\xb2\x2e\x0e\x05\x87\x4b\xa0\x9c\
\x1e\x91\x66\x12\x67\x74\x72\x78\x58\x20\xcb\x29\x94\x52\x23\xd2\
\x4c\xe2\x50\x71\xf8\x04\xca\x59\x13\x69\x56\x71\x28\x96\xa6\x48\
\x96\x32\x10\xd7\xc8\x46\xb1\xb6\x3c\xa2\x85\x27\xd0\x0a\xb3\x26\
\x4e\x0a\xd5\x9b\x9e\x4c\xa0\x19\xc6\x42\x47\x10\x87\x1a\x4f\xa0\
\x70\x3c\x71\x28\x87\x1c\xa4\x02\x59\x4b\xa1\xa3\x89\xc3\xc1\x21\
\x13\xe8\xc8\xe2\x50\x3f\xf0\x90\x0b\x84\x9c\x42\x47\x16\x87\x8b\
\x43\x24\x90\x8b\xf3\x09\xc7\x74\x0b\x8b\x40\x28\x29\xe4\xe2\xf0\
\x33\x65\x02\xb9\x38\xf7\x70\x4d\xf6\xb2\x09\xa4\x91\x42\x2e\x8e\
\x3c\x53\x24\x90\x8b\xb3\x0f\xe7\x52\xd3\x03\x67\xe7\xff\xfe\x6e\
\x7f\xdf\x93\x75\x7e\x7f\xff\x15\x42\x38\xb3\x5d\xdf\xd4\xae\xce\
\x59\xe1\x5a\xb8\x8d\xb5\xe5\xbc\x3e\xfb\xc6\x42\x5f\xd5\x2e\xc3\
\xfd\x26\xe6\xbc\x3e\xab\x40\x2e\x8f\x1e\x52\x52\x8a\x6c\x6d\x76\
\x91\xca\x58\x4d\x21\x36\x81\x5c\x1a\x3d\x24\x65\x14\x3b\x5c\xc1\
\x85\x2a\x63\x31\x85\x58\x04\x72\x59\xf4\x90\x96\x50\xf4\x78\x17\
\x17\xab\x8c\xb5\x14\x22\x17\xc8\x25\xd1\x43\x43\x3e\xf1\x03\xa6\
\x5c\xb0\x32\x96\x52\x88\x54\x20\x97\x43\x0f\x2d\xe9\x54\x8e\xb8\
\x73\xd1\xca\x58\x49\x21\x32\x81\x5c\x0a\x3d\x34\x65\x53\x3b\x64\
\xd3\x85\x2b\x63\x21\x85\x48\x04\x72\x19\xf4\xd0\x96\x4c\xf5\x98\
\x5f\x17\xaf\x8c\xb6\x20\x25\x86\x05\x72\x09\xf4\x40\x90\x4b\xfd\
\xa0\x71\x17\xb0\x0c\x82\x28\x5b\x0c\x09\xe4\xc5\xd7\x03\x45\x2a\
\xf5\x04\x0a\xc1\x45\xac\x01\x45\x98\x9c\x6e\x81\xbc\xe8\x7a\x20\
\xc9\x04\x91\x40\x21\xb8\x90\x35\x20\x89\x13\xe9\xda\x95\xc1\xbd\
\x8b\xc0\xb1\x83\x6f\xeb\x71\x86\x98\x62\x67\xea\xd1\x40\xfa\x04\
\x68\x1e\x03\x71\x8f\x55\x7c\x2c\x54\x06\xe9\x53\xa3\x49\x20\x97\
\x67\x5e\x44\x1f\xe3\x5d\x24\x7d\x50\x52\xa8\x5a\x20\x97\x66\x5e\
\x54\x96\x32\x5c\x28\x7d\x10\x52\xa8\x4a\x20\x97\x65\x5e\x54\xff\
\x9d\xc3\xc5\xd2\x47\x3b\x85\x8a\x02\xb9\x24\xf3\x02\xf1\x2f\xad\
\x2e\x98\x3e\x9a\x29\xb4\x2b\x90\xcb\x31\x2f\x50\xdb\x7a\x5c\x34\
\x7d\xb4\x52\x68\x53\x20\x97\x62\x5e\x20\xb7\x36\xbb\x70\xfa\x68\
\xa4\xd0\xaa\x40\x2e\xc3\xbc\x40\x1f\xef\xe2\xe2\xe9\x23\x9d\x42\
\x77\x02\xb9\x04\xf3\x62\xe2\x88\x3b\x17\x50\x1f\xc9\x14\xfa\x22\
\x90\x17\x7f\x5e\x4c\x1d\xf3\xeb\x22\xea\x23\x95\x42\xff\x04\xf2\
\xa2\xcf\x8b\xc9\xaf\x3a\x70\x21\xf5\x91\x48\xa1\x6f\x21\x78\xb1\
\x67\x86\x5b\xa2\xe5\xf9\xf1\xed\xc6\x59\x60\x89\x6f\x2d\xfc\xfc\
\x4e\x2c\x9b\x48\x7c\x9f\x1a\x67\xff\x3c\x58\x97\xc7\x32\xaf\x1f\
\xe7\xe5\x47\xe0\xef\x23\xce\x1a\xfb\xc6\x42\x05\xa4\x3f\xd2\x5f\
\xde\x9f\x6e\xe6\xbe\x74\xd7\xd3\xe7\x9e\x19\xc7\x82\x9e\x40\x02\
\x20\x88\xc3\x95\x42\x2c\x02\x79\xfa\x7c\x82\x20\x0e\x37\x9e\x40\
\x0c\xa0\x8a\xc3\x91\x42\xe4\x02\x1d\x39\x7d\x50\xc5\xe1\xc4\x13\
\x88\x00\x4b\xe2\x50\xa7\x10\xa9\x40\x47\x4b\x1f\x4b\xe2\x70\xe1\
\x09\xd4\x81\x75\x71\x28\x53\x88\x4c\xa0\x23\xa4\x8f\x75\x71\x38\
\x80\x39\xa5\x15\x9d\xd9\xe4\xa1\x7a\xc3\x93\x08\x34\x6b\xfa\xa4\
\xd2\x3c\x3f\xbe\xdd\xb4\x0f\x32\x40\xc4\xc7\x40\x2b\xec\xa5\x4d\
\x94\x68\x86\x44\xa2\x18\x0b\x0d\x0b\x34\x53\xfa\xb4\x48\x31\x93\
\x48\x23\x78\x02\x85\x31\x09\xac\x8b\x34\x9a\x42\x43\x02\x59\x4f\
\x1f\xca\xa2\x5b\x17\xa9\x97\x43\x26\x10\x67\x91\x2d\x8a\x34\x92\
\x42\xdd\x02\x59\x4c\x1f\xc9\xa2\x5a\x14\xa9\x87\x43\x24\x90\x66\
\x11\xad\x88\xd4\x9b\x42\x5d\x02\x59\x49\x1f\xa4\xa2\x59\x11\xa9\
\x95\x29\x13\x08\xb9\x48\xc8\x22\xf5\xa4\x50\xb3\x40\xc8\xe9\x83\
\x58\x94\x2d\xfe\xcf\x6a\xf3\x6f\xeb\xe1\xc4\xfc\x5a\x58\xdc\xf3\
\x64\x49\x1e\x64\x5a\x03\xa2\x49\x20\xb4\xf4\x49\xe3\xd6\xe2\x5a\
\xd5\xeb\xc7\x79\xb9\x9c\xae\x0b\xd7\x96\x1b\x09\x4c\x8e\x81\xf6\
\x3a\x1c\x79\x8c\x11\x59\x6b\x5b\x7c\x4d\x08\x6f\xd2\x96\xb1\x50\
\xb5\x40\x08\x2f\xac\xe5\x9d\x8a\x28\x52\x4d\x5b\x90\x44\xaa\xc1\
\x44\x02\x8d\x44\x3c\x82\x48\x3d\xf7\xd6\x16\xa9\x36\x85\xaa\x04\
\xd2\x7a\x11\x94\x63\x03\x0d\x91\x28\xee\xa5\x2d\x52\x09\xc8\x04\
\xe2\x1c\x54\x4a\x88\xc4\x71\x6d\x0d\x91\x6a\x52\xa8\x28\x90\x64\
\x83\x25\x9f\x46\x38\x44\x92\x48\x37\xb4\x44\x82\x48\x20\xcd\xc7\
\x58\x0a\x91\x34\xc6\x57\x52\x22\x95\x52\x68\x57\x20\xee\xc6\x21\
\xcd\x7f\xf4\x88\x84\xf0\x84\xa7\x9d\x48\x2a\x09\x84\x24\x4e\x4e\
\x8d\x48\x08\xe2\xe4\x70\x8a\xb4\x97\x42\x9b\x1d\xc1\xd1\x10\x0e\
\x71\x24\x67\x9f\x11\xc5\xd9\x82\xba\x7e\x5b\xb5\x13\x59\x0b\xb3\
\x3e\x5d\x1f\x82\x2d\x79\x42\xa0\xef\xf3\x2d\x21\x57\x6f\x40\x65\
\xaf\x84\x34\xd2\xeb\x5f\xd6\x44\x8a\x50\xd4\x74\xad\x9e\x2c\x02\
\x69\x3c\x8e\x4b\x73\x54\x91\xf2\xda\xde\x75\xc2\xc8\x0d\x34\x3e\
\xa6\xb4\x57\xe0\x8f\x26\x12\x8b\x40\x08\xf3\x38\xda\x1c\x49\xa4\
\xb4\xde\x5f\x5e\x74\xeb\xc5\x10\x06\xc6\x28\x02\x45\x8e\x20\xd2\
\xb0\x40\x08\xe2\x44\xd0\x04\x8a\xcc\x2e\x52\x74\xe0\xdf\x8b\xac\
\xf9\x43\x24\x71\x22\xa8\x02\x45\x66\x15\xa9\x49\x20\x44\x71\x22\
\xe8\x02\x45\x66\x14\xe9\x72\xba\x2e\xbb\xd3\xdf\xc8\xe2\x44\xac\
\x08\x14\x99\x49\xa4\x4d\x81\x2c\x88\x13\xb1\x26\x50\x64\x16\x91\
\x96\xf4\x07\x96\xc4\x89\x58\x15\x28\x62\x5d\xa4\xbf\xa8\xcc\xde\
\x47\x76\xb8\xb3\xea\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\
\x82\
"
qt_resource_name = "\
\x00\x09\
\x0c\x78\x54\x88\
\x00\x6e\
\x00\x65\x00\x77\x00\x50\x00\x72\x00\x65\x00\x66\x00\x69\x00\x78\
\x00\x06\
\x07\x03\x7d\xc3\
\x00\x69\
\x00\x6d\x00\x61\x00\x67\x00\x65\x00\x73\
\x00\x0e\
\x0a\x51\x2d\xe7\
\x00\x69\
\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x74\x00\x69\x00\x65\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x09\
\x09\x6b\xb7\xc7\
\x00\x69\
\x00\x6e\x00\x62\x00\x6f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0b\
\x0a\xd0\x22\xa7\
\x00\x72\
\x00\x65\x00\x64\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x0c\x57\x58\x67\
\x00\x73\
\x00\x65\x00\x6e\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x10\
\x0c\xc3\x45\x27\
\x00\x71\
\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x5f\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x02\xe8\x12\x87\
\x00\x62\
\x00\x6c\x00\x61\x00\x63\x00\x6b\x00\x6c\x00\x69\x00\x73\x00\x74\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x08\
\x0c\x47\x58\x67\
\x00\x73\
\x00\x65\x00\x6e\x00\x64\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0f\
\x05\x46\x9a\xc7\
\x00\x61\
\x00\x64\x00\x64\x00\x72\x00\x65\x00\x73\x00\x73\x00\x62\x00\x6f\x00\x6f\x00\x6b\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x07\x34\x2d\xc7\
\x00\x6e\
\x00\x65\x00\x74\x00\x77\x00\x6f\x00\x72\x00\x6b\x00\x73\x00\x74\x00\x61\x00\x74\x00\x75\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x18\
\x02\x47\xd6\x47\
\x00\x63\
\x00\x61\x00\x6e\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2d\x00\x32\x00\x34\x00\x70\x00\x78\x00\x2d\x00\x79\x00\x65\x00\x6c\
\x00\x6c\x00\x6f\x00\x77\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x02\xa0\x44\xa7\
\x00\x73\
\x00\x75\x00\x62\x00\x73\x00\x63\x00\x72\x00\x69\x00\x70\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x0e\
\x09\x39\xff\x47\
\x00\x71\
\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x05\x89\x73\x07\
\x00\x63\
\x00\x61\x00\x6e\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2d\x00\x32\x00\x34\x00\x70\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x15\
\x0c\xfc\x45\x87\
\x00\x63\
\x00\x61\x00\x6e\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2d\x00\x32\x00\x34\x00\x70\x00\x78\x00\x2d\x00\x72\x00\x65\x00\x64\
\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x0d\
\x07\x76\xdf\x07\
\x00\x67\
\x00\x72\x00\x65\x00\x65\x00\x6e\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x17\
\x00\xd3\x62\xc7\
\x00\x63\
\x00\x61\x00\x6e\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2d\x00\x32\x00\x34\x00\x70\x00\x78\x00\x2d\x00\x67\x00\x72\x00\x65\
\x00\x65\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x11\
\x02\x8c\x5e\x67\
\x00\x6e\
\x00\x6f\x00\x5f\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x73\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x0e\
\x02\x47\x93\x47\
\x00\x79\
\x00\x65\x00\x6c\x00\x6c\x00\x6f\x00\x77\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x12\
\x03\xf4\x2e\xc7\
\x00\x71\
\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x5f\x00\x74\x00\x77\x00\x6f\x00\x2e\x00\x70\x00\x6e\
\x00\x67\
\x00\x11\
\x03\x89\x73\x27\
\x00\x63\
\x00\x61\x00\x6e\x00\x2d\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x2d\x00\x31\x00\x36\x00\x70\x00\x78\x00\x2e\x00\x70\x00\x6e\x00\x67\
\
\x00\x14\
\x07\x12\xd0\xa7\
\x00\x71\
\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x63\x00\x6f\x00\x6e\x00\x5f\x00\x74\x00\x77\x00\x6f\x00\x5f\x00\x78\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x18\x00\x02\x00\x00\x00\x15\x00\x00\x00\x03\
\x00\x00\x02\x36\x00\x00\x00\x00\x00\x01\x00\x00\x3f\x80\
\x00\x00\x02\x92\x00\x00\x00\x00\x00\x01\x00\x00\x47\xb7\
\x00\x00\x01\x3e\x00\x00\x00\x00\x00\x01\x00\x00\x1d\x49\
\x00\x00\x02\x6a\x00\x00\x00\x00\x00\x01\x00\x00\x46\xee\
\x00\x00\x01\x74\x00\x00\x00\x00\x00\x01\x00\x00\x24\xaf\
\x00\x00\x00\xbc\x00\x00\x00\x00\x00\x01\x00\x00\x11\xfc\
\x00\x00\x02\xde\x00\x00\x00\x00\x00\x01\x00\x00\x51\x02\
\x00\x00\x02\xb4\x00\x00\x00\x00\x00\x01\x00\x00\x4a\x35\
\x00\x00\x00\xf2\x00\x00\x00\x00\x00\x01\x00\x00\x18\x2b\
\x00\x00\x01\xbe\x00\x00\x00\x00\x00\x01\x00\x00\x2d\x9f\
\x00\x00\x03\x06\x00\x00\x00\x00\x00\x01\x00\x00\x54\xf5\
\x00\x00\x01\x16\x00\x00\x00\x00\x00\x01\x00\x00\x1a\xd0\
\x00\x00\x02\x16\x00\x00\x00\x00\x00\x01\x00\x00\x3c\x48\
\x00\x00\x01\x9c\x00\x00\x00\x00\x00\x01\x00\x00\x27\x2a\
\x00\x00\x00\x4c\x00\x00\x00\x00\x00\x01\x00\x00\x03\x6a\
\x00\x00\x00\x2a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x64\x00\x00\x00\x00\x00\x01\x00\x00\x06\x1d\
\x00\x00\x00\xdc\x00\x00\x00\x00\x00\x01\x00\x00\x14\xf0\
\x00\x00\x00\x80\x00\x00\x00\x00\x00\x01\x00\x00\x08\xed\
\x00\x00\x00\x96\x00\x00\x00\x00\x00\x01\x00\x00\x0b\x15\
\x00\x00\x01\xe6\x00\x00\x00\x00\x00\x01\x00\x00\x34\xdf\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
mmaction/models/localizers/utils/__init__.py | HypnosXC/mmaction2 | 648 | 12785789 | from .post_processing import post_processing
__all__ = ['post_processing']
|
SymbolExtractorAndRenamer/lldb/packages/Python/lldbsuite/test/sample_test/TestSampleTest.py | Polidea/SiriusObfuscator | 427 | 12785793 | <reponame>Polidea/SiriusObfuscator
"""
Describe the purpose of the test class here.
"""
from __future__ import print_function
import os
import time
import re
import lldb
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class RenameThisSampleTestTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
# If your test case doesn't stress debug info, the
# set this to true. That way it won't be run once for
# each debug info format.
NO_DEBUG_INFO_TESTCASE = True
def test_sample_rename_this(self):
"""There can be many tests in a test case - describe this test here."""
self.build()
self.sample_test()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def sample_test(self):
"""You might use the test implementation in several ways, say so here."""
exe = os.path.join(os.getcwd(), "a.out")
# Create a target by the debugger.
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
# Now create a breakpoint in main.c at the source matching
# "Set a breakpoint here"
breakpoint = target.BreakpointCreateBySourceRegex(
"Set a breakpoint here", lldb.SBFileSpec("main.c"))
self.assertTrue(breakpoint and
breakpoint.GetNumLocations() >= 1,
VALID_BREAKPOINT)
error = lldb.SBError()
# This is the launch info. If you want to launch with arguments or
# environment variables, add them using SetArguments or
# SetEnvironmentEntries
launch_info = lldb.SBLaunchInfo(None)
process = target.Launch(launch_info, error)
self.assertTrue(process, PROCESS_IS_VALID)
# Did we hit our breakpoint?
from lldbsuite.test.lldbutil import get_threads_stopped_at_breakpoint
threads = get_threads_stopped_at_breakpoint(process, breakpoint)
self.assertTrue(
len(threads) == 1,
"There should be a thread stopped at our breakpoint")
# The hit count for the breakpoint should be 1.
self.assertTrue(breakpoint.GetHitCount() == 1)
frame = threads[0].GetFrameAtIndex(0)
test_var = frame.FindVariable("test_var")
self.assertTrue(test_var.GetError().Success(), "Failed to fetch test_var")
test_value = test_var.GetValueAsUnsigned()
self.assertEqual(test_value, 10, "Got the right value for test_var")
|
keras/downstream_tasks/config.py | joeranbosma/ModelsGenesis | 574 | 12785813 | import os
import shutil
import csv
import random
class bms_config:
arch = 'Vnet'
# data
data = '/mnt/dataset/shared/zongwei/BraTS'
csv = "data/bms"
deltr = 30
input_rows = 64
input_cols = 64
input_deps = 32
crop_rows = 100
crop_cols = 100
crop_deps = 50
# model
optimizer = 'adam'
lr = 1e-3
patience = 30
verbose = 1
batch_size = 16
workers = 1
max_queue_size = workers * 1
nb_epoch = 10000
def __init__(self, args):
self.exp_name = self.arch + '-' + args.suffix
if args.data is not None:
self.data = args.data
if args.suffix == 'random':
self.weights = None
elif args.suffix == 'genesis':
self.weights = 'pretrained_weights/Genesis_Chest_CT.h5'
elif args.suffix == 'genesis-autoencoder':
self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5'
elif args.suffix == 'genesis-nonlinear':
self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5'
elif args.suffix == 'genesis-localshuffling':
self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5'
elif args.suffix == 'genesis-outpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5'
elif args.suffix == 'genesis-inpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5'
elif args.suffix == 'denoisy':
self.weights = 'pretrained_weights/denoisy.h5'
elif args.suffix == 'patchshuffling':
self.weights = 'pretrained_weights/patchshuffling.h5'
elif args.suffix == 'hg':
self.weights = 'pretrained_weights/hg.h5'
else:
raise
train_ids = self._load_csv(os.path.join(self.csv, "fold_1.csv")) + self._load_csv(os.path.join(self.csv, "fold_2.csv"))
random.Random(4).shuffle(train_ids)
self.validation_ids = train_ids[:len(train_ids) // 8]
self.train_ids = train_ids[len(train_ids) // 8:]
self.test_ids = self._load_csv(os.path.join(self.csv, "fold_3.csv"))
self.num_train = len(self.train_ids)
self.num_validation = len(self.validation_ids)
self.num_test = len(self.test_ids)
# logs
self.model_path = os.path.join("models/bms", "run_"+str(args.run))
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.logs_path = os.path.join(self.model_path, "logs")
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
def _load_csv(self, foldfile=None):
assert foldfile is not None
patient_ids = []
with open(foldfile, 'r') as f:
reader = csv.reader(f, lineterminator='\n')
patient_ids.extend(reader)
for i, item in enumerate(patient_ids):
patient_ids[i] = item[0]
return patient_ids
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)) and not '_ids' in a:
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
class ecc_config:
arch = 'Vnet'
# data
data = '/mnt/dfs/zongwei/Academic/MICCAI2020/Genesis_PE/dataset/augdata/VOIR'
csv = "data/ecc"
clip_min = -1000
clip_max = 1000
input_rows = 64
input_cols = 64
input_deps = 64
# model
optimizer = 'adam'
lr = 1e-3
patience = 38
verbose = 1
batch_size = 24
workers = 1
max_queue_size = workers * 1
nb_epoch = 10000
num_classes = 1
verbose = 1
def __init__(self, args=None):
self.exp_name = self.arch + '-' + args.suffix + '-cv-' + str(args.cv)
if args.data is not None:
self.data = args.data
if args.suffix == 'random':
self.weights = None
elif args.suffix == 'genesis':
self.weights = 'pretrained_weights/Genesis_Chest_CT.h5'
elif args.suffix == 'genesis-autoencoder':
self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5'
elif args.suffix == 'genesis-nonlinear':
self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5'
elif args.suffix == 'genesis-localshuffling':
self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5'
elif args.suffix == 'genesis-outpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5'
elif args.suffix == 'genesis-inpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5'
elif args.suffix == 'denoisy':
self.weights = 'pretrained_weights/denoisy.h5'
elif args.suffix == 'patchshuffling':
self.weights = 'pretrained_weights/patchshuffling.h5'
elif args.suffix == 'hg':
self.weights = 'pretrained_weights/hg.h5'
else:
raise
# logs
assert args.subsetting is not None
self.model_path = os.path.join("models/ecc", "run_"+str(args.run), args.subsetting)
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.logs_path = os.path.join(self.model_path, "logs")
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
self.patch_csv_path = 'Patch-20mm-cv-'+str(args.cv)+'-features_output_2_iter-100000.csv'
self.candidate_csv_path = 'Candidate-20mm-cv-'+str(args.cv)+'-features_output_2_iter-100000.csv'
self.csv_froc = 'features_output_2_iter-100000.csv'
def display(self):
print("Configurations")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self,a)):
print("{:30} {}".format(a,getattr(self,a)))
#print("\n")
class ncc_config:
arch = 'Vnet'
# data
data = '/mnt/dataset/shared/zongwei/LUNA16/LUNA16_FPR_32x32x32'
train_fold=[0,1,2,3,4]
valid_fold=[5,6]
test_fold=[7,8,9]
hu_min = -1000
hu_max = 1000
input_rows = 64
input_cols = 64
input_deps = 32
# model
optimizer = 'adam'
lr = 1e-3
patience = 10
verbose = 1
batch_size = 24
workers = 1
max_queue_size = workers * 1
nb_epoch = 10000
num_classes = 1
verbose = 1
def __init__(self, args=None):
self.exp_name = self.arch + '-' + args.suffix
if args.data is not None:
self.data = args.data
if args.suffix == 'random':
self.weights = None
elif args.suffix == 'genesis':
self.weights = 'pretrained_weights/Genesis_Chest_CT.h5'
elif args.suffix == 'genesis-autoencoder':
self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5'
elif args.suffix == 'genesis-nonlinear':
self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5'
elif args.suffix == 'genesis-localshuffling':
self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5'
elif args.suffix == 'genesis-outpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5'
elif args.suffix == 'genesis-inpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5'
elif args.suffix == 'denoisy':
self.weights = 'pretrained_weights/denoisy.h5'
elif args.suffix == 'patchshuffling':
self.weights = 'pretrained_weights/patchshuffling.h5'
elif args.suffix == 'hg':
self.weights = 'pretrained_weights/hg.h5'
else:
raise
# logs
self.model_path = os.path.join("models/ncc", "run_"+str(args.run))
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.logs_path = os.path.join(self.model_path, "logs")
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
def display(self):
print("Configurations")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self,a)):
print("{:30} {}".format(a,getattr(self,a)))
#print("\n")
class ncs_config:
arch = 'Vnet'
# data
data = '/mnt/dataset/shared/zongwei/LIDC'
input_rows = 64
input_cols = 64
input_deps = 32
# model
optimizer = 'adam'
lr = 1e-3
patience = 50
verbose = 1
batch_size = 16
workers = 1
max_queue_size = workers * 1
nb_epoch = 10000
def __init__(self, args):
self.exp_name = self.arch + '-' + args.suffix
if args.data is not None:
self.data = args.data
if args.suffix == 'random':
self.weights = None
elif args.suffix == 'genesis':
self.weights = 'pretrained_weights/Genesis_Chest_CT.h5'
elif args.suffix == 'genesis-autoencoder':
self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5'
elif args.suffix == 'genesis-nonlinear':
self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5'
elif args.suffix == 'genesis-localshuffling':
self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5'
elif args.suffix == 'genesis-outpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5'
elif args.suffix == 'genesis-inpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5'
elif args.suffix == 'denoisy':
self.weights = 'pretrained_weights/denoisy.h5'
elif args.suffix == 'patchshuffling':
self.weights = 'pretrained_weights/patchshuffling.h5'
elif args.suffix == 'hg':
self.weights = 'pretrained_weights/hg.h5'
else:
raise
# logs
self.model_path = os.path.join("models/ncs", "run_"+str(args.run))
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.logs_path = os.path.join(self.model_path, "logs")
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)):
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
class lcs_config:
arch = 'Vnet'
# data
data = '/mnt/dfs/zongwei/Academic/MICCAI2019/Data/LiTS/3D_LiTS_NPY_256x256xZ'
nii = '/mnt/dataset/shared/zongwei/LiTS/Tr'
obj = 'liver'
train_idx = [n for n in range(0, 100)]
valid_idx = [n for n in range(100, 115)]
test_idx = [n for n in range(115, 130)]
num_train = len(train_idx)
num_valid = len(valid_idx)
num_test = len(test_idx)
hu_max = 1000
hu_min = -1000
input_rows = 64
input_cols = 64
input_deps = 32
# model
optimizer = 'adam'
lr = 1e-2
patience = 20
verbose = 1
batch_size = 16
workers = 1
max_queue_size = workers * 1
nb_epoch = 10000
def __init__(self, args):
self.exp_name = self.arch + '-' + args.suffix
if args.data is not None:
self.data = args.data
if args.suffix == 'random':
self.weights = None
elif args.suffix == 'genesis':
self.weights = 'pretrained_weights/Genesis_Chest_CT.h5'
elif args.suffix == 'genesis-autoencoder':
self.weights = 'pretrained_weights/Genesis_Chest_CT-autoencoder.h5'
elif args.suffix == 'genesis-nonlinear':
self.weights = 'pretrained_weights/Genesis_Chest_CT-nonlinear.h5'
elif args.suffix == 'genesis-localshuffling':
self.weights = 'pretrained_weights/Genesis_Chest_CT-localshuffling.h5'
elif args.suffix == 'genesis-outpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-outpainting.h5'
elif args.suffix == 'genesis-inpainting':
self.weights = 'pretrained_weights/Genesis_Chest_CT-inpainting.h5'
elif args.suffix == 'denoisy':
self.weights = 'pretrained_weights/denoisy.h5'
elif args.suffix == 'patchshuffling':
self.weights = 'pretrained_weights/patchshuffling.h5'
elif args.suffix == 'hg':
self.weights = 'pretrained_weights/hg.h5'
else:
raise
# logs
self.model_path = os.path.join("models/lcs", "run_"+str(args.run))
if not os.path.exists(self.model_path):
os.makedirs(self.model_path)
self.logs_path = os.path.join(self.model_path, "logs")
if not os.path.exists(self.logs_path):
os.makedirs(self.logs_path)
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if not a.startswith("__") and not callable(getattr(self, a)) and not '_idx' in a:
print("{:30} {}".format(a, getattr(self, a)))
print("\n")
|
genomepy/annotation/__init__.py | vanheeringen-lab/genomepy | 146 | 12785816 | <reponame>vanheeringen-lab/genomepy<gh_stars>100-1000
"""Annotation class, modules & related functions"""
import os
import re
from pathlib import Path
from typing import Iterable, Optional, Union
import numpy as np
import pandas as pd
from loguru import logger
from genomepy.annotation.mygene import map_genes as _map_genes
from genomepy.annotation.mygene import query_mygene
from genomepy.annotation.sanitize import sanitize as _sanitize
from genomepy.annotation.utils import _check_property, _parse_annot, read_annot
from genomepy.providers import map_locations
from genomepy.utils import get_genomes_dir
__all__ = ["Annotation", "query_mygene", "filter_regex"]
class Annotation:
"""
Manipulate genes and whole gene annotations with pandas dataframes.
Parameters
----------
genome : str
Genome name.
name : str, optional
Name of annotation file.
If name is not specified the default annotation for the genome is used.
genomes_dir : str, optional
Genomes installation directory.
Returns
-------
object
attributes & methods to manipulate gene annotations
"""
# import methods
map_genes = _map_genes
sanitize = _sanitize
# lazy attributes (loaded when called)
# listed here for code autocompletion
bed: pd.DataFrame = None
"Dataframe with BED format annotation"
gtf: pd.DataFrame = None
"Dataframe with GTF format annotation"
named_gtf: pd.DataFrame = None
"Dataframe with GTF format annotation, with gene_name as index"
genome_contigs: list = None
"Contigs found in the genome fasta"
annotation_contigs: list = None
"Contigs found in the gene annotation BED"
def __init__(self, genome: str, name: str = None, genomes_dir: str = None):
self.genome = genome
self.genome_dir = os.path.join(get_genomes_dir(genomes_dir), genome)
if not os.path.exists(self.genome_dir):
raise ValueError(f"Genome {self.genome} not found!")
# annotation file provided
if name:
suffixes = Path(name).suffixes[-2:]
if ".bed" in suffixes or ".BED" in suffixes:
self.annotation_bed_file = name
elif ".gtf" in suffixes or ".GTF" in suffixes:
self.annotation_gtf_file = name
else:
raise NotImplementedError(
"Only (gzipped) bed and gtf files are supported at the moment!"
)
else:
# annotation files
self.annotation_gtf_file = _get_file(
self.genome_dir, f"{self.genome}.annotation.gtf"
)
self.annotation_bed_file = _get_file(
self.genome_dir, f"{self.genome}.annotation.bed"
)
# genome files
self.readme_file = _get_file(self.genome_dir, "README.txt", False)
self.genome_file = _get_file(self.genome_dir, f"{self.genome}.fa", False)
self.index_file = _get_file(self.genome_dir, f"{self.genome}.fa.fai", False)
self.sizes_file = _get_file(self.genome_dir, f"{self.genome}.fa.sizes", False)
# lazy attributes
def __getattribute__(self, name):
val = super(Annotation, self).__getattribute__(name)
if val is not None:
return val
# if the attribute is None/empty, check if it is a lazy attribute
if name == "bed":
_check_property(self.annotation_bed_file, f"{self.genome}.annotation.bed")
val = read_annot(self.annotation_bed_file)
setattr(self, name, val)
elif name == "gtf":
_check_property(self.annotation_gtf_file, f"{self.genome}.annotation.gtf")
val = read_annot(self.annotation_gtf_file)
setattr(self, name, val)
elif name == "named_gtf":
df = self.gtf[self.gtf.attribute.str.contains("gene_name")]
names = []
for row in df.attribute:
name = str(row).split("gene_name")[1].split(";")[0]
names.append(name.replace('"', "").replace(" ", ""))
df = df.assign(gene_name=names)
val = df.set_index("gene_name")
setattr(self, name, val)
elif name == "genome_contigs":
_check_property(self.sizes_file, f"{self.genome}.fa.sizes")
val = list(
set(pd.read_csv(self.sizes_file, sep="\t", header=None, dtype=str)[0])
)
setattr(self, name, val)
elif name == "annotation_contigs":
val = list(set(self.bed.chrom))
setattr(self, name, val)
return val
# lazily update attributes if upstream attribute is updated
def __setattr__(self, name, value):
if name == "bed":
self.annotation_contigs = None # noqa
elif name == "gtf":
self.named_gtf = None # noqa
elif name == "sizes_file":
self.genome_contigs = None # noqa
super(Annotation, self).__setattr__(name, value)
def genes(self, annot: str = "bed") -> list:
"""
Retrieve gene names from an annotation.
For BED files, names are taken from the 'name' columns.
For GTF files, names are taken from the 'gene_name' field
in the attribute column, if available.
Parameters
----------
annot : str, optional
Annotation file type: 'bed' or 'gtf' (default: "bed")
Returns
-------
list
gene names
"""
if annot.lower() == "bed":
return list(set(self.bed.name))
return list(set(self.named_gtf.index))
def gene_coords(self, genes: Iterable[str], annot: str = "bed") -> pd.DataFrame:
"""
Retrieve gene locations.
Parameters
----------
genes : Iterable
List of gene names as found in the given annotation file type
annot : str, optional
Annotation file type: 'bed' or 'gtf' (default: "bed")
Returns
-------
pandas.DataFrame
gene annotation
"""
gene_list = list(genes)
if annot.lower() == "bed":
df = self.bed.set_index("name")
gene_info = df[["chrom", "start", "end", "strand"]]
else:
df = self.named_gtf
# 1 row per gene
df = (
df.groupby(["gene_name", "seqname", "strand"])
.agg({"start": np.min, "end": np.max})
.reset_index(level=["seqname", "strand"])
)
gene_info = df[["seqname", "start", "end", "strand"]]
gene_info = gene_info.reindex(gene_list).dropna()
pct = int(100 * len(set(gene_info.index)) / len(gene_list))
if pct < 90:
logger.warning(
(f"Only {pct}% of genes was found. " if pct else "No genes found. ")
+ "A list of all gene names can be found with `Annotation.genes()`"
)
if annot.lower() == "bed":
return gene_info.reset_index()[["chrom", "start", "end", "name", "strand"]]
else:
return gene_info.reset_index()[
["seqname", "start", "end", "gene_name", "strand"]
]
def map_locations(
self, annot: Union[str, pd.DataFrame], to: str, drop=True
) -> Union[None, pd.DataFrame]:
"""
Map chromosome mapping from one assembly to another.
Uses the NCBI assembly reports to find contigs.
Drops missing contigs.
Parameters
----------
annot : str or pd.Dataframe
annotation to map: "bed", "gtf" or a pandas dataframe.
to: str
target provider (UCSC, Ensembl or NCBI)
drop: bool, optional
if True, replace the chromosome column.
If False, add a 2nd chromosome column.
Returns
-------
pandas.DataFrame
chromosome mapping.
"""
genomes_dir = os.path.dirname(self.genome_dir)
mapping = map_locations(self.genome, to, genomes_dir)
if mapping is None:
return
df = _parse_annot(self, annot)
index_name = df.index.name
if not set([index_name] + df.columns.to_list()) & {"chrom", "seqname"}:
raise ValueError(
"Location mapping requires a column named 'chrom' or 'seqname'."
)
# join mapping on chromosome column and return with original index
is_indexed = df.index.to_list() != list(range(df.shape[0]))
if is_indexed:
df = df.reset_index(level=index_name)
index_col = "chrom" if "chrom" in df.columns else "seqname"
df = df.set_index(index_col)
df = mapping.join(df, how="inner")
df = df.reset_index(drop=drop)
df.columns = [index_col] + df.columns.to_list()[1:]
if is_indexed:
df = df.set_index(index_name if index_name else "index")
return df
def filter_regex(
self,
annot: Union[str, pd.DataFrame],
regex: Optional[str] = ".*",
invert_match: Optional[bool] = False,
column: Union[str, int] = 0,
) -> pd.DataFrame:
"""
Filter a dataframe by any column using regex.
Parameters
----------
annot : str or pd.Dataframe
annotation to filter: "bed", "gtf" or a pandas dataframe
regex : str
regex string to match
invert_match : bool, optional
keep contigs NOT matching the regex string
column: str or int, optional
column name or number to filter (default: 1st, contig name)
Returns
-------
pd.DataFrame
filtered dataframe
"""
df = _parse_annot(self, annot)
return filter_regex(df, regex, invert_match, column)
def _get_file(genome_dir: str, fname: str, warn_missing: Optional[bool] = True):
"""
Returns the filepath to a single (gzipped) file in the genome_dir with matching ext.
"""
fpath = os.path.join(genome_dir, fname)
if os.path.exists(fpath):
return fpath
if os.path.exists(f"{fpath}.gz"):
return f"{fpath}.gz"
if warn_missing:
logger.warning(
f"Could not find '{fname}(.gz)' in directory {genome_dir}. "
"Methods using this file won't work!"
)
return
def filter_regex(
df: pd.DataFrame,
regex: str,
invert_match: Optional[bool] = False,
column: Union[str, int] = 0,
) -> pd.DataFrame:
"""
Filter a pandas dataframe by a column (default: 1st, contig name).
Parameters
----------
df: pd.Dataframe
annotation to filter (a pandas dataframe)
regex : str
regex string to match
invert_match : bool, optional
keep contigs NOT matching the regex string
column: str or int, optional
column name or number to filter (default: 1st, contig name)
Returns
-------
pd.DataFrame
filtered dataframe
"""
if column not in df.columns:
if isinstance(column, int):
column = df.columns[column]
else:
raise ValueError(
f"Column '{column}' not found in annotation columns {list(df.columns)}"
)
pattern = re.compile(regex)
filter_func = df[column].map(lambda x: bool(pattern.match(x)) is not invert_match)
return df[filter_func]
|
mango-python/bdgenomics/mango/test/notebook_test.py | heuermh/mango | 120 | 12785846 | <reponame>heuermh/mango
#
# Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdgenomics.mango.test import SparkTestCase
from bdgenomics.adam.adamContext import ADAMContext
class NotebookTest(SparkTestCase):
def test_example(self):
# these variables are read into mango-python.py
spark = self.ss
testMode = True
alignmentFile = self.exampleFile("chr17.7500000-7515000.sam")
variantFile = self.exampleFile("snv.chr17.7502100-7502500.vcf")
genotypeFile = self.exampleFile("genodata.v3.vcf")
featureFile = self.exampleFile("chr17.582500-594500.bed")
# this file is converted from ipynb in make test
testFile = self.exampleFile("notebooks/mango-pileup.py")
exec(open(testFile).read())
def test_coverage_example(self):
# these variables are read into mango-python.py
spark = self.ss
testMode = True
alignmentFile = self.exampleFile("chr17.7500000-7515000.sam")
# this file is converted from mango-python.coverage.ipynb in the Makefile
testCoverageFile = self.exampleFile("notebooks/mango-python-coverage.py")
exec(open(testCoverageFile).read())
def test_alignment_example(self):
# these variables are read into mango-python.py
spark = self.ss
testMode = True
alignmentFile = self.exampleFile("chr17.7500000-7515000.sam")
# this file is converted from mango-python-alignment.ipynb in the Makefile
testAlignmentFile = self.exampleFile("notebooks/mango-python-alignment.py")
exec(open(testAlignmentFile).read())
def test_variants_example(self):
# these variables are read into mango-python.py
spark = self.ss
testMode = True
vcfFile = self.exampleFile("genodata.v3.vcf")
# this file is converted from mango-python-alignment.ipynb in the Makefile
testVariantFile = self.exampleFile("notebooks/mango-python-variants.py")
exec(open(testVariantFile).read())
|
exercises/ja/exc_03_16_02.py | Jette16/spacy-course | 2,085 | 12785876 | <filename>exercises/ja/exc_03_16_02.py
import spacy
nlp = spacy.load("ja_core_news_sm")
text = (
"チックフィレイはジョージア州カレッジパークに本社を置く、"
"チキンサンドを専門とするアメリカのファストフードレストランチェーンです。"
)
# parserを無効化
with ____.____(____):
# テキストを処理する
doc = ____
# docの固有表現を表示
print(____)
|
lexos/managers/file_manager.py | WheatonCS/Lexos | 107 | 12785878 | <gh_stars>100-1000
import io
import os
import shutil
import zipfile
from os import makedirs
from os.path import join as pathjoin
from typing import List, Tuple, Dict
import numpy as np
import pandas as pd
from flask import request, send_file
import lexos.helpers.constants as constants
import lexos.helpers.general_functions as general_functions
import lexos.managers.session_manager as session_manager
from lexos.managers.lexos_file import LexosFile
class FileManager:
def __init__(self):
"""Class for object to hold info about user's files & choices in Lexos.
Each user will have their own unique instance of the
FileManager. A major data attribute of this class is a dictionary
holding the LexosFile objects, each representing an uploaded file to be
used in Lexos. The key for the dictionary is the unique ID of the file,
with the value being the corresponding LexosFile object.
"""
self._files = {}
self.next_id = 0
makedirs(pathjoin(session_manager.session_folder(),
constants.FILE_CONTENTS_FOLDER))
@property
def files(self) -> Dict[int, LexosFile]:
"""A property for private attribute: _files.
:return: a dict map file id to lexos_files.
"""
return self._files
def add_file(self, original_filename: str, file_name: str,
file_string: str) -> int:
"""Adds a file to the FileManager.
The new file identifies with the next ID to be used.
:param original_filename: the original file name of the uploaded file.
:param file_name: the file name we store.
:param file_string: the string contents of the text.
:return: the id of the newly added file.
"""
# solve the problem that there is file with the same name
exist_clone_file = True
while exist_clone_file:
exist_clone_file = False
for file in list(self.files.values()):
if file.name == file_name:
file_name = 'copy of ' + file_name
original_filename = 'copy of ' + original_filename
exist_clone_file = True
break
new_file = LexosFile(
original_filename,
file_name,
file_string,
self.next_id)
self.files[new_file.id] = new_file
self.next_id += 1
self.files[new_file.id].set_name(file_name) # Set the document label
return new_file.id
def delete_files(self, file_ids: List[int]):
"""Deletes all the files that have id in IDs.
:param file_ids: an array containing all the id of the files that need
to be deleted.
"""
for file_id in file_ids:
file_id = int(file_id) # in case that the id is not int
self.files[file_id].clean_and_delete()
del self.files[file_id] # Delete the entry
def get_active_files(self) -> List[LexosFile]:
"""Creates a list of all the active files in FileManager.
:return: a list of LexosFile objects.
"""
active_files = []
for l_file in list(self.files.values()):
if l_file.active:
active_files.append(l_file)
return active_files
def delete_active_files(self) -> List[int]:
"""Deletes every active file.
These active files are deleted by calling the delete method on the
LexosFile object before removing it from the dictionary.
:return: list of deleted file_ids.
"""
file_ids = []
for file_id, l_file in list(self.files.items()):
if l_file.active:
file_ids.append(file_id)
l_file.clean_and_delete()
del self.files[file_id] # Delete the entry
return file_ids
def disable_all(self):
"""Disables every file in the file manager."""
for l_file in list(self.files.values()):
l_file.disable()
def enable_all(self):
"""Enables every file in the file manager."""
for l_file in list(self.files.values()):
l_file.enable()
def get_previews_of_active(self) -> List[Tuple[int, str, str, str]]:
"""Creates a formatted list of previews from every active file.
Each preview on this formatted list of previews is made from every
individual active file located in the file manager.
:return: a formatted list with an entry (tuple) for every active file,
containing the preview information (the file id, name, label
and preview).
"""
previews = []
for l_file in self.files.values():
if l_file.active:
previews.append(
(l_file.id, l_file.name, l_file.label,
l_file.get_preview())
)
# TODO: figure out this should be l_file.label or l_file.class_label
return previews
def get_previews_of_inactive(self) -> List[Tuple[int, str, str, str]]:
"""Creates a formatted list of previews from every inactive file.
Each preview on this formatted list of previews is made from every
individual inactive file located in the file manager.
:return: a formatted list with an entry (tuple) for every inactive
file, containing the preview information (the file id, name,
label and preview).
"""
previews = []
for l_file in list(self.files.values()):
if not l_file.active:
previews.append(
(l_file.id, l_file.name, l_file.class_label,
l_file.get_preview())
)
return previews
def get_content_of_active_with_id(self) -> Dict[int, str]:
"""Helper method to get_matrix.
:return: get all the file content from the file_manager
"""
return {file.id: file.load_contents()
for file in self.get_active_files()}
def toggle_file(self, file_id: int):
"""Toggles the active status of the given file.
:param file_id: the id of the file to be toggled.
"""
l_file = self.files[file_id]
if l_file.active:
l_file.disable()
else:
l_file.enable()
def enable_files(self, file_ids: List[int]):
"""Enables a list of Lexos files.
:param file_ids: list of fileIDs selected in the UI.
"""
for file_id in file_ids:
file_id = int(file_id)
l_file = self.files[file_id]
l_file.enable()
def disable_files(self, file_ids: List[int]):
"""Disables a list of Lexos files.
:param file_ids: list of fileIDs selected in the UI.
"""
for file_id in file_ids:
file_id = int(file_id)
l_file = self.files[file_id]
l_file.disable()
def classify_active_files(self):
"""Applies a class label (from request.data) to every active file."""
# TODO: probably should not get request form here
class_label = request.data
for l_file in list(self.files.values()):
if l_file.active:
l_file.set_class_label(class_label)
def add_upload_file(self, raw_file_string: bytes, file_name: str):
"""Detects (and applies) the encoding type of the file's contents.
Since chardet runs slow, initially detects (only) MIN_ENCODING_DETECT
chars; if that fails, chardet entire file for a fuller test
:param raw_file_string: the file you want to detect the encoding
:param file_name: name of the file
"""
decoded_file_string = general_functions.decode_bytes(
raw_bytes=raw_file_string)
# Line encodings:
# \n Unix, OS X
# \r Mac OS 9
# \r\n Win. CR+LF
# The following block converts everything to '\n'
# "\r\n" -> '\n'
if "\r\n" in decoded_file_string[:constants.MIN_NEWLINE_DETECT]:
decoded_file_string = decoded_file_string.replace('\r', '')
# '\r' -> '\n'
if '\r' in decoded_file_string[:constants.MIN_NEWLINE_DETECT]:
decoded_file_string = decoded_file_string.replace('\r', '\n')
# Add the file to the FileManager
self.add_file(file_name, file_name, decoded_file_string)
def handle_upload_workspace(self):
"""Handles the session when you upload a workspace (.lexos) file."""
# save .lexos file
save_path = os.path.join(constants.UPLOAD_FOLDER,
constants.WORKSPACE_DIR)
save_file = os.path.join(save_path, str(self.next_id) + '.zip')
try:
os.makedirs(save_path)
except FileExistsError:
pass
f = open(save_file, 'wb')
f.write(request.data)
f.close()
# clean the session folder
shutil.rmtree(session_manager.session_folder())
# extract the zip
upload_session_path = os.path.join(
constants.UPLOAD_FOLDER, str(
self.next_id) + '_upload_work_space_folder')
with zipfile.ZipFile(save_file) as zf:
zf.extractall(upload_session_path)
general_functions.copy_dir(upload_session_path,
session_manager.session_folder())
# remove temp
shutil.rmtree(save_path)
shutil.rmtree(upload_session_path)
try:
# if there is no file content folder make one.
# this dir will be lost during download(zip) if your original file
# content folder does not contain anything.
os.makedirs(os.path.join(session_manager.session_folder(),
constants.FILE_CONTENTS_FOLDER))
except FileExistsError:
pass
def update_workspace(self):
"""Updates the whole work space."""
# update the savepath of each file
for l_file in list(self.files.values()):
l_file.save_path = pathjoin(
session_manager.session_folder(),
constants.FILE_CONTENTS_FOLDER,
str(l_file.id) + '.txt')
# update the session
session_manager.load()
def scrub_files(self, saving_changes: bool) -> \
List[Tuple[int, str, str, str]]:
"""Scrubs active files & creates a formatted preview list w/ results.
:param saving_changes: a boolean saying whether or not to save the
changes made.
:return: a formatted list with an entry (tuple) for every active file,
containing the preview information (the file id, label, class
label, and scrubbed contents preview).
"""
previews = []
for l_file in list(self.files.values()):
if l_file.active:
previews.append(
(l_file.id,
l_file.label,
l_file.class_label,
l_file.scrub_contents(saving_changes)))
return previews
def cut_files(self, saving_changes: bool) -> \
List[Tuple[int, str, str, str]]:
"""Cuts active files & creates a formatted preview list w/ the results.
:param saving_changes: a boolean saying whether or not to save the
changes made.
:return: a formatted list with an entry (tuple) for every active file,
containing the preview information (the file id, label, class
label, and cut contents preview).
"""
active_files = []
for l_file in list(self.files.values()):
if l_file.active:
active_files.append(l_file)
previews = []
for l_file in active_files:
l_file.active = False
children_file_contents = l_file.cut_contents()
num_cut_files = len(children_file_contents)
l_file.save_cut_options(parent_id=None)
if saving_changes:
for i, file_string in enumerate(children_file_contents):
original_filename = l_file.name
zeros = len(str(num_cut_files)) - len(str(i + 1))
doc_label = l_file.label + '_' + ('0' * zeros) + str(i + 1)
file_id = self.add_file(
original_filename, doc_label + '.txt', file_string)
self.files[file_id].set_scrub_options_from(parent=l_file)
self.files[file_id].save_cut_options(parent_id=l_file.id)
self.files[file_id].set_name(doc_label)
self.files[file_id].set_class_label(
class_label=l_file.class_label)
else:
for i, file_string in enumerate(children_file_contents):
previews.append(
(l_file.id,
l_file.name,
l_file.label + '_' + str(i + 1),
general_functions.make_preview_from(file_string)))
if saving_changes:
previews = self.get_previews_of_active()
return previews
def zip_active_files(self, zip_file_name: str):
"""Sends a zip file of files containing contents of the active files.
:param zip_file_name: Name to assign to the zipped file.
:return: zipped archive to send to the user, created with Flask's
send_file.
"""
# TODO: make send file happen in interface
zip_stream = io.BytesIO()
zip_file = zipfile.ZipFile(file=zip_stream, mode='w')
for l_file in list(self.files.values()):
if l_file.active:
# Make sure the filename has an extension
l_file_name = l_file.name
if not l_file_name.endswith('.txt'):
l_file_name = l_file_name + '.txt'
zip_file.write(
l_file.save_path,
arcname=l_file_name,
compress_type=zipfile.ZIP_STORED)
zip_file.close()
zip_stream.seek(0)
return send_file(
zip_stream,
attachment_filename=zip_file_name,
as_attachment=True)
def zip_workspace(self) -> str:
"""Sends a zip file containing a pickle file of session & its folder.
:return: the path of the zipped workspace
"""
# TODO: move this to matrix model
# initialize the save path
save_path = os.path.join(
constants.UPLOAD_FOLDER,
constants.WORKSPACE_DIR)
rounded_next_id = str(self.next_id % 10000) # take the last 4 digit
workspace_file_path = os.path.join(
constants.UPLOAD_FOLDER,
rounded_next_id + '_' + constants.WORKSPACE_FILENAME)
# remove unnecessary content in the workspace
try:
shutil.rmtree(
os.path.join(
session_manager.session_folder(),
constants.RESULTS_FOLDER))
# attempt to remove result folder(CSV matrix that kind of crap)
except FileNotFoundError:
pass
# move session folder to work space folder
try:
# try to remove previous workspace in order to resolve conflict
os.remove(workspace_file_path)
except FileNotFoundError:
pass
try:
# empty the save path in order to resolve conflict
shutil.rmtree(save_path)
except FileNotFoundError:
pass
general_functions.copy_dir(session_manager.session_folder(), save_path)
# save session in the work space folder
session_manager.save(save_path)
# zip the dir
zip_file = zipfile.ZipFile(workspace_file_path, 'w')
general_functions.zip_dir(save_path, zip_file)
zip_file.close()
# remove the original dir
shutil.rmtree(save_path)
return workspace_file_path
def check_actives_tags(self) -> Tuple[bool, bool, bool]:
"""Checks the tags of the active files for DOE/XML/HTML/SGML tags.
:return: three booleans, the first signifying the presence of any type
of tags, the secondKeyWord the presence of DOE tags, the third
signifying the presence of gutenberg tags/boilerplate.
"""
found_tags = False
found_doe = False
found_gutenberg = False
for l_file in list(self.files.values()):
if not l_file.active:
continue
# with the looping, do not do the rest of current loop
if l_file.doc_type == 'doe':
found_doe = True
found_tags = True
if l_file.has_tags:
found_tags = True
if l_file.is_gutenberg:
found_gutenberg = True
if found_doe and found_tags:
break
return found_tags, found_doe, found_gutenberg
def update_label(self, file_id: int, file_label: str):
"""Sets the file label of the file denoted to the supplied file label.
Files are denoted by the given id.
:param file_id: the id of the file for which to change the label.
:param file_label: the label to set the file to.
"""
self.files[file_id] = file_label
def get_active_labels_with_id(self) -> Dict[int, str]:
"""Gets labels of all active files in dictionary{file_id: file_label}.
:return: a dictionary of the currently active files' labels.
"""
return {l_file.id: l_file.label
for l_file in self.files.values() if l_file.active}
def get_class_division_map(self) -> pd.DataFrame:
"""Gets the class division map to help with topword analysis.
:return: a pandas data frame where:
- the data is the division map with boolean values that indicate
which class each file belongs to.
- the index is the class labels.
- the column is the file id.
"""
# active files labels and classes.
active_files = self.get_active_files()
file_ids = [file.id for file in active_files]
class_labels = {file.class_label for file in active_files}
# initialize values and get class division map.
label_length = len(file_ids)
class_length = len(class_labels)
class_division_map = pd.DataFrame(
data=np.zeros((class_length, label_length), dtype=bool),
index=class_labels,
columns=file_ids)
# set correct boolean value for each file.
for file in active_files:
class_division_map[file.id][file.class_label] = True
# Set file with no class to Untitled.
class_division_map.index = \
["Untitled" if class_label == "" else class_label
for class_label in class_division_map.index]
return class_division_map
def get_previews_of_all(self) -> List[dict]:
"""Creates a formatted list of previews from every file.
Each preview on this formatted list of previews is made from every
individual file located in the file manager. For use in the Select
screen.
:return: a list of dictionaries with preview information for every
file.
"""
previews = []
for l_file in list(self.files.values()):
values = {
"id": l_file.id,
"filename": l_file.name,
"label": l_file.label,
"class": l_file.class_label,
"source": l_file.original_source_filename,
"preview": l_file.get_preview(),
"state": l_file.active}
previews.append(values)
return previews
def delete_all_file(self):
"""Deletes every active file.
This is done by calling the delete method on the LexosFile object
before removing it from the dictionary.
"""
for file_id, l_file in list(self.files.items()):
l_file.clean_and_delete()
del self.files[file_id] # Delete the entry
|
Python3/547.py | rakhi2001/ecom7 | 854 | 12785886 | __________________________________________________________________________________________________
sample 192 ms submission
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
seen = set()
def visit_all_friends(i: int):
for friend_idx,is_friend in enumerate(M[i]):
if is_friend and friend_idx not in seen:
seen.add(friend_idx)
visit_all_friends(friend_idx)
count = 0
for ridx in range(len(M)):
if ridx not in seen:
visit_all_friends(ridx)
count += 1
return count
__________________________________________________________________________________________________
sample 13172 kb submission
class Solution:
def findCircleNum(self, M: List[List[int]]) -> int:
def dfs1(r, c, circle):
frds = [r, c]
f_s = {r, c}
i = 0
while i < len(frds):
j = frds[i]
for k in range(len(M)):
if M[j][k] == 1 and k not in f_s:
f_s.add(k)
frds.append(k)
i = i + 1
for i in f_s:
for j in f_s:
M[i][j] = circle
circle = 1
for i in range(len(M)):
for j in range(len(M[0])):
if M[i][j] == 1:
circle = circle + 1
dfs1(i, j, circle)
break
return circle - 1
__________________________________________________________________________________________________
|
lambeq/text2diagram/spiders_reader.py | CQCL/lambeq | 131 | 12785935 | # Copyright 2021, 2022 Cambridge Quantum Computing Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['SpidersReader', 'bag_of_words_reader', 'spiders_reader']
from discopy import Word
from discopy.rigid import Diagram, Spider
from lambeq.core.types import AtomicType
from lambeq.core.utils import SentenceType, tokenised_sentence_type_check
from lambeq.text2diagram.base import Reader
S = AtomicType.SENTENCE
class SpidersReader(Reader):
"""A reader that combines words using a spider."""
def sentence2diagram(self,
sentence: SentenceType,
tokenised: bool = False) -> Diagram:
if tokenised:
if not tokenised_sentence_type_check(sentence):
raise ValueError('`tokenised` set to `True`, but variable '
'`sentence` does not have type `list[str]`.')
else:
if not isinstance(sentence, str):
raise ValueError('`tokenised` set to `False`, but variable '
'`sentence` does not have type `str`.')
sentence = sentence.split()
words = [Word(word, S) for word in sentence]
diagram = Diagram.tensor(*words) >> Spider(len(words), 1, S)
return diagram
spiders_reader = SpidersReader()
bag_of_words_reader = spiders_reader
|
RecoJets/JetAnalyzers/test/DijetRatioPlotExample_cfg.py | ckamtsikis/cmssw | 852 | 12785992 | <gh_stars>100-1000
# PYTHON configuration file.
# Description: Example of dijet ratio plot
# with corrected and uncorrected jets
# Author: <NAME>
# Date: 22 - November - 2009
import FWCore.ParameterSet.Config as cms
process = cms.Process("Ana")
process.load("FWCore.MessageService.MessageLogger_cfi")
############# Set the number of events #############
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
############# Define the source file ###############
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/mc/Summer09/QCDFlat_Pt15to3000/GEN-SIM-RECO/MC_31X_V9_7TeV-v1/0000/FABD2A94-C0D3-DE11-B6FD-00237DA13C2E.root')
)
process.source.inputCommands = cms.untracked.vstring("keep *","drop *_MEtoEDMConverter_*_*")
############# Include the jet corrections ##########
process.load("JetMETCorrections.Configuration.L2L3Corrections_Summer09_7TeV_ReReco332_cff")
# set the record's IOV. Must be defined once. Choose ANY correction service. #
process.prefer("L2L3JetCorrectorAK5Calo")
############# User analyzer (calo jets) ##
process.DijetRatioCaloJets = cms.EDAnalyzer("DijetRatioCaloJets",
# Uncorrected CaloJets
UnCorrectedJets = cms.string('ak5CaloJets'),
# Corrected CaloJets
CorrectedJets = cms.string('L2L3CorJetAK5Calo'),
# Name of the output ROOT file containing the histograms
HistoFileName = cms.untracked.string('DijetRatioCaloJets.root')
)
############# User analyzer (PF jets) ##
process.DijetRatioPFJets = cms.EDAnalyzer("DijetRatioPFJets",
# Uncorrected PFJets
UnCorrectedJets = cms.string('ak5PFJets'),
# Corrected PFJets
CorrectedJets = cms.string('L2L3CorJetAK5PF'),
# Name of the output ROOT file containing the histograms
HistoFileName = cms.untracked.string('DijetRatioPFJets.root')
)
############# User analyzer (gen jets) ##
# ak5GenJets are NOT there: First load the needed modules
process.load("RecoJets.Configuration.GenJetParticles_cff")
process.load("RecoJets.JetProducers.ak5GenJets_cfi")
process.DijetRatioGenJets = cms.EDAnalyzer("DijetRatioGenJets",
# Uncorrected GenJets
UnCorrectedJets = cms.string('ak5GenJets'),
# Corrected GenJets == Uncorrected GenJets
CorrectedJets = cms.string('ak5GenJets'),
# Name of the output ROOT file containing the histograms
HistoFileName = cms.untracked.string('DijetRatioGenJets.root')
)
############# Path ###########################
process.p = cms.Path(process.L2L3CorJetAK5Calo * process.DijetRatioCaloJets)
process.p2 = cms.Path(process.L2L3CorJetAK5PF * process.DijetRatioPFJets)
process.p3 = cms.Path(process.genParticlesForJets *
process.ak5GenJets * process.DijetRatioGenJets)
############# Format MessageLogger #################
process.MessageLogger.cerr.FwkReport.reportEvery = 10
|
test/swift_project_test.py | Dan2552/SourceKittenSubl | 163 | 12786018 | from src import swift_project
from helpers import path_helper
import unittest
class TestSourceKitten(unittest.TestCase):
# Test with a simple project directory
# (i.e. without xcodeproj)
def test_source_files_simple_project(self):
project_directory = path_helper.monkey_example_directory()
output = swift_project.source_files(project_directory)
expectation = [
project_directory + "/Banana.swift",
project_directory + "/Monkey.swift"
]
self.assertEqual(sorted(list(output)), sorted(expectation))
|
lib/exaproxy/icap/parser.py | oriolarcas/exaproxy | 124 | 12786041 | <reponame>oriolarcas/exaproxy
#!/usr/bin/env python
# encoding: utf-8
from .request import ICAPRequestFactory
from .response import ICAPResponseFactory
from .header import ICAPResponseHeaderFactory
class ICAPParser (object):
ICAPResponseHeaderFactory = ICAPResponseHeaderFactory
ICAPRequestFactory = ICAPRequestFactory
ICAPResponseFactory = ICAPResponseFactory
VERSIONS = ('ICAP/1.0',)
METHODS = ('REQMOD', 'OPTIONS')
HEADERS = ('cache-control', 'connection', 'date', 'trailer', 'upgrade', 'via',
'authorization','allow','from','host','referer','user-agent', 'preview',
'encapsulated','proxy-authenticate','proxy-authorization', 'istag')
def __init__ (self, configuration):
self.configuration = configuration
self.header_factory = self.ICAPResponseHeaderFactory(configuration)
self.request_factory = self.ICAPRequestFactory(configuration)
self.response_factory = self.ICAPResponseFactory(configuration)
def parseRequestLine (self, request_line):
request_parts = request_line.split() if request_line else []
if len(request_parts) == 3:
method, url, version = request_parts
method = method.upper()
version = version.upper()
else:
method, url, version = None, None, None
return method, url, version
def parseResponseLine (self, response_line):
response_parts = response_line.split(' ', 2) if response_line else []
if len(response_parts) == 3:
version, code, status = response_parts
if code.isdigit():
code = int(code)
else:
version, code, status = None, None, None
else:
version, code, status = None, None, None
return version, code, status
def readHeaders (self, request_lines):
headers = {}
for line in request_lines:
if not line:
break
if ':' not in line:
headers = None
break
key, value = line.split(':', 1)
key = key.lower().strip()
value = value.strip()
if key in self.HEADERS or key.startswith('x-'):
headers[key] = value
if key == 'pragma' and ':' in value:
pkey, pvalue = value.split(':', 1)
pkey = pkey.lower().strip()
pvalue = pvalue.strip()
headers.setdefault(key, {})[pkey] = pvalue
return headers
def parseRequest (self, icap_string, http_string):
request_lines = (p for ss in icap_string.split('\r\n') for p in ss.split('\n'))
try:
request_line = request_lines.next()
except StopIteration:
request_line = None
method, url, version = self.parseRequestLine(request_line)
if method in self.METHODS and version in self.VERSIONS:
headers = self.readHeaders(request_lines)
site_name = url.rsplit(',',1)[-1] if ',' in url else 'default'
headers['x-customer-name'] = site_name
else:
headers = None
offsets = self.getOffsets(headers) if headers is not None else []
length, complete = self.getBodyLength(offsets)
if set(('res-hdr', 'res-body')).intersection(dict(offsets)):
headers = None
return self.request_factory.create(method, url, version, headers, icap_string, http_string, offsets, length, complete) if headers else None
def getOffsets (self, headers):
encapsulated_line = headers.get('encapsulated', '')
parts = (p.strip() for p in encapsulated_line.split(',') if '=' in p)
pairs = (p.split('=',1) for p in parts)
offsets = ((k,int(v)) for (k,v) in pairs if v.isdigit())
return sorted(offsets, lambda (_,a), (__,b): 1 if a >= b else -1)
def getBodyLength (self, offsets):
final, offset = offsets[-1] if offsets else ('null-body', 0)
return offset, final == 'null-body'
def splitResponseParts (self, offsets, body_string):
final, offset = offsets[-1] if offsets else (None, None)
if final != 'null-body':
offsets = offsets + [('null-body', len(body_string))]
names = [name for name,offset in offsets]
positions = [offset for name,offset in offsets]
blocks = ((positions[i], positions[i+1]) for i in xrange(len(positions)-1))
strings = (body_string[start:end] for start,end in blocks)
return dict(zip(names, strings))
def parseResponseHeader (self, header_string):
response_lines = (p for ss in header_string.split('\r\n') for p in ss.split('\n'))
try:
response_line = response_lines.next()
except StopIteration:
response_line = None
version, code, status = self.parseResponseLine(response_line)
if version in self.VERSIONS:
headers = self.readHeaders(response_lines)
headers['server'] = 'EXA Proxy 1.0'
else:
headers = {}
offsets = self.getOffsets(headers) if headers is not None else []
length, complete = self.getBodyLength(offsets)
return self.header_factory.create(version, code, status, headers, header_string, offsets, length, complete)
def continueResponse (self, response_header, body_string):
version, code, status = response_header.info
headers = response_header.headers
header_string = response_header.header_string
# split the body string into components
parts = self.splitResponseParts(response_header.offsets, body_string)
response_string = parts.get('res-hdr', '')
request_string = parts.get('req-hdr', '')
if request_string.startswith('CONNECT'):
intercept_string, new_request_string = self.splitResponse(request_string)
if headers.get('x-intercept', '') != 'active' and not new_request_string:
intercept_string = None
else:
request_string = new_request_string
else:
intercept_string = None
body_string = parts.get('res-body', None) if response_string else parts.get('req-body', None)
return self.response_factory.create(version, code, status, headers, header_string, request_string, response_string, body_string, intercept_string)
def splitResponse (self, response_string):
for delimiter in ('\n\n', '\r\n\r\n'):
if delimiter in response_string:
header_string, subheader_string = response_string.split(delimiter, 1)
break
else:
header_string, subheader_string = response_string, ''
return header_string, subheader_string
|
applications/camera_calibration/scripts/derive_jacobians.py | lingbo-yu/camera_calibration | 474 | 12786043 | import math
import sys
import time
from sympy import *
from sympy.solvers.solveset import nonlinsolve
from optimizer_builder import *
# ### Math functions ###
# Simple model for the fractional-part function used for bilinear interpolation
# which leaves the function un-evaluated. Ignores the discontinuities when
# computing the derivative. They do not matter.
class frac(Function):
# Returns the first derivative of the function.
# A simple model for the function within the range between two discontinuities is:
# f(x) = x - c, with a constant c. So f'(x) = 1.
def fdiff(self, argindex=1):
if argindex == 1:
return S.One
else:
raise ArgumentIndexError(self, argindex)
def UnitQuaternionRotatePoint(q, pt):
t2 = q[0] * q[1]
t3 = q[0] * q[2]
t4 = q[0] * q[3]
t5 = -q[1] * q[1]
t6 = q[1] * q[2]
t7 = q[1] * q[3]
t8 = -q[2] * q[2]
t9 = q[2] * q[3]
t1 = -q[3] * q[3]
return Matrix([[2 * ((t8 + t1) * pt[0] + (t6 - t4) * pt[1] + (t3 + t7) * pt[2]) + pt[0]],
[2 * ((t4 + t6) * pt[0] + (t5 + t1) * pt[1] + (t9 - t2) * pt[2]) + pt[1]],
[2 * ((t7 - t3) * pt[0] + (t2 + t9) * pt[1] + (t5 + t8) * pt[2]) + pt[2]]])
# Transformation is a 7-vector [quaternion, translation].
def TransformPoint(transformation, point):
point_out = UnitQuaternionRotatePoint(transformation, point)
point_out[0] += transformation[4];
point_out[1] += transformation[5];
point_out[2] += transformation[6];
return point_out
# Both transformations are 7-vectors [quaternion, translation].
def RigTransformPoint(camera_tr_rig, rig_tr_global, global_point):
point_rig = UnitQuaternionRotatePoint(rig_tr_global, global_point)
point_rig[0] += rig_tr_global[4];
point_rig[1] += rig_tr_global[5];
point_rig[2] += rig_tr_global[6];
point_out = UnitQuaternionRotatePoint(camera_tr_rig, point_rig)
point_out[0] += camera_tr_rig[4];
point_out[1] += camera_tr_rig[5];
point_out[2] += camera_tr_rig[6];
return point_out
# 3-Vector dot product:
def DotProduct3(vector1, vector2):
return vector1[0] * vector2[0] + vector1[1] * vector2[1] + vector1[2] * vector2[2]
def CubicHermiteSpline(p0, p1, p2, p3, x):
a = (0.5) * (-p0 + (3.0) * p1 - (3.0) * p2 + p3)
b = (0.5) * ((2.0) * p0 - (5.0) * p1 + (4.0) * p2 - p3)
c = (0.5) * (-p0 + p2)
d = p1
return d + x * (c + x * (b + x * a))
def EvalUniformCubicBSpline(a, b, c, d, x):
# x must be in [3, 4[.
# i == 3
x_for_d = x - 3
d_factor = 1./6. * x_for_d * x_for_d * x_for_d
# i == 2
c_factor = -1./2.*x*x*x + 5*x*x - 16*x + 50./3.
# i == 1
b_factor = 1./2.*x*x*x - 11./2.*x*x + (39./2.)*x - 131./6.
# i == 0
a_factor = -1./6. * (x - 4) * (x - 4) * (x - 4)
return a_factor * a + b_factor * b + c_factor * c + d_factor * d
def NoncentralGenericBicubicModelUnprojection(
l00, l01, l02, l03, l10, l11, l12, l13, l20, l21, l22, l23, l30, l31, l32, l33, #camera_intrinsics
frac_x, frac_y):
f0 = CubicHermiteSpline(l00, l01, l02, l03, frac_x)
f1 = CubicHermiteSpline(l10, l11, l12, l13, frac_x)
f2 = CubicHermiteSpline(l20, l21, l22, l23, frac_x)
f3 = CubicHermiteSpline(l30, l31, l32, l33, frac_x)
unprojection = CubicHermiteSpline(f0, f1, f2, f3, frac_y);
direction = Matrix([[unprojection[0]],
[unprojection[1]],
[unprojection[2]]])
direction = direction.normalized()
return Matrix([[direction[0]],
[direction[1]],
[direction[2]],
[unprojection[3]],
[unprojection[4]],
[unprojection[5]]])
def NoncentralGenericBSplineModelUnprojection(
l00, l01, l02, l03, l10, l11, l12, l13, l20, l21, l22, l23, l30, l31, l32, l33, #camera_intrinsics
frac_x, frac_y):
f0 = EvalUniformCubicBSpline(l00, l01, l02, l03, frac_x)
f1 = EvalUniformCubicBSpline(l10, l11, l12, l13, frac_x)
f2 = EvalUniformCubicBSpline(l20, l21, l22, l23, frac_x)
f3 = EvalUniformCubicBSpline(l30, l31, l32, l33, frac_x)
unprojection = EvalUniformCubicBSpline(f0, f1, f2, f3, frac_y);
direction = Matrix([[unprojection[0]],
[unprojection[1]],
[unprojection[2]]])
direction = direction.normalized()
return Matrix([[direction[0]],
[direction[1]],
[direction[2]],
[unprojection[3]],
[unprojection[4]],
[unprojection[5]]])
def CentralGenericBicubicModelUnprojection(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33, #camera_intrinsics
frac_x, frac_y):
f0 = CubicHermiteSpline(p00, p01, p02, p03, frac_x)
f1 = CubicHermiteSpline(p10, p11, p12, p13, frac_x)
f2 = CubicHermiteSpline(p20, p21, p22, p23, frac_x)
f3 = CubicHermiteSpline(p30, p31, p32, p33, frac_x)
unprojection = CubicHermiteSpline(f0, f1, f2, f3, frac_y);
unprojection = unprojection.normalized()
return Matrix([[unprojection[0]],
[unprojection[1]],
[unprojection[2]]])
def CentralGenericBicubicModelFittingProblemError(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33, #camera_intrinsics
frac_x, frac_y, measurement_x, measurement_y, measurement_z):
# Interpolation data points:
# col
# p00 p01 p02 p03
# row p10 p11 p12 p13
# p20 p21 p22 p23
# p30 p31 p32 p33
f0 = CubicHermiteSpline(p00, p01, p02, p03, frac_x)
f1 = CubicHermiteSpline(p10, p11, p12, p13, frac_x)
f2 = CubicHermiteSpline(p20, p21, p22, p23, frac_x)
f3 = CubicHermiteSpline(p30, p31, p32, p33, frac_x)
unprojection = CubicHermiteSpline(f0, f1, f2, f3, frac_y);
unprojection = unprojection.normalized()
return Matrix([[unprojection[0] - measurement_x],
[unprojection[1] - measurement_y],
[unprojection[2] - measurement_z]])
def CentralGenericBSplineModelUnprojection(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33, #camera_intrinsics
frac_x, frac_y):
a = EvalUniformCubicBSpline(p00, p01, p02, p03, frac_x)
b = EvalUniformCubicBSpline(p10, p11, p12, p13, frac_x)
c = EvalUniformCubicBSpline(p20, p21, p22, p23, frac_x)
d = EvalUniformCubicBSpline(p30, p31, p32, p33, frac_x)
unprojection = EvalUniformCubicBSpline(a, b, c, d, frac_y)
unprojection = unprojection.normalized()
return Matrix([[unprojection[0]],
[unprojection[1]],
[unprojection[2]]])
def CentralGenericBSplineModelFittingProblemError(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33, #camera_intrinsics
frac_x, frac_y, measurement_x, measurement_y, measurement_z):
a = EvalUniformCubicBSpline(p00, p01, p02, p03, frac_x)
b = EvalUniformCubicBSpline(p10, p11, p12, p13, frac_x)
c = EvalUniformCubicBSpline(p20, p21, p22, p23, frac_x)
d = EvalUniformCubicBSpline(p30, p31, p32, p33, frac_x)
unprojection = EvalUniformCubicBSpline(a, b, c, d, frac_y)
unprojection = unprojection.normalized()
return Matrix([[unprojection[0] - measurement_x],
[unprojection[1] - measurement_y],
[unprojection[2] - measurement_z]])
def CentralGenericBilinearModelUnprojection(
p00, p01, p10, p11, #camera_intrinsics
frac_x, frac_y):
unprojection = ((1 - frac_x) * (1 - frac_y) * p00 +
( frac_x) * (1 - frac_y) * p01 +
(1 - frac_x) * ( frac_y) * p10 +
( frac_x) * ( frac_y) * p11)
unprojection = unprojection.normalized()
return Matrix([[unprojection[0]],
[unprojection[1]],
[unprojection[2]]])
def CentralGenericBilinearModelFittingProblemError(
p00, p01, p10, p11, #camera_intrinsics
frac_x, frac_y, measurement_x, measurement_y, measurement_z):
unprojection = ((1 - frac_x) * (1 - frac_y) * p00 +
( frac_x) * (1 - frac_y) * p01 +
(1 - frac_x) * ( frac_y) * p10 +
( frac_x) * ( frac_y) * p11)
unprojection = unprojection.normalized()
return Matrix([[unprojection[0] - measurement_x],
[unprojection[1] - measurement_y],
[unprojection[2] - measurement_z]])
def ConvertDirectionToLocalUpdate(base_direction, target_direction, tangent1, tangent2):
factor = 1 / DotProduct3(base_direction, target_direction)
offset = (factor * target_direction) - base_direction
return Matrix([[DotProduct3(tangent1, offset)],
[DotProduct3(tangent2, offset)]])
# For quaternion layout: (w, x, y, z).
def QuaternionMultiplication(z, w):
return Matrix([[z[0] * w[0] - z[1] * w[1] - z[2] * w[2] - z[3] * w[3]],
[z[0] * w[1] + z[1] * w[0] + z[2] * w[3] - z[3] * w[2]],
[z[0] * w[2] - z[1] * w[3] + z[2] * w[0] + z[3] * w[1]],
[z[0] * w[3] + z[1] * w[2] - z[2] * w[1] + z[3] * w[0]]])
# For quaternion layout: (w, x, y, z).
def QuaternionLocalUpdate(delta, q):
norm_delta = sqrt(delta[0] * delta[0] +
delta[1] * delta[1] +
delta[2] * delta[2])
sin_delta_by_delta = sin(norm_delta) / norm_delta
delta_q = Matrix([[cos(norm_delta)],
[sin_delta_by_delta * delta[0]],
[sin_delta_by_delta * delta[1]],
[sin_delta_by_delta * delta[2]]])
return QuaternionMultiplication(delta_q, q)
def ComputeTangentsForLine_ForSmallAbsX(direction):
other_vector = Matrix([[1], [0], [0]])
t1 = direction.cross(other_vector).normalized()
t2 = direction.cross(t1)
return t1.col_join(t2)
def ComputeTangentsForLine_ForLargeAbsX(direction):
other_vector = Matrix([[0], [1], [0]])
t1 = direction.cross(other_vector).normalized()
t2 = direction.cross(t1)
return t1.col_join(t2)
def DirectionBorderRegularization(outer, inner1, inner2):
proj = inner1.dot(inner2) * inner1;
mirror = proj + (proj - inner2);
return mirror - outer
def CentralThinPrismFisheyeProjection(
px, py, pz,
fx, fy, cx, cy,
k1, k2, k3, k4,
p1, p2, sx1, sy1,
fisheye_case):
nx = px / pz
ny = py / pz
r = sqrt(nx * nx + ny * ny)
if fisheye_case:
theta_by_r = atan(r) / r
fisheye_x = theta_by_r * nx
fisheye_y = theta_by_r * ny
else:
fisheye_x = nx
fisheye_y = ny
x2 = fisheye_x * fisheye_x
xy = fisheye_x * fisheye_y
y2 = fisheye_y * fisheye_y
r2 = x2 + y2
r4 = r2 * r2
r6 = r4 * r2
r8 = r6 * r2
radial = k1 * r2 + k2 * r4 + k3 * r6 + k4 * r8
dx = 2 * p1 * xy + p2 * (r2 + 2 * x2) + sx1 * r2
dy = 2 * p2 * xy + p1 * (r2 + 2 * y2) + sy1 * r2
distorted_x = fisheye_x + radial * fisheye_x + dx
distorted_y = fisheye_y + radial * fisheye_y + dy
return Matrix([[fx * distorted_x + cx],
[fy * distorted_y + cy]])
def CentralOpenCVProjection(
px, py, pz,
fx, fy, cx, cy,
k1, k2, k3, k4,
k5, k6, p1, p2):
nx = px / pz
ny = py / pz
x2 = nx * nx
xy = nx * ny
y2 = ny * ny
r2 = x2 + y2
r4 = r2 * r2
r6 = r4 * r2
radial = (1 + k1 * r2 + k2 * r4 + k3 * r6) / (1 + k4 * r2 + k5 * r4 + k6 * r6)
dx = 2 * p1 * xy + p2 * (r2 + 2 * x2)
dy = 2 * p2 * xy + p1 * (r2 + 2 * y2)
distorted_x = nx * radial + dx
distorted_y = ny * radial + dy
return Matrix([[fx * distorted_x + cx],
[fy * distorted_y + cy]])
def CentralRadialProjection(
spline_resolution, spline_param0, spline_param1, spline_param2, spline_param3,
fx, fy, cx, cy, p1, p2, sx1, sy1,
lx, ly, lz):
local_point = Matrix([[lx],
[ly],
[lz]])
# Radial part
original_angle = acos(local_point.normalized()[2]);
pos_in_spline = 1. + (spline_resolution - 3.) / (math.pi / 2) * original_angle;
# chunk = std::max(1, std::min(spline_resolution() - 3, static_cast<int>(pos_in_spline)));
fraction = frac(pos_in_spline) # - chunk;
radial_factor = EvalUniformCubicBSpline(
spline_param0,
spline_param1,
spline_param2,
spline_param3,
fraction + 3.);
# Parametric part
nx = lx / lz
ny = ly / lz
x2 = nx * nx
xy = nx * ny
y2 = ny * ny
r2 = x2 + y2
dx = 2 * p1 * xy + p2 * (r2 + 2 * x2) + sx1 * r2
dy = 2 * p2 * xy + p1 * (r2 + 2 * y2) + sy1 * r2
distorted_x = nx + radial_factor * nx + dx
distorted_y = ny + radial_factor * ny + dy
return Matrix([[fx * distorted_x + cx],
[fy * distorted_y + cy]])
if __name__ == '__main__':
p00 = Matrix(3, 1, lambda i,j:Symbol('p00_%d' % (i), real=True))
p01 = Matrix(3, 1, lambda i,j:Symbol('p01_%d' % (i), real=True))
p02 = Matrix(3, 1, lambda i,j:Symbol('p02_%d' % (i), real=True))
p03 = Matrix(3, 1, lambda i,j:Symbol('p03_%d' % (i), real=True))
p10 = Matrix(3, 1, lambda i,j:Symbol('p10_%d' % (i), real=True))
p11 = Matrix(3, 1, lambda i,j:Symbol('p11_%d' % (i), real=True))
p12 = Matrix(3, 1, lambda i,j:Symbol('p12_%d' % (i), real=True))
p13 = Matrix(3, 1, lambda i,j:Symbol('p13_%d' % (i), real=True))
p20 = Matrix(3, 1, lambda i,j:Symbol('p20_%d' % (i), real=True))
p21 = Matrix(3, 1, lambda i,j:Symbol('p21_%d' % (i), real=True))
p22 = Matrix(3, 1, lambda i,j:Symbol('p22_%d' % (i), real=True))
p23 = Matrix(3, 1, lambda i,j:Symbol('p23_%d' % (i), real=True))
p30 = Matrix(3, 1, lambda i,j:Symbol('p30_%d' % (i), real=True))
p31 = Matrix(3, 1, lambda i,j:Symbol('p31_%d' % (i), real=True))
p32 = Matrix(3, 1, lambda i,j:Symbol('p32_%d' % (i), real=True))
p33 = Matrix(3, 1, lambda i,j:Symbol('p33_%d' % (i), real=True))
l00 = Matrix(6, 1, lambda i,j:Symbol('l00_%d' % (i), real=True))
l01 = Matrix(6, 1, lambda i,j:Symbol('l01_%d' % (i), real=True))
l02 = Matrix(6, 1, lambda i,j:Symbol('l02_%d' % (i), real=True))
l03 = Matrix(6, 1, lambda i,j:Symbol('l03_%d' % (i), real=True))
l10 = Matrix(6, 1, lambda i,j:Symbol('l10_%d' % (i), real=True))
l11 = Matrix(6, 1, lambda i,j:Symbol('l11_%d' % (i), real=True))
l12 = Matrix(6, 1, lambda i,j:Symbol('l12_%d' % (i), real=True))
l13 = Matrix(6, 1, lambda i,j:Symbol('l13_%d' % (i), real=True))
l20 = Matrix(6, 1, lambda i,j:Symbol('l20_%d' % (i), real=True))
l21 = Matrix(6, 1, lambda i,j:Symbol('l21_%d' % (i), real=True))
l22 = Matrix(6, 1, lambda i,j:Symbol('l22_%d' % (i), real=True))
l23 = Matrix(6, 1, lambda i,j:Symbol('l23_%d' % (i), real=True))
l30 = Matrix(6, 1, lambda i,j:Symbol('l30_%d' % (i), real=True))
l31 = Matrix(6, 1, lambda i,j:Symbol('l31_%d' % (i), real=True))
l32 = Matrix(6, 1, lambda i,j:Symbol('l32_%d' % (i), real=True))
l33 = Matrix(6, 1, lambda i,j:Symbol('l33_%d' % (i), real=True))
frac_x = Symbol("frac_x", real=True)
frac_y = Symbol("frac_y", real=True)
measurement_x = Symbol("measurement_x", real=True)
measurement_y = Symbol("measurement_y", real=True)
measurement_z = Symbol("measurement_z", real=True)
# For pose and geometry optimization:
# Local point Jacobian wrt. image_tr_global, pattern_point
image_tr_global = Matrix(7, 1, lambda i,j:Symbol('itg_%d' % (i), real=True))
pattern_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = image_tr_global.col_join(pattern_point)
functions = [lambda variables : TransformPoint(variables.extract([0, 1, 2, 3, 4, 5, 6], [0]), variables.extract([7, 8, 9], [0]))]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=True, simplify_residual=False)
# For rig pose and geometry optimization:
# Local point Jacobian wrt. camera_tr_rig, rig_tr_global, pattern_point
camera_tr_rig = Matrix(7, 1, lambda i,j:Symbol('ctr_%d' % (i), real=True))
rig_tr_global = Matrix(7, 1, lambda i,j:Symbol('rtg_%d' % (i), real=True))
pattern_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = rig_tr_global.col_join(camera_tr_rig).col_join(pattern_point)
functions = [lambda variables : RigTransformPoint(
variables.extract([7, 8, 9, 10, 11, 12, 13], [0]),
variables.extract([0, 1, 2, 3, 4, 5, 6], [0]),
variables.extract([14, 15, 16], [0]))]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=True, simplify_residual=False)
# Tangents Jacobian wrt. direction:
direction = Matrix(3, 1, lambda i,j:Symbol('dir_%d' % (i), real=True))
OptimizerBuilder([lambda variables : ComputeTangentsForLine_ForSmallAbsX(variables)],
direction,
direction,
simplify_function_jacobian=[True],
simplify_jacobian=True, simplify_residual=True)
OptimizerBuilder([lambda variables : ComputeTangentsForLine_ForLargeAbsX(variables)],
direction,
direction,
simplify_function_jacobian=[True],
simplify_jacobian=True, simplify_residual=True)
# Jacobian for CentralGenericBilinear unprojection wrt. pixel x, y
# (CentralGenericBilinear_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian()):
parameters = Matrix([[frac_x],
[frac_y]])
functions = [lambda variables : CentralGenericBilinearModelUnprojection(
p00, p01, p10, p11,
variables[0], variables[1])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# CentralGenericBilinearDirectionCostFunction_ComputeResidualAndJacobian():
# Residual: grid.InterpolateBilinearVector(model->PixelCornerConvToGridPoint(x + 0.5f, y + 0.5f)) - measurement
# Variables are p00 .. p33
parameters = p00.col_join(
p01.col_join(
p10.col_join(
p11)))
functions = [lambda variables : CentralGenericBilinearModelFittingProblemError(
variables.extract([0, 1, 2], [0]),
variables.extract([3, 4, 5], [0]),
variables.extract([6, 7, 8], [0]),
variables.extract([9, 10, 11], [0]),
frac_x, frac_y, measurement_x, measurement_y, measurement_z)]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# CentralGenericBSplineDirectionCostFunction_ComputeResidualAndJacobian():
# Residual: grid.InterpolateBSplineVector(model->PixelCornerConvToGridPoint(x + 0.5f, y + 0.5f)) - measurement
# Variables are p00 .. p33
parameters = p00.col_join(
p01.col_join(
p02.col_join(
p03.col_join(
p10.col_join(
p11.col_join(
p12.col_join(
p13.col_join(
p20.col_join(
p21.col_join(
p22.col_join(
p23.col_join(
p30.col_join(
p31.col_join(
p32.col_join(
p33)))))))))))))))
functions = [lambda variables : CentralGenericBSplineModelFittingProblemError(
variables.extract([0, 1, 2], [0]),
variables.extract([3, 4, 5], [0]),
variables.extract([6, 7, 8], [0]),
variables.extract([9, 10, 11], [0]),
variables.extract([12, 13, 14], [0]),
variables.extract([15, 16, 17], [0]),
variables.extract([18, 19, 20], [0]),
variables.extract([21, 22, 23], [0]),
variables.extract([24, 25, 26], [0]),
variables.extract([27, 28, 29], [0]),
variables.extract([30, 31, 32], [0]),
variables.extract([33, 34, 35], [0]),
variables.extract([36, 37, 38], [0]),
variables.extract([39, 40, 41], [0]),
variables.extract([42, 43, 44], [0]),
variables.extract([45, 46, 47], [0]),
frac_x, frac_y, measurement_x, measurement_y, measurement_z)]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for CentralGenericBSpline unprojection wrt. pixel x, y
# (CentralGenericBSpline_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian()):
parameters = Matrix([[frac_x],
[frac_y]])
functions = [lambda variables : CentralGenericBSplineModelUnprojection(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33,
variables[0], variables[1])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for direction grid border regularization:
outer = Matrix(3, 1, lambda i,j:Symbol('o_%d' % (i), real=True))
inner1 = Matrix(3, 1, lambda i,j:Symbol('i1_%d' % (i), real=True))
inner2 = Matrix(3, 1, lambda i,j:Symbol('i2_%d' % (i), real=True))
parameters = outer.col_join(inner1.col_join(inner2))
OptimizerBuilder([lambda variables : DirectionBorderRegularization(
variables.extract([0, 1, 2], [0]),
variables.extract([3, 4, 5], [0]),
variables.extract([6, 7, 8], [0]))],
parameters,
parameters,
simplify_function_jacobian=[True],
simplify_jacobian=True, simplify_residual=True)
# Derive Jacobian of local update to quaternions (as in ceres)
# TODO: This only works if replacing subs() by limit() in optimizer_builder's
# ComputeValueAndJacobian(). However, it seems that this gave wrong results in other cases ...
q = Matrix(4, 1, lambda i,j:Symbol('q_%d' % (i), real=True))
delta_q = Matrix(3, 1, lambda i,j:Symbol('dq_%d' % (i), real=True))
OptimizerBuilder([lambda variables : QuaternionLocalUpdate(variables, q)],
delta_q,
Matrix([[0], [0], [0]]),
simplify_function_jacobian=[True],
simplify_jacobian=True, simplify_residual=True)
# Derivation of LocalUpdateJacobianWrtDirection():
target_direction = Matrix(3, 1, lambda i,j:Symbol('t_%d' % (i), real=True))
base_direction = Matrix(3, 1, lambda i,j:Symbol('d_%d' % (i), real=True))
tangent1 = Matrix(3, 1, lambda i,j:Symbol('t1_%d' % (i), real=True))
tangent2 = Matrix(3, 1, lambda i,j:Symbol('t2_%d' % (i), real=True))
parameters = target_direction
parameter_values = base_direction # Taking Jacobian at base_direction
functions = [lambda target_dir : ConvertDirectionToLocalUpdate(base_direction, target_dir, tangent1, tangent2)]
OptimizerBuilder(functions,
parameters, parameter_values,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for NoncentralGenericBicubic unprojection wrt. pixel x, y
# (NoncentralGenericBicubic_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian()):
parameters = Matrix([[frac_x],
[frac_y]])
functions = [lambda variables : NoncentralGenericBicubicModelUnprojection(
l00, l01, l02, l03, l10, l11, l12, l13, l20, l21, l22, l23, l30, l31, l32, l33,
variables[0], variables[1])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for CentralGenericBicubic unprojection wrt. pixel x, y
# (CentralGenericBicubic_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian()):
parameters = Matrix([[frac_x],
[frac_y]])
functions = [lambda variables : CentralGenericBicubicModelUnprojection(
p00, p01, p02, p03, p10, p11, p12, p13, p20, p21, p22, p23, p30, p31, p32, p33,
variables[0], variables[1])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# CentralGenericBicubicDirectionCostFunction_ComputeResidualAndJacobian():
# Residual: grid.InterpolateBicubicVector(model->PixelCornerConvToGridPoint(x + 0.5f, y + 0.5f)) - measurement
# Variables are p00 .. p33
parameters = p00.col_join(
p01.col_join(
p02.col_join(
p03.col_join(
p10.col_join(
p11.col_join(
p12.col_join(
p13.col_join(
p20.col_join(
p21.col_join(
p22.col_join(
p23.col_join(
p30.col_join(
p31.col_join(
p32.col_join(
p33)))))))))))))))
functions = [lambda variables : CentralGenericBicubicModelFittingProblemError(
variables.extract([0, 1, 2], [0]),
variables.extract([3, 4, 5], [0]),
variables.extract([6, 7, 8], [0]),
variables.extract([9, 10, 11], [0]),
variables.extract([12, 13, 14], [0]),
variables.extract([15, 16, 17], [0]),
variables.extract([18, 19, 20], [0]),
variables.extract([21, 22, 23], [0]),
variables.extract([24, 25, 26], [0]),
variables.extract([27, 28, 29], [0]),
variables.extract([30, 31, 32], [0]),
variables.extract([33, 34, 35], [0]),
variables.extract([36, 37, 38], [0]),
variables.extract([39, 40, 41], [0]),
variables.extract([42, 43, 44], [0]),
variables.extract([45, 46, 47], [0]),
frac_x, frac_y, measurement_x, measurement_y, measurement_z)]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for NoncentralGenericBSpline unprojection wrt. pixel x, y
# (NoncentralGenericBicubic_UnprojectFromPixelCornerConv_ComputeResidualAndJacobian()):
parameters = Matrix([[frac_x],
[frac_y]])
functions = [lambda variables : NoncentralGenericBSplineModelUnprojection(
l00, l01, l02, l03, l10, l11, l12, l13, l20, l21, l22, l23, l30, l31, l32, l33,
variables[0], variables[1])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for CentralThinPrismFisheyeModel::ProjectPointToPixelCornerConv() wrt. the 12 camera model parameters.
fx = Symbol("fx", real=True)
fy = Symbol("fy", real=True)
cx = Symbol("cx", real=True)
cy = Symbol("cy", real=True)
k1 = Symbol("k1", real=True)
k2 = Symbol("k2", real=True)
k3 = Symbol("k3", real=True)
k4 = Symbol("k4", real=True)
p1 = Symbol("p1", real=True)
p2 = Symbol("p2", real=True)
sx1 = Symbol("sx1", real=True)
sy1 = Symbol("sy1", real=True)
local_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = Matrix([[fx],
[fy],
[cx],
[cy],
[k1],
[k2],
[k3],
[k4],
[p1],
[p2],
[sx1],
[sy1]])
print('Fisheye case:')
functions = [lambda variables : CentralThinPrismFisheyeProjection(
local_point[0], local_point[1], local_point[2],
variables[0], variables[1], variables[2], variables[3],
variables[4], variables[5], variables[6], variables[7],
variables[8], variables[9], variables[10], variables[11], True)]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
print('Non-fisheye case:')
functions = [lambda variables : CentralThinPrismFisheyeProjection(
local_point[0], local_point[1], local_point[2],
variables[0], variables[1], variables[2], variables[3],
variables[4], variables[5], variables[6], variables[7],
variables[8], variables[9], variables[10], variables[11], False)]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian for CentralOpenCVModel::ProjectPointToPixelCornerConv() wrt. the 12 camera model parameters.
fx = Symbol("fx", real=True)
fy = Symbol("fy", real=True)
cx = Symbol("cx", real=True)
cy = Symbol("cy", real=True)
k1 = Symbol("k1", real=True)
k2 = Symbol("k2", real=True)
k3 = Symbol("k3", real=True)
k4 = Symbol("k4", real=True)
k5 = Symbol("k5", real=True)
k6 = Symbol("k6", real=True)
p1 = Symbol("p1", real=True)
p2 = Symbol("p2", real=True)
local_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = Matrix([[fx],
[fy],
[cx],
[cy],
[k1],
[k2],
[k3],
[k4],
[k5],
[k6],
[p1],
[p2]])
functions = [lambda variables : CentralOpenCVProjection(
local_point[0], local_point[1], local_point[2],
variables[0], variables[1], variables[2], variables[3],
variables[4], variables[5], variables[6], variables[7],
variables[8], variables[9], variables[10], variables[11])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian of CentralRadialModel::Project() wrt. the local point.
fx = Symbol("fx", real=True)
fy = Symbol("fy", real=True)
cx = Symbol("cx", real=True)
cy = Symbol("cy", real=True)
p1 = Symbol("p1", real=True)
p2 = Symbol("p2", real=True)
sx1 = Symbol("sx1", real=True)
sy1 = Symbol("sy1", real=True)
spline_resolution = Symbol("spline_resolution", real=True)
spline_param0 = Symbol("spline_param0", real=True)
spline_param1 = Symbol("spline_param1", real=True)
spline_param2 = Symbol("spline_param2", real=True)
spline_param3 = Symbol("spline_param3", real=True)
local_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = Matrix([[local_point[0]],
[local_point[1]],
[local_point[2]]])
functions = [lambda variables : CentralRadialProjection(
spline_resolution, spline_param0, spline_param1, spline_param2, spline_param3,
fx, fy, cx, cy, p1, p2, sx1, sy1,
variables[0], variables[1], variables[2])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
# Jacobian of CentralRadialModel::Project() wrt. the camera model parameters.
fx = Symbol("fx", real=True)
fy = Symbol("fy", real=True)
cx = Symbol("cx", real=True)
cy = Symbol("cy", real=True)
p1 = Symbol("p1", real=True)
p2 = Symbol("p2", real=True)
sx1 = Symbol("sx1", real=True)
sy1 = Symbol("sy1", real=True)
spline_resolution = Symbol("spline_resolution", real=True)
spline_param0 = Symbol("spline_param0", real=True)
spline_param1 = Symbol("spline_param1", real=True)
spline_param2 = Symbol("spline_param2", real=True)
spline_param3 = Symbol("spline_param3", real=True)
local_point = Matrix(3, 1, lambda i,j:Symbol('p_%d' % (i), real=True))
parameters = Matrix([[fx],
[fy],
[cx],
[cy],
[p1],
[p2],
[sx1],
[sy1],
[spline_param0],
[spline_param1],
[spline_param2],
[spline_param3]])
functions = [lambda variables : CentralRadialProjection(
spline_resolution, variables[8], variables[9], variables[10], variables[11],
variables[0], variables[1], variables[2], variables[3],
variables[4], variables[5], variables[6], variables[7],
local_point[0], local_point[1], local_point[2])]
OptimizerBuilder(functions,
parameters, parameters,
simplify_function_jacobian=[False],
simplify_jacobian=False, simplify_residual=False)
|
benchmarks/pydy_pendulum.py | Midnighter/symengine.py | 133 | 12786053 | <gh_stars>100-1000
import os
import time
import sys
sys.path = ["../sympy", "../pydy", "../symengine.py"] + sys.path
import sympy
import symengine
import pydy
from sympy.physics.mechanics.models import n_link_pendulum_on_cart
print(sympy.__file__)
print(symengine.__file__)
print(pydy.__file__)
if (len(sys.argv) > 1):
n = int(sys.argv[1])
else:
n = 4
start = time.time()
sys = n_link_pendulum_on_cart(n, cart_force=False)
end = time.time()
print("%s s" % (end-start))
#print(sys.eom_method.mass_matrix)
|
fssim_rqt_plugins/rqt_fssim_track_editor/src/rqt_fssim_track_editor/cone_editor.py | AhmedOsamaAgha/fssim | 200 | 12786063 | # AMZ-Driverless
# Copyright (c) 2018 Authors:
# - <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import scipy.io as sio
from qt_gui.plugin import Plugin
from PyQt5.QtWidgets import QWidget, QGraphicsScene, QGraphicsView, QGraphicsLineItem
from PyQt5 import QtGui, QtCore
from PyQt5.QtGui import QColor, QPen, QBrush
from PyQt5.QtCore import *
from track import *
from snapshot_handler import *
class TrackViewScene(QGraphicsScene):
cur_scale = 1.0
_px_per_m = 10
enable_editing = False
def __init__(self, context, cone_view):
super(TrackViewScene, self).__init__()
self._context = context
self.cone_view = cone_view # type: QGraphicsView
self._map_height = cone_view.frameGeometry().height()
self._map_width = cone_view.frameGeometry().width()
self._landmarks = []
self._car_diameter = 5.0
self._cone_diameter = self.m_to_px(0.5)
self.cone_view.setDragMode(1)
self._mode = Mode.DRAW
self._grid_alpha = 255
self._grid_m = 5
self._draw_grid(self._grid_alpha, self._grid_m)
self.mousePressEvent = self.on_mouse_down
# self.mouseMoveEvent = self.on_mouse_move
self.track = Track()
self.snapshots = SnapshotHandler()
self.model_path = ""
self.tracks_path = ""
self.draw_rect([0, 0], 0.5, 0.5)
def set_cone_diameter(self, size):
self._cone_diameter = self.m_to_px(size)
def interpolate(self, circular = True):
self.track.interpolate(circular)
self.update_all()
def generate_skipdpad(self, widget):
self.track.generate_skidpad(widget)
self.update_all()
def generate_acceleration(self, widget):
self.track.generate_acceleration(widget)
self.update_all()
def draw_entire_track(self):
self.draw_track(self.track.middle)
self.draw_cones(self.track.cones_right, self.track.get_color(Type.RIGHT))
self.draw_cones(self.track.cones_orange, self.track.get_color(Type.ORANGE))
self.draw_cones(self.track.cones_left, self.track.get_color(Type.LEFT))
self.draw_big_cones(self.track.cones_orange_big, self.track.get_color(Type.ORANGE))
self.draw_tk_device(self.track.tk_device)
self.draw_cones(self.track.cones, self.track.get_color(Type.UNKNOWN))
self.draw_lines(self.track.control_points)
self.draw_axes(self.track.starting_pose_front_wing)
def draw_snapshot(self):
self.clear()
self.update_grid(self._grid_alpha, self._grid_m)
self.draw_cones(self.snapshots.cones)
def draw_snapshot_i(self, i):
self.snapshots.load_snap_from_list(i)
self.draw_snapshot()
def change_enabled(self, enabled):
if enabled:
self.enable_editing = True
self.cone_view.setDragMode(0)
else:
self.enable_editing = False
self.cone_view.setDragMode(1)
def update_grid(self, alpha=20, grid_size=5, draw_track=True):
self._map_height = self.cone_view.frameGeometry().height()
self._map_width = self.cone_view.frameGeometry().width()
self.clear()
self._draw_grid(alpha, grid_size)
if draw_track:
self.draw_entire_track()
def update_all(self):
self.clear()
self.update_grid(self._grid_alpha, self._grid_m)
self.draw_entire_track()
def show_event(self, event):
self.cone_view.fitInView(self.sceneRect(), Qt.KeepAspectRatio)
def change_view(self, i):
if i == 2:
self.draw_snapshot()
elif i == 0:
self.update_all()
def add_land_mark(self, x, y):
pen = QPen(QColor(100, 200, 0), 0.5, Qt.SolidLine, Qt.RoundCap)
def clearTrack(self):
self.track.clear()
self.update_grid(self._grid_alpha, self._grid_m, False)
#####################################
## GETTERS & SETTERS
#####################################
def set_px_per_m(self, val):
self._px_per_m = val
def set_mode(self, mode):
self._mode = mode
def set_cone_add_side(self, side):
self._side = side
#####################################
## EVENT HANDLERS
#####################################
def wheelEvent(self, event):
if event.delta() > 0:
factor = 1.2
if self.cur_scale < 100:
self.cur_scale = self.cur_scale * factor
else:
factor = 0.8
if self.cur_scale > 0.1:
self.cur_scale = self.cur_scale * factor
if self.cur_scale > 0.1 and self.cur_scale < 10:
self.cone_view.scale(factor, factor)
self.update_grid(self._grid_alpha, self._grid_m)
def on_mousewheel(self, event):
pass
def handle_btn_export(self, name, yaml, mat):
path = self.tracks_path + "/" + name
if yaml:
self.track.export_to_yaml(self.tracks_path, name)
if mat:
self.track.export_to_mat(self.tracks_path, name)
self.export_model(path, name)
def export_model(self, path, name):
root = etree.Element("model")
etree.SubElement(root, "name").text = "track"
etree.SubElement(root, "version").text = "1.0"
etree.SubElement(root, "sdf", version="1.4").text = name + ".sdf"
etree.SubElement(root, "description").text = "random track"
tree = etree.ElementTree(root)
tree.write(self.model_path + "/track/model.config", pretty_print=True, xml_declaration=True, encoding='UTF-8')
root = etree.Element("sdf", version="1.4")
model = etree.SubElement(root, "model", name="some track")
for i in range(0, self.track.get_size(Type.RIGHT)):
include = etree.SubElement(model, "include")
etree.SubElement(include, "uri").text = "model://fssim_gazebo/models/cone_blue"
etree.SubElement(include, "pose").text = self.track.get_cone_pos(Type.RIGHT, i)
etree.SubElement(include, "name").text = "cone_right"
for i in range(0, self.track.get_size(Type.LEFT)):
include = etree.SubElement(model, "include")
etree.SubElement(include, "uri").text = "model://fssim_gazebo/models/cone_yellow"
etree.SubElement(include, "pose").text = self.track.get_cone_pos(Type.LEFT, i)
etree.SubElement(include, "name").text = "cone_left"
for i in range(0, self.track.get_size(Type.ORANGE)):
include = etree.SubElement(model, "include")
etree.SubElement(include, "uri").text = "model://fssim_gazebo/models/cone_orange"
etree.SubElement(include, "pose").text = self.track.get_cone_pos(Type.ORANGE, i)
etree.SubElement(include, "name").text = "cone_orange"
for i in range(0, self.track.get_size(Type.ORANGE_BIG)):
include = etree.SubElement(model, "include")
etree.SubElement(include, "uri").text = "model://fssim_gazebo/models/cone_orange_big"
etree.SubElement(include, "pose").text = self.track.get_cone_pos(Type.ORANGE_BIG, i)
etree.SubElement(include, "name").text = "cone_orange_big"
for i in range(0, self.track.get_size(Type.TK_DEVICE)):
include = etree.SubElement(model, "include")
etree.SubElement(include, "uri").text = "model://fssim_gazebo/models/time_keeping"
etree.SubElement(include, "pose").text = self.track.get_cone_pos(Type.TK_DEVICE, i)
etree.SubElement(include, "name").text = "tk_device_" + str(i)
tree = etree.ElementTree(root)
gazebo_models = self.model_path + "/track/" + name
tree.write(gazebo_models + ".sdf", pretty_print=True, xml_declaration=True, encoding='UTF-8')
self.track.export_to_yaml(self.model_path + "/track/tracks_yaml", name,create_dir=False)
print "[INFO] Saving track to: ",gazebo_models + ".sdf"
def handle_btn_import(self, path,outside,inside,center):
if path.endswith('.bag'):
self.track.load_track_from_bag(path,outside,inside,center)
self.update_all()
else:
print "[ERROR] Wrong file extension. Only ROSBAG supported"
def on_mouse_up(self, event):
pass
def on_mouse_move(self, event):
print event
def on_mouse_down(self, event):
if not self.enable_editing:
return
scene_point = event.scenePos()
point = np.array([(scene_point.x()), (scene_point.y())])
point = self.get_m_from_px(point)
if self._mode == Mode.DRAW:
if self.track.add_point_on_middle_line(point):
point_from = (self.track.get_control_point(-2))
point_to = (self.track.get_control_point(-1))
self.draw_line(point_from, point_to)
self.draw_rect(point_to, 0.5, 0.5)
elif self._mode == Mode.EDIT and self.track.computed_cones:
if self._side == Type.RIGHT:
self.track.cones_right = np.vstack([self.track.cones_right, point])
elif self._side == Type.LEFT:
self.track.cones_left = np.vstack([self.track.cones_left, point])
self.update_all()
elif self._mode == Mode.ERASE:
counter = 0
dist_min = 100.0
index = 0
for p in self.track.cones_left:
dist = np.linalg.norm(p - point)
if dist < dist_min:
dist_min = dist
index = counter
counter = counter + 1
if dist_min < 0.5:
self.track.cones_left = np.delete(self.track.cones_left, index, 0)
counter = 0
dist_min = 100.0
index = 0
for p in self.track.cones_right:
dist = np.linalg.norm(p - point)
if dist < dist_min:
dist_min = dist
index = counter
counter = counter + 1
if dist_min < 0.5:
self.track.cones_right = np.delete(self.track.cones_right, index, 0)
self.update_all()
#####################################
## DRAWING FUNCTIONS
#####################################
def _draw_axes(self):
pos_from = self.get_px_pos_from_m([0, 0])
pos_to = self.get_px_pos_from_m([5, 0])
grid_lines = QPen(QColor(255, 0, 0))
grid_lines.setWidth(5)
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
pos_to = self.get_px_pos_from_m([0, 5])
grid_lines = QPen(QColor(0, 0, 255))
grid_lines.setWidth(5)
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
def draw_axes(self, pos):
heading = pos[2]
x = pos[0]
y = pos[1]
length = 2.5
xy = [x,y]
xy_to = [x + length * np.cos(heading), y + length * np.sin(heading)]
pos_from = self.get_px_pos_from_m(xy)
pos_to = self.get_px_pos_from_m(xy_to)
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], QPen(QColor(255, 0, 0)))
heading = heading + np.pi / 2.0
xy_to = [x + length * np.cos(heading), y + length * np.sin(heading)]
pos_from = self.get_px_pos_from_m(xy)
pos_to = self.get_px_pos_from_m(xy_to)
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], QPen(QColor(0, 0, 255)))
def _draw_grid(self, alpha, grid_size):
self._grid_alpha = alpha
self._grid_m = grid_size
self._draw_axes()
max_x = 200
max_y = 200
grid_lines = QPen(QColor(105, 105, 105, alpha))
for x in range(0, max_x, grid_size):
pos_from = self.get_px_pos_from_m([x, -max_y])
pos_to = self.get_px_pos_from_m([x, max_y])
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
pos_from = self.get_px_pos_from_m([max_x, x])
pos_to = self.get_px_pos_from_m([-max_x, x])
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
pos_from = self.get_px_pos_from_m([-x, -max_y])
pos_to = self.get_px_pos_from_m([-x, max_y])
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
pos_from = self.get_px_pos_from_m([max_x, -x])
pos_to = self.get_px_pos_from_m([-max_x, -x])
self.addLine(pos_from[0], pos_from[1], pos_to[0], pos_to[1], grid_lines)
def _draw_cone(self, x, y, diameter=10, color=QColor(100, 200, 0)):
point = self.get_px_pos_from_m([x, y])
cone_pen = QPen(color, 2, Qt.SolidLine, Qt.RoundCap)
cone_ellipse = self.addEllipse(point[0] - diameter / 2,
point[1] - diameter / 2,
diameter,
diameter,
cone_pen)
def draw_line(self, start, end, color=QColor(0, 0, 100)):
cone_pen = QPen(color, 2, Qt.DashLine, Qt.RoundCap)
start = self.get_px_pos_from_m(start)
end = self.get_px_pos_from_m(end)
self.addLine(start[0], start[1], end[0], end[1], cone_pen)
def draw_rect(self, pos, width, height, color=QColor(0, 0, 100)):
cone_pen = QPen(color, 2, Qt.SolidLine, Qt.RoundCap)
width = self.m_to_px(width)
height = self.m_to_px(height)
start = self.get_px_pos_from_m(pos)
start[0] = start[0] - width / 2.0
start[1] = start[1] - height / 2.0
self.addRect(start[0], start[1], width, height, cone_pen)
def draw_track(self, track, color=QColor(100, 200, 0)):
for i, row in enumerate(track):
if i != 0:
self.draw_line(track[i - 1], track[i], color=color)
def draw_cones(self, track, color=QColor(100, 200, 0)):
for x, y in track:
self._draw_cone(x, y, diameter=self._cone_diameter, color=color)
def draw_big_cones(self, track, color=QColor(100, 200, 0)):
for x, y in track:
self._draw_cone(x, y, diameter=self._cone_diameter * 2.0, color=color)
def draw_tk_device(self, track, color=QColor(255, 0, 0)):
for x, y in track:
self._draw_cone(x, y, diameter=self._cone_diameter * 2.0, color=color)
def draw_lines(self, lines, color=QColor(0, 0, 100)):
size = len(lines)
if size < 3:
return
for i in range(1, size):
last = lines[i - 1, :]
pos = lines[i, :]
self.draw_line(last, pos, color)
self.draw_rect(pos, 0.5, 0.5)
#####################################
## CONVERTERS
#####################################
def m_to_px(self, x):
return x * self._px_per_m
def px_to_m(self, px, py):
return [self.px_to_m(px), self.px_to_m(py)]
def px_to_m(self, px):
return px / self._px_per_m
def get_px_pos_from_m(self, p):
p_augmented = np.array([p[0], -p[1], 1])
p_res = np.dot(self.get_transform_px_to_m(), p_augmented)
return np.array([p_res[0, 0], p_res[0, 1]])
def get_m_from_px(self, p):
p_augmented = np.array([p[0], p[1], 1])
p_res = np.dot(np.linalg.inv(self.get_transform_px_to_m()), p_augmented)
return np.array([p_res[0, 0], -p_res[0, 1]])
def get_transform_px_to_m(self):
# Inv = np.matrix([[1, 0], [0, -1]])
angle = 3.0 / 2.0 * np.pi
c = np.cos(angle)
s = np.sin(angle)
Rot = np.matrix([[c, -s], [s, c]])
Multip = np.matrix([[self._px_per_m, 0], [0, self._px_per_m]])
InvRot = Multip * Rot
trans = [self._map_width / 2.0, self._map_height / 2.0]
T = np.matrix([[InvRot[0, 0], InvRot[0, 1], trans[0]],
[InvRot[1, 0], InvRot[1, 1], trans[1]],
[0, 0, 1]])
return T
|
concepts/numberRangeDouble.py | sixtysecondrevit/dynamoPython | 114 | 12786070 | <gh_stars>100-1000
"""
PYTHON RANGE: DOUBLE APPROACH
"""
__author__ = '<NAME> - <EMAIL>'
__twitter__ = '@solamour'
__version__ = '1.0.0'
# DEFINITION:
# Custom definition to build a function similar to our DesignScript
# float range
def floatRange( start, end, step ):
for number in xrange( end ):
yield start
start += step
# SYNTAX: floatRange( [start], stop[, step ] )
# Start = Starting number of the sequence [Open]
# Stop = Generate numbers up to, but not including this number [Closed]
# Step = Difference between each number in the sequence. In order to pair
# with our DesignScript variant, we need to run this as: ( 1.0 / end)
# NOTES:
# If we wish to use floating values (Doubles) we have to specify, hence
# in our step value we use 1.0 instead of 1. IF we used 1 (An integer)
# we would simply return a list of zeroes
# The input ports
start = IN[0] # A number such as 0 (int)
stop = IN[1] # A number such as 10 (int)
# A divisor calculation that changes our ints to floats
step = ( 1.0 / stop )
# The output port - In this case a list comprehension
OUT = [ value for value in floatRange( start, stop + 1, step ) ]
|
vilya/models/elastic/searcher.py | mubashshirjamal/code | 1,582 | 12786115 | <filename>vilya/models/elastic/searcher.py
# -*- coding: utf-8 -*-
from vilya.libs.search import code_client
class SearchEngine(object):
c = code_client
if not c.head():
c.put('')
@classmethod
def check_result(cls, result):
if result and not result.get('error'):
return True
return False
@classmethod
def decode(cls, json_raw, parse_names):
dic = json_raw
if not cls.check_result(dic):
return []
decoded = []
for e in dic['hits']['hits']:
d = e['_source']
values = []
for parse_name in parse_names:
values.append(d.get(parse_name))
decoded.append(values)
return decoded
@classmethod
def get_count(cls, result):
if cls.check_result(result):
return result['hits']['total']
return 0
@classmethod
def query_all(cls, index_type, from_=0, size=0):
data = {
'from': from_,
'size': size,
'query': {
'match_all': {}
}
}
result = cls.c.get('%s/_search' % index_type, data=data)
return result
@classmethod
def query_by_field(cls, index_type, field_dict, from_=0, size=0):
data = {
'from': from_,
'size': size,
'query': {
"term": field_dict,
},
}
result = cls.c.get('%s/_search' % index_type, data=data)
return result
@classmethod
def search_a_phrase(cls, index_type, phrase, from_=0, size=20,
filter_data=None, sort_data=None, highlight_data=None,
facets_data=None):
data = {
'from': from_,
'size': size,
"query": {
"query_string": {
"query": phrase
}
},
}
if highlight_data:
data['highlight'] = highlight_data
if filter_data:
filtered_query_data = {
"filtered": {
"query": data['query'],
"filter": filter_data,
}
}
data['query'] = filtered_query_data
if sort_data:
data['sort'] = sort_data
if facets_data:
data['facets'] = facets_data
result = cls.c.get('%s/_search' % index_type, data=data)
return result
|
supersqlite/third_party/_apsw/tools/docmissing.py | plasticity-admin/supersqlite | 1,520 | 12786148 | <reponame>plasticity-admin/supersqlite<filename>supersqlite/third_party/_apsw/tools/docmissing.py
# python
#
# See the accompanying LICENSE file.
#
# Find things that haven't been documented and should be or have been
# but don't exist.
import glob, sys
import apsw
retval=0
classes={}
for filename in glob.glob("doc/*.rst"):
for line in open(filename, "rtU"):
line=line.strip().split()
if len(line)>=2:
if line[0]==".." and line[1] in ("method::", "automethod::", "attribute::"):
funcname=line[2].split("(")[0].strip()
if "." in funcname:
klass, funcname=funcname.split(".",1)
else:
klass="apsw"
if klass not in classes:
classes[klass]=[]
classes[klass].append(funcname)
# ok, so we know what was documented. Now lets see what exists
con=apsw.Connection(":memory:")
cur=con.cursor()
cur.execute("create table x(y); insert into x values(x'<PASSWORD>');select * from x")
blob=con.blobopen("main", "x", "y", con.last_insert_rowid(), 0)
vfs=apsw.VFS("aname", "")
vfsfile=apsw.VFSFile("", ":memory:", [apsw.SQLITE_OPEN_MAIN_DB|apsw.SQLITE_OPEN_CREATE|apsw.SQLITE_OPEN_READWRITE, 0])
# virtual tables aren't real - just check their size hasn't changed
assert len(classes['VTModule'])==2
del classes['VTModule']
assert len(classes['VTTable'])==13
del classes['VTTable']
assert len(classes['VTCursor'])==6
del classes['VTCursor']
for name, obj in ( ('Connection', con),
('Cursor', cur),
('blob', blob),
('VFS', vfs),
('VFSFile', vfsfile),
('apsw', apsw),
):
if name not in classes:
retval=1
print "class", name,"not found"
continue
for c in classes[name]:
if not hasattr(obj, c):
# it is legit for these to be missing from code (currently because code is broken)
if (name+"."+c) in ("apsw.async_control", "apsw.async_initialize", "apsw.async_run", "apsw.async_shutdown"):
continue
retval=1
print "%s.%s in documentation but not object" % (name, c)
for c in dir(obj):
if c.startswith("__"): continue
if name=="apsw":
# ignore constants and modules
if type(getattr(apsw, c)) in (type(3), type(sys)):
continue
# ignore debugging thingies
if c.startswith("test_") or c in ("faultdict", "_fini"):
continue
# ignore the exceptions
if isinstance(getattr(apsw, c), type) and issubclass(getattr(apsw,c), Exception):
continue
# ignore classes !!!
if c in ("Connection", "VFS", "VFSFile", "zeroblob", "Shell", "URIFilename"):
continue
# ignore mappings !!!
if c.startswith("mapping_"):
continue
if c not in classes[name]:
if "%s.%s" % (name, c) not in ("Cursor.next",):
retval=1
print "%s.%s on object but not in documentation" % (name, c)
sys.exit(retval)
|
exercises/es/test_03_14_03.py | Jette16/spacy-course | 2,085 | 12786194 | <reponame>Jette16/spacy-course
def test():
assert (
"patterns = list(nlp.pipe(people))" in __solution__
), "¿Estás usando nlp.pipe envuelto en una lista?"
__msg__.good(
"¡Buen trabajo! Ahora continuemos con un ejemplo práctico que usa nlp.pipe "
"para procesar documentos con metadatos adicionales."
)
|
py/jpy/ci/appveyor/dump-dlls.py | devinrsmith/deephaven-core | 210 | 12786219 | import psutil, os
p = psutil.Process(os.getpid())
for dll in p.memory_maps():
print(dll.path)
|
challenges/4.C.Absolute_Value/lesson_tests.py | pradeepsaiu/python-coding-challenges | 141 | 12786231 | import unittest
from main import *
class AbsoluteValueTests(unittest.TestCase):
def test_main(self):
self.assertIsInstance(absolute_value, int)
self.assertEqual(absolute_value, 42)
|
randopt/samplers.py | seba-1511/randopt | 115 | 12786260 | <gh_stars>100-1000
#!/usr/bin/env python
import random
import math
from . import RANDOPT_RNG
"""
Here we implement the sampling strategies.
"""
class Sampler(object):
"""
Base class for all samplers.
Note: This class should not be directly instanciated.
"""
def __init__(self, *args, **kwargs):
self.rng = random.Random()
RANDOPT_RNG.random() # Change initial random state
self.rng.setstate(RANDOPT_RNG.getstate())
def sample(self):
raise NotImplementedError('sample() has not been implemented.')
def seed(self, seed_val):
self.rng.seed(seed_val)
def get_state(self):
return self.rng.getstate()
def set_state(self, state):
self.rng.setstate(state)
class Constant(Sampler):
def __init__(self, value):
super(Constant, self).__init__()
self.value = value
def sample(self):
return self.value
class Choice(Sampler):
"""
Samples a value from a given list according to the provided sampler.
Parameters:
* items - (list) itemsm to be sampled.
* sampler - (Sampler) Sampler used to select an item based on its index.
Return type: n/a
Example:
TODO.
"""
def __init__(self, items, sampler=None):
"""sampler is any of the available samplers,
used to sample element's index from the list."""
if sampler is None:
sampler = Uniform()
self.sampler = sampler
self.items = items
self.rng = self.sampler.rng
def sample(self):
i = self.sampler.sample() * len(self.items)
i = int(math.floor(i))
return self.items[i]
class Truncated(Sampler):
"""
Given a sampler, clips the distribution between low and high.
If None, not truncated.
Parameters:
* sampler - (Sampler) Sampler to be truncated.
* low - (float) minimum value to be sampled. Default: None
* high - (float) maximum value to be sampled. Default: None
Return type: n/a
Example:
sampler = Gaussian(0.0, 0.1)
truncated = Truncated(sampler, -0.1, 0.1)
"""
def __init__(self, sampler=None, low=None, high=None):
if sampler is None:
sampler = Uniform()
self.sampler = sampler
self.min = low
self.max = high
self.rng = self.sampler.rng
def sample(self):
val = self.sampler.sample()
if self.min is not None and val < self.min:
val = self.min
if self.max is not None and val > self.max:
val = self.max
return val
class Uniform(Sampler):
'''
Generates a randomly sampled value from low to high with equal probability.
Parameters:
* low - (float) minimum value.
* high - (float) maximum value.
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.Uniform(low=-1.0, high=1.0, dtype='float')
'''
def __init__(self, low=0.0, high=1.0, dtype='float'):
super(Uniform, self).__init__()
self.low = low
self.high = high
self.dtype = dtype
def sample(self):
res = self.rng.uniform(self.low, self.high)
if 'fl' in self.dtype:
return res
return int(res)
class Gaussian(Sampler):
'''
Generates a randomly sampled value with specified mean and std based on a Gaussian distribution.
Parameters:
* mean - (float) mean of Gaussian. Default: 0.0
* std - (float) standard deviation of Gaussian. Default: 1.0
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.Gaussian(mean=0.0, std=1.0, dtype='float')
'''
def __init__(self, mean=0.0, std=1.0, dtype='float'):
super(Gaussian, self).__init__()
self.mean = mean
self.std = std
self.dtype = dtype
def sample(self):
res = self.rng.gauss(self.mean, self.std)
if 'fl' in self.dtype:
return res
return int(res)
class Normal(Gaussian):
pass
class LognormVariate(Sampler):
'''
Generates a randomly sampled value with specified mean and std based on a Log normal distribution.
Parameters:
* mean - (float) mean of Lognormal. Default: 0.0
* std - (float) standard deviation of Lognormal. Default: 1.0
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.LognormVariate(mean=0.0, std=1.0, dtype='float')
'''
def __init__(self, mean=0.0, std=1.0, dtype='float'):
super(LognormVariate, self).__init__()
self.mean = mean
self.std = std
self.dtype = dtype
def sample(self):
res = self.rng.lognormvariate(self.mean, self.std)
if 'fl' in self.dtype:
return res
return int(res)
class BetaVariate(Sampler):
'''
Generates a randomly sampled value with specified mean and std based on a Beta distribution.
Parameters:
* alpha - (float) alpha of beta distribution.
* beta - (float) beta of beta distribution.
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.BetaVariate(alpha=1,beta=1,dtype='float')
'''
def __init__(self, alpha, beta, dtype='float'):
super(BetaVariate, self).__init__()
self.alpha = alpha
self.beta = beta
self.dtype = dtype
def sample(self):
res = self.rng.betavariate(self.alpha, self.beta)
if 'fl' in self.dtype:
return res
return int(res)
class ExpoVariate(Sampler):
'''
Generates a randomly sampled value with lambda based on an exponential distribution.
Parameters:
* lam - (float) lambda of exponential distribution (one divided by desired mean).
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.ExpoVariate(lam=1, dtype='float')
'''
def __init__(self, lam, dtype='float'):
super(ExpoVariate, self).__init__()
self.lam = lam
self.dtype = dtype
def sample(self):
res = self.rng.expovariate(self.lam)
if 'fl' in self.dtype:
return res
return int(res)
class WeibullVariate(Sampler):
'''
Generates a randomly sampled value with specified mean and std based on a Weibull distribution.
Parameters:
* alpha - (float) alpha of Weibull distribution (scale parameter).
* beta - (float) beta of Weibull distribution (shape parameter).
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.WeibullVariate(alpha=1,beta=1,dtype='float')
'''
def __init__(self, alpha, beta, dtype='float'):
super(WeibullVariate, self).__init__()
self.alpha = alpha
self.beta = beta
self.dtype = dtype
def sample(self):
res = self.rng.weibullvariate(self.alpha, self.beta)
if 'fl' in self.dtype:
return res
return int(res)
class ParetoVariate(Sampler):
'''
Generates a randomly sampled value with alpha based on the Pareto distribution.
Parameters:
* alpha - (float) alpha of Pareto distribution (shape parameter).
* dtype - (string) data type. Default: float
Return type: n/a
Example:
randopt.ParetoVariate(alpha=1,dtype='float')
'''
def __init__(self, alpha, dtype='float'):
super(ParetoVariate, self).__init__()
self.alpha = alpha
self.dtype = dtype
def sample(self):
res = self.rng.paretovariate(self.alpha)
if 'fl' in self.dtype:
return res
return int(res)
|
sdk/anomalydetector/azure-ai-anomalydetector/azure/ai/anomalydetector/models/_models.py | rsdoherty/azure-sdk-for-python | 2,728 | 12786272 | <filename>sdk/anomalydetector/azure-ai-anomalydetector/azure/ai/anomalydetector/models/_models.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class AlignPolicy(msrest.serialization.Model):
"""AlignPolicy.
:param align_mode: An optional field, indicates how we align different variables into the same
time-range which is required by the model.{Inner, Outer}. Possible values include: "Inner",
"Outer".
:type align_mode: str or ~azure.ai.anomalydetector.models.AlignMode
:param fill_na_method: An optional field, indicates how missed values will be filled with. Can
not be set to NotFill, when alignMode is Outer.{Previous, Subsequent, Linear, Zero, Fix,
NotFill}. Possible values include: "Previous", "Subsequent", "Linear", "Zero", "Pad",
"NotFill".
:type fill_na_method: str or ~azure.ai.anomalydetector.models.FillNAMethod
:param padding_value: optional field, only be useful if FillNAMethod is set to Pad.
:type padding_value: int
"""
_attribute_map = {
'align_mode': {'key': 'alignMode', 'type': 'str'},
'fill_na_method': {'key': 'fillNAMethod', 'type': 'str'},
'padding_value': {'key': 'paddingValue', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(AlignPolicy, self).__init__(**kwargs)
self.align_mode = kwargs.get('align_mode', None)
self.fill_na_method = kwargs.get('fill_na_method', None)
self.padding_value = kwargs.get('padding_value', None)
class AnomalyContributor(msrest.serialization.Model):
"""AnomalyContributor.
:param contribution_score: The higher the contribution score is, the more likely the variable
to be the root cause of a anomaly.
:type contribution_score: float
:param variable: Variable name of a contributor.
:type variable: str
"""
_validation = {
'contribution_score': {'maximum': 2, 'minimum': 0},
}
_attribute_map = {
'contribution_score': {'key': 'contributionScore', 'type': 'float'},
'variable': {'key': 'variable', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AnomalyContributor, self).__init__(**kwargs)
self.contribution_score = kwargs.get('contribution_score', None)
self.variable = kwargs.get('variable', None)
class AnomalyDetectorError(msrest.serialization.Model):
"""Error information returned by the API.
:param code: The error code. Possible values include: "InvalidCustomInterval", "BadArgument",
"InvalidGranularity", "InvalidPeriod", "InvalidModelArgument", "InvalidSeries",
"InvalidJsonFormat", "RequiredGranularity", "RequiredSeries".
:type code: str or ~azure.ai.anomalydetector.models.AnomalyDetectorErrorCodes
:param message: A message explaining the error reported by the service.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AnomalyDetectorError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class AnomalyState(msrest.serialization.Model):
"""AnomalyState.
All required parameters must be populated in order to send to Azure.
:param timestamp: Required. timestamp.
:type timestamp: ~datetime.datetime
:param value:
:type value: ~azure.ai.anomalydetector.models.AnomalyValue
:param errors: Error message when inference this timestamp.
:type errors: list[~azure.ai.anomalydetector.models.ErrorResponse]
"""
_validation = {
'timestamp': {'required': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'AnomalyValue'},
'errors': {'key': 'errors', 'type': '[ErrorResponse]'},
}
def __init__(
self,
**kwargs
):
super(AnomalyState, self).__init__(**kwargs)
self.timestamp = kwargs['timestamp']
self.value = kwargs.get('value', None)
self.errors = kwargs.get('errors', None)
class AnomalyValue(msrest.serialization.Model):
"""AnomalyValue.
All required parameters must be populated in order to send to Azure.
:param contributors: If current timestamp is an anomaly, contributors will show potential root
cause for thus anomaly. Contributors can help us understand why current timestamp has been
detected as an anomaly.
:type contributors: list[~azure.ai.anomalydetector.models.AnomalyContributor]
:param is_anomaly: Required. To indicate whether current timestamp is anomaly or not.
:type is_anomaly: bool
:param severity: Required. anomaly score of the current timestamp, the more significant an
anomaly is, the higher the score will be.
:type severity: float
:param score: anomaly score of the current timestamp, the more significant an anomaly is, the
higher the score will be, score measures global significance.
:type score: float
"""
_validation = {
'is_anomaly': {'required': True},
'severity': {'required': True, 'maximum': 1, 'minimum': 0},
'score': {'maximum': 2, 'minimum': 0},
}
_attribute_map = {
'contributors': {'key': 'contributors', 'type': '[AnomalyContributor]'},
'is_anomaly': {'key': 'isAnomaly', 'type': 'bool'},
'severity': {'key': 'severity', 'type': 'float'},
'score': {'key': 'score', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(AnomalyValue, self).__init__(**kwargs)
self.contributors = kwargs.get('contributors', None)
self.is_anomaly = kwargs['is_anomaly']
self.severity = kwargs['severity']
self.score = kwargs.get('score', None)
class ChangePointDetectRequest(msrest.serialization.Model):
"""ChangePointDetectRequest.
All required parameters must be populated in order to send to Azure.
:param series: Required. Time series data points. Points should be sorted by timestamp in
ascending order to match the change point detection result.
:type series: list[~azure.ai.anomalydetector.models.TimeSeriesPoint]
:param granularity: Required. Can only be one of yearly, monthly, weekly, daily, hourly,
minutely or secondly. Granularity is used for verify whether input series is valid. Possible
values include: "yearly", "monthly", "weekly", "daily", "hourly", "minutely", "secondly",
"microsecond", "none".
:type granularity: str or ~azure.ai.anomalydetector.models.TimeGranularity
:param custom_interval: Custom Interval is used to set non-standard time interval, for example,
if the series is 5 minutes, request can be set as {"granularity":"minutely",
"customInterval":5}.
:type custom_interval: int
:param period: Optional argument, periodic value of a time series. If the value is null or does
not present, the API will determine the period automatically.
:type period: int
:param stable_trend_window: Optional argument, advanced model parameter, a default
stableTrendWindow will be used in detection.
:type stable_trend_window: int
:param threshold: Optional argument, advanced model parameter, between 0.0-1.0, the lower the
value is, the larger the trend error will be which means less change point will be accepted.
:type threshold: float
"""
_validation = {
'series': {'required': True},
'granularity': {'required': True},
}
_attribute_map = {
'series': {'key': 'series', 'type': '[TimeSeriesPoint]'},
'granularity': {'key': 'granularity', 'type': 'str'},
'custom_interval': {'key': 'customInterval', 'type': 'int'},
'period': {'key': 'period', 'type': 'int'},
'stable_trend_window': {'key': 'stableTrendWindow', 'type': 'int'},
'threshold': {'key': 'threshold', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(ChangePointDetectRequest, self).__init__(**kwargs)
self.series = kwargs['series']
self.granularity = kwargs['granularity']
self.custom_interval = kwargs.get('custom_interval', None)
self.period = kwargs.get('period', None)
self.stable_trend_window = kwargs.get('stable_trend_window', None)
self.threshold = kwargs.get('threshold', None)
class ChangePointDetectResponse(msrest.serialization.Model):
"""ChangePointDetectResponse.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar period: Frequency extracted from the series, zero means no recurrent pattern has been
found.
:vartype period: int
:param is_change_point: isChangePoint contains change point properties for each input point.
True means an anomaly either negative or positive has been detected. The index of the array is
consistent with the input series.
:type is_change_point: list[bool]
:param confidence_scores: the change point confidence of each point.
:type confidence_scores: list[float]
"""
_validation = {
'period': {'readonly': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'is_change_point': {'key': 'isChangePoint', 'type': '[bool]'},
'confidence_scores': {'key': 'confidenceScores', 'type': '[float]'},
}
def __init__(
self,
**kwargs
):
super(ChangePointDetectResponse, self).__init__(**kwargs)
self.period = None
self.is_change_point = kwargs.get('is_change_point', None)
self.confidence_scores = kwargs.get('confidence_scores', None)
class DetectionRequest(msrest.serialization.Model):
"""Request to submit a detection.
All required parameters must be populated in order to send to Azure.
:param source: Required. source file link of the input variables, each variable will be a csv
with two columns, the first column will be timestamp, the second column will be value.Besides
these variable csv files, a extra meta.json can be included in th zip file if you would like to
rename a variable.Be default, the file name of the variable will be used as the variable name.
The variables used in detection should be consistent with variables in the model used for
detection.
:type source: str
:param start_time: Required. A require field, start time of data be used for detection, should
be date-time.
:type start_time: ~datetime.datetime
:param end_time: Required. A require field, end time of data be used for detection, should be
date-time.
:type end_time: ~datetime.datetime
"""
_validation = {
'source': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(DetectionRequest, self).__init__(**kwargs)
self.source = kwargs['source']
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
class DetectionResult(msrest.serialization.Model):
"""Anomaly Response of one detection corresponds to a resultId.
All required parameters must be populated in order to send to Azure.
:param result_id: Required.
:type result_id: str
:param summary: Required. Multivariate anomaly detection status.
:type summary: ~azure.ai.anomalydetector.models.DetectionResultSummary
:param results: Required. anomaly status of each timestamp.
:type results: list[~azure.ai.anomalydetector.models.AnomalyState]
"""
_validation = {
'result_id': {'required': True},
'summary': {'required': True},
'results': {'required': True},
}
_attribute_map = {
'result_id': {'key': 'resultId', 'type': 'str'},
'summary': {'key': 'summary', 'type': 'DetectionResultSummary'},
'results': {'key': 'results', 'type': '[AnomalyState]'},
}
def __init__(
self,
**kwargs
):
super(DetectionResult, self).__init__(**kwargs)
self.result_id = kwargs['result_id']
self.summary = kwargs['summary']
self.results = kwargs['results']
class DetectionResultSummary(msrest.serialization.Model):
"""DetectionResultSummary.
All required parameters must be populated in order to send to Azure.
:param status: Required. Multivariate anomaly detection status. Possible values include:
"CREATED", "RUNNING", "READY", "FAILED".
:type status: str or ~azure.ai.anomalydetector.models.DetectionStatus
:param errors: Error message when creating or training model fails.
:type errors: list[~azure.ai.anomalydetector.models.ErrorResponse]
:param variable_states:
:type variable_states: list[~azure.ai.anomalydetector.models.VariableState]
:param setup_info: Required. Request when creating the model.
:type setup_info: ~azure.ai.anomalydetector.models.DetectionRequest
"""
_validation = {
'status': {'required': True},
'setup_info': {'required': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ErrorResponse]'},
'variable_states': {'key': 'variableStates', 'type': '[VariableState]'},
'setup_info': {'key': 'setupInfo', 'type': 'DetectionRequest'},
}
def __init__(
self,
**kwargs
):
super(DetectionResultSummary, self).__init__(**kwargs)
self.status = kwargs['status']
self.errors = kwargs.get('errors', None)
self.variable_states = kwargs.get('variable_states', None)
self.setup_info = kwargs['setup_info']
class DetectRequest(msrest.serialization.Model):
"""DetectRequest.
All required parameters must be populated in order to send to Azure.
:param series: Required. Time series data points. Points should be sorted by timestamp in
ascending order to match the anomaly detection result. If the data is not sorted correctly or
there is duplicated timestamp, the API will not work. In such case, an error message will be
returned.
:type series: list[~azure.ai.anomalydetector.models.TimeSeriesPoint]
:param granularity: Optional argument, can be one of yearly, monthly, weekly, daily, hourly,
minutely, secondly, microsecond or none. If granularity is not present, it will be none by
default. If granularity is none, the timestamp property in time series point can be absent.
Possible values include: "yearly", "monthly", "weekly", "daily", "hourly", "minutely",
"secondly", "microsecond", "none".
:type granularity: str or ~azure.ai.anomalydetector.models.TimeGranularity
:param custom_interval: Custom Interval is used to set non-standard time interval, for example,
if the series is 5 minutes, request can be set as {"granularity":"minutely",
"customInterval":5}.
:type custom_interval: int
:param period: Optional argument, periodic value of a time series. If the value is null or does
not present, the API will determine the period automatically.
:type period: int
:param max_anomaly_ratio: Optional argument, advanced model parameter, max anomaly ratio in a
time series.
:type max_anomaly_ratio: float
:param sensitivity: Optional argument, advanced model parameter, between 0-99, the lower the
value is, the larger the margin value will be which means less anomalies will be accepted.
:type sensitivity: int
"""
_validation = {
'series': {'required': True},
}
_attribute_map = {
'series': {'key': 'series', 'type': '[TimeSeriesPoint]'},
'granularity': {'key': 'granularity', 'type': 'str'},
'custom_interval': {'key': 'customInterval', 'type': 'int'},
'period': {'key': 'period', 'type': 'int'},
'max_anomaly_ratio': {'key': 'maxAnomalyRatio', 'type': 'float'},
'sensitivity': {'key': 'sensitivity', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(DetectRequest, self).__init__(**kwargs)
self.series = kwargs['series']
self.granularity = kwargs.get('granularity', None)
self.custom_interval = kwargs.get('custom_interval', None)
self.period = kwargs.get('period', None)
self.max_anomaly_ratio = kwargs.get('max_anomaly_ratio', None)
self.sensitivity = kwargs.get('sensitivity', None)
class DiagnosticsInfo(msrest.serialization.Model):
"""DiagnosticsInfo.
:param model_state:
:type model_state: ~azure.ai.anomalydetector.models.ModelState
:param variable_states:
:type variable_states: list[~azure.ai.anomalydetector.models.VariableState]
"""
_attribute_map = {
'model_state': {'key': 'modelState', 'type': 'ModelState'},
'variable_states': {'key': 'variableStates', 'type': '[VariableState]'},
}
def __init__(
self,
**kwargs
):
super(DiagnosticsInfo, self).__init__(**kwargs)
self.model_state = kwargs.get('model_state', None)
self.variable_states = kwargs.get('variable_states', None)
class EntireDetectResponse(msrest.serialization.Model):
"""EntireDetectResponse.
All required parameters must be populated in order to send to Azure.
:param period: Required. Frequency extracted from the series, zero means no recurrent pattern
has been found.
:type period: int
:param expected_values: Required. ExpectedValues contain expected value for each input point.
The index of the array is consistent with the input series.
:type expected_values: list[float]
:param upper_margins: Required. UpperMargins contain upper margin of each input point.
UpperMargin is used to calculate upperBoundary, which equals to expectedValue + (100 -
marginScale)*upperMargin. Anomalies in response can be filtered by upperBoundary and
lowerBoundary. By adjusting marginScale value, less significant anomalies can be filtered in
client side. The index of the array is consistent with the input series.
:type upper_margins: list[float]
:param lower_margins: Required. LowerMargins contain lower margin of each input point.
LowerMargin is used to calculate lowerBoundary, which equals to expectedValue - (100 -
marginScale)*lowerMargin. Points between the boundary can be marked as normal ones in client
side. The index of the array is consistent with the input series.
:type lower_margins: list[float]
:param is_anomaly: Required. IsAnomaly contains anomaly properties for each input point. True
means an anomaly either negative or positive has been detected. The index of the array is
consistent with the input series.
:type is_anomaly: list[bool]
:param is_negative_anomaly: Required. IsNegativeAnomaly contains anomaly status in negative
direction for each input point. True means a negative anomaly has been detected. A negative
anomaly means the point is detected as an anomaly and its real value is smaller than the
expected one. The index of the array is consistent with the input series.
:type is_negative_anomaly: list[bool]
:param is_positive_anomaly: Required. IsPositiveAnomaly contain anomaly status in positive
direction for each input point. True means a positive anomaly has been detected. A positive
anomaly means the point is detected as an anomaly and its real value is larger than the
expected one. The index of the array is consistent with the input series.
:type is_positive_anomaly: list[bool]
"""
_validation = {
'period': {'required': True},
'expected_values': {'required': True},
'upper_margins': {'required': True},
'lower_margins': {'required': True},
'is_anomaly': {'required': True},
'is_negative_anomaly': {'required': True},
'is_positive_anomaly': {'required': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'expected_values': {'key': 'expectedValues', 'type': '[float]'},
'upper_margins': {'key': 'upperMargins', 'type': '[float]'},
'lower_margins': {'key': 'lowerMargins', 'type': '[float]'},
'is_anomaly': {'key': 'isAnomaly', 'type': '[bool]'},
'is_negative_anomaly': {'key': 'isNegativeAnomaly', 'type': '[bool]'},
'is_positive_anomaly': {'key': 'isPositiveAnomaly', 'type': '[bool]'},
}
def __init__(
self,
**kwargs
):
super(EntireDetectResponse, self).__init__(**kwargs)
self.period = kwargs['period']
self.expected_values = kwargs['expected_values']
self.upper_margins = kwargs['upper_margins']
self.lower_margins = kwargs['lower_margins']
self.is_anomaly = kwargs['is_anomaly']
self.is_negative_anomaly = kwargs['is_negative_anomaly']
self.is_positive_anomaly = kwargs['is_positive_anomaly']
class ErrorResponse(msrest.serialization.Model):
"""ErrorResponse.
All required parameters must be populated in order to send to Azure.
:param code: Required. The error Code.
:type code: str
:param message: Required. A message explaining the error reported by the service.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = kwargs['code']
self.message = kwargs['message']
class LastDetectResponse(msrest.serialization.Model):
"""LastDetectResponse.
All required parameters must be populated in order to send to Azure.
:param period: Required. Frequency extracted from the series, zero means no recurrent pattern
has been found.
:type period: int
:param suggested_window: Required. Suggested input series points needed for detecting the
latest point.
:type suggested_window: int
:param expected_value: Required. Expected value of the latest point.
:type expected_value: float
:param upper_margin: Required. Upper margin of the latest point. UpperMargin is used to
calculate upperBoundary, which equals to expectedValue + (100 - marginScale)*upperMargin. If
the value of latest point is between upperBoundary and lowerBoundary, it should be treated as
normal value. By adjusting marginScale value, anomaly status of latest point can be changed.
:type upper_margin: float
:param lower_margin: Required. Lower margin of the latest point. LowerMargin is used to
calculate lowerBoundary, which equals to expectedValue - (100 - marginScale)*lowerMargin.
:type lower_margin: float
:param is_anomaly: Required. Anomaly status of the latest point, true means the latest point is
an anomaly either in negative direction or positive direction.
:type is_anomaly: bool
:param is_negative_anomaly: Required. Anomaly status in negative direction of the latest point.
True means the latest point is an anomaly and its real value is smaller than the expected one.
:type is_negative_anomaly: bool
:param is_positive_anomaly: Required. Anomaly status in positive direction of the latest point.
True means the latest point is an anomaly and its real value is larger than the expected one.
:type is_positive_anomaly: bool
"""
_validation = {
'period': {'required': True},
'suggested_window': {'required': True},
'expected_value': {'required': True},
'upper_margin': {'required': True},
'lower_margin': {'required': True},
'is_anomaly': {'required': True},
'is_negative_anomaly': {'required': True},
'is_positive_anomaly': {'required': True},
}
_attribute_map = {
'period': {'key': 'period', 'type': 'int'},
'suggested_window': {'key': 'suggestedWindow', 'type': 'int'},
'expected_value': {'key': 'expectedValue', 'type': 'float'},
'upper_margin': {'key': 'upperMargin', 'type': 'float'},
'lower_margin': {'key': 'lowerMargin', 'type': 'float'},
'is_anomaly': {'key': 'isAnomaly', 'type': 'bool'},
'is_negative_anomaly': {'key': 'isNegativeAnomaly', 'type': 'bool'},
'is_positive_anomaly': {'key': 'isPositiveAnomaly', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(LastDetectResponse, self).__init__(**kwargs)
self.period = kwargs['period']
self.suggested_window = kwargs['suggested_window']
self.expected_value = kwargs['expected_value']
self.upper_margin = kwargs['upper_margin']
self.lower_margin = kwargs['lower_margin']
self.is_anomaly = kwargs['is_anomaly']
self.is_negative_anomaly = kwargs['is_negative_anomaly']
self.is_positive_anomaly = kwargs['is_positive_anomaly']
class Model(msrest.serialization.Model):
"""Response of get model.
All required parameters must be populated in order to send to Azure.
:param model_id: Required. Model identifier.
:type model_id: str
:param created_time: Required. Date and time (UTC) when the model was created.
:type created_time: ~datetime.datetime
:param last_updated_time: Required. Date and time (UTC) when the model was last updated.
:type last_updated_time: ~datetime.datetime
:param model_info: Training Status of the model.
:type model_info: ~azure.ai.anomalydetector.models.ModelInfo
"""
_validation = {
'model_id': {'required': True},
'created_time': {'required': True},
'last_updated_time': {'required': True},
}
_attribute_map = {
'model_id': {'key': 'modelId', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'last_updated_time': {'key': 'lastUpdatedTime', 'type': 'iso-8601'},
'model_info': {'key': 'modelInfo', 'type': 'ModelInfo'},
}
def __init__(
self,
**kwargs
):
super(Model, self).__init__(**kwargs)
self.model_id = kwargs['model_id']
self.created_time = kwargs['created_time']
self.last_updated_time = kwargs['last_updated_time']
self.model_info = kwargs.get('model_info', None)
class ModelInfo(msrest.serialization.Model):
"""Train result of a model including status, errors and diagnose info for model and variables.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param sliding_window: An optional field, indicates how many history points will be used to
determine the anomaly score of one subsequent point.
:type sliding_window: int
:param align_policy: An optional field, since those multivariate need to be aligned in the same
timestamp before starting the detection.
:type align_policy: ~azure.ai.anomalydetector.models.AlignPolicy
:param source: Required. source file link of the input variables, each variable will be a csv
with two columns, the first column will be timestamp, the second column will be value.Besides
these variable csv files, an extra meta.json can be included in th zip file if you would like
to rename a variable.Be default, the file name of the variable will be used as the variable
name.
:type source: str
:param start_time: Required. require field, start time of data be used for generating
multivariate anomaly detection model, should be data-time.
:type start_time: ~datetime.datetime
:param end_time: Required. require field, end time of data be used for generating multivariate
anomaly detection model, should be data-time.
:type end_time: ~datetime.datetime
:param display_name: optional field, name of the model.
:type display_name: str
:ivar status: Model training status. Possible values include: "CREATED", "RUNNING", "READY",
"FAILED".
:vartype status: str or ~azure.ai.anomalydetector.models.ModelStatus
:ivar errors: Error message when fails to create a model.
:vartype errors: list[~azure.ai.anomalydetector.models.ErrorResponse]
:ivar diagnostics_info: Used for deep analysis model and variables.
:vartype diagnostics_info: ~azure.ai.anomalydetector.models.DiagnosticsInfo
"""
_validation = {
'source': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
'display_name': {'max_length': 24, 'min_length': 0},
'status': {'readonly': True},
'errors': {'readonly': True},
'diagnostics_info': {'readonly': True},
}
_attribute_map = {
'sliding_window': {'key': 'slidingWindow', 'type': 'int'},
'align_policy': {'key': 'alignPolicy', 'type': 'AlignPolicy'},
'source': {'key': 'source', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'display_name': {'key': 'displayName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[ErrorResponse]'},
'diagnostics_info': {'key': 'diagnosticsInfo', 'type': 'DiagnosticsInfo'},
}
def __init__(
self,
**kwargs
):
super(ModelInfo, self).__init__(**kwargs)
self.sliding_window = kwargs.get('sliding_window', None)
self.align_policy = kwargs.get('align_policy', None)
self.source = kwargs['source']
self.start_time = kwargs['start_time']
self.end_time = kwargs['end_time']
self.display_name = kwargs.get('display_name', None)
self.status = None
self.errors = None
self.diagnostics_info = None
class ModelList(msrest.serialization.Model):
"""Response to the list models operation.
All required parameters must be populated in order to send to Azure.
:param models: Required. List of models.
:type models: list[~azure.ai.anomalydetector.models.ModelSnapshot]
:param current_count: Required. Current count of trained multivariate models.
:type current_count: int
:param max_count: Required. Max number of models that can be trained for this subscription.
:type max_count: int
:param next_link: next link to fetch more models.
:type next_link: str
"""
_validation = {
'models': {'required': True},
'current_count': {'required': True},
'max_count': {'required': True},
}
_attribute_map = {
'models': {'key': 'models', 'type': '[ModelSnapshot]'},
'current_count': {'key': 'currentCount', 'type': 'int'},
'max_count': {'key': 'maxCount', 'type': 'int'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ModelList, self).__init__(**kwargs)
self.models = kwargs['models']
self.current_count = kwargs['current_count']
self.max_count = kwargs['max_count']
self.next_link = kwargs.get('next_link', None)
class ModelSnapshot(msrest.serialization.Model):
"""ModelSnapshot.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param model_id: Required. Model identifier.
:type model_id: str
:param created_time: Required. Date and time (UTC) when the model was created.
:type created_time: ~datetime.datetime
:param last_updated_time: Required. Date and time (UTC) when the model was last updated.
:type last_updated_time: ~datetime.datetime
:ivar status: Required. Model training status. Possible values include: "CREATED", "RUNNING",
"READY", "FAILED".
:vartype status: str or ~azure.ai.anomalydetector.models.ModelStatus
:param display_name:
:type display_name: str
:param variables_count: Required. Count of variables.
:type variables_count: int
"""
_validation = {
'model_id': {'required': True},
'created_time': {'required': True},
'last_updated_time': {'required': True},
'status': {'required': True, 'readonly': True},
'variables_count': {'required': True},
}
_attribute_map = {
'model_id': {'key': 'modelId', 'type': 'str'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'last_updated_time': {'key': 'lastUpdatedTime', 'type': 'iso-8601'},
'status': {'key': 'status', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'variables_count': {'key': 'variablesCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ModelSnapshot, self).__init__(**kwargs)
self.model_id = kwargs['model_id']
self.created_time = kwargs['created_time']
self.last_updated_time = kwargs['last_updated_time']
self.status = None
self.display_name = kwargs.get('display_name', None)
self.variables_count = kwargs['variables_count']
class ModelState(msrest.serialization.Model):
"""ModelState.
:param epoch_ids: Epoch id.
:type epoch_ids: list[int]
:param train_losses:
:type train_losses: list[float]
:param validation_losses:
:type validation_losses: list[float]
:param latencies_in_seconds:
:type latencies_in_seconds: list[float]
"""
_attribute_map = {
'epoch_ids': {'key': 'epochIds', 'type': '[int]'},
'train_losses': {'key': 'trainLosses', 'type': '[float]'},
'validation_losses': {'key': 'validationLosses', 'type': '[float]'},
'latencies_in_seconds': {'key': 'latenciesInSeconds', 'type': '[float]'},
}
def __init__(
self,
**kwargs
):
super(ModelState, self).__init__(**kwargs)
self.epoch_ids = kwargs.get('epoch_ids', None)
self.train_losses = kwargs.get('train_losses', None)
self.validation_losses = kwargs.get('validation_losses', None)
self.latencies_in_seconds = kwargs.get('latencies_in_seconds', None)
class TimeSeriesPoint(msrest.serialization.Model):
"""TimeSeriesPoint.
All required parameters must be populated in order to send to Azure.
:param timestamp: Optional argument, timestamp of a data point (ISO8601 format).
:type timestamp: ~datetime.datetime
:param value: Required. The measurement of that point, should be float.
:type value: float
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'value': {'key': 'value', 'type': 'float'},
}
def __init__(
self,
**kwargs
):
super(TimeSeriesPoint, self).__init__(**kwargs)
self.timestamp = kwargs.get('timestamp', None)
self.value = kwargs['value']
class VariableState(msrest.serialization.Model):
"""VariableState.
:param variable: Variable name.
:type variable: str
:param filled_na_ratio: Merged NA ratio of a variable.
:type filled_na_ratio: float
:param effective_count: Effective time-series points count.
:type effective_count: int
:param start_time: Start time of a variable.
:type start_time: ~datetime.datetime
:param end_time: End time of a variable.
:type end_time: ~datetime.datetime
:param errors: Error message when parse variable.
:type errors: list[~azure.ai.anomalydetector.models.ErrorResponse]
"""
_validation = {
'filled_na_ratio': {'maximum': 1, 'minimum': 0},
}
_attribute_map = {
'variable': {'key': 'variable', 'type': 'str'},
'filled_na_ratio': {'key': 'filledNARatio', 'type': 'float'},
'effective_count': {'key': 'effectiveCount', 'type': 'int'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'errors': {'key': 'errors', 'type': '[ErrorResponse]'},
}
def __init__(
self,
**kwargs
):
super(VariableState, self).__init__(**kwargs)
self.variable = kwargs.get('variable', None)
self.filled_na_ratio = kwargs.get('filled_na_ratio', None)
self.effective_count = kwargs.get('effective_count', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.errors = kwargs.get('errors', None)
|
qaforum/utils.py | UREDDY616/IIITVforum | 117 | 12786308 | <gh_stars>100-1000
import pytz
from datetime import datetime
from django.utils import timezone
from math import log
# uses a version of reddit score algorithm
# https://medium.com/hacking-and-gonzo/how-reddit-ranking-algorithms-work-ef111e33d0d9#.aef67efq1
def question_score(question):
creation_date = question.pub_date
score = question.total_points
answers_positive_points = list(
question.answer_set.all().values_list(
'answervote__value', flat=True)).count(True)
answers_negative_points = list(
question.answer_set.all().values_list(
'answervote__value', flat=True)).count(False)
score = score * 2 + answers_positive_points - answers_negative_points
reference_date = pytz.timezone(
timezone.get_default_timezone_name()).localize(datetime(1970, 1, 1))
difference = creation_date - reference_date
difference_seconds = difference.days * 86400 + difference.seconds +\
(float(difference.microseconds) / 1000000)
order = log(max(abs(score), 1), 10)
sign = 1 if score > 0 else -1 if score < 0 else 0
seconds = difference_seconds - 1134028003
return round(sign * order + seconds / 45000, 7)
|
examples/graph_prediction/general_gnn.py | JonaBecher/spektral | 2,145 | 12786322 | """
This example implements the model from the paper
> [Design Space for Graph Neural Networks](https://arxiv.org/abs/2011.08843)<br>
> <NAME>, <NAME>, <NAME>
using the PROTEINS dataset.
The configuration at the top of the file is the best one identified in the
paper, and should work well for many different datasets without changes.
Note: the results reported in the paper are averaged over 3 random repetitions
with an 80/20 split.
"""
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.optimizers import Adam
from spektral.data import DisjointLoader
from spektral.datasets import TUDataset
from spektral.models import GeneralGNN
physical_devices = tf.config.list_physical_devices("GPU")
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
################################################################################
# Config
################################################################################
batch_size = 32
learning_rate = 0.01
epochs = 400
################################################################################
# Load data
################################################################################
data = TUDataset("PROTEINS")
# Train/test split
np.random.shuffle(data)
split = int(0.8 * len(data))
data_tr, data_te = data[:split], data[split:]
# Data loaders
loader_tr = DisjointLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_te = DisjointLoader(data_te, batch_size=batch_size)
################################################################################
# Build model
################################################################################
model = GeneralGNN(data.n_labels, activation="softmax")
optimizer = Adam(learning_rate)
loss_fn = CategoricalCrossentropy()
################################################################################
# Fit model
################################################################################
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions) + sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
acc = tf.reduce_mean(categorical_accuracy(target, predictions))
return loss, acc
def evaluate(loader):
output = []
step = 0
while step < loader.steps_per_epoch:
step += 1
inputs, target = loader.__next__()
pred = model(inputs, training=False)
outs = (
loss_fn(target, pred),
tf.reduce_mean(categorical_accuracy(target, pred)),
len(target), # Keep track of batch size
)
output.append(outs)
if step == loader.steps_per_epoch:
output = np.array(output)
return np.average(output[:, :-1], 0, weights=output[:, -1])
epoch = step = 0
results = []
for batch in loader_tr:
step += 1
loss, acc = train_step(*batch)
results.append((loss, acc))
if step == loader_tr.steps_per_epoch:
step = 0
epoch += 1
results_te = evaluate(loader_te)
print(
"Ep. {} - Loss: {:.3f} - Acc: {:.3f} - Test loss: {:.3f} - Test acc: {:.3f}".format(
epoch, *np.mean(results, 0), *results_te
)
)
results = []
################################################################################
# Evaluate model
################################################################################
results_te = evaluate(loader_te)
print("Final results - Loss: {:.3f} - Acc: {:.3f}".format(*results_te))
|
Chapter01/19_iterator_example.py | add54/ADMIN_SYS_PYTHON | 116 | 12786329 | <filename>Chapter01/19_iterator_example.py<gh_stars>100-1000
numbers = [10, 20, 30, 40]
numbers_iter = iter(numbers)
print(next(numbers_iter))
print(next(numbers_iter))
print(numbers_iter.__next__())
print(numbers_iter.__next__())
next(numbers_iter)
|
src/app/conf/static.py | denkasyanov/education-backend | 151 | 12786362 | <filename>src/app/conf/static.py
import os.path
from app.conf.boilerplate import BASE_DIR
from app.conf.environ import env
STATIC_URL = env('STATIC_URL', default='/static/')
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
|
tests/environment/test_custom_environment_provider.py | TheCodingLand/pyctuator | 118 | 12786367 | <gh_stars>100-1000
from typing import Dict
from pyctuator.environment.custom_environment_provider import CustomEnvironmentProvider
from pyctuator.environment.environment_provider import PropertyValue
def test_custom_environment_provider() -> None:
def produce_env() -> Dict:
return {
"a": "s1",
"b": {
"secret": "ha ha",
"c": 625,
},
"d": {
"e": True,
"f": "hello",
"g": {
"h": 123,
"i": "abcde"
}
}
}
provider = CustomEnvironmentProvider("custom", produce_env)
properties_source = provider.get_properties_source()
assert properties_source.name == "custom"
assert properties_source.properties == {
"a": PropertyValue(value="s1"),
"b.secret": PropertyValue(value="******"),
"b.c": PropertyValue(value=625),
"d.e": PropertyValue(value=True),
"d.f": PropertyValue(value="hello"),
"d.g.h": PropertyValue(value=123),
"d.g.i": PropertyValue(value="abcde"),
}
|
akshare/economic/macro_china_hk.py | J-Z-Z/akshare | 721 | 12786375 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/6 15:21
Desc: 中国-香港-宏观指标
https://data.eastmoney.com/cjsj/foreign_8_0.html
"""
import pandas as pd
import requests
from akshare.utils import demjson
def macro_china_hk_cpi() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-消费者物价指数
https://data.eastmoney.com/cjsj/foreign_8_0.html
:return: 消费者物价指数
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "0",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_cpi_ratio() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-消费者物价指数年率
https://data.eastmoney.com/cjsj/foreign_8_1.html
:return: 消费者物价指数年率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "1",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_rate_of_unemployment() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-失业率
https://data.eastmoney.com/cjsj/foreign_8_2.html
:return: 失业率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "2",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_gbp() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港 GDP
https://data.eastmoney.com/cjsj/foreign_8_3.html
:return: 香港 GDP
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "3",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值']) / 100
temp_df['现值'] = pd.to_numeric(temp_df['现值']) / 100
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_gbp_ratio() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港 GDP 同比
https://data.eastmoney.com/cjsj/foreign_8_4.html
:return: 香港 GDP 同比
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "4",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_building_volume() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港楼宇买卖合约数量
https://data.eastmoney.com/cjsj/foreign_8_5.html
:return: 香港楼宇买卖合约数量
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "5",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_building_amount() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港楼宇买卖合约成交金额
https://data.eastmoney.com/cjsj/foreign_8_6.html
:return: 香港楼宇买卖合约成交金额
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "6",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值']) / 100
temp_df['现值'] = pd.to_numeric(temp_df['现值']) / 100
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_trade_diff_ratio() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港商品贸易差额年率
https://data.eastmoney.com/cjsj/foreign_8_7.html
:return: 香港商品贸易差额年率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "7",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
def macro_china_hk_ppi() -> pd.DataFrame:
"""
东方财富-经济数据一览-中国香港-香港制造业 PPI 年率
https://data.eastmoney.com/cjsj/foreign_8_8.html
:return: 香港制造业 PPI 年率
:rtype: pandas.DataFrame
"""
url = "https://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "8",
"stat": "8",
"pageNo": "1",
"pageNum": "1",
"_": "1621332091873",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
temp_df['时间'] = pd.to_datetime(temp_df['时间']).dt.date
temp_df['发布日期'] = pd.to_datetime(temp_df['发布日期']).dt.date
return temp_df
if __name__ == "__main__":
macro_china_hk_cpi_df = macro_china_hk_cpi()
print(macro_china_hk_cpi_df)
macro_china_hk_cpi_ratio_df = macro_china_hk_cpi_ratio()
print(macro_china_hk_cpi_ratio_df)
macro_china_hk_rate_of_unemployment_df = macro_china_hk_rate_of_unemployment()
print(macro_china_hk_rate_of_unemployment_df)
macro_china_hk_gbp_df = macro_china_hk_gbp()
print(macro_china_hk_gbp_df)
macro_china_hk_gbp_ratio_df = macro_china_hk_gbp_ratio()
print(macro_china_hk_gbp_ratio_df)
marco_china_hk_building_volume_df = macro_china_hk_building_volume()
print(marco_china_hk_building_volume_df)
macro_china_hk_building_amount_df = macro_china_hk_building_amount()
print(macro_china_hk_building_amount_df)
macro_china_hk_trade_diff_ratio_df = macro_china_hk_trade_diff_ratio()
print(macro_china_hk_trade_diff_ratio_df)
macro_china_hk_ppi_df = macro_china_hk_ppi()
print(macro_china_hk_ppi_df)
|
chainercv/functions/ps_roi_max_align_2d.py | beam2d/chainercv | 1,600 | 12786392 | # Modified work:
# -----------------------------------------------------------------------------
# Copyright (c) 2019 Preferred Infrastructure, Inc.
# Copyright (c) 2019 Preferred Networks, Inc.
# -----------------------------------------------------------------------------
# Original work:
# -----------------------------------------------------------------------------
# Copyright (c) 2015 by Contributors
# \file roi_pooling.cu
# \brief roi pooling operator
# \author <NAME>, <NAME>, <NAME>
# \changed to roi_align by <NAME>
# \file roi_align.cu
# \roi align operator described in Mask RCNN
# -----------------------------------------------------------------------------
from __future__ import division
import numbers
import numpy as np
import six
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
from chainercv.functions.ps_roi_average_align_2d \
import _GET_BILINEAR_INTERP_KERNEL
from chainercv.functions.ps_roi_average_align_2d \
import _get_bilinear_interp_params
from chainercv.functions.ps_roi_average_align_2d import _get_bounds
from chainercv.functions.ps_roi_average_align_2d import _pair
from chainercv.functions.ps_roi_average_pooling_2d import _outsize
class PSROIMaxAlign2D(function.Function):
def __init__(
self, outsize, spatial_scale,
group_size, sampling_ratio=None
):
out_c, out_h, out_w = _outsize(outsize)
if out_c is not None and \
not (isinstance(out_c, numbers.Integral) and out_c > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(out_c), out_c))
if not (isinstance(out_h, numbers.Integral) and out_h > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(out_h), out_h))
if not (isinstance(out_w, numbers.Integral) and out_w > 0):
raise TypeError(
'outsize[2] must be positive integer: {}, {}'
.format(type(out_w), out_w))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real)
and spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
if not (isinstance(group_size, numbers.Integral)
and group_size > 0):
raise TypeError(
'group_size must be positive integer: {}, {}'
.format(type(group_size), group_size))
sampling_ratio = _pair(sampling_ratio)
if not all((isinstance(s, numbers.Integral) and s >= 1) or s is None
for s in sampling_ratio):
raise TypeError(
'sampling_ratio must be integer >= 1 or a pair of it: {}'
.format(sampling_ratio))
self.out_c, self.out_h, self.out_w = out_c, out_h, out_w
self.spatial_scale = spatial_scale
self.group_size = group_size
self.sampling_ratio = sampling_ratio
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == np.float32,
x_type.ndim == 4,
roi_type.dtype == np.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == np.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0]
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = np.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = np.empty(top_data.shape, dtype=np.int32)
group_size = self.group_size
pooled_width, pooled_height \
= self.out_w, self.out_h
spatial_scale = self.spatial_scale
for i in six.moves.range(top_data.size):
n, ctop, ph, pw = np.unravel_index(i, top_data.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
gh = int(np.floor(ph * group_size / pooled_height))
gw = int(np.floor(pw * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(np.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(np.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
maxval = - np.inf
maxidx = -1
for iy in six.moves.range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
for ix in six.moves.range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear interpolation {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
tmpval = 0.0
isvalid = False
bottom_index = iy * roi_bin_grid_w + ix
if w1 > 0 and y_low >= 0 and x_low >= 0:
v1 = bottom_data[roi_batch_ind, c, y_low, x_low]
tmpval += w1 * v1
isvalid = True
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
v2 = bottom_data[roi_batch_ind, c, y_low, x_high]
tmpval += w2 * v2
isvalid = True
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
v3 = bottom_data[roi_batch_ind, c, y_high, x_low]
tmpval += w3 * v3
isvalid = True
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
v4 = bottom_data[roi_batch_ind, c, y_high, x_high]
tmpval += w4 * v4
isvalid = True
if isvalid and tmpval > maxval:
maxval = tmpval
maxidx = bottom_index
# }}
top_data[n, ctop, ph, pw] = maxval
self.argmax_data[n, ctop, ph, pw] = maxidx
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = cuda.cupy.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = cuda.cupy.empty(top_data.shape, np.int32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T bottom_data, raw T bottom_rois,
raw int32 bottom_roi_indices,
T spatial_scale, int32 channel,
int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w
''',
'T top_data, int32 argmax_data',
'''
// pos in output filter
int ph = (i / pooled_width) % pooled_height;
int pw = i % pooled_width;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_data_offset =
(roi_batch_ind * channel + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
T maxval = - (T) (1.0 / 0.0);
int maxidx = -1;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g. iy = 0, 1
{
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
T tmpval = 0.;
bool isvalid = false;
int bottom_index = iy * roi_bin_grid_w + ix;
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T v1 = bottom_data[
bottom_data_offset + y_low * width + x_low];
tmpval += w1 * v1;
isvalid = true;
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T v2 = bottom_data[
bottom_data_offset + y_low * width + x_high];
tmpval += w2 * v2;
isvalid = true;
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T v3 = bottom_data[
bottom_data_offset + y_high * width + x_low];
tmpval += w3 * v3;
isvalid = true;
}
if (w4 > 0 && y_high <= height - 1 &&
x_high <= width - 1) {
T v4 = bottom_data[
bottom_data_offset + y_high * width + x_high];
tmpval += w4 * v4;
isvalid = true;
}
// }}
if (isvalid && tmpval > maxval) {
maxval = tmpval;
maxidx = bottom_index;
}
}
}
top_data = maxval;
argmax_data = maxidx;
''',
'ps_roi_max_align_2d_fwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(bottom_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, self.out_h, self.out_w,
self.group_size, sampling_ratio_h, sampling_ratio_w,
top_data, self.argmax_data)
return top_data,
def backward_cpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
height, width = self._bottom_data_shape[2:]
bottom_diff = np.zeros(self._bottom_data_shape, np.float32)
spatial_scale = self.spatial_scale
pooled_height = self.out_h
pooled_width = self.out_w
group_size = self.group_size
top_diff = gy[0]
for i in six.moves.range(top_diff.size):
n, ctop, ph, pw = np.unravel_index(i, top_diff.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
gh = int(np.floor(float(ph) * group_size / pooled_height))
gw = int(np.floor(float(pw) * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
top_diff_this_bin = top_diff[n, ctop, ph, pw]
maxidx = self.argmax_data[n, ctop, ph, pw]
if maxidx != -1:
if self.sampling_ratio[0] is None:
roi_bin_grid_h = int(np.ceil(roi_height / pooled_height))
else:
roi_bin_grid_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
roi_bin_grid_w = int(np.ceil(roi_width / pooled_width))
else:
roi_bin_grid_w = self.sampling_ratio[1]
iy = int(maxidx / roi_bin_grid_w)
ix = maxidx % roi_bin_grid_w
y = roi_start_h + ph * bin_size_h + \
(iy + .5) * bin_size_h / roi_bin_grid_h
x = roi_start_w + pw * bin_size_w + \
(ix + .5) * bin_size_w / roi_bin_grid_w
y, y_low, y_high = _get_bounds(y, height)
if y is None or y_low is None or y_high is None:
continue
x, x_low, x_high = _get_bounds(x, width)
if x is None or x_low is None or x_high is None:
continue
# bilinear_interpolation_gradient {{
w1, w2, w3, w4 = _get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high)
if w1 > 0 and y_low >= 0 and x_low >= 0:
g1 = top_diff_this_bin * w1
bottom_diff[roi_batch_ind, c, y_low, x_low] += g1
if w2 > 0 and y_low >= 0 and x_high <= width - 1:
g2 = top_diff_this_bin * w2
bottom_diff[roi_batch_ind, c, y_low, x_high] += g2
if w3 > 0 and y_high <= height - 1 and x_low >= 0:
g3 = top_diff_this_bin * w3
bottom_diff[roi_batch_ind, c, y_high, x_low] += g3
if w4 > 0 and y_high <= height - 1 and x_high <= width - 1:
g4 = top_diff_this_bin * w4
bottom_diff[roi_batch_ind, c, y_high, x_high] += g4
# }}
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
channel, height, width = self._bottom_data_shape[1:]
out_c, out_h, out_w = gy[0].shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, np.float32)
if self.sampling_ratio[0] is None:
sampling_ratio_h = 0
else:
sampling_ratio_h = self.sampling_ratio[0]
if self.sampling_ratio[1] is None:
sampling_ratio_w = 0
else:
sampling_ratio_w = self.sampling_ratio[1]
cuda.elementwise(
'''
raw T top_diff, raw int32 argmax_data,
raw T bottom_rois, raw int32 bottom_roi_indices,
T spatial_scale, int32 channel, int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size, int32 sampling_ratio_h, int32 sampling_ratio_w
''',
'raw T bottom_diff',
'''
// (n, c, h, w) coords in bottom data
int pw = i % pooled_width;
int ph = (i / pooled_width) % pooled_height;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
// Do not using rounding; this implementation detail is critical
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_diff_offset =
(roi_batch_ind * channel + c) * height * width;
int top_offset =
(n * pooled_dim + ctop) * pooled_height * pooled_width;
T top_diff_this_bin =
top_diff[top_offset + ph * pooled_width + pw];
int maxidx = argmax_data[top_offset + ph * pooled_width + pw];
if (maxidx != -1) {
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio_h > 0)
? sampling_ratio_h
: ceil(roi_height / pooled_height); // e.g. = 2
int roi_bin_grid_w = (sampling_ratio_w > 0)
? sampling_ratio_w
: ceil(roi_width / pooled_width);
int iy = maxidx / roi_bin_grid_w;
int ix = maxidx % roi_bin_grid_w;
T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g. 0.5, 1.5
T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
int y_low, y_high;
bool y_ret = get_bounds(y, height, y_low, y_high);
if (!y_ret) continue;
int x_low, x_high;
bool x_ret = get_bounds(x, width, x_low, x_high);
if (!x_ret) continue;
// bilinear_interpolation_gradient {{
T w1, w2, w3, w4;
get_bilinear_interp_params(
y, x, y_low, x_low, y_high, x_high, w1, w2, w3, w4);
if (w1 > 0 && y_low >= 0 && x_low >= 0) {
T g1 = top_diff_this_bin * w1;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_low], g1);
}
if (w2 > 0 && y_low >= 0 && x_high <= width - 1) {
T g2 = top_diff_this_bin * w2;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_low * width + x_high], g2);
}
if (w3 > 0 && y_high <= height - 1 && x_low >= 0) {
T g3 = top_diff_this_bin * w3;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_low], g3);
}
if (w4 > 0 && y_high <= height - 1 && x_high <= width - 1) {
T g4 = top_diff_this_bin * w4;
atomicAdd(&bottom_diff[
bottom_diff_offset + y_high * width + x_high], g4);
}
// }}
}
''',
'ps_roi_max_align_2d_bwd',
preamble=_GET_BILINEAR_INTERP_KERNEL,
)(gy[0], self.argmax_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, out_h, out_w, self.group_size,
sampling_ratio_h, sampling_ratio_w, bottom_diff,
size=gy[0].size)
return bottom_diff, None, None
def ps_roi_max_align_2d(
x, rois, roi_indices, outsize,
spatial_scale, group_size, sampling_ratio=None
):
"""Position Sensitive Region of Interest (ROI) Max align function.
This function computes position sensitive max value of input spatial patch
with the given region of interests. Each ROI is splitted into
:math:`(group\_size, group\_size)` regions, and position sensitive values
in each region is computed.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (array): Input roi. The shape is expected to
be :math:`(R, 4)`, and each datum is set as below:
(y_min, x_min, y_max, x_max). The dtype is :obj:`numpy.float32`.
roi_indices (array): Input roi indices. The shape is expected to
be :math:`(R, )`. The dtype is :obj:`numpy.int32`.
outsize ((int, int, int) or (int, int) or int): Expected output size
after pooled: (channel, height, width) or (height, width)
or outsize. ``outsize=o`` and ``outsize=(o, o)`` are equivalent.
Channel parameter is used to assert the input shape.
spatial_scale (float): Scale of the roi is resized.
group_size (int): Position sensitive group size.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing PSROIPooling:
`R-FCN <https://arxiv.org/abs/1605.06409>`_.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return PSROIMaxAlign2D(
outsize, spatial_scale,
group_size, sampling_ratio)(x, rois, roi_indices)
|
draw_bar.py | IndexFziQ/nn4nlp-concepts | 440 | 12786471 | # import libraries
import matplotlib
matplotlib.use('Agg')
import pandas as pd
import matplotlib.pyplot as plt
import argparse
from collections import defaultdict
#%matplotlib inline
# set font
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Helvetica'
# set the style of the axes and the text color
plt.rcParams['axes.edgecolor']='#333F4B'
plt.rcParams['axes.linewidth']=0.8
plt.rcParams['xtick.color']='#333F4B'
plt.rcParams['ytick.color']='#333F4B'
plt.rcParams['text.color']='#333F4B'
parser = argparse.ArgumentParser(description='Draw Bar')
parser.add_argument('--tsv', default='input.tsv', help='input file separted by \'\\t\' ')
parser.add_argument('--fig', default='out.png', help='the output figure')
parser.add_argument('--title', default='Concept Count in All Papers', help='the title of the graph')
parser.add_argument('--colored_concepts', default=None, nargs='+',
help='An interleaved list of filenames containing concept tags (e.g. first.txt red second.txt purple)')
args = parser.parse_args()
concept_colors = defaultdict(lambda: '#007ACC')
if args.colored_concepts:
for i in range(0, len(args.colored_concepts), 2):
print(f"opening {args.colored_concepts[i]} as {args.colored_concepts[i+1]}")
with open(args.colored_concepts[i], 'r') as f:
for line in f:
line = line.strip()
concept_colors[line] = args.colored_concepts[i+1]
print(f'concept_colors[{line}] = {args.colored_concepts[i+1]}')
tsv_file = args.tsv
fig_file = args.fig
fin = open(tsv_file,"r")
cpt_list = []
val_list = []
for line in fin:
line = line.strip()
cpt, val = line.split("\t")
val_list.append(int(val))
cpt_list.append(cpt)
fin.close()
percentages = pd.Series(val_list,
index=cpt_list)
df = pd.DataFrame({'percentage' : percentages})
df = df.sort_values(by='percentage')
color_list = [concept_colors[x] for x in df.index]
# we first need a numeric placeholder for the y axis
my_range=list(range(1,len(df.index)+1))
fig, ax = plt.subplots(figsize=(10,25))
# create lines and dots for each bar
plt.hlines(y=my_range, xmin=0, xmax=df['percentage'], colors=color_list, alpha=0.5, linewidth=5)
# plt.plot(df['percentage'], my_range, "o", markersize=5, colors=color_list, alpha=0.6)
# set labels
ax.set_xlabel(args.title, fontsize=15, fontweight='black', color = '#333F4B')
ax.xaxis.set_label_position('top')
ax.xaxis.tick_top()
#ax.set_ylabel('')
# set axis
ax.tick_params(axis='both', which='major', labelsize=12)
plt.yticks(my_range, df.index)
# add an horizonal label for the y axis
#fig.text(-0.23, 0.86, 'Concept Coverage (Fulltext)', fontsize=15, fontweight='black', color = '#333F4B')
# change the style of the axis spines
ax.spines['bottom'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['left'].set_smart_bounds(True)
ax.spines['top'].set_smart_bounds(True)
'''
# set the spines position
ax.spines['bottom'].set_position(('axes', -0.04))
ax.spines['left'].set_position(('axes', 0.015))
'''
plt.savefig(fig_file, dpi=300, bbox_inches='tight')
|
test/nose_test.py | fakeNetflix/uber-repo-doubles | 150 | 12786510 | <gh_stars>100-1000
import unittest
from nose.plugins import PluginTester
from doubles.nose import NoseIntegration
from doubles.instance_double import InstanceDouble
from doubles.targets.expectation_target import expect
def test_nose_plugin():
class TestNosePlugin(PluginTester, unittest.TestCase):
activate = '--with-doubles'
plugins = [NoseIntegration()]
def test_expect(self):
assert 'MockExpectationError' in self.output
assert 'FAILED (failures=1)' in self.output
assert 'Ran 2 tests' in self.output
def makeSuite(self):
class TestCase(unittest.TestCase):
def runTest(self):
subject = InstanceDouble('doubles.testing.User')
expect(subject).instance_method
def test2(self):
pass
return [TestCase('runTest'), TestCase('test2')]
result = unittest.TestResult()
TestNosePlugin('test_expect')(result)
assert result.wasSuccessful()
|
python/ql/test/experimental/query-tests/Security/CWE-117/LogInjectionGood.py | madhurimamandal/codeql | 4,036 | 12786544 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Desc :Log Injection
"""
from flask import Flask
from flask import request
import logging
logging.basicConfig(level=logging.DEBUG)
app = Flask(__name__)
@app.route('/good1')
def good1():
name = request.args.get('name')
name = name.replace('\r\n','').replace('\n','')
logging.info('User name: ' + name) # Good
return 'good1'
if __name__ == '__main__':
app.debug = True
handler = logging.FileHandler('log')
app.logger.addHandler(handler)
app.run()
|
ckanext/example_theme/v14_more_custom_css/plugin.py | okfde/ckankrzn | 2,805 | 12786621 | <gh_stars>1000+
../v13_custom_css/plugin.py |
pybinding/greens.py | lise1020/pybinding | 159 | 12786624 | """Green's function computation and related methods
Deprecated: use the chebyshev module instead
"""
import warnings
from . import chebyshev
from .support.deprecated import LoudDeprecationWarning
__all__ = ['Greens', 'kpm', 'kpm_cuda']
Greens = chebyshev.KPM
def kpm(*args, **kwargs):
warnings.warn("Use pb.kpm() instead", LoudDeprecationWarning, stacklevel=2)
return chebyshev.kpm(*args, **kwargs)
def kpm_cuda(*args, **kwargs):
warnings.warn("Use pb.kpm_cuda() instead", LoudDeprecationWarning, stacklevel=2)
return chebyshev.kpm_cuda(*args, **kwargs)
|
src/debugpy/_vendored/pydevd/tests_python/resources/_debugger_case_source_mapping_and_reference.py | r3m0t/debugpy | 695 | 12786625 | def full_function():
# Note that this function is not called, it's there just to make the mapping explicit.
a = 1 # map to cEll1, line 2
b = 2 # map to cEll1, line 3
c = 3 # map to cEll2, line 2
d = 4 # map to cEll2, line 3
def create_code():
cell1_code = compile(''' # line 1
a = 1 # line 2
b = 2 # line 3
''', '<cEll1>', 'exec')
cell2_code = compile('''# line 1
c = 3 # line 2
d = 4 # line 3
''', '<cEll2>', 'exec')
# Set up the source in linecache. Python doesn't have a public API for
# this, so we have to hack around it, similar to what IPython does.
import linecache
import time
code = ''' # line 1
a = 1 # line 2
b = 2 # line 3
'''
linecache.cache['<cEll1>'] = (
len(code),
time.time(),
[line + '\n' for line in code.splitlines()],
'<cEll1>',
)
code = '''# line 1
c = 3 # line 2
d = 4 # line 3
'''
linecache.cache['<cEll2>'] = (
len(code),
time.time(),
[line + '\n' for line in code.splitlines()],
'<cEll2>',
)
return {'cEll1': cell1_code, 'cEll2': cell2_code}
if __name__ == '__main__':
code = create_code()
exec(code['cEll1'])
exec(code['cEll1'])
exec(code['cEll2'])
exec(code['cEll2'])
print('TEST SUCEEDED')
|
wrappers/tensorflow/example5 - denoise.py | NobuoTsukamoto/librealsense | 6,457 | 12786662 | <reponame>NobuoTsukamoto/librealsense
import pyrealsense2 as rs
import numpy as np
import cv2
from tensorflow import keras
import time, sys
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.infrared, 1, 848, 480, rs.format.y8, 30) # 1 for left frame
# Start streaming
pipeline.start(config)
channels = 2
cropped_w, cropped_h = 480, 480
test_model_name = ""
if (len(sys.argv) > 1):
test_model_name = str(sys.argv[1])
t1 = time.perf_counter()
model = keras.models.load_model(test_model_name)
t2 = time.perf_counter()
print('model loading : ', t2 - t1, 'seconds')
def predict(noisy_image, ir_image):
t1 = time.perf_counter()
ir_image = np.array(ir_image).astype("uint16")
cropped_ir , cropped_noisy = [], []
width, height = 848, 480
w, h = cropped_w, cropped_h
for col_i in range(0, width, w):
for row_i in range(0, height, h):
cropped_ir.append(ir_image[row_i:row_i+h, col_i:col_i+w])
cropped_noisy.append(noisy_image[row_i:row_i+h, col_i:col_i+w])
# fill with zero to get size 480x480 for both images
fill = np.zeros((h, w - cropped_ir[-1].shape[1]), dtype="uint16")
cropped_ir[-1] = np.hstack((cropped_ir[-1], fill))
cropped_noisy[-1] = np.hstack((cropped_noisy[-1], fill))
t2 = time.perf_counter()
print('image cropping : ', t2 - t1, 'seconds')
cropped_image_offsets = [(0,0), (0,480)]
whole_image = np.zeros((height, width, channels), dtype="float32")
for i in range(len(cropped_ir)):
t1 = time.perf_counter()
noisy_images_plt = cropped_noisy[i].reshape(1, cropped_w, cropped_h, 1)
ir_images_plt = cropped_ir[i].reshape(1, cropped_w, cropped_h, 1)
im_and_ir = np.stack((noisy_images_plt, ir_images_plt), axis=3)
im_and_ir = im_and_ir.reshape(1, cropped_w, cropped_h, channels)
img = np.array(im_and_ir)
# Parse numbers as floats
img = img.astype('float32')
# Normalize data : remove average then devide by standard deviation
img = img / 65535
sample = img
row, col = cropped_image_offsets[i]
t2 = time.perf_counter()
print('image channeling : ', t2 - t1, 'seconds')
t1 = time.perf_counter()
denoised_image = model.predict(sample)
t2 = time.perf_counter()
print('prediction only : ', t2 - t1, 'seconds')
row_end = row + cropped_h
col_end = col + cropped_w
denoised_row = cropped_h
denoised_col = cropped_w
if row + cropped_h >= height:
row_end = height - 1
denoised_row = abs(row - row_end)
if col + cropped_w >= width:
col_end = width - 1
denoised_col = abs(col - col_end)
# combine tested images
whole_image[row:row_end, col:col_end] = denoised_image[:, 0:denoised_row, 0:denoised_col, :]
return whole_image[:, :, 0]
#=============================================================================================================
def convert_image(i):
m = np.min(i)
M = np.max(i)
i = np.divide(i, np.array([M - m], dtype=np.float)).astype(np.float)
i = (i - m).astype(np.float)
i8 = (i * 255.0).astype(np.uint8)
if i8.ndim == 3:
i8 = cv2.cvtColor(i8, cv2.COLOR_BGRA2GRAY)
i8 = cv2.equalizeHist(i8)
colorized = cv2.applyColorMap(i8, cv2.COLORMAP_JET)
colorized[i8 == int(m)] = 0
font = cv2.FONT_HERSHEY_SIMPLEX
m = float("{:.2f}".format(m))
M = float("{:.2f}".format(M))
colorized = cv2.putText(colorized, str(m) + " .. " + str(M) + "[m]", (20, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
return colorized
try:
c = rs.colorizer()
while True:
print("==============================================================")
t0 = time.perf_counter()
# Wait for a coherent pair of frames: depth and ir
t1 = time.perf_counter()
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
ir_frame = frames.get_infrared_frame()
t2 = time.perf_counter()
print('getting depth + ir frames : ', t2 - t1, 'seconds')
if not depth_frame or not ir_frame:
continue
# Convert images to numpy arrays
t1 = time.perf_counter()
depth_image = np.asanyarray(depth_frame.get_data())
ir_image = np.asanyarray(ir_frame.get_data())
t2 = time.perf_counter()
print('convert frames to numpy arrays : ', t2 - t1, 'seconds')
t1 = time.perf_counter()
predicted_image = predict(depth_image, ir_image)
t2 = time.perf_counter()
print('processing + prediction : ', t2 - t1, 'seconds')
# Stack both images horizontally
# depth_image = convert_image(depth_image)
t1 = time.perf_counter()
depth_image = np.asanyarray(c.process(depth_frame).get_data())
predicted_image = convert_image(predicted_image)
red = depth_image[:, :, 2].copy()
blue = depth_image[:, :, 0].copy()
depth_image[:, :, 0] = red
depth_image[:, :, 2] = blue
images = np.hstack((depth_image, predicted_image))
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
cv2.waitKey(1)
t2 = time.perf_counter()
print('show image : ', t2 - t1, 'seconds')
print('TOTAL TIME : ', t2 - t0, 'seconds')
finally:
# Stop streaming
pipeline.stop()
|
test/test_jump.py | mind-owner/Cyberbrain | 2,440 | 12786673 | from cyberbrain import Binding, InitialValue, Symbol
def test_jump(tracer, check_golden_file):
a = []
b = "b"
c = "c"
tracer.start()
if a: # POP_JUMP_IF_FALSE
pass # JUMP_FORWARD
else:
x = 1
if not a: # POP_JUMP_IF_TRUE
x = 2
x = a != b != c # JUMP_IF_FALSE_OR_POP
x = a == b or c # JUMP_IF_TRUE_OR_POP
# TODO: Test JUMP_ABSOLUTE. This requires loop instructions to be Implemented.
tracer.stop()
|
experiments/ukf_baseball.py | VladPodilnyk/Kalman-and-Bayesian-Filters-in-Python | 12,315 | 12786680 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 8 09:55:24 2015
@author: rlabbe
"""
from math import radians, sin, cos, sqrt, exp, atan2, radians
from numpy import array, asarray
from numpy.random import randn
import numpy as np
import math
import matplotlib.pyplot as plt
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.common import runge_kutta4
class BaseballPath(object):
def __init__(self, x0, y0, launch_angle_deg, velocity_ms,
noise=(1.0,1.0)):
""" Create 2D baseball path object
(x = distance from start point in ground plane,
y=height above ground)
x0,y0 initial position
launch_angle_deg angle ball is travelling respective to
ground plane
velocity_ms speeed of ball in meters/second
noise amount of noise to add to each position
in (x,y)
"""
omega = radians(launch_angle_deg)
self.v_x = velocity_ms * cos(omega)
self.v_y = velocity_ms * sin(omega)
self.x = x0
self.y = y0
self.noise = noise
def drag_force(self, velocity):
""" Returns the force on a baseball due to air drag at
the specified velocity. Units are SI
"""
B_m = 0.0039 + 0.0058 / (1. + exp((velocity-35.)/5.))
return B_m * velocity
def update(self, dt, vel_wind=0.):
""" compute the ball position based on the specified time
step and wind velocity. Returns (x,y) position tuple
"""
# Euler equations for x and y
self.x += self.v_x*dt
self.y += self.v_y*dt
# force due to air drag
v_x_wind = self.v_x - vel_wind
v = sqrt(v_x_wind**2 + self.v_y**2)
F = self.drag_force(v)
# Euler's equations for velocity
self.v_x = self.v_x - F*v_x_wind*dt
self.v_y = self.v_y - 9.81*dt - F*self.v_y*dt
return (self.x, self.y)
radar_pos = (100,0)
omega = 45.
def radar_sense(baseball, noise_rng, noise_brg):
x, y = baseball.x, baseball.y
rx, ry = radar_pos[0], radar_pos[1]
rng = ((x-rx)**2 + (y-ry)**2) ** .5
bearing = atan2(y-ry, x-rx)
rng += randn() * noise_rng
bearing += radians(randn() * noise_brg)
return (rng, bearing)
ball = BaseballPath(x0=0, y0=1, launch_angle_deg=45,
velocity_ms=60, noise=[0,0])
'''
xs = []
ys = []
dt = 0.05
y = 1
while y > 0:
x,y = ball.update(dt)
xs.append(x)
ys.append(y)
plt.plot(xs, ys)
plt.axis('equal')
plt.show()
'''
dt = 1/30.
def hx(x):
global radar_pos
dx = radar_pos[0] - x[0]
dy = radar_pos[1] - x[2]
rng = (dx*dx + dy*dy)**.5
bearing = atan2(-dy, -dx)
#print(x)
#print('hx:', rng, np.degrees(bearing))
return array([rng, bearing])
def fx(x, dt):
fx.ball.x = x[0]
fx.ball.y = x[2]
fx.ball.vx = x[1]
fx.ball.vy = x[3]
N = 10
ball_dt = dt/float(N)
for i in range(N):
fx.ball.update(ball_dt)
#print('fx', fx.ball.x, fx.ball.v_x, fx.ball.y, fx.ball.v_y)
return array([fx.ball.x, fx.ball.v_x, fx.ball.y, fx.ball.v_y])
fx.ball = BaseballPath(x0=0, y0=1, launch_angle_deg=45,
velocity_ms=60, noise=[0,0])
y = 1.
x = 0.
theta = 35. # launch angle
v0 = 50.
ball = BaseballPath(x0=x, y0=y, launch_angle_deg=theta,
velocity_ms=v0, noise=[.3,.3])
kf = UKF(dim_x=4, dim_z=2, dt=dt, hx=hx, fx=fx, kappa=0)
#kf.R *= r
kf.R[0,0] = 0.1
kf.R[1,1] = radians(0.2)
omega = radians(omega)
vx = cos(omega) * v0
vy = sin(omega) * v0
kf.x = array([x, vx, y, vy])
kf.R*= 0.01
#kf.R[1,1] = 0.01
kf.P *= 10
f1 = kf
t = 0
xs = []
ys = []
while y > 0:
t += dt
x,y = ball.update(dt)
z = radar_sense(ball, 0, 0)
#print('z', z)
#print('ball', ball.x, ball.v_x, ball.y, ball.v_y)
f1.predict()
f1.update(z)
xs.append(f1.x[0])
ys.append(f1.x[2])
f1.predict()
p1 = plt.scatter(x, y, color='r', marker='o', s=75, alpha=0.5)
p2, = plt.plot (xs, ys, lw=2, marker='o')
#p3, = plt.plot (xs2, ys2, lw=4)
#plt.legend([p1,p2, p3],
# ['Measurements', 'Kalman filter(R=0.5)', 'Kalman filter(R=10)'],
# loc='best', scatterpoints=1)
plt.show()
|
scaffold/generators/common.py | CaravelKit/saas-base | 189 | 12786683 | <reponame>CaravelKit/saas-base<filename>scaffold/generators/common.py
# Functions used all the generators
import os
# Check if file and path exist, if not, create them. Then rewrite file or add content at the
# beginning, commenting the existing part.
def create_write_file(file_path, new_content, rewrite = False, comment_start = '<!--', comment_end = '-->',
ignore_existing_files = False):
file_param = 'r+'
if os.path.exists(file_path):
if ignore_existing_files:
# Ignore existing file and return
print('Ignore: ', file_path)
return
else:
file_param = 'w+'
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise Exception('Path cannot be created, please try again.')
with open(file_path, file_param) as output_file:
if not rewrite:
output_file.seek(0)
content = output_file.read()
content = content.replace(comment_start, '').replace(comment_end, '')
content = comment_start + content
content += comment_end
content = new_content + content
else:
content = new_content
output_file.seek(0)
output_file.truncate()
output_file.write(content)
output_file.close()
|
toollib/__init__.py | atpuxiner/toollib | 113 | 12786744 | <filename>toollib/__init__.py
"""
@author axiner
@version v1.0.0
@created 2021/12/12 13:14
@abstract This is a tool library.
@description
@history
"""
from pathlib import Path
here = Path(__file__).absolute().parent
__version__ = '2022.05.11'
|
tests/test_regression.py | weninc/bitshuffle-1 | 162 | 12786746 | """
Test that data encoded with earlier versions can still be decoded correctly.
"""
from __future__ import absolute_import, division, print_function
import pathlib
import unittest
import numpy as np
import h5py
TEST_DATA_DIR = pathlib.Path(__file__).parent / "data"
OUT_FILE_TEMPLATE = "regression_%s.h5"
VERSIONS = [
"0.1.3",
]
class TestAll(unittest.TestCase):
def test_regression(self):
for version in VERSIONS:
file_name = TEST_DATA_DIR / (OUT_FILE_TEMPLATE % version)
f = h5py.File(file_name, "r")
g_orig = f["origional"]
g_comp = f["compressed"]
for dset_name in g_comp.keys():
self.assertTrue(np.all(g_comp[dset_name][:] == g_orig[dset_name][:]))
if __name__ == "__main__":
unittest.main()
|
tests/test_config.py | isidentical/unimport | 147 | 12786750 | <reponame>isidentical/unimport
import re
from pathlib import Path
from unittest import TestCase
from unimport import constants as C
from unimport import utils
from unimport.config import Config, DefaultConfig
TEST_DIR = Path(__file__).parent / "configs"
pyproject = TEST_DIR / "pyproject.toml"
setup_cfg = TEST_DIR / "setup.cfg"
no_unimport_pyproject = TEST_DIR / "no_unimport" / "pyproject.toml"
no_unimport_setup_cfg = TEST_DIR / "no_unimport" / "setup.cfg"
class ConfigTestCase(TestCase):
include = "test|test2|tests.py"
exclude = "__init__.py|tests/"
sources = [Path("path1"), Path("path2")]
def test_toml_parse(self):
config = Config(config_file=pyproject).parse()
self.assertEqual(self.include, config.include)
self.assertEqual(self.exclude, config.exclude)
self.assertEqual(self.sources, config.sources)
self.assertTrue(config.gitignore)
self.assertTrue(config.requirements)
self.assertFalse(config.remove)
self.assertTrue(config.diff)
self.assertTrue(config.ignore_init)
def test_cfg_parse(self):
config = Config(config_file=setup_cfg).parse()
self.assertEqual(self.include, config.include)
self.assertEqual(self.exclude, config.exclude)
self.assertEqual(self.sources, config.sources)
self.assertTrue(config.gitignore)
self.assertTrue(config.requirements)
self.assertFalse(config.remove)
self.assertTrue(config.diff)
self.assertTrue(config.ignore_init)
def test_cfg_merge(self):
config = Config(config_file=setup_cfg).parse()
console_configuration = {
"include": "tests|env",
"remove": True,
"diff": False,
"include_star_import": True,
}
gitignore_exclude = utils.get_exclude_list_from_gitignore()
exclude = "|".join(
[config.exclude] + gitignore_exclude + [C.INIT_FILE_IGNORE_REGEX]
)
config = config.merge(**console_configuration)
self.assertEqual("tests|env", config.include)
self.assertEqual(exclude, config.exclude)
self.assertEqual(self.sources, config.sources)
self.assertTrue(config.gitignore)
self.assertTrue(config.requirements)
self.assertTrue(config.remove)
self.assertFalse(config.diff)
self.assertTrue(config.ignore_init)
class DefaultCommandTestCase(TestCase):
def setUp(self):
self.config = DefaultConfig()
def test_there_is_no_command(self):
self.assertEqual(
self.config.merge(there_is_no_command=True), self.config.merge()
)
def test_same_with_default_config(self):
self.assertEqual(
self.config.merge(exclude=self.config.exclude).exclude,
self.config.merge().exclude,
)
def test_check(self):
self.assertTrue(self.config.merge().check)
self.assertTrue(self.config.merge(check=True).check)
self.assertTrue(self.config.merge(gitignore=True).check)
self.assertFalse(self.config.merge(diff=True).check)
self.assertFalse(self.config.merge(remove=True).check)
self.assertFalse(self.config.merge(permission=True).check)
def test_diff(self):
self.assertFalse(self.config.merge().diff)
self.assertFalse(self.config.merge(remove=True).diff)
self.assertTrue(self.config.merge(diff=True).diff)
self.assertTrue(self.config.merge(permission=True).diff)
class TomlCommandTestCase(TestCase):
def setUp(self):
self.config = Config(pyproject).parse()
self.exclude = "__init__.py|tests/"
def test_same_with_toml_config(self):
self.assertEqual(
self.config.merge(exclude=self.exclude).exclude,
self.config.merge().exclude,
)
def test_check(self):
self.assertTrue(self.config.merge(check=True).check)
self.assertTrue(self.config.merge(diff=False).check)
self.assertTrue(self.config.merge(diff=False, permission=False).check)
self.assertFalse(self.config.merge().check)
self.assertFalse(self.config.merge(gitignore=True).check)
self.assertFalse(self.config.merge(diff=True).check)
self.assertFalse(self.config.merge(remove=True).check)
self.assertFalse(self.config.merge(permission=True).check)
class NoUnimportSectionTestCase(TestCase):
def setUp(self):
self.default_config = DefaultConfig()
def test_toml_parse(self):
config = Config(config_file=no_unimport_pyproject).parse()
self.assertEqual(self.default_config.include, config.include)
self.assertEqual(self.default_config.exclude, config.exclude)
self.assertEqual(self.default_config.sources, config.sources)
self.assertFalse(config.gitignore)
self.assertFalse(config.requirements)
self.assertFalse(config.remove)
self.assertFalse(config.diff)
self.assertFalse(config.ignore_init)
def test_cfg_parse(self):
config = Config(config_file=no_unimport_setup_cfg).parse()
self.assertEqual(self.default_config.include, config.include)
self.assertEqual(self.default_config.exclude, config.exclude)
self.assertEqual(self.default_config.sources, config.sources)
self.assertFalse(config.gitignore)
self.assertFalse(config.requirements)
self.assertFalse(config.remove)
self.assertFalse(config.diff)
self.assertFalse(config.ignore_init)
def test_cfg_merge(self):
config = Config(config_file=no_unimport_setup_cfg).parse()
console_configuration = {
"include": "tests|env",
"remove": True,
"diff": False,
"include_star_import": True,
}
config = config.merge(**console_configuration)
self.assertEqual("tests|env", config.include)
self.assertEqual(self.default_config.exclude, config.exclude)
self.assertEqual(self.default_config.sources, config.sources)
self.assertTrue(config.remove)
self.assertTrue(config.include_star_import)
self.assertFalse(config.gitignore)
self.assertFalse(config.requirements)
self.assertFalse(config.diff)
class InitFileIgnoreRegexTestCase(TestCase):
exclude_regex = re.compile(C.INIT_FILE_IGNORE_REGEX)
def test_match(self):
self.assertIsNotNone(self.exclude_regex.search("path/to/__init__.py"))
self.assertIsNotNone(self.exclude_regex.search("to/__init__.py"))
self.assertIsNotNone(self.exclude_regex.search("__init__.py"))
def test_not_match(self):
self.assertIsNone(self.exclude_regex.search("path/to/_init_.py"))
self.assertIsNone(
self.exclude_regex.search("path/to/__init__/test.py")
)
self.assertIsNone(self.exclude_regex.search("__init__"))
self.assertIsNone(self.exclude_regex.search("__init__py"))
self.assertIsNone(self.exclude_regex.search("__init__bpy"))
|
ghostwriter/rolodex/migrations/0013_projectsubtask_marked_complete.py | bbhunter/Ghostwriter | 601 | 12786759 | # Generated by Django 3.0.10 on 2021-02-11 21:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rolodex', '0012_auto_20210211_1853'),
]
operations = [
migrations.AddField(
model_name='projectsubtask',
name='marked_complete',
field=models.DateField(blank=True, help_text='Date the task was marked complete', null=True, verbose_name='Marked Complete'),
),
]
|
public-engines/iris-h2o-automl/marvin_iris_h2o_automl/training/metrics_evaluator.py | guialba/incubator-marvin | 101 | 12786771 | #!/usr/bin/env python
# coding=utf-8
"""MetricsEvaluator engine action.
Use this module to add the project main code.
"""
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.engine_base import EngineBaseTraining
from ..model_serializer import ModelSerializer
__all__ = ['MetricsEvaluator']
logger = get_logger('metrics_evaluator')
class MetricsEvaluator(ModelSerializer, EngineBaseTraining):
def __init__(self, **kwargs):
super(MetricsEvaluator, self).__init__(**kwargs)
def execute(self, params, **kwargs):
import h2o
from sklearn import metrics
# h2o.init()
y_test = self.marvin_dataset['test_X']['Species']
self.marvin_dataset['test_X'].drop(columns='Species', inplace=True)
teste = h2o.H2OFrame.from_python(self.marvin_dataset['test_X'])
preds = self.marvin_model.predict(teste).as_data_frame()['predict'].values
self.marvin_metrics = metrics.accuracy_score(y_test, preds)
|
pyscf/x2c/test/test_x2c_grad.py | robert-anderson/pyscf | 501 | 12786788 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import reduce
import unittest
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import gto
from pyscf.x2c import sfx2c1e
from pyscf.x2c import sfx2c1e_grad
def _sqrt0(a):
w, v = scipy.linalg.eigh(a)
return numpy.dot(v*numpy.sqrt(w), v.conj().T)
def _invsqrt0(a):
w, v = scipy.linalg.eigh(a)
return numpy.dot(v/numpy.sqrt(w), v.conj().T)
def _sqrt1(a0, a1):
'''Solving first order of x^2 = a'''
w, v = scipy.linalg.eigh(a0)
w = numpy.sqrt(w)
a1 = reduce(numpy.dot, (v.conj().T, a1, v))
x1 = a1 / (w[:,None] + w)
x1 = reduce(numpy.dot, (v, x1, v.conj().T))
return x1
def _invsqrt1(a0, a1):
'''Solving first order of x^2 = a^{-1}'''
w, v = scipy.linalg.eigh(a0)
w = 1./numpy.sqrt(w)
a1 = -reduce(numpy.dot, (v.conj().T, a1, v))
x1 = numpy.einsum('i,ij,j->ij', w**2, a1, w**2) / (w[:,None] + w)
x1 = reduce(numpy.dot, (v, x1, v.conj().T))
return x1
def get_R(mol):
s0 = mol.intor('int1e_ovlp')
t0 = mol.intor('int1e_kin')
s0sqrt = _sqrt0(s0)
s0invsqrt = _invsqrt0(s0)
x0 = get_x0(mol)
c = lib.param.LIGHT_SPEED
stild = s0 + reduce(numpy.dot, (x0.T, t0*(.5/c**2), x0))
R = _invsqrt0(reduce(numpy.dot, (s0invsqrt, stild, s0invsqrt)))
R = reduce(numpy.dot, (s0invsqrt, R, s0sqrt))
return R
def get_r1(mol, atm_id, pos):
# See JCP 135 084114, Eq (34)
c = lib.param.LIGHT_SPEED
aoslices = mol.aoslice_by_atom()
ish0, ish1, p0, p1 = aoslices[atm_id]
s0 = mol.intor('int1e_ovlp')
t0 = mol.intor('int1e_kin')
s1all = mol.intor('int1e_ipovlp', comp=3)
t1all = mol.intor('int1e_ipkin', comp=3)
s1 = numpy.zeros_like(s0)
t1 = numpy.zeros_like(t0)
s1[p0:p1,:] =-s1all[pos][p0:p1]
s1[:,p0:p1] -= s1all[pos][p0:p1].T
t1[p0:p1,:] =-t1all[pos][p0:p1]
t1[:,p0:p1] -= t1all[pos][p0:p1].T
x0 = get_x0(mol)
x1 = get_x1(mol, atm_id)[pos]
sa0 = s0 + reduce(numpy.dot, (x0.T, t0*(.5/c**2), x0))
sa1 = s1 + reduce(numpy.dot, (x0.T, t1*(.5/c**2), x0))
sa1+= reduce(numpy.dot, (x1.T, t0*(.5/c**2), x0))
sa1+= reduce(numpy.dot, (x0.T, t0*(.5/c**2), x1))
s0_sqrt = _sqrt0(s0)
s0_invsqrt = _invsqrt0(s0)
s1_sqrt = _sqrt1(s0, s1)
s1_invsqrt = _invsqrt1(s0, s1)
R0_part = reduce(numpy.dot, (s0_invsqrt, sa0, s0_invsqrt))
R1_part = (reduce(numpy.dot, (s0_invsqrt, sa1, s0_invsqrt)) +
reduce(numpy.dot, (s1_invsqrt, sa0, s0_invsqrt)) +
reduce(numpy.dot, (s0_invsqrt, sa0, s1_invsqrt)))
R1 = reduce(numpy.dot, (s0_invsqrt, _invsqrt1(R0_part, R1_part), s0_sqrt))
R1 += reduce(numpy.dot, (s1_invsqrt, _invsqrt0(R0_part), s0_sqrt))
R1 += reduce(numpy.dot, (s0_invsqrt, _invsqrt0(R0_part), s1_sqrt))
return R1
def get_h0_s0(mol):
s = mol.intor_symmetric('int1e_ovlp')
t = mol.intor_symmetric('int1e_kin')
v = mol.intor_symmetric('int1e_nuc')
w = mol.intor_symmetric('int1e_pnucp')
nao = s.shape[0]
n2 = nao * 2
h = numpy.zeros((n2,n2), dtype=v.dtype)
m = numpy.zeros((n2,n2), dtype=v.dtype)
c = lib.param.LIGHT_SPEED
h[:nao,:nao] = v
h[:nao,nao:] = t
h[nao:,:nao] = t
h[nao:,nao:] = w * (.25/c**2) - t
m[:nao,:nao] = s
m[nao:,nao:] = t * (.5/c**2)
return h, m
def get_h1_s1(mol, ia):
aoslices = mol.aoslice_by_atom()
ish0, ish1, p0, p1 = aoslices[0]
nao = mol.nao_nr()
s1 = mol.intor('int1e_ipovlp', comp=3)
t1 = mol.intor('int1e_ipkin', comp=3)
v1 = mol.intor('int1e_ipnuc', comp=3)
w1 = mol.intor('int1e_ipspnucsp', comp=12).reshape(3,4,nao,nao)[:,3]
with mol.with_rinv_origin(mol.atom_coord(ia)):
rinv1 = -8*mol.intor('int1e_iprinv', comp=3)
prinvp1 = -8*mol.intor('int1e_ipsprinvsp', comp=12).reshape(3,4,nao,nao)[:,3]
n2 = nao * 2
h = numpy.zeros((3,n2,n2), dtype=v1.dtype)
m = numpy.zeros((3,n2,n2), dtype=v1.dtype)
rinv1[:,p0:p1,:] -= v1[:,p0:p1]
rinv1 = rinv1 + rinv1.transpose(0,2,1).conj()
prinvp1[:,p0:p1,:] -= w1[:,p0:p1]
prinvp1 = prinvp1 + prinvp1.transpose(0,2,1).conj()
s1ao = numpy.zeros_like(s1)
t1ao = numpy.zeros_like(t1)
s1ao[:,p0:p1,:] = -s1[:,p0:p1]
s1ao[:,:,p0:p1]+= -s1[:,p0:p1].transpose(0,2,1)
t1ao[:,p0:p1,:] = -t1[:,p0:p1]
t1ao[:,:,p0:p1]+= -t1[:,p0:p1].transpose(0,2,1)
c = lib.param.LIGHT_SPEED
h[:,:nao,:nao] = rinv1
h[:,:nao,nao:] = t1ao
h[:,nao:,:nao] = t1ao
h[:,nao:,nao:] = prinvp1 * (.25/c**2) - t1ao
m[:,:nao,:nao] = s1ao
m[:,nao:,nao:] = t1ao * (.5/c**2)
return h, m
def get_x0(mol):
c = lib.param.LIGHT_SPEED
h0, s0 = get_h0_s0(mol)
e, c = scipy.linalg.eigh(h0, s0)
nao = mol.nao_nr()
cl = c[:nao,nao:]
cs = c[nao:,nao:]
x0 = scipy.linalg.solve(cl.T, cs.T).T
return x0
def get_x1(mol, ia):
h0, s0 = get_h0_s0(mol)
h1, s1 = get_h1_s1(mol, ia)
e0, c0 = scipy.linalg.eigh(h0, s0)
nao = mol.nao_nr()
cl0 = c0[:nao,nao:]
cs0 = c0[nao:,nao:]
x0 = scipy.linalg.solve(cl0.T, cs0.T).T
h1 = numpy.einsum('pi,xpq,qj->xij', c0.conj(), h1, c0[:,nao:])
s1 = numpy.einsum('pi,xpq,qj->xij', c0.conj(), s1, c0[:,nao:])
epi = e0[:,None] - e0[nao:]
degen_mask = abs(epi) < 1e-7
epi[degen_mask] = 1e200
c1 = (h1 - s1 * e0[nao:]) / -epi
c1[:,degen_mask] = -.5 * s1[:,degen_mask]
c1 = numpy.einsum('pq,xqi->xpi', c0, c1)
cl1 = c1[:,:nao]
cs1 = c1[:,nao:]
x1 = [scipy.linalg.solve(cl0.T, (cs1[i] - x0.dot(cl1[i])).T).T
for i in range(3)]
return numpy.asarray(x1)
mol1 = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. , 0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
mol2 = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. ,-0.0001)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
mol = gto.M(
verbose = 0,
atom = [["O" , (0. , 0. , 0. )],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]],
basis = '3-21g',
)
class KnownValues(unittest.TestCase):
def test_x1(self):
with lib.light_speed(10) as c:
x_1 = get_x0(mol1)
x_2 = get_x0(mol2)
x1_ref = (x_1 - x_2) / 0.0002 * lib.param.BOHR
x1t = get_x1(mol, 0)
self.assertAlmostEqual(abs(x1t[2]-x1_ref).max(), 0, 7)
x0 = get_x0(mol)
h0, s0 = get_h0_s0(mol)
e0, c0 = scipy.linalg.eigh(h0, s0)
get_h1_etc = sfx2c1e_grad._gen_first_order_quantities(mol, e0, c0, x0)
x1 = get_h1_etc(0)[4]
self.assertAlmostEqual(abs(x1-x1t).max(), 0, 9)
def test_R1(self):
with lib.light_speed(10) as c:
R_1 = get_R(mol1)
R_2 = get_R(mol2)
R1_ref = (R_1 - R_2) / 0.0002 * lib.param.BOHR
R1t = get_r1(mol, 0, 2)
self.assertAlmostEqual(abs(R1t-R1_ref).max(), 0, 7)
x0 = get_x0(mol)
h0, s0 = get_h0_s0(mol)
e0, c0 = scipy.linalg.eigh(h0, s0)
get_h1_etc = sfx2c1e_grad._gen_first_order_quantities(mol, e0, c0, x0)
R1 = get_h1_etc(0)[6][2]
self.assertAlmostEqual(abs(R1-R1t).max(), 0, 9)
def test_hfw(self):
with lib.light_speed(10) as c:
x2c_1 = sfx2c1e.SpinFreeX2C(mol1)
x2c_2 = sfx2c1e.SpinFreeX2C(mol2)
x2cobj = sfx2c1e.SpinFreeX2C(mol)
fh_ref = (x2c_1.get_hcore() - x2c_2.get_hcore()) / 0.0002 * lib.param.BOHR
fh = x2cobj.hcore_deriv_generator(deriv=1)
self.assertAlmostEqual(abs(fh(0)[2] - fh_ref).max(), 0, 7)
x2c_1.xuncontract = 0
x2c_2.xuncontract = 0
x2cobj.xuncontract =0
fh_ref = (x2c_1.get_hcore() - x2c_2.get_hcore()) / 0.0002 * lib.param.BOHR
fh = x2cobj.hcore_deriv_generator(deriv=1)
self.assertAlmostEqual(abs(fh(0)[2] - fh_ref).max(), 0, 7)
x2c_1.xuncontract = 1
x2c_2.xuncontract = 1
x2cobj.xuncontract =1
x2c_1.approx = 'ATOM1E'
x2c_2.approx = 'ATOM1E'
x2cobj.approx = 'ATOM1E'
fh_ref = (x2c_1.get_hcore() - x2c_2.get_hcore()) / 0.0002 * lib.param.BOHR
fh = x2cobj.hcore_deriv_generator(deriv=1)
self.assertAlmostEqual(abs(fh(0)[2] - fh_ref).max(), 0, 7)
if __name__ == "__main__":
print("Full Tests for sfx2c1e gradients")
unittest.main()
|
vdb/extensions/arm.py | wisdark/vivisect | 716 | 12786801 | <reponame>wisdark/vivisect<gh_stars>100-1000
import envi
import envi.cli as e_cli
import envi.common as e_common
import envi.archs.arm.regs as e_arm_regs
import envi.archs.thumb16.disasm as e_thumb
def armdis(db, line):
'''
Disassemble arm instructions from the given address.
Usage: armdis <addr_exp>
'''
disasmobj = e_arm.ArmDisasm()
armthumdis(db, line, disasmobj)
def thumbdis(db, line):
'''
Disassemble thumb instructions from the given address.
Usage: thumbdis <addr_exp>
'''
disasmobj = e_thumb.ThumbDisasm()
armthumdis(db, line, disasmobj)
def armthumbdis(db, line, disasmobj):
'''
Core of disassmbly, for code-reuse. Only difference is the object actually
doing the disassembly.
'''
t = db.getTrace()
argv = e_cli.splitargs(line)
size = 20
argc = len(argv)
if argc == 0:
addr = t.getProgramCounter()
else:
addr = t.parseExpression(argv[0])
if argc > 1:
size = t.parseExpression(argv[1])
bytez = t.readMemory(addr, size)
offset = 0
db.vprint("Dissassembly:")
while offset < size:
va = addr + offset
op = disasmobj.disasm(bytez, offset, va)
obytez = bytez[offset:offset+len(op)]
db.canvas.addVaText('0x%.8x' % va, va=va)
db.canvas.addText(": %s " % e_common.hexify(obytez).ljust(17))
op.render(db.canvas)
db.canvas.addText("\n")
offset += len(op)
def togglethumb(db, line):
'''
Toggle Thumb Mode
'''
t = db.getTrace()
cur_t = t.getRegister(e_arm_regs.REG_T)
new_t = not cur_t
arch = (envi.ARCH_ARMV7, envi.ARCH_THUMB)[new_t]
t.setRegister(e_arm_regs.REG_T, new_t)
db.canvas.addText("Toggled Thumb Mode: %r\n" % new_t)
def vdbExtension(vdb, trace):
vdb.addCmdAlias('db', 'mem -F bytes')
vdb.addCmdAlias('dw', 'mem -F u_int_16')
vdb.addCmdAlias('dd', 'mem -F u_int_32')
vdb.addCmdAlias('dq', 'mem -F u_int_64')
vdb.addCmdAlias('dr', 'mem -F "Deref View"')
vdb.addCmdAlias('ds', 'mem -F "Symbols View"')
vdb.registerCmdExtension(armdis)
vdb.registerCmdExtension(thumbdis)
vdb.registerCmdExtension(togglethumb)
|
xmodaler/engine/rl_trainer.py | YehLi/xmodaler | 830 | 12786804 | # Copyright 2021 JD.com, Inc., JD AI
"""
@author: <NAME>
@contact: <EMAIL>
"""
import time
import copy
import torch
from .defaults import DefaultTrainer
from xmodaler.scorer import build_scorer
from xmodaler.config import kfg
from xmodaler.losses import build_rl_losses
import xmodaler.utils.comm as comm
from .build import ENGINE_REGISTRY
__all__ = ['RLTrainer']
@ENGINE_REGISTRY.register()
class RLTrainer(DefaultTrainer):
def __init__(self, cfg):
super(RLTrainer, self).__init__(cfg)
self.scorer = self.build_scorer(cfg)
self.losses = build_rl_losses(cfg)
@classmethod
def build_scorer(cls, cfg):
return build_scorer(cfg)
def run_step(self):
start = time.perf_counter()
try:
data = next(self._train_data_loader_iter)
except StopIteration:
self._train_data_loader_iter = iter(self.train_data_loader)
data = next(self._train_data_loader_iter)
data_time = time.perf_counter() - start
data = comm.unwrap_model(self.model).preprocess_batch(data)
self.model.eval()
with torch.no_grad():
bs_data = copy.copy(data)
bs_outputs_dict = self.model(bs_data, use_beam_search=False, output_sents=False)
bs_rewards = self.scorer(bs_outputs_dict)
self.model.train()
data[kfg.DECODE_BY_SAMPLE] = True
outputs_dict = self.model(data, use_beam_search=False, output_sents=False)
rewards = self.scorer(outputs_dict)
rewards = torch.from_numpy(rewards[kfg.REWARDS] - bs_rewards[kfg.REWARDS]).float().cuda()
outputs_dict.update({ kfg.REWARDS: rewards })
losses_dict = {}
for loss in self.losses:
loss_dict = loss(outputs_dict)
losses_dict.update(loss_dict)
losses = sum(losses_dict.values())
self.optimizer.zero_grad()
losses.backward()
bs_rewards.pop(kfg.REWARDS)
losses_dict.update(bs_rewards)
self._write_metrics(losses_dict, data_time)
self.optimizer.step() |
manifold_flow/flows/flow.py | selflein/manifold-flow | 199 | 12786819 | <reponame>selflein/manifold-flow
import logging
from manifold_flow.utils.various import product
from manifold_flow import distributions
from manifold_flow.flows import BaseFlow
logger = logging.getLogger(__name__)
class Flow(BaseFlow):
""" Ambient normalizing flow (AF) """
def __init__(self, data_dim, transform):
super(Flow, self).__init__()
self.data_dim = data_dim
self.latent_dim = data_dim
self.total_data_dim = product(data_dim)
self.total_latent_dim = product(self.latent_dim)
self.latent_distribution = distributions.StandardNormal((self.total_latent_dim,))
self.transform = transform
self._report_model_parameters()
def forward(self, x, context=None):
""" Transforms data point to latent space, evaluates log likelihood """
# Encode
u, log_det = self._encode(x, context=context)
# Decode
x = self.decode(u, context=context)
# Log prob
log_prob = self.latent_distribution._log_prob(u, context=None)
log_prob = log_prob + log_det
return x, log_prob, u
def encode(self, x, context=None):
""" Encodes data point to latent space """
u, _ = self._encode(x, context=context)
return u
def decode(self, u, context=None):
""" Encodes data point to latent space """
x, _ = self.transform.inverse(u, context=context)
return x
def log_prob(self, x, context=None):
""" Evaluates log likelihood """
# Encode
u, log_det = self._encode(x, context)
# Log prob
log_prob = self.latent_distribution._log_prob(u, context=None)
log_prob = log_prob + log_det
return log_prob
def sample(self, u=None, n=1, context=None):
""" Generates samples from model """
if u is None:
u = self.latent_distribution.sample(n, context=None)
x = self.decode(u, context=context)
return x
def _encode(self, x, context=None):
u, log_det = self.transform(x, context=context)
return u, log_det
|
glue/algorithms/square.py | glensc/glue | 514 | 12786832 | import copy
class SquareAlgorithmNode(object):
def __init__(self, x=0, y=0, width=0, height=0, used=False,
down=None, right=None):
"""Node constructor.
:param x: X coordinate.
:param y: Y coordinate.
:param width: Image width.
:param height: Image height.
:param used: Flag to determine if the node is used.
:param down: Down :class:`~Node`.
:param right Right :class:`~Node`.
"""
self.x = x
self.y = y
self.width = width
self.height = height
self.used = used
self.right = right
self.down = down
def find(self, node, width, height):
"""Find a node to allocate this image size (width, height).
:param node: Node to search in.
:param width: Pixels to grow down (width).
:param height: Pixels to grow down (height).
"""
if node.used:
return self.find(node.right, width, height) or self.find(node.down, width, height)
elif node.width >= width and node.height >= height:
return node
return None
def grow(self, width, height):
""" Grow the canvas to the most appropriate direction.
:param width: Pixels to grow down (width).
:param height: Pixels to grow down (height).
"""
can_grow_d = width <= self.width
can_grow_r = height <= self.height
should_grow_r = can_grow_r and self.height >= (self.width + width)
should_grow_d = can_grow_d and self.width >= (self.height + height)
if should_grow_r:
return self.grow_right(width, height)
elif should_grow_d:
return self.grow_down(width, height)
elif can_grow_r:
return self.grow_right(width, height)
elif can_grow_d:
return self.grow_down(width, height)
return None
def grow_right(self, width, height):
"""Grow the canvas to the right.
:param width: Pixels to grow down (width).
:param height: Pixels to grow down (height).
"""
old_self = copy.copy(self)
self.used = True
self.x = self.y = 0
self.width += width
self.down = old_self
self.right = SquareAlgorithmNode(x=old_self.width,
y=0,
width=width,
height=self.height)
node = self.find(self, width, height)
if node:
return self.split(node, width, height)
return None
def grow_down(self, width, height):
"""Grow the canvas down.
:param width: Pixels to grow down (width).
:param height: Pixels to grow down (height).
"""
old_self = copy.copy(self)
self.used = True
self.x = self.y = 0
self.height += height
self.right = old_self
self.down = SquareAlgorithmNode(x=0,
y=old_self.height,
width=self.width,
height=height)
node = self.find(self, width, height)
if node:
return self.split(node, width, height)
return None
def split(self, node, width, height):
"""Split the node to allocate a new one of this size.
:param node: Node to be splitted.
:param width: New node width.
:param height: New node height.
"""
node.used = True
node.down = SquareAlgorithmNode(x=node.x,
y=node.y + height,
width=node.width,
height=node.height - height)
node.right = SquareAlgorithmNode(x=node.x + width,
y=node.y,
width=node.width - width,
height=height)
return node
class SquareAlgorithm(object):
def process(self, sprite):
root = SquareAlgorithmNode(width=sprite.images[0].absolute_width,
height=sprite.images[0].absolute_height)
# Loot all over the images creating a binary tree
for image in sprite.images:
node = root.find(root, image.absolute_width, image.absolute_height)
if node: # Use this node
node = root.split(node, image.absolute_width, image.absolute_height)
else: # Grow the canvas
node = root.grow(image.absolute_width, image.absolute_height)
image.x = node.x
image.y = node.y
|
Greedy/045. Jump Game II.py | beckswu/Leetcode | 138 | 12786861 |
class Solution:
def jump(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums)
if n<2: return 0
step, reach, next = 0, 0, 0
for i, v in enumerate(nums):
if i == reach:
reach = max(next, i+v)
step += 1
if reach >= n-1: break
next = nums[reach] + reach
else:
next = max(next, i+v)
return step
class Solution:
def jump(self, nums):
n = len(nums)
step, end, next = 0, 0, 0
for i, v in enumerate(nums[:-1]):
next = max(next, i + v)
if i == end:
step += 1
end = next
return step
class Solution:
# @param {integer[]} nums
# @return {integer}
def jump(self, nums):
n, start, end, step = len(nums), 0, 0, 0
while end < n - 1:
step += 1
maxend = end + 1
for i in range(start, end + 1):
if i + nums[i] >= n - 1:
return step
maxend = max(maxend, i + nums[i])
start, end = end + 1, maxend
return step
class Solution:
# @param {integer[]} nums
# @return {integer}
def jump(self, nums):
n, cur_max, next_max, steps = len(nums), 0, 0, 0
for i in range(n):
if i>cur_max:
steps+=1
cur_max=next_max
if cur_max>=n:break
next_max=max(next_max,nums[i]+i)
return steps
class Solution:
def jump(self, nums: List[int]) -> int:
if len(nums) <= 1: return 0
l, r = 0, nums[0]
times = 1
while r < len(nums) - 1:
times += 1
nxt = max(i + nums[i] for i in range(l, r + 1))
l, r = r, nxt
return times |
examples/eventTester.py | tgolsson/appJar | 666 | 12786863 | <filename>examples/eventTester.py
import sys
sys.path.append("../")
from appJar import gui
def press(btn):
print("default:", btn)
if btn == "writing":
app.setTextArea("t1", "some writing")
elif btn == "writing2":
app.setTextArea("t2", "some writing")
elif btn == "get":
print(app.getTextArea("t1"))
elif btn == "get2":
print(app.getTextArea("t2"))
elif btn == "log":
app.logTextArea("t1")
elif btn == "log2":
app.logTextArea("t2")
elif btn == "check":
print(app.textAreaChanged("t1"))
elif btn == "check2":
print(app.textAreaChanged("t2"))
def sub(btn): print("submit ", btn)
def chng(btn):
print("change ", btn)
if btn in ["t1", "t2"]: print(app.getTextArea(btn))
app=gui("Event Tester")
app.addLabel("l1", "click me", 0, 0)
app.setLabelChangeFunction("l1", press)
app.addLabel("l2", "click me", 0, 1)
app.setLabelSubmitFunction("l2", press)
app.addEntry("e1", 1, 0, 2)
app.setEntrySubmitFunction("e1", sub)
app.setEntryChangeFunction("e1", chng)
app.addTextArea("t1", 2, 0)
app.setTextAreaSubmitFunction("t1", sub)
app.setTextAreaChangeFunction("t1", chng)
app.addScrolledTextArea("t2", 2, 1)
app.setTextAreaSubmitFunction("t2", sub)
app.setTextAreaChangeFunction("t2", chng)
app.addButton("writing", press, 3, 0)
app.addButton("writing2", press, 3, 1)
app.addButton("get", press, 4, 0)
app.addButton("get2", press, 4, 1)
app.addButton("log", press, 5, 0)
app.addButton("log2", press, 5, 1)
app.addButton("check", press, 6, 0)
app.addButton("check2", press, 6, 1)
app.go()
|
pymagnitude/third_party/allennlp/tests/data/token_indexers/dep_label_indexer_test.py | tpeng/magnitude | 1,520 | 12786878 | <gh_stars>1000+
# pylint: disable=no-self-use,invalid-name
from __future__ import absolute_import
from collections import defaultdict
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import DepLabelIndexer
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
class TestDepLabelIndexer(AllenNlpTestCase):
def setUp(self):
super(TestDepLabelIndexer, self).setUp()
self.tokenizer = SpacyWordSplitter(parse=True)
def test_count_vocab_items_uses_pos_tags(self):
tokens = self.tokenizer.split_words(u"This is a sentence.")
tokens = [Token(u"<S>")] + [t for t in tokens] + [Token(u"</S>")]
indexer = DepLabelIndexer()
counter = defaultdict(lambda: defaultdict(int))
for token in tokens:
indexer.count_vocab_items(token, counter)
assert counter[u"dep_labels"] == {u"ROOT": 1, u"nsubj": 1,
u"det": 1, u"NONE": 2, u"attr": 1, u"punct": 1}
def test_tokens_to_indices_uses_pos_tags(self):
tokens = self.tokenizer.split_words(u"This is a sentence.")
tokens = [t for t in tokens] + [Token(u"</S>")]
vocab = Vocabulary()
root_index = vocab.add_token_to_namespace(u'ROOT', namespace=u'dep_labels')
none_index = vocab.add_token_to_namespace(u'NONE', namespace=u'dep_labels')
indexer = DepLabelIndexer()
assert indexer.tokens_to_indices([tokens[1]], vocab, u"tokens1") == {u"tokens1": [root_index]}
assert indexer.tokens_to_indices([tokens[-1]], vocab, u"tokens-1") == {u"tokens-1": [none_index]}
def test_padding_functions(self):
indexer = DepLabelIndexer()
assert indexer.get_padding_token() == 0
assert indexer.get_padding_lengths(0) == {}
def test_as_array_produces_token_sequence(self):
indexer = DepLabelIndexer()
padded_tokens = indexer.pad_token_sequence({u'key': [1, 2, 3, 4, 5]}, {u'key': 10}, {})
assert padded_tokens == {u'key': [1, 2, 3, 4, 5, 0, 0, 0, 0, 0]}
|
examples/layout_form.py | pzahemszky/guizero | 320 | 12786913 | from guizero import App, Text, TextBox, Combo, PushButton, Box
app = App()
Text(app, text="My form")
form = Box(app, width="fill", layout="grid")
form.border = True
Text(form, text="Title", grid=[0,0], align="right")
TextBox(form, grid=[1,0])
Text(form, text="Name", grid=[0,1], align="right")
TextBox(form, grid=[1,1])
Text(form, text="Age", grid=[0,2], align="right")
TextBox(form, grid=[1,2])
buttons = Box(app, width="fill", align="bottom")
PushButton(buttons, text="Ok", align="left")
PushButton(buttons, text="Cancel", align="left")
app.display() |
api/tests/unit/telemetry/test_unit_telemetry_serializers.py | mevinbabuc/flagsmith | 1,259 | 12786937 | <reponame>mevinbabuc/flagsmith<filename>api/tests/unit/telemetry/test_unit_telemetry_serializers.py
from unittest import mock
from django.test import override_settings
from telemetry.serializers import TelemetrySerializer
from tests.unit.telemetry.helpers import get_example_telemetry_data
@override_settings(INFLUXDB_TOKEN="<PASSWORD>")
@mock.patch("telemetry.serializers.get_ip_address_from_request")
@mock.patch("telemetry.serializers.InfluxDBWrapper")
def test_telemetry_serializer_save(MockInfluxDBWrapper, mock_get_ip_address):
# Given
data = get_example_telemetry_data()
serializer = TelemetrySerializer(data=data, context={"request": mock.MagicMock()})
mock_wrapper = mock.MagicMock()
MockInfluxDBWrapper.return_value = mock_wrapper
ip_address = "127.0.0.1"
mock_get_ip_address.return_value = ip_address
# When
serializer.is_valid() # must be called to access validated data
serializer.save()
# Then
mock_wrapper.add_data_point.assert_called_once_with(
"heartbeat", 1, tags={**data, "ip_address": ip_address}
)
mock_wrapper.write.assert_called_once()
|
fetch_cord/computer/gpu/Gpu_interface.py | TabulateJarl8/FetchCord | 286 | 12786938 | # from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import List, TypeVar, Dict
from ..Peripheral_interface import Peripherical_interface
class Gpu_interface(Peripherical_interface, metaclass=ABCMeta):
_vendor: str
_model: str
@property
def vendor(self) -> str:
return self._vendor
@vendor.setter
def vendor(self, value: str):
self._vendor = value
@property
def model(self) -> str:
return self._model
@model.setter
@abstractmethod
def model(self, value: str):
raise NotImplementedError
@property
def temp(self) -> float:
try:
self._temp = self.get_temp()
except NotImplementedError as e:
try:
raise e
finally:
e = None
del e
else:
return self._temp
@temp.setter
def temp(self, value: float):
self._temp = value
def __init__(self, os, vendor, model):
super().__init__(os)
self.vendor = vendor
self.model = model
@abstractmethod
def get_temp(self) -> float:
raise NotImplementedError
GpuType = TypeVar("GpuType", bound="Gpu_interface")
def get_gpuid(gpu_ids: Dict[str, str], gpus: List[GpuType]):
vendors = []
for i in range(len(gpus)):
if gpus[i].vendor not in vendors:
vendors.append(gpus[i].vendor)
gpuvendor = "".join(vendors).lower()
if gpuvendor in gpu_ids:
return gpu_ids[gpuvendor]
else:
print("Unknown GPU, contact us on github to resolve this.")
return "unknown"
|
WebMirror/management/rss_parser_funcs/feed_parse_extractCgtranslationsMe.py | fake-name/ReadableWebProxy | 193 | 12786940 | <filename>WebMirror/management/rss_parser_funcs/feed_parse_extractCgtranslationsMe.py
def extractCgtranslationsMe(item):
'''
Parser for 'cgtranslations.me'
'''
if 'Manga' in item['tags']:
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if ('Gifting (Fanfic)' in item['tags'] and 'LN Chapters' in item['tags']) or \
item['tags'] == ['Gifting (Fanfic)']:
return buildReleaseMessageWithType(item, 'Gifting this World with Wonderful Blessings!', vol, chp, frag=frag, postfix=postfix)
if 'Gifting (Fanfic)' in item['tags'] and 'explosion' in item['tags']:
return buildReleaseMessageWithType(item, 'Kono Subarashii Sekai ni Bakuen wo!', vol, chp, frag=frag, postfix=postfix)
if ('KonoSuba' in item['tags'] and 'LN Chapters' in item['tags']):
return buildReleaseMessageWithType(item, 'KonoSuba', vol, chp, frag=frag, postfix=postfix)
return False |
probe/modules/antivirus/eset/eset_file_security.py | krisshol/bach-kmno | 248 | 12786944 | <filename>probe/modules/antivirus/eset/eset_file_security.py
#
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import logging
import re
from pathlib import Path
from datetime import datetime
from modules.antivirus.base import AntivirusUnix
log = logging.getLogger(__name__)
class EsetFileSecurity(AntivirusUnix):
name = "ESET File Security (Linux)"
# ==================================
# Constructor and destructor stuff
# ==================================
def __init__(self, *args, **kwargs):
# class super class constructor
super().__init__(*args, **kwargs)
# Modify retun codes (see --help for details)
self._scan_retcodes[self.ScanResult.INFECTED] = lambda x: x in [1, 50]
self._scan_retcodes[self.ScanResult.ERROR] = lambda x: x in [100]
# scan tool variables
self.scan_args = (
"--clean-mode=NONE", # do not remove infected files
"--no-log-all" # do not log clean files
)
self.scan_patterns = [
re.compile('name="(?P<file>.*)", threat="(?P<name>.*)", '
'action=.*', re.IGNORECASE),
]
self.scan_path = Path("/opt/eset/esets/sbin/esets_scan")
# ==========================================
# Antivirus methods (need to be overriden)
# ==========================================
def get_version(self):
"""return the version of the antivirus"""
return self._run_and_parse(
'--version',
regexp='(?P<version>\d+(\.\d+)+)',
group='version')
def get_database(self):
"""return list of files in the database"""
search_paths = [
Path('/var/opt/eset/esets/lib/'),
]
database_patterns = [
'*.dat', # determined using strace on linux
]
return self.locate(database_patterns, search_paths, syspath=False)
def get_virus_database_version(self):
"""Return the Virus Database version"""
fdata = Path("/var/opt/eset/esets/lib/data/data.txt")
data = fdata.read_text()
matches = re.search('VerFileETAG_update\.eset\.com=(?P<version>.*)',
data, re.IGNORECASE)
if not matches:
raise RuntimeError(
"Cannot read dbversion in {}".format(fdata.absolute()))
version = matches.group('version').strip()
matches = re.search('LastUpdate=(?P<date>\d*)', data, re.IGNORECASE)
if not matches:
raise RuntimeError(
"Cannot read db date in {}".format(fdata.absolute()))
date = matches.group('date').strip()
date = datetime.fromtimestamp(int(date)).strftime('%Y-%m-%d')
return version + ' (' + date + ')'
|
examples/Plot_FibonacciLines.py | Physicworld/pyjuque | 343 | 12787019 | import os
import sys
curr_path = os.path.abspath(__file__)
root_path = os.path.abspath(
os.path.join(curr_path, os.path.pardir, os.path.pardir))
sys.path.append(root_path)
from pyjuque.Exchanges.CcxtExchange import CcxtExchange
from pyjuque.Plotting import PlotData
import plotly.graph_objs as go
def horizontal_line(start_time, end_time, value, color=None):
return go.layout.Shape(
type="line",
x0=start_time,
y0=value,
x1=end_time,
y1=value,
line=dict(color=color)
)
def Main():
exchange = CcxtExchange('binance')
symbol = "BTC/USDT"
interval = "4h"
df = exchange.getOHLCVHistory(symbol, interval, 8000)
start_time = df['time'][0]
end_time = df['time'][len(df)-1]
price_min = df['close'].min()
price_max = df['close'].max()
diff = price_max - price_min
level1 = price_max - 0.236 * diff
level2 = price_max - 0.382 * diff
level3 = price_max - 0.618 * diff
lines = []
lines.append(horizontal_line(
start_time, end_time, price_max,
color="rgba(255, 0, 0, 255)"))
lines.append(horizontal_line(
start_time, end_time, level1,
color="rgba(255, 255, 0, 255)"))
lines.append(horizontal_line(
start_time, end_time, level2,
color="rgba(0, 255, 0, 255)"))
lines.append(horizontal_line(
start_time, end_time, level3,
color="rgba(0, 255, 255, 255)"))
lines.append(horizontal_line(
start_time, end_time, price_min,
color="rgba(0, 0, 255, 255)"))
PlotData(df,
add_candles=False,
plot_shapes=lines,
plot_title="fib_levels_"+symbol.replace('/', '').lower() + "_" + interval,
show_plot=True)
if __name__ == '__main__':
Main() |
Subsets and Splits