filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_26517
|
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import Firefox, Chrome, PhantomJS
from selenium import webdriver
from argparse import ArgumentParser
from urllib.parse import quote
import time
import copy
import sys
import os
TIMEOUT = 1000
TIMESLP = 3
def login(driver, username, password, failed=0):
if failed == 3:
raise Exception('门户登录失败')
iaaaUrl = 'https://iaaa.pku.edu.cn/iaaa/oauth.jsp'
appName = quote('北京大学校内信息门户新版')
redirectUrl = 'https://portal.pku.edu.cn/portal2017/ssoLogin.do'
driver.get('https://portal.pku.edu.cn/portal2017/')
driver.get(
f'{iaaaUrl}?appID=portal2017&appName={appName}&redirectUrl={redirectUrl}'
)
print('门户登陆中...')
driver.find_element_by_id('user_name').send_keys(username)
time.sleep(TIMESLP)
driver.find_element_by_id('password').send_keys(password)
time.sleep(TIMESLP)
driver.find_element_by_id('logon_button').click()
try:
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.LINK_TEXT, '我知道了')))
except:
pass
else:
driver.find_element_by_link_text('我知道了').click()
try:
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'all')))
except:
login(driver, username, password, failed + 1)
else:
print('门户登录成功!')
def go_to_application_out(driver):
driver.find_element_by_id('all').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-input__inner')))
def go_to_application_in(driver):
driver.get('https://portal.pku.edu.cn/portal2017/#/bizCenter')
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'all')))
driver.find_element_by_id('all').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.ID, 'tag_s_stuCampusExEnReq')))
driver.find_element_by_id('tag_s_stuCampusExEnReq').click()
time.sleep(TIMESLP)
driver.switch_to.window(driver.window_handles[-1])
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-card__body')))
time.sleep(TIMESLP)
driver.find_element_by_class_name('el-card__body').click()
time.sleep(TIMESLP)
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.CLASS_NAME, 'el-select')))
def select_in_out(driver, way):
driver.find_element_by_class_name('el-select').click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{way}"]').click()
def select_campus(driver, campus):
driver.find_elements_by_class_name('el-select')[1].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{campus}"]').click()
def select_destination(driver, destination):
driver.find_elements_by_class_name('el-select')[2].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{destination}"]').click()
def select_district(driver, district):
driver.find_elements_by_class_name('el-select')[3].click()
time.sleep(TIMESLP)
driver.find_element_by_xpath(f'//li/span[text()="{district}"]').click()
def write_reason(driver, reason):
driver.find_element_by_class_name('el-textarea__inner').send_keys(
f'{reason}')
time.sleep(TIMESLP)
def write_track(driver, track):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{track}')
time.sleep(TIMESLP)
def write_street(driver, street):
driver.find_elements_by_class_name('el-textarea__inner')[1].send_keys(
f'{street}')
time.sleep(TIMESLP)
def click_check(driver):
driver.find_element_by_class_name('el-checkbox__label').click()
time.sleep(TIMESLP)
def click_inPeking(driver):
driver.find_element_by_class_name('el-radio__inner').click()
time.sleep(TIMESLP)
def submit(driver):
driver.find_element_by_xpath(
'//button/span[contains(text(),"保存")]').click()
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located(
(By.XPATH, '(//button/span[contains(text(),"提交")])[3]')))
driver.find_element_by_xpath(
'(//button/span[contains(text(),"提交")])[3]').click()
time.sleep(TIMESLP)
def fill_out(driver, campus, reason, destination, track):
print('开始填报出校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '出校')
print('Done')
print('选择校区 ', end='')
select_campus(driver, campus)
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
print('选择出校目的地 ', end='')
select_destination(driver, destination)
print('Done')
print('填写出校行动轨迹 ', end='')
write_track(driver, track)
print('Done')
click_check(driver)
submit(driver)
print('出校备案填报完毕!')
def fill_in(driver, campus, reason, habitation, district, street):
print('开始填报入校备案')
print('选择出校/入校 ', end='')
select_in_out(driver, '入校')
print('Done')
print('填写出入校事由 ', end='')
write_reason(driver, reason)
print('Done')
if habitation != '北京':
raise Exception('暂不支持京外入校备案,请手动填写')
print('选择居住地所在区 ', end='')
select_district(driver, district)
print('Done')
print('填写居住地所在街道 ', end='')
write_street(driver, street)
print('Done')
click_inPeking(driver)
click_check(driver)
submit(driver)
print('入校备案填报完毕!')
def run(driver, username, password, campus, reason, destination, track,
habitation, district, street):
login(driver, username, password)
print('=================================')
go_to_application_out(driver)
fill_out(driver, campus, reason, destination, track)
print('=================================')
go_to_application_in(driver)
fill_in(driver, campus, reason, habitation, district, street)
print('=================================')
print('可以愉快的玩耍啦!')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--username', '-u', type=str, help='用户名')
parser.add_argument('--password', '-p', type=str, help='密码')
parser.add_argument('--campus', type=str, help='所在校区, 燕园、万柳、畅春园、圆明园、中关新园', default='燕园')
parser.add_argument('--reason', type=str, help='出校原因, eg. 吃饭', default='吃饭')
parser.add_argument('--destination', type=str, help='出校目的地, eg. 北京', default='北京')
parser.add_argument('--track', type=str, help='出校轨迹, eg. 畅春园食堂', default='畅春园')
parser.add_argument('--habitation', type=str, help='入校前居住地, eg. 北京', default='北京')
parser.add_argument('--district', type=str, help='入校前居住所在区, eg. 海淀区', default='海淀区')
parser.add_argument('--street', type=str, help='入校前居住所在街道, eg. 燕园街道', default='燕园街道')
args = parser.parse_args()
args_public = copy.deepcopy(args)
args_public.password = 'xxxxxxxx'
print('Arguments: {}'.format(args_public))
print('Driver Launching...')
# driver = Firefox()
# driver = Chrome()
if sys.platform == 'darwin': # macOS
phantomjs_path = os.path.join('phantomjs', 'phantomjs-darwin')
elif sys.platform == 'linux': # linux
phantomjs_path = os.path.join('phantomjs', 'phantomjs-linux-x86_64')
else: # windows
phantomjs_path = os.path.join('phantomjs', 'phantomjs-windows.exe')
driver = PhantomJS(executable_path=phantomjs_path)
run(driver, args.username, args.password, args.campus, args.reason,
args.destination, args.track, args.habitation, args.district,
args.street)
driver.close()
|
the-stack_0_26518
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from scipy.ndimage import zoom as zoom_scipy
from monai.transforms import Zoom
from tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose
VALID_CASES = [(1.5, "nearest"), (1.5, "nearest"), (0.8, "bilinear"), (0.8, "area")]
INVALID_CASES = [((None, None), "bilinear", TypeError), ((0.9, 0.9), "s", ValueError)]
class TestZoom(NumpyImageTestCase2D):
@parameterized.expand(VALID_CASES)
def test_correct_results(self, zoom, mode):
for p in TEST_NDARRAYS:
zoom_fn = Zoom(zoom=zoom, mode=mode, keep_size=False)
zoomed = zoom_fn(p(self.imt[0]))
_order = 0
if mode.endswith("linear"):
_order = 1
expected = []
for channel in self.imt[0]:
expected.append(zoom_scipy(channel, zoom=zoom, mode="nearest", order=_order, prefilter=False))
expected = np.stack(expected).astype(np.float32)
assert_allclose(zoomed, p(expected), atol=1.0)
def test_keep_size(self):
for p in TEST_NDARRAYS:
zoom_fn = Zoom(zoom=[0.6, 0.6], keep_size=True, align_corners=True)
zoomed = zoom_fn(p(self.imt[0]), mode="bilinear")
assert_allclose(zoomed.shape, self.imt.shape[1:])
zoom_fn = Zoom(zoom=[1.3, 1.3], keep_size=True)
zoomed = zoom_fn(p(self.imt[0]))
assert_allclose(zoomed.shape, self.imt.shape[1:])
@parameterized.expand(INVALID_CASES)
def test_invalid_inputs(self, zoom, mode, raises):
for p in TEST_NDARRAYS:
with self.assertRaises(raises):
zoom_fn = Zoom(zoom=zoom, mode=mode)
zoom_fn(p(self.imt[0]))
def test_padding_mode(self):
for p in TEST_NDARRAYS:
zoom_fn = Zoom(zoom=0.5, mode="nearest", padding_mode="constant", keep_size=True)
test_data = p([[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]])
zoomed = zoom_fn(test_data)
expected = p([[[0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0], [0.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]]])
torch.testing.assert_allclose(zoomed, expected)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_26520
|
######################################################################
#
# File: test/unit/v1/test_policy.py
#
# Copyright 2019, Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from unittest.mock import MagicMock
from ..test_base import TestBase
from .deps import FileVersionInfo
from .deps import LocalSyncPath, B2SyncPath
from .deps import B2Folder
from .deps import make_b2_keep_days_actions
class TestMakeB2KeepDaysActions(TestBase):
def setUp(self):
self.keep_days = 7
self.today = 100 * 86400
self.one_day_millis = 86400 * 1000
def test_no_versions(self):
self.check_one_answer(True, [], [])
def test_new_version_no_action(self):
self.check_one_answer(True, [(1, -5, 'upload')], [])
def test_no_source_one_old_version_hides(self):
# An upload that is old gets deleted if there is no source file.
self.check_one_answer(False, [(1, -10, 'upload')], ['b2_hide(folder/a)'])
def test_old_hide_causes_delete(self):
# A hide marker that is old gets deleted, as do the things after it.
self.check_one_answer(
True, [(1, -5, 'upload'), (2, -10, 'hide'), (3, -20, 'upload')],
['b2_delete(folder/a, 2, (hide marker))', 'b2_delete(folder/a, 3, (old version))']
)
def test_old_upload_causes_delete(self):
# An upload that is old stays if there is a source file, but things
# behind it go away.
self.check_one_answer(
True, [(1, -5, 'upload'), (2, -10, 'upload'), (3, -20, 'upload')],
['b2_delete(folder/a, 3, (old version))']
)
def test_out_of_order_dates(self):
# The one at date -3 will get deleted because the one before it is old.
self.check_one_answer(
True, [(1, -5, 'upload'), (2, -10, 'upload'), (3, -3, 'upload')],
['b2_delete(folder/a, 3, (old version))']
)
def check_one_answer(self, has_source, id_relative_date_action_list, expected_actions):
source_file = LocalSyncPath('a', 'a', 100, 10) if has_source else None
dest_file_versions = [
FileVersionInfo(
id_=id_,
file_name='folder/' + 'a',
upload_timestamp=self.today + relative_date * self.one_day_millis,
action=action,
size=100,
file_info={},
content_type='text/plain',
content_sha1='content_sha1',
) for (id_, relative_date, action) in id_relative_date_action_list
]
dest_file = B2SyncPath(
'a', selected_version=dest_file_versions[0], all_versions=dest_file_versions
) if dest_file_versions else None
bucket = MagicMock()
api = MagicMock()
api.get_bucket_by_name.return_value = bucket
dest_folder = B2Folder('bucket-1', 'folder', api)
actual_actions = list(
make_b2_keep_days_actions(
source_file, dest_file, dest_folder, dest_folder, self.keep_days, self.today
)
)
actual_action_strs = [str(a) for a in actual_actions]
self.assertEqual(expected_actions, actual_action_strs)
|
the-stack_0_26521
|
import json
import datetime
import logging
import re
from typing import Optional
import pytz
from django.conf import settings
from django.contrib.auth.backends import BaseBackend
from django.contrib.auth.models import User, Group
from web3 import Web3, HTTPProvider
from ens import ENS
from siwe.siwe import (
SiweMessage,
ValidationError,
ExpiredMessage,
MalformedSession,
InvalidSignature,
)
from .custom_groups.group_manager import GroupManager
from .models import Wallet, Nonce
def _nonce_is_valid(nonce: str) -> bool:
"""
Check if given nonce exists and has not yet expired.
:param nonce: The nonce string to validate.
:return: True if valid else False.
"""
n = Nonce.objects.get(value=nonce)
is_valid = False
if n is not None and n.expiration > datetime.datetime.now(tz=pytz.UTC):
is_valid = True
n.delete()
return is_valid
class SiweBackend(BaseBackend):
"""
Authenticate an Ethereum address as per Sing-In with Ethereum (EIP-4361).
"""
def authenticate(self, request, signature: str = None, siwe_message: SiweMessage = None):
body = json.loads(request.body)
if siwe_message is None:
siwe_message = SiweMessage(
message={
re.sub(r"(?<!^)(?=[A-Z])", "_", k).lower(): v
for k, v in body["message"].items()
}
)
signature = body["signature"]
# Validate signature
w3 = Web3(HTTPProvider(settings.PROVIDER))
try:
siwe_message.validate(signature=signature, provider=w3)
except ValidationError:
logging.info("Authentication attempt rejected due to invalid message.")
return None
except ExpiredMessage:
logging.info("Authentication attempt rejected due to expired message.")
return None
except MalformedSession as e:
logging.info(
f"Authentication attempt rejected due to missing fields: {', '.join(e.missing_fields)}"
)
return None
except InvalidSignature:
logging.info("Authentication attempt rejected due to invalid signature.")
return None
# Validate nonce
if not _nonce_is_valid(siwe_message.nonce):
return None
# Pull ENS data
ens_profile = ENSProfile(ethereum_address=siwe_message.address, w3=w3)
# Message and nonce has been validated. Authentication complete. Continue with authorization/other.
now = datetime.datetime.now(tz=pytz.UTC)
try:
wallet = Wallet.objects.get(ethereum_address=siwe_message.address)
wallet.last_login = now
wallet.ens_name = ens_profile.name
wallet.save()
logging.debug(f"Found wallet for address {siwe_message.address}")
except Wallet.DoesNotExist:
wallet = Wallet(
ethereum_address=Web3.toChecksumAddress(siwe_message.address),
ens_name=ens_profile.name,
ens_avatar=ens_profile.avatar,
created=now,
last_login=now,
password=None,
)
wallet.set_unusable_password()
wallet.save()
logging.debug(
f"Could not find wallet for address {siwe_message.address}. Creating new wallet object."
)
# Group settings
if (
hasattr(settings, "CREATE_GROUPS_ON_AUTHN")
and settings.CREATE_GROUPS_ON_AUTHN
):
for custom_group in settings.CUSTOM_GROUPS:
group, created = Group.objects.get_or_create(name=custom_group[0])
if created:
logging.info(f"Created group '{custom_group[0]}'.")
group_manager: GroupManager = custom_group[1]
if group_manager.is_member(
wallet=wallet,
provider=HTTPProvider(settings.PROVIDER),
):
logging.info(
f"Adding wallet '{wallet.ethereum_address}' to group '{custom_group[0]}'."
)
wallet.groups.add(group)
return wallet
def get_user(self, ethereum_address: str) -> Optional[Wallet]:
"""
Get Wallet by ethereum address if exists.
:param ethereum_address: Ethereum address of user.
:return: Wallet object if exists or None
"""
try:
return Wallet.objects.get(pk=ethereum_address)
except User.DoesNotExist:
return None
class ENSProfile:
"""
Container for ENS profile information including but not limited to primary name and avatar.
"""
name: str
avatar: str
def __init__(self, ethereum_address: str, w3: Web3):
# Temporary until https://github.com/ethereum/web3.py/pull/2286 is merged
self.name = ENS.fromWeb3(w3).name(address=ethereum_address)
resolver = ENS.fromWeb3(w3).resolver(normal_name=self.name)
# if resolver:
# self.avatar = resolver.caller.text(normal_name_to_hash(self.name), 'avatar')
# else:
self.avatar = ""
|
the-stack_0_26523
|
# -*- coding: utf-8 -*-
'''
Configuration of network device
.. versionadded:: Carbon
:codeauthor: Krzysztof Pawlowski <[email protected]>
:maturity: new
:depends: python-ethtool
:platform: linux
.. code-block:: yaml
eth0:
ethtool.coalesce:
- name: eth0
- rx_usecs: 24
- tx_usecs: 48
eth0:
ethtool.ring:
- name: eth0
- rx: 1024
- tx: 1024
eth0:
ethtool.offload:
- name: eth0
- tcp_segmentation_offload: on
'''
from __future__ import absolute_import
# Set up logging
import logging
log = logging.getLogger(__name__)
def __virtual__():
'''
Provide ethtool state
'''
return 'ethtool' if 'ethtool.show_driver' in __salt__ else False
def coalesce(name, **kwargs):
'''
Manage coalescing settings of network device
name
Interface name to apply coalescing settings
.. code-block:: yaml
eth0:
ethtool.coalesce:
- name: eth0
- adaptive_rx: on
- adaptive_tx: on
- rx_usecs: 24
- rx_frame: 0
- rx_usecs_irq: 0
- rx_frames_irq: 0
- tx_usecs: 48
- tx_frames: 0
- tx_usecs_irq: 0
- tx_frames_irq: 0
- stats_block_usecs: 0
- pkt_rate_low: 0
- rx_usecs_low: 0
- rx_frames_low: 0
- tx_usecs_low: 0
- tx_frames_low: 0
- pkt_rate_high: 0
- rx_usecs_high: 0
- rx_frames_high: 0
- tx_usecs_high: 0
- tx_frames_high: 0
- sample_interval: 0
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Network device {0} coalescing settings are up to date.'.format(name),
}
apply_coalescing = False
if 'test' not in kwargs:
kwargs['test'] = __opts__.get('test', False)
# Build coalescing settings
try:
old = __salt__['ethtool.show_coalesce'](name)
if not isinstance(old, dict):
ret['result'] = False
ret['comment'] = 'Device {0} coalescing settings are not supported'.format(name)
return ret
new = {}
diff = []
# Retreive changes to made
for key, value in kwargs.items():
if key in ['adaptive_rx', 'adaptive_tx']:
value = value and "on" or "off"
if key in old and value != old[key]:
new.update({key: value})
diff.append('{0}: {1}'.format(key, value))
# Dry run
if kwargs['test']:
if not new:
return ret
if new:
ret['result'] = None
ret['comment'] = 'Device {0} coalescing settings are set to be ' \
'updated:\n{1}'.format(name, '\n'.join(diff))
return ret
# Prepare return output
if new:
apply_coalescing = True
ret['comment'] = 'Device {0} coalescing settings updated.'.format(name)
ret['changes']['ethtool_coalesce'] = '\n'.join(diff)
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
# Apply coalescing settings
if apply_coalescing:
try:
__salt__['ethtool.set_coalesce'](name, **new)
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
return ret
def ring(name, **kwargs):
'''
Manage rx/tx ring parameters of network device
Use 'max' word to set with factory maximum
name
Interface name to apply ring parameters
.. code-block:: yaml
eth0:
ethtool.ring:
- name: eth0
- rx: 1024
- rx_mini: 0
- rx_jumbo: 0
- tx: max
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Network device {0} ring parameters are up to date.'.format(name),
}
apply_ring = False
if 'test' not in kwargs:
kwargs['test'] = __opts__.get('test', False)
# Build ring parameters
try:
old = __salt__['ethtool.show_ring'](name)
if not isinstance(old, dict):
ret['result'] = False
ret['comment'] = 'Device {0} ring parameters are not supported'.format(name)
return ret
new = {}
diff = []
# Retreive changes to made
for key, value in kwargs.items():
if key in old:
if value == 'max':
value = old['{0}_max'.format(key)]
if value != old[key]:
new.update({key: value})
diff.append('{0}: {1}'.format(key, value))
# Dry run
if kwargs['test']:
if not new:
return ret
if new:
ret['result'] = None
ret['comment'] = 'Device {0} ring parameters are set to be ' \
'updated:\n{1}'.format(name, '\n'.join(diff))
return ret
# Prepare return output
if new:
apply_ring = True
ret['comment'] = 'Device {0} ring parameters updated.'.format(name)
ret['changes']['ethtool_ring'] = '\n'.join(diff)
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
# Apply ring parameters
if apply_ring:
try:
__salt__['ethtool.set_ring'](name, **new)
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
return ret
def offload(name, **kwargs):
'''
Manage protocol offload and other features of network device
name
Interface name to apply coalescing settings
.. code-block:: yaml
eth0:
ethtool.offload:
- name: eth0
- tcp_segmentation_offload: on
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'Network device {0} offload settings are up to date.'.format(name),
}
apply_offload = False
if 'test' not in kwargs:
kwargs['test'] = __opts__.get('test', False)
# Build offload settings
try:
old = __salt__['ethtool.show_offload'](name)
if not isinstance(old, dict):
ret['result'] = False
ret['comment'] = 'Device {0} offload settings are not supported'.format(name)
return ret
new = {}
diff = []
# Retreive changes to made
for key, value in kwargs.items():
value = value and "on" or "off"
if key in old and value != old[key]:
new.update({key: value})
diff.append('{0}: {1}'.format(key, value))
# Dry run
if kwargs['test']:
if not new:
return ret
if new:
ret['result'] = None
ret['comment'] = 'Device {0} offload settings are set to be ' \
'updated:\n{1}'.format(name, '\n'.join(diff))
return ret
# Prepare return output
if new:
apply_offload = True
ret['comment'] = 'Device {0} offload settings updated.'.format(name)
ret['changes']['ethtool_offload'] = '\n'.join(diff)
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
# Apply offload settings
if apply_offload:
try:
__salt__['ethtool.set_offload'](name, **new)
except AttributeError as error:
ret['result'] = False
ret['comment'] = str(error)
return ret
return ret
|
the-stack_0_26526
|
'''
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Terrain/alos_chili.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/alos_chili.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Datasets/Terrain/alos_chili.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Terrain/alos_chili.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
'''
# %%
'''
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell.
'''
# %%
# %%capture
# !pip install earthengine-api
# !pip install geehydro
# %%
'''
Import libraries
'''
# %%
import ee
import folium
import geehydro
# %%
'''
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for this first time or if you are getting an authentication error.
'''
# %%
# ee.Authenticate()
ee.Initialize()
# %%
'''
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
'''
# %%
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
# %%
'''
## Add Earth Engine Python script
'''
# %%
dataset = ee.Image('CSP/ERGo/1_0/Global/ALOS_CHILI')
alosChili = dataset.select('constant')
alosChiliVis = {
'min': 0.0,
'max': 255.0,
}
Map.setCenter(-105.8636, 40.3439, 11)
Map.addLayer(alosChili, alosChiliVis, 'ALOS CHILI')
# %%
'''
## Display Earth Engine data layers
'''
# %%
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
|
the-stack_0_26529
|
import copy
import numpy as np
from neupy.utils import asfloat
from neupy import layers, storage
from neupy.storage import (
validate_data_structure, InvalidFormat,
ParameterLoaderError, load_layer_parameter,
)
from base import BaseTestCase
class DictStorageTestCase(BaseTestCase):
maxDiff = 10000
def test_storage_invalid_input_type(self):
network = [
layers.Input(10),
layers.Relu(5),
layers.Relu(2),
]
message = (
"Invalid input type. Input should be "
"network or optimizer with network"
)
with self.assertRaisesRegexp(TypeError, message):
storage.save_dict(network)
def test_storage_save_dict(self):
network = layers.join(
layers.parallel([
layers.Input(2, name='input-1'),
layers.PRelu(1, name='prelu')
], [
layers.Input(1, name='input-2'),
layers.Sigmoid(4, name='sigmoid'),
layers.BatchNorm(name='batch-norm'),
]),
layers.Concatenate(name='concatenate'),
layers.Softmax(3, name='softmax'),
)
dict_network = storage.save_dict(network)
expected_keys = ('metadata', 'layers', 'graph')
self.assertItemsEqual(expected_keys, dict_network.keys())
expected_metadata_keys = ('created', 'language', 'library', 'version')
actual_metadata_keys = dict_network['metadata'].keys()
self.assertItemsEqual(expected_metadata_keys, actual_metadata_keys)
self.assertEqual(len(dict_network['layers']), 7)
expected_layers = [{
'class_name': 'Input',
'configs': {'name': 'input-1', 'shape': (2,)},
'name': 'input-1',
}, {
'class_name': 'PRelu',
'configs': {'alpha_axes': (-1,), 'name': 'prelu', 'n_units': 1},
'name': 'prelu',
}, {
'class_name': 'Input',
'configs': {'name': 'input-2', 'shape': (1,)},
'name': 'input-2',
}, {
'class_name': 'Sigmoid',
'configs': {'name': 'sigmoid', 'n_units': 4},
'name': 'sigmoid',
}, {
'class_name': 'BatchNorm',
'configs': {
'alpha': 0.1,
'axes': (0,),
'epsilon': 1e-05,
'name': 'batch-norm'
},
'name': 'batch-norm',
}, {
'class_name': 'Concatenate',
'configs': {'axis': -1, 'name': 'concatenate'},
'name': 'concatenate',
}, {
'class_name': 'Softmax',
'configs': {'name': 'softmax', 'n_units': 3},
'name': 'softmax',
}]
actual_layers = []
for i, layer in enumerate(dict_network['layers']):
self.assertIn('parameters', layer, msg="Layer #" + str(i))
layer = copy.deepcopy(layer)
del layer['parameters']
actual_layers.append(layer)
self.assertEqual(actual_layers, expected_layers)
def test_storage_load_dict_using_names(self):
relu = layers.Relu(2, name='relu')
network = layers.join(layers.Input(10), relu)
weight = np.ones((10, 2))
bias = np.ones((2,))
storage.load_dict(network, {
'metadata': {}, # avoided for simplicity
'graph': {}, # avoided for simplicity
# Input layer was avoided on purpose
'layers': [{
'name': 'relu',
'class_name': 'Relu',
'configs': {},
'parameters': {
'weight': {'trainable': True, 'value': weight},
'bias': {'trainable': True, 'value': bias},
}
}]
})
np.testing.assert_array_almost_equal(weight, self.eval(relu.weight))
np.testing.assert_array_almost_equal(bias, self.eval(relu.bias))
def test_storage_load_dict_using_wrong_names(self):
network = layers.join(
layers.Input(3),
layers.Relu(4, name='relu'),
layers.Linear(5, name='linear') >> layers.Relu(),
layers.Softmax(6, name='softmax'),
)
storage.load_dict(network, {
'metadata': {}, # avoided for simplicity
'graph': {}, # avoided for simplicity
# Input layer was avoided on purpose
'layers': [{
'name': 'name-1',
'class_name': 'Relu',
'configs': {},
'parameters': {
'weight': {'trainable': True, 'value': np.ones((3, 4))},
'bias': {'trainable': True, 'value': np.ones((4,))},
}
}, {
'name': 'name-2',
'class_name': 'Relu',
'configs': {},
'parameters': {
'weight': {'trainable': True, 'value': np.ones((4, 5))},
'bias': {'trainable': True, 'value': np.ones((5,))},
}
}, {
'name': 'name-3',
'class_name': 'Softmax',
'configs': {},
'parameters': {
'weight': {'trainable': True, 'value': np.ones((5, 6))},
'bias': {'trainable': True, 'value': np.ones((6,))},
}
}]
}, load_by='order', skip_validation=False)
relu = network.layer('relu')
self.assertEqual(12, np.sum(self.eval(relu.weight)))
self.assertEqual(4, np.sum(self.eval(relu.bias)))
linear = network.layer('linear')
self.assertEqual(20, np.sum(self.eval(linear.weight)))
self.assertEqual(5, np.sum(self.eval(linear.bias)))
softmax = network.layer('softmax')
self.assertEqual(30, np.sum(self.eval(softmax.weight)))
self.assertEqual(6, np.sum(self.eval(softmax.bias)))
def test_storage_load_dict_invalid_number_of_paramters(self):
network = layers.join(
layers.Input(3),
layers.Relu(4, name='relu'),
layers.Linear(5, name='linear') > layers.Relu(),
layers.Softmax(6, name='softmax'),
)
data = {
'metadata': {}, # avoided for simplicity
'graph': {}, # avoided for simplicity
# Input layer was avoided on purpose
'layers': [{
'name': 'name-1',
'class_name': 'Relu',
'configs': {},
'parameters': {
'weight': {
'trainable': True,
'value': np.ones((3, 4))
},
'bias': {'trainable': True, 'value': np.ones((4,))},
}
}]
}
with self.assertRaises(ParameterLoaderError):
storage.load_dict(network, data, ignore_missing=False)
def test_failed_loading_mode_for_storage(self):
network = layers.Input(2) >> layers.Sigmoid(1)
with self.assertRaisesRegexp(ValueError, "Invalid value"):
storage.load_dict(network, {}, load_by='unknown')
def test_failed_load_parameter_invalid_type(self):
sigmoid = layers.Sigmoid(1, bias=None)
network = layers.join(layers.Input(2), sigmoid)
network.create_variables()
with self.assertRaisesRegexp(ParameterLoaderError, "equal to None"):
load_layer_parameter(sigmoid, {
'parameters': {
'bias': {
'value': np.array([[0]]),
'trainable': True,
},
},
})
class StoredDataValidationTestCase(BaseTestCase):
def test_stored_data_dict_format_basics(self):
with self.assertRaises(InvalidFormat):
validate_data_structure([])
with self.assertRaises(InvalidFormat):
validate_data_structure({})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': {}})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': []})
def test_stored_data_layers_format(self):
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [[]]})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [{
'parameters': {},
}]})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [{
'parameters': {},
}]})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [{
'parameters': {},
}]})
with self.assertRaises(InvalidFormat):
validate_data_structure({
'layers': [{
'parameters': [], # wrong type
'name': 'name',
}]
})
result = validate_data_structure({
'layers': [{
'parameters': {},
'name': 'name',
}]
})
self.assertIsNone(result)
def test_stored_data_parameters_format(self):
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [{
'name': 'name',
'parameters': {
'weight': np.ones((2, 3)),
}
}]})
with self.assertRaises(InvalidFormat):
validate_data_structure({'layers': [{
'name': 'name',
'parameters': {
'weight': {
'data': np.ones((2, 3)),
},
}
}]})
result = validate_data_structure({'layers': [{
'name': 'name',
'parameters': {
'weight': {
'value': np.ones((2, 3)),
'trainable': True,
},
}
}]})
self.assertIsNone(result)
def test_basic_skip_validation(self):
network = layers.Input(10) >> layers.Relu(1)
with self.assertRaises(InvalidFormat):
storage.load_dict(network, {}, skip_validation=False)
class TransferLearningTestCase(BaseTestCase):
def test_transfer_learning_using_position(self):
network_pretrained = layers.join(
layers.Input(10),
layers.Elu(5),
layers.Elu(2, name='elu'),
layers.Sigmoid(1),
)
network_new = layers.join(
layers.Input(10),
layers.Elu(5),
layers.Elu(2),
)
pretrained_layers_stored = storage.save_dict(network_pretrained)
with self.assertRaises(ParameterLoaderError):
storage.load_dict(
network_new,
pretrained_layers_stored,
load_by='names_or_order',
ignore_missing=False)
storage.load_dict(
network_new,
pretrained_layers_stored,
load_by='names_or_order',
ignore_missing=True)
random_input = asfloat(np.random.random((12, 10)))
new_network_output = self.eval(network_new.output(random_input))
pretrained_output = self.eval(
network_pretrained.end('elu').output(random_input))
np.testing.assert_array_almost_equal(
pretrained_output, new_network_output)
def test_transfer_learning_using_names(self):
network_pretrained = layers.join(
layers.Input(10),
layers.Elu(5, name='elu-a'),
layers.Elu(2, name='elu-b'),
layers.Sigmoid(1),
)
network_new = layers.join(
layers.Input(10),
layers.Elu(5, name='elu-a'),
layers.Elu(2, name='elu-b'),
layers.Elu(8, name='elu-c'), # new layer
)
pretrained_layers_stored = storage.save_dict(network_pretrained)
storage.load_dict(
network_new,
pretrained_layers_stored,
load_by='names',
skip_validation=False,
ignore_missing=True)
random_input = asfloat(np.random.random((12, 10)))
pretrained_output = self.eval(
network_pretrained.end('elu-b').output(random_input))
new_network_output = self.eval(
network_new.end('elu-b').output(random_input))
np.testing.assert_array_almost_equal(
pretrained_output, new_network_output)
pred = self.eval(network_new.output(random_input))
self.assertEqual(pred.shape, (12, 8))
|
the-stack_0_26530
|
import pytz
from datetime import datetime
from typing import Any, List, Optional
from vnpy.api.tora.vntora import (CTORATstpMarketDataField, CTORATstpMdApi, CTORATstpMdSpi,
CTORATstpReqUserLoginField,
CTORATstpRspInfoField, CTORATstpRspUserLoginField,
CTORATstpUserLogoutField, TORA_TSTP_LACT_AccountID)
from vnpy.gateway.tora.error_codes import get_error_msg
from vnpy.trader.constant import Exchange
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
)
from .constant import EXCHANGE_TORA2VT, EXCHANGE_VT2TORA
CHINA_TZ = pytz.timezone("Asia/Shanghai")
def parse_datetime(date: str, time: str):
# sampled :
# date: '20190611'
# time: '16:28:24'
dt = datetime.strptime(f'{date}-{time}', "%Y%m%d-%H:%M:%S")
dt = dt.replace(tzinfo=CHINA_TZ)
return dt
class ToraMdSpi(CTORATstpMdSpi):
""""""
def __init__(self, api: "ToraMdApi", gateway: "BaseGateway"):
""""""
super().__init__()
self.gateway = gateway
self._api = api
def OnFrontConnected(self) -> Any:
""""""
self.gateway.write_log("行情服务器连接成功")
self._api.login()
def OnFrontDisconnected(self, error_code: int) -> Any:
""""""
self.gateway.write_log(
f"行情服务器连接断开({error_code}):{get_error_msg(error_code)}")
def OnRspError(
self, error_info: CTORATstpRspInfoField, request_id: int, is_last: bool
) -> Any:
""""""
error_id = error_info.ErrorID
error_msg = error_info.ErrorMsg
self.gateway.write_log(f"行情服务收到错误消息({error_id}):{error_msg}")
def OnRspUserLogin(
self,
info: CTORATstpRspUserLoginField,
error_info: CTORATstpRspInfoField,
request_id: int,
is_last: bool,
) -> Any:
""""""
error_id = error_info.ErrorID
if error_id != 0:
error_msg = error_info.ErrorMsg
self.gateway.write_log(f"行情服务登录失败({error_id}):{error_msg}")
return
self.gateway.write_log("行情服务器登录成功")
def OnRspUserLogout(
self,
info: CTORATstpUserLogoutField,
error_info: CTORATstpRspInfoField,
request_id: int,
is_last: bool,
) -> Any:
""""""
error_id = error_info.ErrorID
if error_id != 0:
error_msg = error_info.ErrorMsg
self.gateway.write_log(f"行情服务登出失败({error_id}):{error_msg}")
return
self.gateway.write_log("行情服务器登出成功")
def OnRtnDepthMarketData(self, data: CTORATstpMarketDataField) -> Any:
""""""
if data.ExchangeID not in EXCHANGE_TORA2VT:
return
tick_data = TickData(
gateway_name=self.gateway.gateway_name,
symbol=data.SecurityID,
exchange=EXCHANGE_TORA2VT[data.ExchangeID],
datetime=parse_datetime(data.TradingDay, data.UpdateTime),
name=data.SecurityName,
volume=0,
last_price=data.LastPrice,
last_volume=data.Volume, # to verify: is this correct?
limit_up=data.UpperLimitPrice,
limit_down=data.LowerLimitPrice,
open_price=data.OpenPrice,
high_price=data.HighestPrice,
low_price=data.LowestPrice,
pre_close=data.PreClosePrice,
bid_price_1=data.BidPrice1,
bid_price_2=data.BidPrice2,
bid_price_3=data.BidPrice3,
bid_price_4=data.BidPrice4,
bid_price_5=data.BidPrice5,
ask_price_1=data.AskPrice1,
ask_price_2=data.AskPrice2,
ask_price_3=data.AskPrice3,
ask_price_4=data.AskPrice4,
ask_price_5=data.AskPrice5,
bid_volume_1=data.BidVolume1,
bid_volume_2=data.BidVolume2,
bid_volume_3=data.BidVolume3,
bid_volume_4=data.BidVolume4,
bid_volume_5=data.BidVolume5,
ask_volume_1=data.AskVolume1,
ask_volume_2=data.AskVolume2,
ask_volume_3=data.AskVolume3,
ask_volume_4=data.AskVolume4,
ask_volume_5=data.AskVolume5,
)
self.gateway.on_tick(tick_data)
class ToraMdApi:
""""""
def __init__(self, gateway: BaseGateway):
""""""
self.gateway = gateway
self.username = ""
self.password = ""
self.md_address = ""
self._native_api: Optional[CTORATstpMdApi] = None
self._spi: Optional["ToraMdApi"] = None
self._last_req_id = 0
def _get_new_req_id(self):
""""""
req_id = self._last_req_id
self._last_req_id += 1
return req_id
def stop(self):
"""
:note not thread-safe
"""
if self._native_api:
self._native_api.RegisterSpi(None)
self._spi = None
self._native_api.Release()
self._native_api = None
def join(self):
"""
:note not thread-safe
"""
if self._native_api:
self._native_api.Join()
def login(self):
"""
send login request using self.username, self.password
:return:
"""
info = CTORATstpReqUserLoginField()
info.LogInAccount = self.username
info.LogInAccountType = TORA_TSTP_LACT_AccountID
info.Password = self.password
self._native_api.ReqUserLogin(info, self._get_new_req_id())
def connect(self):
"""
:note not thread-safe
"""
self._native_api = CTORATstpMdApi.CreateTstpMdApi()
self._spi = ToraMdSpi(self, self.gateway)
self._native_api.RegisterSpi(self._spi)
self._native_api.RegisterFront(self.md_address)
self._native_api.Init()
return True
def subscribe(self, symbols: List[str], exchange: Exchange):
""""""
err = self._native_api.SubscribeMarketData(
symbols, EXCHANGE_VT2TORA[exchange])
self._if_error_write_log(err, "subscribe")
def _if_error_write_log(self, error_code: int, function_name: str):
""""""
if error_code != 0:
error_msg = get_error_msg(error_code)
msg = f'在执行 {function_name} 时发生错误({error_code}): {error_msg}'
self.gateway.write_log(msg)
return True
|
the-stack_0_26532
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import shutil
import matplotlib.pyplot as plt
import pytest
import torch
import torch.nn as nn
from _pytest.fixtures import SubRequest
from _pytest.monkeypatch import MonkeyPatch
from torchgeo.datasets import InriaAerialImageLabeling
class TestInriaAerialImageLabeling:
@pytest.fixture(params=["train", "test"])
def dataset(
self, request: SubRequest, monkeypatch: MonkeyPatch
) -> InriaAerialImageLabeling:
root = os.path.join("tests", "data", "inria")
test_md5 = "478688944e4797c097d9387fd0b3f038"
monkeypatch.setattr(InriaAerialImageLabeling, "md5", test_md5)
transforms = nn.Identity() # type: ignore[no-untyped-call]
return InriaAerialImageLabeling(
root, split=request.param, transforms=transforms, checksum=True
)
def test_getitem(self, dataset: InriaAerialImageLabeling) -> None:
x = dataset[0]
assert isinstance(x, dict)
assert isinstance(x["image"], torch.Tensor)
if dataset.split == "train":
assert isinstance(x["mask"], torch.Tensor)
assert x["mask"].ndim == 2
assert x["image"].shape[0] == 3
assert x["image"].ndim == 3
def test_len(self, dataset: InriaAerialImageLabeling) -> None:
assert len(dataset) == 5
def test_already_downloaded(self, dataset: InriaAerialImageLabeling) -> None:
InriaAerialImageLabeling(root=dataset.root)
def test_not_downloaded(self, tmp_path: str) -> None:
with pytest.raises(RuntimeError, match="Dataset not found"):
InriaAerialImageLabeling(str(tmp_path))
def test_dataset_checksum(self, dataset: InriaAerialImageLabeling) -> None:
InriaAerialImageLabeling.md5 = "randommd5hash123"
shutil.rmtree(os.path.join(dataset.root, dataset.directory))
with pytest.raises(RuntimeError, match="Dataset corrupted"):
InriaAerialImageLabeling(root=dataset.root, checksum=True)
def test_plot(self, dataset: InriaAerialImageLabeling) -> None:
x = dataset[0].copy()
if dataset.split == "train":
x["prediction"] = x["mask"]
dataset.plot(x, suptitle="Test")
plt.close()
dataset.plot(x, show_titles=False)
plt.close()
|
the-stack_0_26533
|
"""Base table module."""
import abc
import typing as tp
from enum import Enum
from pathlib import Path
from pylatex import Document, Package, NoEscape, UnsafeCommand
from varats.paper.case_study import CaseStudy
from varats.table.tables import TableRegistry
class TableFormat(Enum):
"""List of supported TableFormats."""
value: str # pylint: disable=invalid-name
PLAIN = "plain"
SIMPLE = "simple"
GITHUB = "github"
GRID = "grid"
FANCY_GRID = "fancy_grid"
PIPE = "pipe"
ORGTBL = "orgtbl"
JIRA = "jira"
PRESTO = "presto"
PRETTY = "pretty"
PSQL = "psql"
RST = "rst"
MEDIAWIKI = "mediawiki"
MOINMOIN = "moinmoin"
YOUTRACK = "youtrack"
HTML = "html"
UNSAFEHTML = "unsafehtml"
LATEX = "latex"
LATEX_RAW = "latex_raw"
LATEX_BOOKTABS = "latex_booktabs"
TEXTILE = "textile"
class Table(metaclass=TableRegistry):
"""An abstract base class for all tables generated by VaRA-TS."""
format_filetypes = {
TableFormat.GITHUB: "md",
TableFormat.HTML: "html",
TableFormat.UNSAFEHTML: "html",
TableFormat.LATEX: "tex",
TableFormat.LATEX_RAW: "tex",
TableFormat.LATEX_BOOKTABS: "tex",
TableFormat.RST: "rst",
}
def __init__(self, name: str, **kwargs: tp.Any) -> None:
self.__name = name
self.__format = TableFormat.LATEX_BOOKTABS
self.__saved_extra_args = kwargs
@property
def name(self) -> str:
"""
Name of the current table.
Test:
>>> Table('test').name
'test'
"""
return self.__name
@property
def format(self) -> TableFormat:
"""
Current table format as used by python-tabulate.
Test:
>>> Table('test').format
<TableFormat.LATEX_BOOKTABS: 'latex_booktabs'>
"""
return self.__format
@format.setter
def format(self, new_format: TableFormat) -> None:
"""
Set current format of the table.
Args:
new_format: a table format as used by python-tabulate
"""
self.__format = new_format
@property
def table_kwargs(self) -> tp.Any:
"""
Access the kwargs passed to the initial table.
Test:
>>> tab = Table('test', foo='bar', baz='bazzer')
>>> tab.table_kwargs['foo']
'bar'
>>> tab.table_kwargs['baz']
'bazzer'
"""
return self.__saved_extra_args
@staticmethod
def supports_stage_separation() -> bool:
"""True, if the table supports stage separation, i.e., the table can be
drawn separating the different stages in a case study."""
return False
@abc.abstractmethod
def tabulate(self) -> str:
"""Build the table using tabulate."""
def table_file_name(self, include_filetype: bool = True) -> str:
"""
Get the file name this table; will be stored to when calling save.
Args:
include_filetype: flags whether to include the file extension at the
end of the filename.
Returns:
the file name the table will be stored to
Test:
>>> p = Table('test', project='bar')
>>> p.table_file_name()
'bar_test.tex'
>>> p = Table('foo', project='bar', table_case_study=CaseStudy('baz',\
42))
>>> p.format = TableFormat.FANCY_GRID
>>> p.table_file_name()
'baz_42_foo.txt'
"""
filetype = self.format_filetypes.get(self.__format, "txt")
table_ident = ''
if self.table_kwargs.get('table_case_study', None):
case_study: CaseStudy = self.table_kwargs['table_case_study']
table_ident = f"{case_study.project_name}_{case_study.version}_"
elif 'project' in self.table_kwargs:
table_ident = f"{self.table_kwargs['project']}_"
sep_stages = ''
if self.supports_stage_separation(
) and self.table_kwargs.get('sep_stages', None):
sep_stages = 'S'
table_file_name = f"{table_ident}{self.name}{sep_stages}"
if include_filetype:
table_file_name += f".{filetype}"
return table_file_name
@abc.abstractmethod
def wrap_table(self, table: str) -> str:
"""
Used to wrap tables inside a complete latex document by passing desired
parameters to wrap_table_in_document.
Returns:
The resulting table string.
"""
def save(
self,
path: tp.Optional[Path] = None,
wrap_document: bool = False
) -> None:
"""
Save the current table to a file.
Args:
path: The path where the file is stored (excluding the file name).
wrap_document: flags whether to wrap the (latex) table code into a
complete document.
"""
table = self.tabulate()
if wrap_document:
table = self.wrap_table(table)
if path is None:
table_dir = Path(self.table_kwargs["table_dir"])
else:
table_dir = path
with open(table_dir / self.table_file_name(), "w") as outfile:
outfile.write(table)
def wrap_table_in_document(
table: str, landscape: bool = False, margin: float = 1.5
) -> str:
"""
Wraps given table inside a proper latex document. Uses longtable instead of
tabular to fit data on multiple pages.
Args:
table: table string to wrap the document around.
landscape: orientation of the table document. True for landscape mode,
i.e. horizontal orientation.
margin: margin of the wrapped table inside the resulting document.
Returns:
string representation of the resulting latex document.
"""
doc = Document(
documentclass="scrbook",
document_options="paper=a4",
geometry_options={
"margin": f"{margin}cm",
"landscape": "true" if landscape else "false"
}
)
# set monospace font
monospace_comm = UnsafeCommand(
'renewcommand', r'\familydefault', extra_arguments=r'\ttdefault'
)
doc.preamble.append(monospace_comm)
# package in case longtables are used
doc.packages.append(Package('longtable'))
# package for booktabs automatically generated by pandas.to_latex()
doc.packages.append(Package('booktabs'))
doc.change_document_style("empty")
# embed latex table inside document
doc.append(NoEscape(table))
# dump function returns string representation of document
return tp.cast(str, doc.dumps())
|
the-stack_0_26534
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the rules_pkg distribution is usable."""
import os
import subprocess
import unittest
from bazel_tools.tools.python.runfiles import runfiles
from releasing import release_tools
from distro import release_version
_VERBOSE = True
class PackagingTest(unittest.TestCase):
"""Test the distribution packaging."""
def setUp(self):
self.data_files = runfiles.Create()
self.repo = 'rules_pkg'
self.version = release_version.RELEASE_VERSION
def testBuild(self):
# Set up a fresh Bazel workspace using the currently build repo.
tempdir = os.path.join(os.environ['TEST_TMPDIR'], 'build')
if not os.path.exists(tempdir):
os.makedirs(tempdir)
with open(os.path.join(tempdir, 'WORKSPACE'), 'w') as workspace:
file_name = release_tools.package_basename(self.repo, self.version)
local_path = runfiles.Create().Rlocation(
os.path.join('rules_pkg', 'distro', file_name))
sha256 = release_tools.get_package_sha256(local_path)
workspace_content = '\n'.join((
'workspace(name = "test_rules_pkg_packaging")',
release_tools.workspace_content(
'file://%s' % local_path, self.repo, sha256)
))
workspace.write(workspace_content)
if _VERBOSE:
print('=== WORKSPACE ===')
print(workspace_content)
# We do a little dance of renaming *.tmpl to *, mostly so that we do not
# have a BUILD file in testdata, which would create a package boundary.
def CopyTestFile(source_name, dest_name):
source_path = self.data_files.Rlocation(
os.path.join('rules_pkg', 'distro', 'testdata', source_name))
with open(source_path) as inp:
with open(os.path.join(tempdir, dest_name), 'w') as out:
content = inp.read()
out.write(content)
CopyTestFile('BUILD.tmpl', 'BUILD')
os.chdir(tempdir)
build_result = subprocess.check_output(['bazel', 'build', ':dummy_tar'])
if _VERBOSE:
print('=== Build Result ===')
print(build_result)
content = subprocess.check_output(
['/bin/tar', 'tzf', 'bazel-bin/dummy_tar.tar.gz'])
self.assertEqual(b'./\n./BUILD\n', content)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_26542
|
#!/usr/bin/env python
# check that some internal data structures are conistent
import sys
import glob
import subprocess
from pyang import util
from pyang import error
from pyang import grammar
from pyang import syntax
def oscmd(cmd):
p = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
res = p.stdout.read()
err = p.stderr.read()
retcode = p.returncode
if len(res) > 0 and res[-1] == '\n':
res = res[:-1]
if len(err) > 0 and err[-1] == '\n':
err = err[:-1]
return (retcode, res, err)
found_error = False
def chk_error_codes():
global found_error
files = glob.glob("../pyang/*.py") + glob.glob("../pyang/*/*.py")
del files[files.index('../pyang/error.py')]
filesstr = ' '.join(files)
for x in error.error_codes:
(retcode, res, err) = oscmd('grep %s %s' % (x, filesstr))
if retcode != 0:
sys.stderr.write("Error code: %s not used" % x)
found_error = True
def chk_stmts():
stmtmaps = [(grammar.stmt_map, "grammar.stmt_map"),
(syntax.yin_map, "syntax.yin_map")]
for (map, name) in stmtmaps:
for stmt in map:
targets = util.listsdelete((map,name), stmtmaps)
for (tmap, tname) in targets:
if stmt not in tmap:
sys.stderr.write("Stmt %s in %s not found in %s" % \
(stmt, name, tname))
chk_error_codes()
chk_stmts()
if found_error:
sys.exit(1)
|
the-stack_0_26543
|
# this code is a copy/paste of
# https://github.com/scikit-learn/scikit-learn/blob/
# b0b8a39d8bb80611398e4c57895420d5cb1dfe09/doc/sphinxext/github_link.py
from operator import attrgetter
import inspect
import subprocess
import os
import sys
from functools import partial
REVISION_CMD = "git rev-parse --short HEAD"
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print("Failed to execute git to get revision")
return None
return revision.decode("utf-8")
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ("py", "pyx"):
return
if not info.get("module") or not info.get("fullname"):
return
class_name = info["fullname"].split(".")[0]
module = __import__(info["module"], fromlist=[class_name])
obj = attrgetter(info["fullname"])(module)
# Unwrap the object to get the correct source
# file in case that is wrapped by a decorator
obj = inspect.unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ""
return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(
_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt
)
|
the-stack_0_26544
|
# -*- coding: utf-8 -*-
"""
Created on Nov 24, 2014
@author: moloch
Copyright 2014 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Handlers related to controlling and configuring the overall game.
"""
# pylint: disable=unused-wildcard-import,no-member
import os
import subprocess
import logging
import defusedxml.minidom
import xml.etree.cElementTree as ET
import time
from tempfile import NamedTemporaryFile
from builtins import str
from models.Flag import Flag
from models.Box import Box
from models.Swat import Swat
from models.GameLevel import GameLevel
from models.User import ADMIN_PERMISSION
from models.Team import Team
from models.Penalty import Penalty
from models.Snapshot import Snapshot
from models.SnapshotTeam import SnapshotTeam
from models.SourceCode import SourceCode
from models.Corporation import Corporation
from models.Category import Category
from models.Notification import Notification
from models.RegistrationToken import RegistrationToken
from libs.EventManager import EventManager
from libs.SecurityDecorators import *
from libs.StringCoding import encode, decode
from libs.ValidationError import ValidationError
from libs.ConfigHelpers import save_config
from libs.GameHistory import GameHistory
from libs.ConsoleColors import *
from handlers.BaseHandlers import BaseHandler
from string import printable
from setup.xmlsetup import import_xml
from tornado.options import options
from past.builtins import basestring
from datetime import datetime
class AdminGameHandler(BaseHandler):
""" Start or stop the game """
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
start_game = self.get_argument("start_game", None)
suspend_reg = self.get_argument("suspend_registration", "false")
freeze_score = self.get_argument("freeze_scoreboard", "false")
if start_game:
if self.get_argument("start_game", "") == "true":
self.start_game()
else:
self.stop_game()
if suspend_reg == "true":
self.application.settings["suspend_registration"] = True
elif suspend_reg == "false":
self.application.settings["suspend_registration"] = False
if freeze_score == "false":
self.application.settings["freeze_scoreboard"] = False
if self.application.settings["temp_global_notifications"] is not None:
options.global_notification = self.application.settings[
"temp_global_notifications"
]
self.application.settings["temp_global_notifications"] = None
self.event_manager.push_scoreboard()
elif freeze_score:
diff = 60 * int(freeze_score)
self.application.settings["freeze_scoreboard"] = time.time() + diff
self.application.settings[
"temp_global_notifications"
] = options.global_notification
options.global_notification = False
self.event_manager.push_scoreboard()
self.redirect("/user")
def start_game(self):
""" Start the game and any related callbacks """
if not self.application.settings["game_started"]:
logging.info("The game is about to begin, good hunting!")
self.application.settings["game_started"] = True
self.application.settings["history_callback"].start()
if self.config.use_bots:
self.application.settings["score_bots_callback"].start()
self.set_all_users_lock(False)
def stop_game(self):
""" Stop the game and all callbacks """
if self.application.settings["game_started"]:
logging.info("The game is stopping ...")
self.application.settings["game_started"] = False
self.application.settings["suspend_registration"] = False
self.application.settings["freeze_scoreboard"] = False
if self.application.settings["history_callback"]._running:
self.application.settings["history_callback"].stop()
if self.application.settings["score_bots_callback"]._running:
self.application.settings["score_bots_callback"].stop()
self.set_all_users_lock(True)
def set_all_users_lock(self, lock):
""" Set the lock attribute on all accounts """
for user in User.all_users():
user.locked = lock
self.dbsession.add(user)
self.dbsession.commit()
class AdminMessageHandler(BaseHandler):
event_manager = EventManager.instance()
""" Send a global notification message """
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
message = self.get_argument("message", "")
if len(message) > 0:
self.event_manager.admin_message(message)
if self.chatsession:
self.chatsession.post_message(message)
self.redirect("/user")
class AdminRegTokenHandler(BaseHandler):
""" Manages registration tokens """
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
""" Call method based on URI """
uri = {"create": self.create, "view": self.view}
if len(args) and args[0] in uri:
uri[args[0]]()
else:
self.render("public/404.html")
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
""" Used to delete regtokens """
token_value = self.get_argument("token_value", "")
reg_token = RegistrationToken.by_value(token_value)
if reg_token is not None:
self.dbsession.delete(reg_token)
self.dbsession.commit()
self.redirect("/admin/regtoken/view")
else:
self.render("admin/view/token.html", errors=["Token does not exist"])
def create(self):
""" Adds a registration token to the db and displays the value """
token = RegistrationToken()
self.dbsession.add(token)
self.dbsession.commit()
self.render("admin/create/token.html", token=token)
def view(self):
""" View all reg tokens """
self.render("admin/view/token.html", errors=None)
class AdminSourceCodeMarketHandler(BaseHandler):
""" Add source code files to the source code market """
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
self.render("admin/upgrades/source_code_market.html", errors=None)
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
uri = {"/add": self.add_source_code, "/delete": self.delete_source_code}
if len(args) and args[0] in uri:
try:
uri[args[0]]()
except ValidationError as error:
self.render(
"admin/upgrades/source_code_market.html", errors=[str(error)]
)
else:
self.render("public/404.html")
def add_source_code(self):
box = Box.by_uuid(self.get_argument("box_uuid", ""))
if box is not None:
file_count = len(self.request.files["source_archive"])
if not "source_archive" in self.request.files and 0 < file_count:
raise ValidationError("No file data")
else:
price = self.get_argument("price", "")
self.create_source_code(box, price)
self.render("admin/upgrades/source_code_market.html", errors=None)
else:
raise ValidationError("The selected box does not exist")
def create_source_code(self, box, price):
""" Save file data and create object in database """
description = self.get_argument("description", "")
file_name = self.request.files["source_archive"][0]["filename"]
source_code = SourceCode(
file_name=file_name, box_id=box.id, price=price, description=description
)
self.dbsession.add(source_code)
self.dbsession.flush()
source_code.data = self.request.files["source_archive"][0]["body"]
self.dbsession.add(source_code)
self.dbsession.commit()
def delete_source_code(self):
""" Delete source code file """
uuid = self.get_argument("box_uuid", "")
box = Box.by_uuid(uuid)
if box is not None and box.source_code is not None:
box.source_code.delete_data()
self.dbsession.delete(box.source_code)
self.dbsession.commit()
else:
raise ValidationError("Box/source code does not exist")
self.render("admin/upgrades/source_code_market.html", errors=None)
class AdminSwatHandler(BaseHandler):
""" Manage SWAT requests """
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
self.render_page()
def render_page(self, errors=None):
""" Render page with extra arguments """
if errors is not None and not isinstance(errors, list):
errors = [str(errors)]
self.render(
"admin/upgrades/swat.html",
pending_bribes=Swat.all_pending(),
in_progress_bribes=Swat.all_in_progress(),
completed_bribes=Swat.all_completed(),
errors=errors,
)
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
""" Accept/Complete bribes """
uri = {"/accept": self.accept_bribe, "/complete": self.complete_bribe}
if len(args) and args[0] in uri:
uri[args[0]]()
else:
self.render("public/404.html")
def accept_bribe(self):
""" Accept bribe, and lock user's account """
swat = Swat.by_uuid(self.get_argument("uuid", ""))
if swat is not None and not swat.completed:
logging.info("Accepted SWAT with uuid: %s", swat.uuid)
swat.accepted = True
swat.target.locked = True
self.dbsession.add(swat)
self.dbsession.add(swat.target)
self.dbsession.commit()
self.render_page()
else:
logging.warn(
"Invalid request to accept bribe with uuid: %r"
% (self.get_argument("uuid", ""),)
)
self.render_page("Requested SWAT object does not exist")
def complete_bribe(self):
""" Complete bribe and unlock user's account """
swat = Swat.by_uuid(self.get_argument("uuid", ""))
if swat is not None and not swat.completed:
logging.info("Completed SWAT with uuid: %s", swat.uuid)
swat.completed = True
swat.target.locked = False
self.dbsession.add(swat)
self.dbsession.add(swat.target)
self.dbsession.commit()
self.render_page()
else:
logging.warn(
"Invalid request to complete bribe with uuid: %r"
% (self.get_argument("uuid", ""),)
)
self.render_page("Requested SWAT object does not exist")
class AdminConfigurationHandler(BaseHandler):
""" Allows the admin to change some of the configuraiton options """
def get_int(self, name, default=0):
try:
return abs(int(self.get_argument(name, default)))
except:
return default
def get_bool(self, name, default=""):
if not isinstance(default, basestring):
default = str(default).lower()
return self.get_argument(name, default) == "true"
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
self.render("admin/configuration.html", errors=[], config=self.config)
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
"""
Update configuration
Disabled fields will not be send in the POST, so check for blank values
"""
self.config.game_name = self.get_argument("game_name", "Root the Box")
self.config.restrict_registration = self.get_bool(
"restrict_registration", False
)
self.config.require_email = self.get_bool("require_email", True)
self.config.global_notification = self.get_bool("global_notification", True)
self.config.hints_taken = self.get_bool("hints_taken", False)
self.config.story_mode = self.get_bool("story_mode", False)
try:
self.config.rank_by = str(self.get_argument("rank_by", "money"))
except:
self.config.rank_by = bytes(self.get_argument("rank_by", "money"))
self.config.scoreboard_visibility = str(
self.get_argument("scoreboard_visibility", "public")
)
self.config.teams = self.get_bool("teams", True)
self.config.public_teams = self.get_bool("public_teams")
self.config.show_mvp = self.get_bool("show_mvp")
self.config.mvp_max = self.get_int("mvp_max", 10)
self.config.team_sharing = self.get_bool("team_sharing")
self.config.dynamic_flag_value = self.get_bool("dynamic_flag_value", False)
self.config.max_flag_attempts = self.get_int("max_flag_attempts", 100)
self.config.flag_value_decrease = self.get_int("flag_value_decrease")
self.config.penalize_flag_value = self.get_bool("penalize_flag_value", False)
self.config.flag_penalty_cost = self.get_int("flag_penalty_cost")
self.config.flag_stop_penalty = self.get_int("flag_stop_penalty")
self.config.flag_start_penalty = self.get_int("flag_start_penalty")
self.config.max_team_size = self.get_int("max_team_size")
self.config.min_user_password_length = self.get_int(
"min_user_password_length", 12
)
self.config.banking = self.get_bool("banking", True)
self.config.max_password_length = self.get_int("max_password_length", 7)
self.config.starting_team_money = self.get_int("starting_team_money", 500)
self.config_bots()
self.config.bot_reward = self.get_int("bot_reward", 50)
self.config.use_black_market = self.get_bool("use_black_market", True)
self.config.password_upgrade_cost = self.get_int("password_upgrade_cost", 1000)
self.config.bribe_cost = self.get_int("bribe_cost", 2500)
self.config.max_pastebin_size = self.get_int("max_pastebin_size", 4096)
self.render("admin/configuration.html", errors=None, config=self.config)
def config_bots(self):
""" Updates bot config, and starts/stops the botnet callback """
self.config.use_bots = self.get_bool("use_bots", True)
if (
self.config.use_bots
and not self.application.settings["score_bots_callback"]._running
):
logging.info("Starting botnet callback function")
self.application.settings["score_bots_callback"].start()
elif self.application.settings["score_bots_callback"]._running:
logging.info("Stopping botnet callback function")
self.application.settings["score_bots_callback"].stop()
def on_finish(self):
save_config()
class AdminGarbageCfgHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
""" Download a Box's garbage file """
box = Box.by_uuid(self.get_argument("uuid", ""))
if box is not None:
data = box.get_garbage_cfg()
self.set_header("Content-Type", "text/plain")
self.set_header(
"Content-disposition",
"attachment; filename=%s.garbage"
% (filter(lambda char: char in printable[:-38], box.name),),
)
self.set_header("Content-Length", len(data))
self.write(data)
class AdminGitStatusHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
""" Get the status of Git """
sp = subprocess.Popen(
["git", "fetch"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = sp.communicate()
if err:
git = "RTB Updates: Git unable to connect to repository"
else:
sp = subprocess.Popen(
["git", "status", "-uno"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = sp.communicate()
out = str(out)
if "Your branch is behind" in out and "modified:" in out:
git = "RTB Updates: Modified files (merge conflicts)"
elif "Your branch is" in out:
branch = out.split("\n")
for line in branch:
if "Your branch is" in line:
git = "RTB Updates: " + line
break
else:
git = out
if git is not None:
self.set_header("Content-Type", "text/plain;charset=utf-8")
self.set_header("Content-Length", len(git))
self.write(git)
self.finish()
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
""" Update RTB to the latest repository code. """
os.system("git pull")
"""
Shutdown the actual process and restart the service.
"""
pid = os.getpid()
print(INFO + "%s : Restarting the service (%i)..." % (self.current_time(), pid))
self.finish()
os.execl("./setup/restart.sh", "--restart")
def current_time(self):
""" Nicely formatted current time as a string """
return str(datetime.now()).split(" ")[1].split(".")[0]
class AdminExportHandler(BaseHandler):
API_VERSION = "1"
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
""" Export to document formats """
self.render("admin/export.html", errors=None)
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
""" Include the requests exports in the xml dom """
root = ET.Element("rootthebox")
root.set("api", self.API_VERSION)
if self.get_argument("game_objects", "") == "true":
self.export_game_objects(root)
xml_dom = defusedxml.minidom.parseString(ET.tostring(root))
self.write_xml(xml_dom.toprettyxml())
def write_xml(self, xml_doc):
""" Write XML document to page """
self.set_header("Content-Type", "text/xml")
self.set_header(
"Content-disposition",
"attachment; filename=%s.xml"
% (self.config.game_name.replace("\n", "").replace("\r", ""),),
)
self.set_header("Content-Length", len(encode(xml_doc, "utf-8")))
self.write(encode(xml_doc, "utf-8"))
self.finish()
def export_game_objects(self, root):
"""
Exports the game objects to an XML doc.
For the record, I hate XML with a passion.
"""
levels_elem = ET.SubElement(root, "gamelevels")
levels_elem.set("count", str(GameLevel.count()))
for level in GameLevel.all()[1:]:
level.to_xml(levels_elem)
category_elem = ET.SubElement(root, "categories")
category_elem.set("count", str(Category.count()))
for category in Category.all():
category.to_xml(category_elem)
corps_elem = ET.SubElement(root, "corporations")
corps_elem.set("count", str(Corporation.count()))
for corp in Corporation.all():
corp.to_xml(corps_elem)
class AdminImportXmlHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
""" Import setup files """
self.render("admin/import.html", success=None, errors=None)
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
"""
Import XML file uploaded by the admin.
"""
if "xml_file" in self.request.files:
fxml = self._get_tmp_file()
errors = []
success = None
if import_xml(fxml):
success = "Successfully imported XML objects"
else:
errors.append("Failed to parse file correctly.")
os.unlink(fxml)
self.render("admin/import.html", success=success, errors=errors)
else:
self.render("admin/import.html", success=None, errors=["No file data."])
def _get_tmp_file(self):
""" Creates a tmp file with the file data """
data = self.request.files["xml_file"][0]["body"]
tmp_file = NamedTemporaryFile(delete=False)
tmp_file.write(data)
tmp_file.close()
return tmp_file.name
class AdminResetHandler(BaseHandler):
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def get(self, *args, **kwargs):
""" Reset Game Information """
self.render("admin/reset.html", success=None, errors=None)
@restrict_ip_address
@authenticated
@authorized(ADMIN_PERMISSION)
def post(self, *args, **kwargs):
"""
Reset the Game
"""
errors = []
success = None
try:
users = User.all()
for user in users:
user.money = 0
teams = Team.all()
for team in teams:
if options.banking:
team.money = options.starting_team_money
else:
team.money = 0
team.flags = []
team.hints = []
team.boxes = []
team.items = []
team.purchased_source_code = []
level_0 = GameLevel.by_number(0)
if not level_0:
level_0 = GameLevel.all()[0]
team.game_levels = [level_0]
self.dbsession.add(team)
self.dbsession.commit()
self.dbsession.flush()
for team in teams:
for paste in team.pastes:
self.dbsession.delete(paste)
for shared_file in team.files:
shared_file.delete_data()
self.dbsession.delete(shared_file)
self.dbsession.commit()
self.dbsession.flush()
Penalty.clear()
Notification.clear()
snapshot = Snapshot.all()
for snap in snapshot:
self.dbsession.delete(snap)
self.dbsession.commit()
snapshot_team = SnapshotTeam.all()
for snap in snapshot_team:
self.dbsession.delete(snap)
self.dbsession.commit()
game_history = GameHistory.instance()
game_history.take_snapshot() # Take starting snapshot
flags = Flag.all()
for flag in flags:
flag.value = (
flag._original_value if flag._original_value else flag.value
)
self.dbsession.add(flag)
self.dbsession.commit()
self.dbsession.flush()
self.flush_memcached()
success = "Successfully Reset Game"
self.render("admin/reset.html", success=success, errors=errors)
except BaseException as e:
errors.append("Failed to Reset Game")
logging.error(str(e))
self.render("admin/reset.html", success=None, errors=errors)
|
the-stack_0_26548
|
from __future__ import print_function
import os, sys
if sys.version_info[0] != 3 or sys.version_info[1] < 0:
print('Your Python version is too old! Please use Python 3.0 or higher.')
sys.exit(1)
import hashlib
import fnmatch
import configparser
import argparse
import platform
os.chdir(os.path.dirname(os.path.abspath(__file__)))
warnings = []
BULLET = '* o '
DEFAULT_FILTERMODE = 'NONE'
DEFAULT_FILTERFILE = ''
DEFAULT_SUBDIRS = 'TRUE'
DEFAULT_MAXFILESIZE = 0
DEFAULT_INCLUDEEMPTYFILES = 'FALSE'
DEFAULT_BLOCKSIZE = 65536
DEFAULT_HASHALGORITHM = 1
DEFAULT_CSV_FOLDER = os.getcwd()
DEFAULT_CSV = ''
MIN_BLOCKSIZE = 65536
try:
maxcol = os.get_terminal_size().columns - 2
# piped output to file or other process
except OSError:
maxcol = sys.maxsize - 2
def findDup(parentFolder, filters, scanOptions):
# This does a quick scan to identify files of exactly the same size without having to read every files contents
# This shorter 'preliminary list' is then passed for file hashing which is much slower. In this way, only likely candidates for duplicates are read
sizeDups = {}
hashDups = {}
filterMode = scanOptions['FilterMode']
numFiles = 0
for dirName, subdirs, fileList in os.walk(parentFolder):
newDirName = True
for fileName in fileList:
numFiles = numFiles + 1
if ((scanOptions['SubDirs'].upper()=='FALSE') and (dirName == parentFolder)) or (scanOptions['SubDirs'].upper()!='FALSE'):
# Get the path to the file
filterFound = False
# Calculate size
path = os.path.join(dirName, fileName)
for filter_fn in filters:
if fnmatch.fnmatch(path, filter_fn):
filterFound=True
if (not filterFound and filterMode.upper() == 'EXCLUDE') or (filterFound and filterMode.upper() == 'INCLUDE') or (filterMode.upper()=='NONE'):
if newDirName:
print(' ' * maxcol, end='\r')
print('Scanning ' + shortenName(dirName, maxcol - 9), end='\r')
newDirName = False
try:
fileSize = int(os.path.getsize(path))
except:
fileSize = -1
else:
fileSize = -1
# Add or append the file path
if (fileSize != -1):
if ((fileSize == 0 and scanOptions['MaxFileSize'] == 0 and scanOptions['IncludeEmptyFiles'].upper() == 'TRUE')
or (fileSize == 0 and scanOptions['MaxFileSize'] > 0 and scanOptions['IncludeEmptyFiles'].upper() == 'TRUE')
or (fileSize > 0 and scanOptions['MaxFileSize'] == 0)
or (fileSize > 0 and scanOptions['MaxFileSize'] > 0 and scanOptions['MaxFileSize'] >= fileSize)):
if fileSize in sizeDups:
sizeDups[fileSize].append(path)
else:
sizeDups[fileSize] = [path]
print(' ' * maxcol, end='\r')
print (str(numFiles) + ' file(s) in',parentFolder, 'scanned.')
print ('Now checking potential duplicates...')
hashDups = findDupsInDict(sizeDups, scanOptions['HashAlgorithm'], scanOptions['Blocksize'])
return hashDups
def findDupsInDict(fileDict, hashAlgorithmVal, blocksize):
dups = {}
hashAlgorithms = {}
hashAlgorithms = getHashAlgorithms(hashAlgorithmVal)
results = list(filter(lambda x: len(x) > 1, fileDict.values()))
if len(results) > 0:
currResult = 0
percentComplete = 0
numResults = len(results)
for result in results:
currResult = currResult + 1
for subresult in result:
fileHash = hashfile(subresult, blocksize, hashAlgorithms)
if fileHash in dups and fileHash != 0:
dups[fileHash].append(subresult)
elif not(fileHash in dups) and fileHash != 0:
dups[fileHash] = [subresult]
print(' ' * maxcol, end='\r')
print ('Checking potential duplicate set', currResult, 'of', numResults, end='\r')
percentComplete = int(round(currResult / numResults,0))
print('')
return dups
# Joins two dictionaries
def joinDicts(dict1, dict2):
for key in dict2.keys():
if key in dict1:
dict1[key] = dict1[key] + dict2[key]
else:
dict1[key] = dict2[key]
def getHashAlgorithms(algorithm_val):
hashAlgorithms = {}
valSHA512 = 32
valSHA384 = 16
valSHA256 = 8
valSHA224 = 4
valSHA1 = 2
valMD5 = 1
if not str(algorithm_val).isnumeric():
algorithm_val = valMD5
hashAlgorithms['useSHA512'] = False
hashAlgorithms['useSHA384'] = False
hashAlgorithms['useSHA256'] = False
hashAlgorithms['useSHA224'] = False
hashAlgorithms['useSHA1'] = False
hashAlgorithms['useMD5'] = False
if (algorithm_val <= 0) or (algorithm_val >= 64):
algorithm_val = 1
if algorithm_val >= valSHA512:
hashAlgorithms['useSHA512'] = True
algorithm_val = algorithm_val - valSHA512
if algorithm_val >= valSHA384:
hashAlgorithms['useSHA384'] = True
algorithm_val = algorithm_val - valSHA384
if algorithm_val >= valSHA256:
hashAlgorithms['useSHA256'] = True
algorithm_val = algorithm_val - valSHA256
if algorithm_val >= valSHA224:
hashAlgorithms['useSHA224'] = True
algorithm_val = algorithm_val - valSHA224
if algorithm_val >= valSHA1:
hashAlgorithms['useSHA1'] = True
algorithm_val = algorithm_val - valSHA1
if algorithm_val >= valMD5:
hashAlgorithms['useMD5'] = True
return hashAlgorithms
def hashfile(path, blocksize, hashAlgorithms):
compositeHash = ''
if int(blocksize) <= MIN_BLOCKSIZE:
blocksize = DEFAULT_BLOCKSIZE
try:
afile = open(path, 'rb')
if hashAlgorithms['useMD5']: hasherMD5 = hashlib.md5()
if hashAlgorithms['useSHA1']: hasherSHA1 = hashlib.sha1()
if hashAlgorithms['useSHA224']: hasherSHA224 = hashlib.sha224()
if hashAlgorithms['useSHA256']: hasherSHA256 = hashlib.sha256()
if hashAlgorithms['useSHA384']: hasherSHA384 = hashlib.sha384()
if hashAlgorithms['useSHA512']: hasherSHA512 = hashlib.sha512()
buf = afile.read(blocksize)
while len(buf) > 0:
if hashAlgorithms['useMD5']: hasherMD5.update(buf)
if hashAlgorithms['useSHA1']: hasherSHA1.update(buf)
if hashAlgorithms['useSHA224']: hasherSHA224.update(buf)
if hashAlgorithms['useSHA256']: hasherSHA256.update(buf)
if hashAlgorithms['useSHA384']: hasherSHA384.update(buf)
if hashAlgorithms['useSHA512']: hasherSHA512.update(buf)
buf = afile.read(blocksize)
afile.close()
if hashAlgorithms['useMD5']: compositeHash = compositeHash + hasherMD5.hexdigest()
if hashAlgorithms['useSHA1']: compositeHash = compositeHash + hasherSHA1.hexdigest()
if hashAlgorithms['useSHA224']: compositeHash = compositeHash + hasherSHA224.hexdigest()
if hashAlgorithms['useSHA256']: compositeHash = compositeHash + hasherSHA256.hexdigest()
if hashAlgorithms['useSHA384']: compositeHash = compositeHash + hasherSHA384.hexdigest()
if hashAlgorithms['useSHA512']: compositeHash = compositeHash + hasherSHA512.hexdigest()
return compositeHash
except:
warnings.append('WARNING: Could not calculate the hash of ' + path)
return 0
def printResults(dict1, csvOutput):
if (not os.path.exists(os.path.dirname(csvOutput)) and csvOutput != ''):
if os.path.dirname(csvOutput) == '':
newCsvOutput = os.path.join(DEFAULT_CSV_FOLDER, csvOutput)
else:
newCsvOutput = csvOutput.replace(os.path.dirname(csvOutput), DEFAULT_CSV_FOLDER)
warnings.append('WARNING: The folder name "' + os.path.dirname(csvOutput)
+ '" for the CSV output file does not exist. '
+ 'Results will be saved in ' + newCsvOutput + ' instead.')
csvOutput = newCsvOutput
results = list(filter(lambda x: len(x) > 1, dict1.values()))
print('')
print('************************************************************')
if len(results) > 0:
if csvOutput !='': f = open(csvOutput, 'w+')
print('* RESULTS: DUPLICATES FOUND:')
if csvOutput !='': f.write('DUPLICATES FOUND:\nFile Name,File Size (bytes)')
print('* ---------------------------------------------------------')
for result in results:
if csvOutput !='': f.write('\n')
for subresult in result:
print('* \t' + subresult)
if csvOutput !='': f.write(subresult + ',' + str(os.path.getsize(subresult)) + '\n')
print('* ---------------------------------------------------------\n*')
if csvOutput !='': f.close()
else:
print('* RESULTS: NO DUPLICATE FILES FOUND')
print('************************************************************')
def loadDefaultScanOptions():
#These values will be used if they are not set through config file or command line parameters
scanOptions = {}
scanOptions['FilterMode'] = DEFAULT_FILTERMODE
scanOptions['FilterFile'] = DEFAULT_FILTERFILE
scanOptions['SubDirs'] = DEFAULT_SUBDIRS
scanOptions['MaxFileSize'] = DEFAULT_MAXFILESIZE
scanOptions['IncludeEmptyFiles'] = DEFAULT_INCLUDEEMPTYFILES
scanOptions['Blocksize'] = DEFAULT_BLOCKSIZE
scanOptions['HashAlgorithm'] = DEFAULT_HASHALGORITHM
scanOptions['CSVOutput'] = DEFAULT_CSV
return scanOptions
def loadConfigFileScanOptions(configFile):
#These values will override the defaults if they are set
scanOptions = {}
scanOptions = loadDefaultScanOptions()
if os.path.exists(configFile):
config = configparser.ConfigParser()
with open(configFile) as cf:
config.read_file(cf)
if config.has_option('General', 'FilterMode') and (config.get('General', 'FilterMode').upper() == 'NONE' or config.get('General', 'FilterMode').upper() == 'INCLUDE') or config.get('General', 'filterMode').upper() == 'EXCLUDE':
scanOptions['FilterMode'] = config.get('General', 'FilterMode').upper()
if (scanOptions['FilterMode'].upper() != 'NONE') and (os.path.exists(config.get('General', 'FilterFile'))):
scanOptions['FilterFile'] = config.get('General', 'FilterFile')
if config.has_option('Scan Options', 'SubDirs') and (config.get('Scan Options', 'SubDirs').upper() == 'TRUE' or config.get('Scan Options', 'SubDirs').upper() == 'FALSE'):
scanOptions['SubDirs'] = config.get('Scan Options', 'SubDirs').upper()
if config.has_option('Scan Options', 'MaxFileSize') and (config.get('Scan Options', 'MaxFileSize').isnumeric()):
scanOptions['MaxFileSize'] = int(config.get('Scan Options', 'MaxFileSize'))
if config.has_option('Scan Options', 'IncludeEmptyFiles') and (config.get('Scan Options', 'IncludeEmptyFiles').upper() == 'TRUE' or config.get('Scan Options', 'IncludeEmptyFiles').upper == 'FALSE'):
scanOptions['IncludeEmptyFiles'] = config.get('Scan Options', 'IncludeEmptyFiles').upper()
if config.has_option('Advanced', 'Blocksize') and (config.get('Advanced', 'Blocksize').isnumeric()):
scanOptions['Blocksize'] = abs(int(config.get('Advanced', 'Blocksize')))
if scanOptions['Blocksize'] <= MIN_BLOCKSIZE: scanOptions['Blocksize'] = MIN_BLOCKSIZE
if config.has_option('Advanced', 'HashAlgorithm') and (config.get('Advanced', 'HashAlgorithm').isnumeric()):
scanOptions['HashAlgorithm'] = int(config.get('Advanced', 'HashAlgorithm'))
if config.has_option('Scan Options', 'CSVOutput'):
scanOptions['CSVOutput'] = str(config.get('Scan Options', 'CSVOutput'))
return scanOptions
def loadFilters(filterFile):
if os.path.exists(filterFile):
with open(filterFile) as f:
filters = f.read().splitlines()
else:
filters = []
return filters
def printHashAlgorithms(hashAlgorithms):
print('* USING ALGORITHMS:')
print('* -----------------')
if hashAlgorithms['useMD5']: print(BULLET + 'MD5')
if hashAlgorithms['useSHA1']: print(BULLET + 'SHA1')
if hashAlgorithms['useSHA224']: print(BULLET + 'SHA224')
if hashAlgorithms['useSHA256']: print(BULLET + 'SHA256')
if hashAlgorithms['useSHA384']: print(BULLET + 'SHA384')
if hashAlgorithms['useSHA512']: print(BULLET + 'SHA512')
def loadCommandLineScanOptions(args, scanOptions):
if args['filterMode'] != None and (args['filterMode'].upper()=='INCLUDE' or args['filterMode'].upper()=='EXCLUDE' or args['filterMode'].upper()=='NONE'):
scanOptions['FilterMode'] = args['filterMode'].upper()
if args['filterFile'] != None:
if os.path.exists(args['filterFile']):
scanOptions['FilterFile'] = args['filterFile']
if args['subDirs'] != None and (args['subDirs'].upper()=='TRUE' or args['subDirs'].upper()=='FALSE'):
scanOptions['SubDirs'] = args['subDirs'].upper()
if args['maxFileSize'] != None:
scanOptions['MaxFileSize'] = int(abs(args['maxFileSize']))
if (args['includeEmptyFiles'] != None) and ((args['includeEmptyFiles'].upper()=='TRUE') or args['includeEmptyFiles'].upper()=='FALSE'):
scanOptions['IncludeEmptyFiles'] = args['includeEmptyFiles'].upper()
if args['blocksize'] != None and abs(args['blocksize']) >= MIN_BLOCKSIZE:
scanOptions['Blocksize'] = int(abs(args['blocksize']))
if args['hashAlgorithm'] != None:
scanOptions['HashAlgorithm'] = int(args['hashAlgorithm'])
if args['csvOutput'] != None:
scanOptions['CSVOutput'] = args['csvOutput']
return scanOptions
def shortenName(stringToShorten, lengthToShorten):
if stringToShorten == None: return ''
if lengthToShorten == None: return stringToShorten
if lengthToShorten < 5: lengthToShorten = 5
if len(stringToShorten) <= lengthToShorten:
shortenedString = stringToShorten
else:
splitSize = int(round((lengthToShorten-3) / 2,0))
shortenedString = stringToShorten[:splitSize] + '...' + stringToShorten[-splitSize:]
return shortenedString
def padSpaces(stringToPad, lengthToPad):
stringToPad = str(stringToPad)
while len(stringToPad) < lengthToPad:
stringToPad = stringToPad + ' '
return stringToPad
def printSettings(folders, scanOptions, filters):
print('')
print('************************************************************')
printHashAlgorithms(getHashAlgorithms(scanOptions['HashAlgorithm']))
print('* \n* FOLDER(S) TO SCAN:')
print('* ------------------')
for x in folders: print(BULLET + str(x))
print('* \n* SCAN OPTIONS USED:')
print('* ------------------')
for x in scanOptions: print(BULLET + padSpaces(str(x),20) + ': ' + str(scanOptions[x]))
if len(filters) > 0:
print('* FILTERS: ')
print('* --------')
for x in filters: print(BULLET + str(x))
print('*\n************************************************************')
print ('')
def printWarnings(warnings):
if len(warnings) > 0:
print('')
print('************************************************************')
print('* WARNINGS:')
for x in range(len(warnings)): print (BULLET + ' ' + warnings[x])
print('************************************************************')
print('')
def getConfigurations(cmdArgs):
#First load the default scan options
scanOptions = {}
scanOptions = loadDefaultScanOptions()
#Then over-write these default scan options with any values supplied in a configuration file
config = configparser.ConfigParser()
scanOptions['ConfigFile']=''
if cmdArgs['configFile'] != None: scanOptions['ConfigFile'] = cmdArgs['configFile']
configFile = scanOptions['ConfigFile']
if os.path.exists(configFile): scanOptions = loadConfigFileScanOptions(configFile)
#Finally over-write these scan options with any explicitly supplied in the command line itself
loadCommandLineScanOptions(cmdArgs, scanOptions)
return scanOptions
def getFilters(filterFile, cmdFilters):
#If a filter has been set in the commandline, use that. Otherwise try to get it from the config file
if filterFile != None and filterFile != '' and cmdFilters != None:
warnings.append('INFO: Supplied --filters command line parameter will take precedence over supplied --filterMode parameter or config file settings')
if cmdFilters != None and cmdFilters != '':
filters = cmdFilters
elif filterFile != None and filterFile != '':
filters = loadFilters(filterFile)
else:
filters = []
return filters
def getDupsInFolders(folders):
#Iterate through each supplied folder name and start scanning for duplicates
for i in folders:
if i[-1] == ':' and platform.system() == 'Windows': i = i + '\\'
if os.path.exists(i):
# Find the duplicated files and append them to the dups
joinDicts(dups, findDup(i, filters, scanOptions))
else:
warnings.append('WARNING: ' + str(i) + ' is not a valid path, please verify')
return dups
if __name__ == '__main__':
dups = {}
#Read the command line parameters
parser = argparse.ArgumentParser(description='Search for duplicate files in one or more folders')
parser.add_argument('-cfg', '--configFile', help='Configuration File for script', required=False)
parser.add_argument('-fm', '--filterMode', help='Filter Mode', choices=['INCLUDE', 'EXCLUDE', 'NONE'], required=False)
parser.add_argument('-ff', '--filterFile', help='File containing list of filters to be applied if Filter Mode is not NONE', required=False)
parser.add_argument('-f', '--filters', nargs='+', help = 'List of filters', required=False)
parser.add_argument('-s', '--subDirs', help='Scan subdirectories of selected folders?', choices=['TRUE', 'FALSE'], required=False)
parser.add_argument('-ms', '--maxFileSize', type=int, help='Maximum size of files to be scanned', required=False)
parser.add_argument('-emp', '--includeEmptyFiles', help='Include files with no content in results?', choices=['TRUE', 'FALSE'], required=False)
parser.add_argument('-bs', '--blocksize', type=int, help='Blocksize for file reads', required=False)
parser.add_argument('-ha', '--hashAlgorithm', type=int, help='Algorithm(s) to be used for file hashing', required=False)
parser.add_argument('-csv', '--csvOutput', help='Path to output results in CSV format', required=False)
parser.add_argument('-dirs', '--directories', nargs='+', help = 'List of directories to scan', required=True)
args = vars(parser.parse_args())
#Construct the set of scan options from command line parameters (1st precedence), configuration file settings (2nd precedence), and default values (fallback)
scanOptions = getConfigurations(args)
#Get the filter list to be used, if any
filters = getFilters(scanOptions['FilterFile'], args['filters'])
#Get list of directories to be scanned (currently can only be a command line parameter)
folders = args['directories']
#Print the list of settings to the console
printSettings(folders, scanOptions, filters)
#Find all the duplicates
dups = getDupsInFolders(folders)
#Print the results to the console and any output file specified
printResults(dups, scanOptions['CSVOutput'])
#Print any errors / warnings and the duplicates found to the consoles
printWarnings(warnings)
|
the-stack_0_26549
|
#!/usr/bin/python3
import sys, os, re
regex_alpha = re.compile(r"[a-zA-ZäöüÄÖÜß]+")
charmap = {
'ä': 'a',
'ö': 'o',
'ü': 'u',
'ß': 'ss',
'Ä': 'A',
'Ö': 'O',
'Ü': 'U'
}
def key_function(t):
return "".join((charmap[c] if c in charmap else c) for c in t[0].lower())
def sift(s,n):
return set(text for text in regex_alpha.findall(s) if len(text)>n)
def get_indexlist():
f = open("indexlist.txt","r")
m = sift(f.read(),1)
f.close()
return m
class Index:
def __init__(self):
self.pool = {}
self.index = {}
def append_path(self, path):
if path in self.pool:
self.pool[path]["count"] += 1
else:
self.pool[path] = {"count": 0}
def append(self,text,name,path):
self.append_path(path)
if text in self.index:
self.index[text].append([name,path])
else:
self.index[text] = [[name,path]]
def calculate_pool(self):
a = list(self.pool.items())
# The more often a pool index occurs,
# the shorter it should be.
a.sort(key = lambda t: -t[1]["count"])
for index,t in enumerate(a):
t[1]["index"] = index
self.pool_data = [t[0] for t in a]
def finalize(self):
self.calculate_pool()
pool = self.pool
for record in self.index.items():
for t in record[1]:
t[1] = pool[t[1]]["index"]
def is_indexable(s):
return s[-4:]==".htm" or s[-4:]==".txt" or s[-4:]==".tex"
def generate_index():
a = list(os.walk("./"))
index = Index()
for t in a:
for name in t[2]:
if is_indexable(name):
path = os.path.join(t[0],name)
f = open(path,'r')
s = f.read()
f.close()
for text in sift(s,2):
index.append(text,name,path)
index.finalize()
pool = index.pool_data
for text, L in index.index.items():
L.sort(key=lambda t: t[0].lower())
index = list(index.index.items())
index.sort(key=key_function)
return index, pool
def generate_index_exclusive():
index_set = get_indexlist()
a = list(os.walk("./"))
d = {}
for t in a:
for name in t[2]:
if is_indexable(name):
path = os.path.join(t[0],name)
f = open(path,'r')
s = f.read()
f.close()
for text in index_set:
if text in s:
index_append(d,text,name,path)
for text, L in d.items():
L.sort(key=lambda t: t[0].lower())
index = list(d.items())
index.sort(key=key_function)
return index
def file_list_to_html(a):
buffer = []
for name, path in a:
buffer.append("<a href=\""+path+"\">"+name+"</a>")
return ", ".join(buffer)
def index_to_html(index):
buffer = []
for text, a in index:
buffer.append("\n<dt>"+text)
buffer.append("\n<dd>"+file_list_to_html(a))
return "".join(buffer)
def wlist(index):
return "\n".join(text for text,a in index)
def file_list_to_json(a):
buffer = []
for name, path in a:
buffer.append('{}'.format(path))
return "[" + ",".join(buffer) + "]"
def index_to_json(index):
buffer = []
for text, a in index:
buffer.append("[\""+text+"\","+file_list_to_json(a)+"]")
return ",\n".join(buffer)
def pool_to_json(pool):
return ",\n".join("\"" + path + "\"" for path in pool)
def main():
cwd = os.getcwd()
os.chdir(sys.argv[1])
index, pool = generate_index()
os.chdir(cwd)
buffer = ["pool=[\n", pool_to_json(pool), "\n];\nindex=[\n",
index_to_json(index), "\n];\n"]
f = open("index.js", "w")
f.write("".join(buffer))
f.close()
# f = open("wlist.txt", "w")
# f.write(wlist(index))
# f.close()
main()
|
the-stack_0_26550
|
from distutils.core import setup
from os import path
# file_dir = path.abspath(path.dirname(__file__))
# Get the long description from the README file.
# try:
# import pypandoc
# long_description = pypandoc.convert('README.md', 'rst')
# long_description = long_description.replace('\r', '')
# except ImportError:
# print('Pandoc not found. Long_description conversion failure.')
# with open(path.join(file_dir, 'README.md'), encoding='utf-8') as f:
# long_description = f.read()
desc = '''Multitemporal denoising'''
setup(name='rabasar',
version='0.1dev',
description=desc,
long_description="", #long_description,
url='',
author='''Charlie Marshak, Marc Simard, Michael Denbina,''',
author_email='[email protected]',
keywords='rabasar despekling',
packages=['rabasar'], # setuptools.find_packages(exclude=['doc']),
# Required Packages
# We assume an environment specified by requirements.txt is provided We
# could take this approach:
# https://github.com/scikit-image/scikit-image/blob/master/setup.py#L117-L131
# but rather use the requirements.txt to specify a valid environment and
# not muddle the installation with pip and possibly conda.
install_requires=[],
)
|
the-stack_0_26551
|
import os
from ViTYOLO_simple import ViTYOLO
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from torch.autograd import Variable
from yoloLoss import yoloLoss
from datasetvit import yoloDataset
import numpy as np
use_gpu = torch.cuda.is_available()
file_root = 'allimgs/'
learning_rate = 0.001
num_epochs = 50
batch_size = 8
print('load pre-trined model')
print('cuda', torch.cuda.current_device(), torch.cuda.device_count())
criterion = yoloLoss(7,2,5,0.5)
net = ViTYOLO()
if use_gpu:
net.cuda()
net.train()
# different learning rate
params=[]
params_dict = dict(net.named_parameters())
for key,value in params_dict.items():
if key.startswith('features'):
params += [{'params':[value],'lr':learning_rate*1}]
else:
params += [{'params':[value],'lr':learning_rate}]
optimizer = torch.optim.SGD(params, lr=learning_rate, momentum=0.9, weight_decay=5e-4)
# optimizer = torch.optim.Adam(net.parameters(),lr=learning_rate,weight_decay=1e-4)
train_dataset = yoloDataset(root=file_root,list_file='listfile.txt',train=True,transform = [transforms.ToTensor()] )
train_loader = DataLoader(train_dataset,batch_size=batch_size,shuffle=True,num_workers=0)
test_dataset = yoloDataset(root=file_root,list_file='voc2007test.txt',train=False,transform = [transforms.ToTensor()] )
test_loader = DataLoader(test_dataset,batch_size=batch_size,shuffle=False,num_workers=0)
print('the dataset has %d images' % (len(train_dataset)))
print('the batch_size is %d' % (batch_size))
logfile = open('log.txt', 'w')
num_iter = 0
best_test_loss = np.inf
for epoch in range(num_epochs):
net.train()
# if epoch == 1:
# learning_rate = 0.0005
# if epoch == 2:
# learning_rate = 0.00075
# if epoch == 3:
# learning_rate = 0.001
if epoch == 30:
learning_rate=0.0001
if epoch == 40:
learning_rate=0.00001
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
print('\n\nStarting epoch %d / %d' % (epoch + 1, num_epochs))
print('Learning Rate for this epoch: {}'.format(learning_rate))
total_loss = 0.
for i,(images,target) in enumerate(train_loader):
images = Variable(images)
target = Variable(target)
if use_gpu:
images,target = images.cuda(),target.cuda()
pred = net(images)
loss = criterion(pred,target)
total_loss += loss.data.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 50 == 0:
print ('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f, average_loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_loader), loss.data.item(), total_loss / (i+1)))
num_iter += 1
#validation
validation_loss = 0.0
net.eval()
for i,(images,target) in enumerate(test_loader):
images = Variable(images,volatile=True)
target = Variable(target,volatile=True)
if use_gpu:
images,target = images.cuda(),target.cuda()
pred = net(images)
loss = criterion(pred,target)
validation_loss += loss.data.item()
validation_loss /= len(test_loader)
if best_test_loss > validation_loss:
best_test_loss = validation_loss
print('get best test loss %.5f' % best_test_loss)
torch.save(net.state_dict(),'best.pth')
logfile.writelines(str(epoch) + '\t' + str(validation_loss) + '\n')
logfile.flush()
torch.save(net.state_dict(),'yolo.pth')
|
the-stack_0_26553
|
from django.shortcuts import render
from django.contrib import messages
from django.shortcuts import render_to_response, redirect
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse, HttpResponse
from django.utils import timezone
from constance import config # For the explicitly user-configurable stuff
from .decorators import site_is_configured, login_if_required_for_dashboard
from . import device_forms, profile_forms, beer_forms, setup_forms
from . import setup_views, mdnsLocator, almost_json, git_integration, connection_debug, udev_integration
import json, datetime, pytz, os, random, sys, subprocess
import fermentrack_django.settings as settings
from app.models import BrewPiDevice, OldControlConstants, NewControlConstants, PinDevice, SensorDevice, BeerLogPoint, Beer
from external_push.models import GenericPushTarget
from django.contrib.auth.models import User
def error_notifications(request):
if config.GIT_UPDATE_TYPE != "none":
# TODO - Reset this to 18 hours
# Check the git status at least every 6 hours
now_time = timezone.now()
try:
if config.LAST_GIT_CHECK < now_time - datetime.timedelta(hours=6):
try:
if git_integration.app_is_current():
config.LAST_GIT_CHECK = now_time
else:
messages.info(request, "This app is not at the latest version! " +
'<a href="/upgrade"">Upgrade from GitHub</a> to receive the latest version.')
except:
# If we can't check for the latest version info, skip and move on
pass
except:
# So here's the deal. On Python3 conversion, any datetime.datetime objects stored in Constance end up
# getting unpickled poorly. It's truly quite a pickle! Ahhhahahahaha, I crack myself up. Anyways, just
# overwrite it. Git check can happen on next refresh.
config.LAST_GIT_CHECK = now_time - datetime.timedelta(hours=18)
config.FIRMWARE_LIST_LAST_REFRESHED = now_time - datetime.timedelta(hours=72)
if not config.ALLOW_GIT_BRANCH_SWITCHING:
# Ths user is using one of the two "default" branches (dev or master). Make sure that the branch he/she is
# actually using is the same as the one that he/she wanted.
# Don't check if the user has custom branch switching though, as they should be allowed to pick whatever
# branch he/she wants.
if settings.GIT_BRANCH != config.GIT_UPDATE_TYPE:
if config.GIT_UPDATE_TYPE not in [x for x,_ in settings.CONSTANCE_ADDITIONAL_FIELDS['git_update_type_select'][1]['choices']]:
# TODO - Fix this to pick up the default
config.GIT_UPDATE_TYPE = "dev"
else:
messages.warning(request, "You selected to update from the {} code ".format(config.GIT_UPDATE_TYPE) +
"branch, but you are currently using the {} branch. ".format(settings.GIT_BRANCH) +
'Click <a href="/upgrade">here</a> to update to the correct branch.')
# This is a good idea to do, but unfortunately sshwarn doesn't get removed when the password is changed, only when
# the user logs in a second time. Once I have time to make a "help" page for this, I'll readd this check
# TODO - Readd this check
# if os.path.isfile("/var/run/sshwarn"):
# messages.warning(request, "You have SSH enabled on the Raspberry Pi, but the default (pi) user's password is "
# "unchanged! This is potentially a major security issue. Please SSH in, change the "
# "password, and SSH in one more time to test that it worked. Otherwise, we'll keep "
# "annoying you until you do.")
# Siteroot is a lazy way of determining where to direct the user when they go to http://devicename.local/
def siteroot(request):
# In addition to requiring the site to be configured, we require that there be a user account. Due to the
# setup workflow, the user will generally be created before constance configuration takes place, but if
# the user account gets deleted (for example, in the admin) we want the user to go through that portion
# of account setup.
num_users=User.objects.all().count()
if not config.USER_HAS_COMPLETED_CONFIGURATION or num_users <= 0:
# If things aren't configured, redirect to the guided setup workflow
return redirect('setup_splash')
else:
# Notify the user of things like git being out of date, issues with SSH, etc.
error_notifications(request)
# The default screen is the "lcd list" screen
return render(request, template_name="siteroot.html")
# return device_lcd_list(request=request)
@login_required
@site_is_configured # Checks if the user completed constance configuration
def add_device(request):
# TODO - Add user permissioning
# if not request.user.has_perm('app.add_device'):
# messages.error(request, 'Your account is not permissioned to add devices. Please contact an admin')
# return redirect("/")
if request.POST:
form = device_forms.DeviceForm(request.POST)
if form.is_valid():
# TODO - Add support for editing to this
new_device = BrewPiDevice(
device_name=form.cleaned_data['device_name'],
temp_format=form.cleaned_data['temp_format'],
data_point_log_interval=form.cleaned_data['data_point_log_interval'],
useInetSocket=form.cleaned_data['useInetSocket'],
socketPort=form.cleaned_data['socketPort'],
socketHost=form.cleaned_data['socketHost'],
serial_port=form.cleaned_data['serial_port'],
serial_alt_port=form.cleaned_data['serial_alt_port'],
board_type=form.cleaned_data['board_type'],
socket_name=form.cleaned_data['socket_name'],
connection_type=form.cleaned_data['connection_type'],
wifi_host=form.cleaned_data['wifi_host'],
wifi_port=form.cleaned_data['wifi_port'],
prefer_connecting_via_udev=form.cleaned_data['prefer_connecting_via_udev'],
)
new_device.save()
# Once the device is added, go ahead and autodetect the udev serial number (if we're connecting via serial)
if new_device.connection_type == BrewPiDevice.CONNECTION_SERIAL:
new_device.set_udev_from_port()
messages.success(request, u'Device {} Added.<br>Please wait a few seconds for controller to start'.format(new_device))
return redirect("/")
else:
return render(request, template_name='setup/device_add.html', context={'form': form})
else:
# We don't want two devices to have the same port, and the port number doesn't really matter. Just
# randomize it.
random_port = random.randint(2000,3000)
initial_values = {'socketPort': random_port, 'temp_format': config.TEMPERATURE_FORMAT}
form = device_forms.DeviceForm(initial=initial_values)
return render(request, template_name='setup/device_add.html', context={'form': form})
@site_is_configured
@login_if_required_for_dashboard
def device_lcd_list(request):
# This handles generating the list of LCD screens for each device.
# Loading the actual data for the LCD screens is handled by Vue.js which loads the data via calls to api/lcd.py
return render(request, template_name="device_lcd_list.html")
@login_required
@site_is_configured
def device_control_constants_legacy(request, device_id, control_constants):
# TODO - Add user permissioning
# if not request.user.has_perm('app.add_device'):
# messages.error(request, 'Your account is not permissioned to add devices. Please contact an admin')
# return redirect("/")
active_device = BrewPiDevice.objects.get(id=device_id)
if request.POST:
form = device_forms.OldCCModelForm(request.POST)
if form.is_valid():
# Generate the new_control_constants object from the form data
new_control_constants = form.save(commit=False)
# At this point, we have both the OLD control constants (control_constants) and the NEW control constants
# TODO - Modify the below to only send constants that have changed to the controller
if not new_control_constants.save_all_to_controller(active_device):
return render(request, template_name='device_control_constants_old.html',
context={'form': form, 'active_device': active_device})
# TODO - Make it so if we added a preset name we save the new preset
# new_device.save()
messages.success(request, u'Control constants updated for device {}'.format(active_device))
return redirect("/")
else:
return render(request, template_name='device_control_constants_old.html',
context={'form': form, 'active_device': active_device})
else:
form = device_forms.OldCCModelForm(instance=control_constants)
return render(request, template_name='device_control_constants_old.html',
context={'form': form, 'active_device': active_device})
@login_required
@site_is_configured
def device_control_constants_modern(request, device_id, control_constants):
# TODO - Add user permissioning
# if not request.user.has_perm('app.add_device'):
# messages.error(request, 'Your account is not permissioned to add devices. Please contact an admin')
# return redirect("/")
active_device = BrewPiDevice.objects.get(id=device_id)
if request.POST:
form = device_forms.NewCCModelForm(request.POST)
if form.is_valid():
# Generate the new_control_constants object from the form data
new_control_constants = form.save(commit=False)
# At this point, we have both the OLD control constants (control_constants) and the NEW control constants
# TODO - Modify the below to only send constants that have changed to the controller
if not new_control_constants.save_all_to_controller(active_device):
return render(request, template_name='device_control_constants_new.html',
context={'form': form, 'active_device': active_device})
# TODO - Make it so if we added a preset name we save the new preset
# new_device.save()
messages.success(request, u'Control constants updated for device {}'.format(active_device))
return redirect("/")
else:
return render(request, template_name='device_control_constants_new.html',
context={'form': form, 'active_device': active_device})
else:
form = device_forms.OldCCModelForm(instance=control_constants)
return render(request, template_name='device_control_constants_new.html',
context={'form': form, 'active_device': active_device})
@login_required
@site_is_configured
def device_control_constants(request, device_id):
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
control_constants, is_legacy = active_device.retrieve_control_constants()
if control_constants is None:
# We weren't able to retrieve the version from the controller.
messages.error(request, u"Unable to reach brewpi-script for device {}".format(active_device))
return redirect('device_dashboard', device_id=device_id)
elif is_legacy:
return device_control_constants_legacy(request, device_id, control_constants)
else:
return device_control_constants_modern(request, device_id, control_constants)
@login_required
@site_is_configured
def sensor_list(request, device_id):
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
devices_loaded = active_device.load_sensors_from_device()
if devices_loaded:
for this_device in active_device.available_devices:
data = {'device_function': this_device.device_function, 'invert': this_device.invert,
'address': this_device.address, 'pin': this_device.pin}
this_device.device_form = device_forms.SensorFormRevised(data)
for this_device in active_device.installed_devices:
data = {'device_function': this_device.device_function, 'invert': this_device.invert,
'address': this_device.address, 'pin': this_device.pin, 'installed': True,
'perform_uninstall': True}
this_device.device_form = device_forms.SensorFormRevised(data)
else:
# If we weren't able to load devices, we should have set an error message instead. Display it.
# (we can't display it directly from load_sensors_from_device() because we aren't passing request)
messages.error(request, active_device.error_message)
return render(request, template_name="pin_list.html",
context={'available_devices': active_device.available_devices, 'active_device': active_device,
'installed_devices': active_device.installed_devices, 'devices_loaded': devices_loaded})
@login_required
@site_is_configured
def sensor_config(request, device_id):
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
active_device.load_sensors_from_device()
if request.POST:
form = device_forms.SensorFormRevised(request.POST)
if form.is_valid():
# OK. Here is where things get a bit tricky - We can't just rely on the form to generate the sensor object
# as all the form really does is specify what about the sensor to change. Let's locate the sensor we need
# to update, then adjust it based on the sensor (device) type.
if form.data['installed']:
sensor_to_adjust = SensorDevice.find_device_from_address_or_pin(active_device.installed_devices,
address=form.cleaned_data['address'], pin=form.cleaned_data['pin'])
else:
sensor_to_adjust = SensorDevice.find_device_from_address_or_pin(active_device.available_devices,
address=form.cleaned_data['address'], pin=form.cleaned_data['pin'])
sensor_to_adjust.device_function = form.cleaned_data['device_function']
sensor_to_adjust.invert = form.cleaned_data['invert']
sensor_to_adjust.calibrate_adjust = form.cleaned_data['calibration']
if form.cleaned_data['perform_uninstall']:
write_succeeded = sensor_to_adjust.uninstall()
else:
write_succeeded = sensor_to_adjust.write_config_to_controller()
if write_succeeded:
if form.cleaned_data['perform_uninstall']:
messages.success(request, 'Device {} was uninstalled'.format(device_id))
else:
messages.success(request, 'Device definition saved for device {}'.format(device_id))
return redirect('sensor_list', device_id=device_id)
else:
# We failed to write the configuration to the controller. Show an error.
# TODO - Expand this error message to include instructions on resetting the EEPROM.
messages.error(request, "Failed to write the configuration to the controller. If this continues, try "
"resetting the EEPROM on the controller.")
return redirect('sensor_list', device_id=device_id)
else:
messages.error(request, "There was an error processing the form. Please review and resubmit.")
return redirect('sensor_list', device_id=device_id)
return redirect('sensor_list', device_id=device_id)
@login_required
@site_is_configured
def sensor_refresh(request, device_id):
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
active_device.request_device_refresh()
return redirect('sensor_list', device_id=device_id)
@site_is_configured
@login_if_required_for_dashboard
def device_dashboard(request, device_id, beer_id=None):
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
beer_create_form = beer_forms.BeerCreateForm()
if beer_id is None:
beer_obj = active_device.active_beer or None
available_beer_logs = Beer.objects.filter(device_id=active_device.id)
else:
try:
beer_obj = Beer.objects.get(id=beer_id, device_id=active_device.id)
except:
# If we are given an invalid beer log ID, let's return an error & drop back to the (valid) dashboard
messages.error(request, 'Unable to load beer log with ID {}'.format(beer_id))
return redirect('device_dashboard', device_id=device_id)
available_beer_logs = Beer.objects.filter(device_id=active_device.id).exclude(id=beer_id)
if beer_obj is None:
# TODO - Determine if we want to load some fake "example" data (similar to what brewpi-www does)
beer_file_url = "/data/fake.csv"
else:
beer_file_url = beer_obj.data_file_url('base_csv')
if beer_obj is None:
column_headers = {}
else:
column_headers = beer_obj.column_headers_to_graph_string('base_csv')
return render(request, template_name="device_dashboard.html",
context={'active_device': active_device, 'beer_create_form': beer_create_form,
'beer': beer_obj, 'temp_display_format': config.DATE_TIME_FORMAT_DISPLAY,
# 'column_headers': column_headers,
'beer_file_url': beer_file_url, 'available_beer_logs': available_beer_logs,
'selected_beer_id': beer_id})
@login_required
@site_is_configured
def device_temp_control(request, device_id):
# TODO - Add user permissioning
# if not request.user.has_perm('app.add_device'):
# messages.error(request, 'Your account is not permissioned to add devices. Please contact an admin')
# return redirect("/")
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
if request.POST:
form = device_forms.TempControlForm(request.POST)
if form.is_valid():
if form.cleaned_data['temp_control'] == 'off':
try:
success = active_device.set_temp_control(method=form.cleaned_data['temp_control'])
except ValueError as e:
messages.error(request, str(e))
return redirect('siteroot')
elif form.cleaned_data['temp_control'] == 'beer_constant' or form.cleaned_data['temp_control'] == 'fridge_constant':
try:
success = active_device.set_temp_control(method=form.cleaned_data['temp_control'],
set_temp=float(form.cleaned_data['temperature_setting']))
except ValueError as e:
messages.error(request, str(e))
return redirect('siteroot')
elif form.cleaned_data['temp_control'] == 'beer_profile':
if 'start_at' in form.cleaned_data:
start_at = form.cleaned_data['start_at']
else:
start_at = None
try:
success = active_device.set_temp_control(method=form.cleaned_data['temp_control'],
profile=form.cleaned_data['profile'], profile_startat=start_at)
except ValueError as e:
messages.error(request, str(e))
return redirect('siteroot')
else:
messages.error(request, "Invalid temperature control function specified.")
return redirect('siteroot')
if success:
messages.success(request, u'Temperature control settings updated for {}. Please wait a few seconds for settings to take effect.'.format(active_device))
if active_device.active_beer is None:
# We started temperature control, but aren't logging anything. Prompt the user.
messages.info(request, 'Temperature control enabled, but logging is off. Start a new beer from the device dashboard.')
elif active_device.logging_status != active_device.DATA_LOGGING_ACTIVE:
# We altered temperature control, but logging is paused. Prompt the user.
messages.info(request, 'Temperature control enabled, but logging is off. Start a new beer from the device dashboard.')
else:
messages.error(request, u'Unable to update temperature control settings for {}'.format(active_device))
return redirect('siteroot')
else:
messages.error(request, 'Unable to parse temperature control settings provided')
return redirect('siteroot')
else:
messages.error(request, 'No temperature control settings provided')
return redirect('siteroot')
@login_required
@site_is_configured
def github_trigger_upgrade(request, variant=""):
# TODO - Add permission check here
commit_info = git_integration.get_local_remote_commit_info()
allow_git_branch_switching = config.ALLOW_GIT_BRANCH_SWITCHING
app_is_current = git_integration.app_is_current()
git_update_type = config.GIT_UPDATE_TYPE
tags = git_integration.get_tag_info()
if allow_git_branch_switching:
branch_info = git_integration.get_remote_branch_info()
else:
branch_info = {}
if request.POST:
if app_is_current and 'new_branch' not in request.POST and 'tag' not in request.POST:
messages.error(request, "Nothing to upgrade - Local copy and GitHub are at same commit")
else:
cmds = {}
if not allow_git_branch_switching:
# I'm not doing "if git_update_type == config.GIT_UPDATE_TYPE" so users who have update set to 'none'
# can still update from the "master" branch.
if git_update_type == "dev":
branch_to_use = "dev"
else:
# Assume if they have anything other than "dev" they want master
branch_to_use = "master"
elif 'new_branch' not in request.POST:
# Branch switching is enabled, but we weren't provided with a branch. Use the current branch.
branch_to_use = commit_info['local_branch']
else:
# Branch switching is enabled & the user provided a branch. Use it.
branch_to_use = request.POST.get('new_branch', "master")
if variant == "":
if sys.version_info[0] < 3:
# TODO - After April 2018, delete the Python 2 option here
cmds['tag'] = "nohup utils/upgrade.sh -t \"{}\" -b \"master\" &".format(request.POST.get('tag', ""))
cmds['branch'] = "nohup utils/upgrade.sh -b \"{}\" &".format(branch_to_use)
messages.success(request, "Triggered an upgrade from GitHub")
else:
cmds['tag'] = "nohup utils/upgrade3.sh -t \"{}\" -b \"master\" &".format(request.POST.get('tag', ""))
cmds['branch'] = "nohup utils/upgrade3.sh -b \"{}\" &".format(branch_to_use)
messages.success(request, "Triggered an upgrade from GitHub")
elif variant == "force":
if sys.version_info[0] < 3:
# TODO - After April 2018, delete the Python 2 option here
cmds['tag'] = "nohup utils/force_upgrade.sh -t \"{}\" -b \"master\" &".format(request.POST.get('tag', ""))
cmds['branch'] = "nohup utils/force_upgrade.sh -b \"{}\" &".format(branch_to_use)
messages.success(request, "Triggered an upgrade from GitHub")
else:
cmds['tag'] = "nohup utils/force_upgrade3.sh -t \"{}\" -b \"master\" &".format(request.POST.get('tag', ""))
cmds['branch'] = "nohup utils/force_upgrade3.sh -b \"{}\" &".format(branch_to_use)
messages.success(request, "Triggered an upgrade from GitHub")
else:
cmds['tag'] = ""
cmds['branch'] = ""
messages.error(request, "Invalid upgrade variant '{}' requested".format(variant))
if 'tag' in request.POST:
# If we were passed a tag name, explicitly update to it. Assume (for now) all tags are within master
cmd = cmds['tag']
else:
cmd = cmds['branch']
subprocess.call(cmd, shell=True)
else:
# We'll display this error message if the page is being accessed and no form has been posted
if app_is_current:
messages.warning(request, "Nothing to upgrade - Local copy and GitHub are at same commit")
return render(request, template_name="github_trigger_upgrade.html",
context={'commit_info': commit_info, 'app_is_current': app_is_current,
'branch_info': branch_info, 'tags': tags, 'git_update_type': git_update_type,
'allow_git_branch_switching': allow_git_branch_switching})
@login_required
@site_is_configured
def github_trigger_force_upgrade(request):
# TODO - Get rid of this in favor of a better urlpattern
return github_trigger_upgrade(request, variant="force")
def login(request, next=None):
if not next:
if 'next' in request.GET:
next=request.GET['next']
elif 'next' in request.POST:
next=request.POST['next']
else:
next="/"
if 'username' in request.POST:
target_user = auth.authenticate(username=request.POST['username'], password=request.POST['password'])
if target_user: # If the user authenticated, process login & redirect
auth.login(request, target_user)
messages.success(request, "Logged in")
if 'next' in request.POST:
if len(request.POST['next']) > 1:
return redirect(request.POST['next'])
return redirect('siteroot')
else:
messages.error(request, "Login failed")
return render(request, template_name="site_login.html", context={'pagetitle': 'Log In', 'next': next})
# If we hit this, we just need to display the form (no valid form input received)
return render(request, template_name="site_login.html", context={'pagetitle': 'Log In', 'next': next})
def logout(request):
if request.user.is_authenticated():
auth.logout(request)
return redirect('siteroot')
else:
return redirect('login')
@login_required
@site_is_configured
def site_settings(request):
# TODO - Add user permissioning. The wizard creates the user and login so we can check for superuser here
if not config.USER_HAS_COMPLETED_CONFIGURATION:
return redirect('siteroot')
all_push_targets = GenericPushTarget.objects.all()
if request.POST:
form = setup_forms.GuidedSetupConfigForm(request.POST)
if form.is_valid():
f = form.cleaned_data
config.BREWERY_NAME = f['brewery_name']
config.DATE_TIME_FORMAT_DISPLAY = f['date_time_format_display']
config.REQUIRE_LOGIN_FOR_DASHBOARD = f['require_login_for_dashboard']
config.TEMPERATURE_FORMAT = f['temperature_format']
config.PREFERRED_TIMEZONE = f['preferred_timezone']
config.USER_HAS_COMPLETED_CONFIGURATION = True # Toggle once they've completed the configuration workflow
config.GRAVITY_SUPPORT_ENABLED = f['enable_gravity_support']
config.GIT_UPDATE_TYPE = f['update_preference']
if f['enable_sentry_support'] != settings.ENABLE_SENTRY:
# The user changed the "Enable Sentry" value - but this doesn't actually take effect until Fermentrack
# restarts.
# TODO - Queue a request to Huey to restart fermentrack
messages.warning(request, "Sentry status has changed - please restart Fermentrack for this to take "
"effect.")
# This sits outside the if check above in case the user updates the setting before Fermentrack was restarted
if f['enable_sentry_support']:
setup_views.set_sentry_status(enabled=True)
else:
setup_views.set_sentry_status(enabled=False)
messages.success(request, 'App configuration has been saved')
return redirect('siteroot')
else:
return render(request, template_name='site_config.html',
context={'form': form, 'all_push_targets': all_push_targets,
'completed_config': config.USER_HAS_COMPLETED_CONFIGURATION})
else:
form = setup_forms.GuidedSetupConfigForm()
return render(request, template_name='site_config.html',
context={'form': form, 'all_push_targets': all_push_targets,
'completed_config': config.USER_HAS_COMPLETED_CONFIGURATION})
@login_required
@site_is_configured
def device_control_constants_legacy(request, device_id, control_constants):
# TODO - Add user permissioning
# if not request.user.has_perm('app.add_device'):
# messages.error(request, 'Your account is not permissioned to add devices. Please contact an admin')
# return redirect("/")
active_device = BrewPiDevice.objects.get(id=device_id)
if request.POST:
form = device_forms.OldCCModelForm(request.POST)
if form.is_valid():
# Generate the new_control_constants object from the form data
new_control_constants = form.save(commit=False)
# At this point, we have both the OLD control constants (control_constants) and the NEW control constants
# TODO - Modify the below to only send constants that have changed to the controller
if not new_control_constants.save_all_to_controller(active_device):
return render(request, template_name='device_control_constants_old.html',
context={'form': form, 'active_device': active_device})
# TODO - Make it so if we added a preset name we save the new preset
# new_device.save()
messages.success(request, u'Control constants updated for device {}'.format(active_device))
return redirect("/")
else:
return render(request, template_name='device_control_constants_old.html',
context={'form': form, 'active_device': active_device})
else:
form = device_forms.OldCCModelForm(instance=control_constants)
return render(request, template_name='device_control_constants_old.html',
context={'form': form, 'active_device': active_device})
@login_required
@site_is_configured
def device_eeprom_reset(request, device_id):
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
# This may be unncecessary for the EEPROM reset process, but using it as a proxy to check if we can connect
control_constants, is_legacy = active_device.retrieve_control_constants()
if control_constants is None:
# We weren't able to retrieve the version from the controller.
messages.error(request, u"Unable to reach brewpi-script for device {}".format(active_device))
return redirect('device_dashboard', device_id=device_id)
else:
active_device.reset_eeprom()
messages.success(request, "Device EEPROM reset")
return redirect("device_control_constants", device_id=device_id)
@login_required
@site_is_configured
def device_wifi_reset(request, device_id):
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
# Using this as a proxy to check if we can connect
control_constants, is_legacy = active_device.retrieve_control_constants()
if control_constants is None:
# We weren't able to retrieve the version from the controller.
messages.error(request, u"Unable to reach brewpi-script for device {}".format(active_device))
return redirect('device_dashboard', device_id=device_id)
else:
active_device.reset_wifi()
messages.success(request, "Device WiFi settings reset. Reset the device to access the configuration AP.")
return redirect("device_control_constants", device_id=device_id)
def site_help(request):
return render(request, template_name='site_help.html', context={})
@login_required
@site_is_configured
def device_manage(request, device_id):
# TODO - Add user permissioning
# if not request.user.has_perm('app.edit_device'):
# messages.error(request, 'Your account is not permissioned to edit devices. Please contact an admin')
# return redirect("/")
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
# Forms posted back to device_manage are explicitly settings update forms
if request.POST:
form = device_forms.DeviceForm(request.POST)
if form.is_valid():
# Update the device settings based on what we were passed via the form
active_device.device_name=form.cleaned_data['device_name']
active_device.temp_format=form.cleaned_data['temp_format']
active_device.data_point_log_interval=form.cleaned_data['data_point_log_interval']
active_device.useInetSocket=form.cleaned_data['useInetSocket']
active_device.socketPort=form.cleaned_data['socketPort']
active_device.socketHost=form.cleaned_data['socketHost']
active_device.serial_port=form.cleaned_data['serial_port']
active_device.serial_alt_port=form.cleaned_data['serial_alt_port']
# Not going to allow editing the board type. Can revisit if there seems to be a need later
# active_device.board_type=form.cleaned_data['board_type']
active_device.socket_name=form.cleaned_data['socket_name']
active_device.connection_type=form.cleaned_data['connection_type']
active_device.wifi_host=form.cleaned_data['wifi_host']
active_device.wifi_port=form.cleaned_data['wifi_port']
active_device.save()
messages.success(request, u'Device {} Updated.<br>Please wait a few seconds for the connection to restart'.format(active_device))
active_device.restart_process()
return render(request, template_name='device_manage.html',
context={'form': form, 'active_device': active_device})
else:
return render(request, template_name='device_manage.html',
context={'form': form, 'active_device': active_device})
else:
# This would probably be easier if I was to use ModelForm instead of Form, but at this point I don't feel like
# refactoring it. Project for later if need be.
initial_values = {
'device_name': active_device.device_name,
'temp_format': active_device.temp_format,
'data_point_log_interval': active_device.data_point_log_interval,
'connection_type': active_device.connection_type,
'useInetSocket': active_device.useInetSocket,
'socketPort': active_device.socketPort,
'socketHost': active_device.socketHost,
'serial_port': active_device.serial_port,
'serial_alt_port': active_device.serial_alt_port,
'board_type': active_device.board_type,
'socket_name': active_device.socket_name,
'wifi_host': active_device.wifi_host,
'wifi_port': active_device.wifi_port,
}
form = device_forms.DeviceForm(initial=initial_values)
return render(request, template_name='device_manage.html',
context={'form': form, 'active_device': active_device})
@login_required
@site_is_configured
def device_uninstall(request, device_id):
# TODO - Add user permissioning
# if not request.user.has_perm('app.delete_device'):
# messages.error(request, 'Your account is not permissioned to uninstall devices. Please contact an admin')
# return redirect("/")
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
if request.POST:
if 'remove_1' in request.POST and 'remove_2' in request.POST and 'remove_3' in request.POST:
if request.POST['remove_1'] == "on" and request.POST['remove_2'] == "on" and request.POST['remove_3'] == "on":
try: # Yes, there is probably a better way to do this than try/except, but this works.
if active_device.gravity_sensor is not None:
# If there's an associated gravity sensor, let's disassociate the sensor & stop it from logging
grav_sensor = active_device.gravity_sensor
if grav_sensor.active_log is not None:
# The gravity sensor is currently actively logging something. This is not ideal. Lets stop it.
grav_sensor.active_log = None
messages.warning(request,
u"Gravity sensor {} was actively logging, and has now been stopped.".format(
grav_sensor))
grav_sensor.assigned_brewpi_device = None
grav_sensor.save()
except:
pass
active_device.delete()
messages.success(request, u"The device '{}' was successfully uninstalled.".format(active_device))
return redirect("siteroot")
# If we get here, one of the switches wasn't toggled
messages.error(request, "All three switches must be set to 'yes' to uninstall a device.")
return redirect("device_manage", device_id=device_id)
else:
messages.error(request, "To uninstall a device, use the form on the 'Manage Device' page.")
return redirect("device_manage", device_id=device_id)
# So here's the deal -- If we want to write json files sequentially, we have to skip closing the array. If we want to
# then interpret them using JavaScript, however, we MUST have fully formed, valid json. To acheive that, we're going to
# wrap the json file and append the closing bracket after dumping its contents to the browser.
def almost_json_view(request, device_id, beer_id):
json_close = "\r\n]"
# beer_obj = Beer.objects.get(id=beer_id, device_id=device_id)
beer_obj = Beer.objects.get(id=beer_id)
filename = os.path.join(settings.BASE_DIR, settings.DATA_ROOT, beer_obj.full_filename("annotation_json"))
if os.path.isfile(filename): # If there are no annotations, return an empty JsonResponse
f = open(filename, 'r')
wrapper = almost_json.AlmostJsonWrapper(f, closing_string=json_close)
response = HttpResponse(wrapper, content_type="application/json")
response['Content-Length'] = os.path.getsize(filename) + len(json_close)
return response
else:
empty_array = []
return JsonResponse(empty_array, safe=False, json_dumps_params={'indent': 4})
@login_required
@site_is_configured
def debug_connection(request, device_id):
# TODO - Add user permissioning
# if not request.user.has_perm('app.delete_device'):
# messages.error(request, 'Your account is not permissioned to uninstall devices. Please contact an admin')
# return redirect("/")
try:
active_device = BrewPiDevice.objects.get(id=device_id)
except:
messages.error(request, "Unable to load device with ID {}".format(device_id))
return redirect('siteroot')
tests=[]
FAILED = 'Failed'
PASSED = 'Passed'
if active_device.status != BrewPiDevice.STATUS_ACTIVE:
test_result = {'name': 'Device Status Test', 'parameter': active_device.status, 'status': FAILED,
'result': 'Device not active'}
else:
test_result = {'name': 'Device Status Test', 'parameter': active_device.status, 'status': PASSED,
'result': 'Device active & managed by Circus'}
tests.append(test_result)
if active_device.connection_type == BrewPiDevice.CONNECTION_WIFI:
# For WiFi, the first thing we check is if we can resolve the mDNS hostname
mdns_lookup = connection_debug.dns_lookup(active_device.wifi_host)
if mdns_lookup is None:
test_result = {'name': 'DNS Lookup Test', 'parameter': active_device.wifi_host, 'status': FAILED,
'result': 'No DNS response'}
else:
test_result = {'name': 'DNS Lookup Test', 'parameter': active_device.wifi_host, 'status': PASSED,
'result': mdns_lookup}
tests.append(test_result)
# Once that's done, we'll test connecting to the mDNS hostname & cached IP address
# Start with the mDNS hostname (if mdns_lookup was successful)
if mdns_lookup is not None:
hostname = active_device.wifi_host
connection_check, version_check, version_string = connection_debug.test_telnet(hostname, active_device.wifi_port)
if connection_check:
# We were able to telnet into the hostname
test_result = {'name': 'Connection Test', 'parameter': hostname, 'status': PASSED,
'result': 'Connected'}
tests.append(test_result)
if version_check:
# We were able to get a version number from the host - this is a complete success
test_result = {'name': 'Controller Response Test', 'parameter': hostname, 'status': PASSED,
'result': version_string}
tests.append(test_result)
else:
# We weren't able to get a version number from the host
test_result = {'name': 'Controller Response Test', 'parameter': hostname, 'status': FAILED,
'result': ''}
tests.append(test_result)
else:
test_result = {'name': 'Connection Test', 'parameter': hostname, 'status': FAILED,
'result': 'Unable to connect'}
tests.append(test_result)
if len(active_device.wifi_host_ip) > 7:
hostname = active_device.wifi_host_ip
connection_check, version_check, version_string = connection_debug.test_telnet(hostname, active_device.wifi_port)
test_result = {'name': 'Cached IP Test', 'parameter': hostname, 'status': PASSED,
'result': 'Available'}
tests.append(test_result)
if connection_check:
# We were able to telnet into the hostname
test_result = {'name': 'Connection Test', 'parameter': hostname, 'status': PASSED,
'result': 'Connected'}
tests.append(test_result)
if version_check:
# We were able to get a version number from the host - this is a complete success
test_result = {'name': 'Controller Response Test', 'parameter': hostname, 'status': PASSED,
'result': version_string}
tests.append(test_result)
else:
# We weren't able to get a version number from the host
test_result = {'name': 'Controller Response Test', 'parameter': hostname, 'status': FAILED,
'result': ''}
tests.append(test_result)
else:
test_result = {'name': 'Connection Test', 'parameter': hostname, 'status': FAILED,
'result': 'Unable to connect'}
tests.append(test_result)
else:
test_result = {'name': 'Cached IP Test', 'parameter': active_device.wifi_host_ip, 'status': FAILED,
'result': 'Unavailable'}
tests.append(test_result)
elif active_device.connection_type == BrewPiDevice.CONNECTION_SERIAL:
if udev_integration.valid_platform_for_udev():
# Pyudev is available on this platform - let's see if we have a USB serial number set
test_result = {'name': 'Udev Availability Test', 'parameter': udev_integration.get_platform(), 'status': PASSED,
'result': 'pyudev is available & loaded'}
tests.append(test_result)
if active_device.prefer_connecting_via_udev:
# Let the user know they are using udev if available
test_result = {'name': 'USB Serial Number (SN) Usage Test', 'parameter': active_device.prefer_connecting_via_udev, 'status': PASSED,
'result': 'Will look up port via USB serial number'}
tests.append(test_result)
else:
# Let the user know they AREN'T using udev (and it's available)
test_result = {'name': 'USB SN Usage Test', 'parameter': active_device.prefer_connecting_via_udev, 'status': FAILED,
'result': 'Will NOT look up port via USB serial number'}
tests.append(test_result)
if len(active_device.udev_serial_number) > 1:
# The user has a seemingly valid USB serial number set on the device
test_result = {'name': 'USB SN Test', 'parameter': '', 'status': PASSED,
'result': active_device.udev_serial_number}
tests.append(test_result)
if active_device.prefer_connecting_via_udev:
# If the user has everything set up to use udev (and actually wants to use it) then check what
# we find if we look up the USB serial number
found_node = udev_integration.get_node_from_serial(active_device.udev_serial_number)
if found_node is None:
# We weren't able to find a matching USB device with that serial number
test_result = {'name': 'USB SN Availability Test', 'parameter': '(USB SN)', 'status': FAILED,
'result': 'No device with that serial number found'}
tests.append(test_result)
else:
# We found a device that matched that serial number
test_result = {'name': 'USB SN Availability Test', 'parameter': '(USB SN)', 'status': PASSED,
'result': found_node}
tests.append(test_result)
# Last test - Check if the serial port matches what we just found via the USB lookup
if found_node == active_device.serial_port:
# It matched!
test_result = {'name': 'Udev Matches Cached Port Test', 'parameter': found_node,
'status': PASSED, 'result': active_device.serial_port}
tests.append(test_result)
else:
# It... didn't match.
test_result = {'name': 'Udev Matches Cached Port Test', 'parameter': found_node,
'status': FAILED, 'result': active_device.serial_port}
tests.append(test_result)
else:
# There isn't a seemingly valid USB serial number set on the device
test_result = {'name': 'USB Serial Number Test', 'parameter': '', 'status': FAILED,
'result': active_device.udev_serial_number}
tests.append(test_result)
else:
# Pyudev isn't available on this platform
test_result = {'name': 'Udev Availability Test', 'parameter': udev_integration.get_platform(), 'status': FAILED,
'result': 'pyudev is not available, or isn\'t loaded'}
tests.append(test_result)
return render(request, template_name='device_debug_connection.html',
context={'tests': tests, 'active_device': active_device})
|
the-stack_0_26554
|
"""
Given two integers dividend and divisor, divide two integers without using multiplication, division and mod operator.
Return the quotient after dividing dividend by divisor.
The integer division should truncate toward zero.
Example 1:
Input: dividend = 10, divisor = 3
Output: 3
Example 2:
Input: dividend = 7, divisor = -3
Output: -2
Note:
Both dividend and divisor will be 32-bit signed integers.
The divisor will never be 0.
Assume we are dealing with an environment which could only store integers within the 32-bit signed integer range: [−231, 231 − 1]. For the purpose of this problem, assume that your function returns 231 − 1 when the division result overflows.
"""
class Solution(object):
def divide(self, dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
postitive = (dividend < 0) is (divisor < 0)
dividend,divisor = abs(dividend),abs(divisor)
res = 0
while dividend >= divisor:
temp ,i = divisor,1
while dividend >= temp:
dividend -= temp
res +=i
i <<= 1
temp <<= 1
if not postitive:
res = -res
return min(max(-2147483648, res), 2147483647)
|
the-stack_0_26555
|
"""albatrosses_hub URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("home.urls")),
path("forums/", include("forums.urls")),
path("auth/", include("authentication.urls")),
]
|
the-stack_0_26557
|
import argparse
import sys
from typing import Sequence
from exabel_data_sdk import ExabelClient
from exabel_data_sdk.client.api.data_classes.relationship import Relationship
from exabel_data_sdk.scripts.base_script import BaseScript
class CreateRelationship(BaseScript):
"""
Creates a new relationship.
"""
def __init__(self, argv: Sequence[str], description: str):
super().__init__(argv, description)
self.parser.add_argument(
"--relationship-type",
required=True,
type=str,
help="The resource name of the relationship type, "
"for example 'relationshipTypes/ns.relationshipTypeIdentifier'",
)
self.parser.add_argument(
"--from-entity",
required=True,
type=str,
help="The resource name of the entity the relationship goes from",
)
self.parser.add_argument(
"--to-entity",
required=True,
type=str,
help="The resource name of the entity the relationship goes to",
)
self.parser.add_argument(
"--description",
required=True,
type=str,
help="One or more paragraphs of text description",
)
def run_script(self, client: ExabelClient, args: argparse.Namespace) -> None:
relationship = client.relationship_api.create_relationship(
Relationship(
relationship_type=args.relationship_type,
from_entity=args.from_entity,
to_entity=args.to_entity,
description=args.description,
properties={},
)
)
print("Successfully created relationship:")
print(relationship)
if __name__ == "__main__":
CreateRelationship(sys.argv, "Create a new relationship.").run()
|
the-stack_0_26558
|
"""
Tests for optimize routines
"""
from numpy.testing import assert_, assert_array_almost_equal, assert_array_almost_equal_nulp
import jax.numpy as jnp
from .line_search import line_search
from .bfgs_minimize import fmin_bfgs
from jax import jit
from scipy.optimize import minimize as smin
import numpy as onp
def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
"""
Check that strong Wolfe conditions apply
"""
phi1 = phi(s)
phi0 = phi(0)
derphi0 = derphi(0)
derphi1 = derphi(s)
msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % (
s, phi0, phi1, derphi0, derphi1, err_msg)
assert_(phi1 <= phi0 + c1 * s * derphi0, "Wolfe 1 failed: " + msg)
assert_(abs(derphi1) <= abs(c2 * derphi0), "Wolfe 2 failed: " + msg)
def assert_line_wolfe(x, p, s, f, fprime, **kw):
assert_wolfe(s, phi=lambda sp: f(x + p * sp),
derphi=lambda sp: jnp.dot(fprime(x + p * sp), p), **kw)
def assert_fp_equal(x, y, err_msg="", nulp=50):
"""Assert two arrays are equal, up to some floating-point rounding error"""
try:
assert_array_almost_equal_nulp(x, y, nulp)
except AssertionError as e:
raise AssertionError("%s\n%s" % (e, err_msg))
def value_and_grad(f, fprime):
def func(x):
return f(x), fprime(x)
return func
class TestLineSearch(object):
# -- scalar functions; must have dphi(0.) < 0
def _scalar_func_1(self, s):
self.fcount += 1
p = -s - s ** 3 + s ** 4
dp = -1 - 3 * s ** 2 + 4 * s ** 3
return p, dp
def _scalar_func_2(self, s):
self.fcount += 1
p = jnp.exp(-4 * s) + s ** 2
dp = -4 * jnp.exp(-4 * s) + 2 * s
return p, dp
def _scalar_func_3(self, s):
self.fcount += 1
p = -jnp.sin(10 * s)
dp = -10 * jnp.cos(10 * s)
return p, dp
# -- num_parent-d functions
def _line_func_1(self, x):
self.fcount += 1
f = jnp.dot(x, x)
df = 2 * x
return f, df
def _line_func_2(self, x):
self.fcount += 1
f = jnp.dot(x, jnp.dot(self.A, x)) + 1
df = jnp.dot(self.A + self.A.T, x)
return f, df
# --
def setup_method(self):
self.scalar_funcs = []
self.line_funcs = []
self.N = 20
self.fcount = 0
def bind_index(func, idx):
# Remember Python's closure semantics!
return lambda *a, **kw: func(*a, **kw)[idx]
for name in sorted(dir(self)):
if name.startswith('_scalar_func_'):
value = getattr(self, name)
self.scalar_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
elif name.startswith('_line_func_'):
value = getattr(self, name)
self.line_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
onp.random.seed(1234)
self.A = onp.random.randn(self.N, self.N)
def scalar_iter(self):
for name, phi, derphi in self.scalar_funcs:
for old_phi0 in onp.random.randn(3):
yield name, phi, derphi, old_phi0
def line_iter(self):
for name, f, fprime in self.line_funcs:
k = 0
while k < 9:
x = onp.random.randn(self.N)
p = onp.random.randn(self.N)
if jnp.dot(p, fprime(x)) >= 0:
# always pick a descent direction
continue
k += 1
old_fv = float(onp.random.randn())
yield name, f, fprime, x, p, old_fv
# -- Generic scalar searches
def test_scalar_search_wolfe2(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
res = line_search(value_and_grad(phi, derphi), 0., 1.)
s, phi1, derphi1 = res.a_k, res.f_k, res.g_k
# s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(
# phi, derphi, phi(0), old_phi0, derphi(0))
assert_fp_equal(phi1, phi(s), name)
if derphi1 is not None:
assert_fp_equal(derphi1, derphi(s), name)
assert_wolfe(s, phi, derphi, err_msg="%s %g" % (name, old_phi0))
# -- Generic line searches
def test_line_search_wolfe2(self):
c = 0
smax = 512
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
res = line_search(value_and_grad(f, fprime), x, p, old_fval=f0, gfk=g0)
s = res.a_k
fc = res.nfev
gc = res.ngev
fv = res.f_k
gv = res.g_k
# s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,
# g0, f0, old_f,
# amax=smax)
# assert_equal(self.fcount, fc+gc)
assert_array_almost_equal(fv, f(x + s * p), decimal=5)
if gv is not None:
assert_array_almost_equal(gv, fprime(x + s * p), decimal=5)
if s < smax:
c += 1
assert_(c > 3) # check that the iterator really works...
def test_line_search_wolfe2_bounds(self):
# See gh-7475
# For this f and p, starting at a point on axis 0, the strong Wolfe
# condition 2 is met if and only if the step length s satisfies
# |x + s| <= c2 * |x|
f = lambda x: jnp.dot(x, x)
fp = lambda x: 2 * x
p = jnp.array([1, 0])
# Smallest s satisfying strong Wolfe conditions for these arguments is 30
x = -60 * p
c2 = 0.5
res = line_search(value_and_grad(f, fp), x, p, c2=c2)
s = res.a_k
# s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)
assert_line_wolfe(x, p, s, f, fp)
assert s >= 30.
res = line_search(value_and_grad(f, fp), x, p, c2=c2, maxiter=5)
assert res.failed
# s=30 will only be tried on the 6th iteration, so this won't converge
def test_line_search(self):
import jax
import jax.numpy as np
def f(x):
return np.cos(np.sum(np.exp(-x)) ** 2)
# assert not line_search(jax.value_and_grad(f), num_per_cluster.ones(2), num_per_cluster.array([-0.5, -0.25])).failed
xk = np.ones(2)
pk = np.array([-0.5, -0.25])
res = line_search(jax.value_and_grad(f), xk, pk, maxiter=100)
from scipy.optimize.linesearch import line_search_wolfe2
scipy_res = line_search_wolfe2(f, jax.grad(f), xk, pk)
# print(scipy_res[0], res.a_k)
# print(scipy_res[3], res.f_k)
assert np.isclose(scipy_res[0], res.a_k)
assert np.isclose(scipy_res[3], res.f_k)
# -- More specific tests
def rosenbrock(np):
def func(x):
return np.sum(100. * np.diff(x) ** 2 + (1. - x[:-1]) ** 2)
return func
def himmelblau(np):
def func(p):
x, y = p
return (x ** 2 + y - 11.) ** 2 + (x + y ** 2 - 7.) ** 2
return func
def matyas(np):
def func(p):
x, y = p
return 0.26 * (x ** 2 + y ** 2) - 0.48 * x * y
return func
def eggholder(np):
def func(p):
x, y = p
return - (y + 47) * np.sin(np.sqrt(np.abs(x / 2. + y + 47.))) - x * np.sin(
np.sqrt(np.abs(x - (y + 47.))))
return func
class TestBFGS(object):
# def __init__(self):
# pass
def test_minimize(self):
# Note, cannot compare step for step with scipy BFGS because our line search is _slightly_ different.
for maxiter in [None]:
for func_and_init in [(rosenbrock, jnp.zeros(2)),
(himmelblau, jnp.zeros(2)),
(matyas, jnp.ones(2) * 6.),
(eggholder, jnp.ones(2) * 100.)]:
func, x0 = func_and_init
def compare(func, x0):
@jit
def min_op(x0):
result = fmin_bfgs(func(jnp), x0,
options=dict(ls_maxiter=100, maxiter=maxiter, gtol=1e-6))
return result
jax_res = min_op(x0)
scipy_res = smin(func(onp), x0, method='BFGS')
assert onp.isclose(scipy_res.x, jax_res.x_k, atol=2e-5).all()
compare(func, x0)
if __name__ == '__main__':
TestBFGS().test_minimize()
|
the-stack_0_26559
|
# script to implement RPi shutdown and reboot on GPIO-connected button press
from subprocess import call
import RPi.GPIO as GPIO
import os
import time
# Define a function to run when an interrupt is called
def shutdown(pin):
print("shutting down")
os.system("sudo shutdown -h now") # Shutdown command
def reboot(pin):
print("rebooting")
os.system("sudo reboot") # Reboot command
GPIO.setmode(GPIO.BCM) # Set pin numbering to board numbering
GPIO.setup(23, GPIO.IN) # already have pullup
GPIO.add_event_detect(23, GPIO.FALLING, callback=shutdown, bouncetime=200) #Set up an interrupt to look for button presses
GPIO.setup(24, GPIO.IN) # already have pullup
GPIO.add_event_detect(24, GPIO.FALLING, callback=reboot, bouncetime=200) #Set up an interrupt to look for button presses
while True:
time.sleep(.5)
|
the-stack_0_26561
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Cisco Systems, Inc. and others. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the Config class.
"""
import copy
import logging
import os
import warnings
from .. import path
logger = logging.getLogger(__name__)
DUCKLING_SERVICE_NAME = 'duckling'
DEFAULT_DUCKLING_URL = 'http://localhost:7151/parse'
CONFIG_DEPRECATION_MAPPING = {
'DOMAIN_CLASSIFIER_CONFIG': 'DOMAIN_MODEL_CONFIG',
'INTENT_CLASSIFIER_CONFIG': 'INTENT_MODEL_CONFIG',
'ENTITY_RECOGNIZER_CONFIG': 'ENTITY_MODEL_CONFIG',
'ROLE_CLASSIFIER_CONFIG': 'ROLE_MODEL_CONFIG',
'ENTITY_RESOLVER_CONFIG': 'ENTITY_RESOLUTION_CONFIG',
'get_entity_recognizer_config': 'get_entity_model_config',
'get_intent_classifier_config': 'get_intent_model_config',
'get_entity_resolver_config': 'get_entity_resolution_model_config',
'get_role_classifier_config': 'get_role_model_config'
}
DEFAULT_DOMAIN_CLASSIFIER_CONFIG = {
'model_type': 'text',
'model_settings': {
'classifier_type': 'logreg',
},
'param_selection': {
'type': 'k-fold',
'k': 10,
'grid': {
'fit_intercept': [True, False],
'C': [10, 100, 1000, 10000, 100000]
},
},
'features': {
'bag-of-words': {
'lengths': [1]
},
'freq': {'bins': 5},
'in-gaz': {}
}
}
DEFAULT_INTENT_CLASSIFIER_CONFIG = {
'model_type': 'text',
'model_settings': {
'classifier_type': 'logreg'
},
'param_selection': {
'type': 'k-fold',
'k': 10,
'grid': {
'fit_intercept': [True, False],
'C': [0.01, 1, 100, 10000, 1000000],
'class_bias': [1, 0.7, 0.3, 0]
}
},
'features': {
'bag-of-words': {
'lengths': [1]
},
'in-gaz': {},
'freq': {'bins': 5},
'length': {}
}
}
DEFAULT_ENTITY_RECOGNIZER_CONFIG = {
'model_type': 'tagger',
'label_type': 'entities',
'model_settings': {
'classifier_type': 'memm',
'tag_scheme': 'IOB',
'feature_scaler': 'max-abs'
},
'param_selection': {
'type': 'k-fold',
'k': 5,
'scoring': 'accuracy',
'grid': {
'penalty': ['l1', 'l2'],
'C': [0.01, 1, 100, 10000, 1000000, 100000000]
},
},
'features': {
'bag-of-words-seq': {
'ngram_lengths_to_start_positions': {
1: [-2, -1, 0, 1, 2],
2: [-2, -1, 0, 1]
}
},
'in-gaz-span-seq': {},
'sys-candidates-seq': {
'start_positions': [-1, 0, 1]
}
}
}
DEFAULT_ENTITY_RESOLVER_CONFIG = {
'model_type': 'text_relevance'
}
DOC_TYPE = 'document'
# ElasticSearch mapping to define text analysis settings for text fields.
# It defines specific index configuration for synonym indices. The common index configuration
# is in default index template.
DEFAULT_ES_SYNONYM_MAPPING = {
"mappings": {
DOC_TYPE: {
"properties": {
"sort_factor": {
"type": "double"
},
"whitelist": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {
"type": "keyword",
"ignore_above": 256
},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer"
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer"
}
},
"analyzer": "default_analyzer"
}
}
}
}
}
}
}
PHONETIC_ES_SYNONYM_MAPPING = {
"mappings": {
DOC_TYPE: {
"properties": {
"sort_factor": {
"type": "double"
},
"whitelist": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {
"type": "keyword",
"ignore_above": 256
},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer"
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer"
},
"double_metaphone": {
"type": "text",
"analyzer": "phonetic_analyzer"
}
},
"analyzer": "default_analyzer"
}
}
},
"cname": {
"type": "text",
"analyzer": "default_analyzer",
"fields": {
"raw": {
"type": "keyword",
"ignore_above": 256
},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer"
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer"
},
"double_metaphone": {
"type": "text",
"analyzer": "phonetic_analyzer"
}
}
}
}
}
},
"settings": {
"analysis": {
"filter": {
"phonetic_filter": {
"type": "phonetic",
"encoder": "doublemetaphone",
"replace": True,
"max_code_len": 7
}
},
"analyzer": {
"phonetic_analyzer": {
"filter": [
"lowercase",
"asciifolding",
"token_shingle",
"phonetic_filter",
],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3",
"remove_dot",
],
"type": "custom",
"tokenizer": "whitespace"
}
}
}
}
}
DEFAULT_ROLE_CLASSIFIER_CONFIG = {
'model_type': 'text',
'model_settings': {
'classifier_type': 'logreg'
},
'params': {
'C': 100,
'penalty': 'l1'
},
'features': {
'bag-of-words-before': {
'ngram_lengths_to_start_positions': {
1: [-2, -1],
2: [-2, -1]
}
},
'bag-of-words-after': {
'ngram_lengths_to_start_positions': {
1: [0, 1],
2: [0, 1]
}
},
'other-entities': {}
}
}
DEFAULT_ES_INDEX_TEMPLATE_NAME = "mindmeld_default"
# Default ES index template that contains the base index configuration shared across different
# types of indices. Currently all ES indices will be created using this template.
# - custom text analysis settings such as custom analyzers, token filters and character filters.
# - dynamic field mapping template for text fields
# - common fields, e.g. id.
DEFAULT_ES_INDEX_TEMPLATE = {
"template": "*",
"mappings": {
DOC_TYPE: {
"dynamic_templates": [
{
"default_text": {
"match": "*",
"match_mapping_type": "string",
"mapping": {
"type": "text",
"analyzer": "default_analyzer",
"fields": {
"raw": {
"type": "keyword",
"ignore_above": 256
},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer"
},
"processed_text": {
"type": "text",
"analyzer": "english"
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer"
}
}
}
}
}
],
"properties": {
"id": {
"type": "keyword"
}
}
}
},
"settings": {
"analysis": {
"char_filter": {
"remove_loose_apostrophes": {
"pattern": " '|' ",
"type": "pattern_replace",
"replacement": ""
},
"space_possessive_apostrophes": {
"pattern": "([^\\p{N}\\s]+)'s ",
"type": "pattern_replace",
"replacement": "$1 's "
},
"remove_special_beginning": {
"pattern": "^[^\\p{L}\\p{N}\\p{Sc}&']+",
"type": "pattern_replace",
"replacement": ""
},
"remove_special_end": {
"pattern": "[^\\p{L}\\p{N}&']+$",
"type": "pattern_replace",
"replacement": ""
},
"remove_special1": {
"pattern": "([\\p{L}]+)[^\\p{L}\\p{N}&']+(?=[\\p{N}\\s]+)",
"type": "pattern_replace",
"replacement": "$1 "
},
"remove_special2": {
"pattern": "([\\p{N}]+)[^\\p{L}\\p{N}&']+(?=[\\p{L}\\s]+)",
"type": "pattern_replace",
"replacement": "$1 "
},
"remove_special3": {
"pattern": "([\\p{L}]+)[^\\p{L}\\p{N}&']+(?=[\\p{L}]+)",
"type": "pattern_replace",
"replacement": "$1 "
},
"remove_comma": {
"pattern": ",",
"type": "pattern_replace",
"replacement": ""
},
"remove_tm_and_r": {
"pattern": "™|®",
"type": "pattern_replace",
"replacement": ""
},
"remove_dot": {
"pattern": "([\\p{L}]+)[.]+(?=[\\p{L}\\s]+)",
"type": "pattern_replace",
"replacement": "$1"
},
},
"filter": {
"token_shingle": {
"max_shingle_size": "4",
"min_shingle_size": "2",
"output_unigrams": "true",
"type": "shingle"
},
"ngram_filter": {
"type": "ngram",
"min_gram": "3",
"max_gram": "3"
},
},
"analyzer": {
"default_analyzer": {
"filter": [
"lowercase",
"asciifolding",
"token_shingle"
],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3"
],
"type": "custom",
"tokenizer": "whitespace"
},
"keyword_match_analyzer": {
"filter": [
"lowercase",
"asciifolding"
],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3"
],
"type": "custom",
"tokenizer": "keyword"
},
"char_ngram_analyzer": {
"filter": [
"lowercase",
"asciifolding",
"ngram_filter"
],
"char_filter": [
"remove_comma",
"remove_tm_and_r",
"remove_loose_apostrophes",
"space_possessive_apostrophes",
"remove_special_beginning",
"remove_special_end",
"remove_special1",
"remove_special2",
"remove_special3"
],
"type": "custom",
"tokenizer": "whitespace"
}
}
}
}
}
# Elasticsearch mapping to define knowledge base index specific configuration:
# - dynamic field mapping to index all synonym whitelist in fields with "$whitelist" suffix.
# - location field
#
# The common configuration is defined in default index template
DEFAULT_ES_QA_MAPPING = {
"mappings": {
DOC_TYPE: {
"dynamic_templates": [
{
"synonym_whitelist_text": {
"match": "*$whitelist",
"match_mapping_type": "object",
"mapping": {
"type": "nested",
"properties": {
"name": {
"type": "text",
"fields": {
"raw": {
"type": "keyword",
"ignore_above": 256
},
"normalized_keyword": {
"type": "text",
"analyzer": "keyword_match_analyzer"
},
"char_ngram": {
"type": "text",
"analyzer": "char_ngram_analyzer"
}
},
"analyzer": "default_analyzer"
}
}
}
}
}
],
"properties": {
"location": {
"type": "geo_point"
}
}
}
}
}
DEFAULT_PARSER_DEPENDENT_CONFIG = {
'left': True,
'right': True,
'min_instances': 0,
'max_instances': None,
'precedence': 'left',
'linking_words': frozenset()
}
DEFAULT_RANKING_CONFIG = {
'query_clauses_operator': 'or'
}
DEFAULT_NLP_CONFIG = {
'resolve_entities_using_nbest_transcripts': [],
'system_entity_recognizer': {'type': DUCKLING_SERVICE_NAME, 'url': DEFAULT_DUCKLING_URL}
}
def get_app_namespace(app_path):
"""Returns the namespace of the application at app_path"""
try:
_app_namespace = _get_config_module(app_path).APP_NAMESPACE
if 'JUPYTER_USER' in os.environ:
_app_namespace = '{}_{}'.format(os.environ['JUPYTER_USER'], _app_namespace)
return _app_namespace
except (OSError, IOError):
logger.debug('No app configuration file found')
except AttributeError:
logger.debug('App namespace not set in app configuration')
# If a relative path is passed in, we resolve to its abspath
app_path = os.path.abspath(app_path) if not os.path.isabs(app_path) else app_path
_app_namespace = os.path.split(app_path)[1]
if 'JUPYTER_USER' in os.environ:
_app_namespace = '{jupyter_user}_{app_namespace}'.format(
jupyter_user=os.environ['JUPYTER_USER'], app_namespace=_app_namespace)
return _app_namespace
def is_duckling_configured(app_path):
"""Returns True if the app config specifies that duckling should be run
as a system entity recognizer
Args:
app_path (str): A application path
Returns:
(bool): True if the app config specifies that the numerical parsing
should be run
"""
config = get_nlp_config(app_path).get('system_entity_recognizer')
if isinstance(config, dict):
# We get into this conditional when the app has specified the system_entity_recognizer
# nlp config
return config.get('type') == DUCKLING_SERVICE_NAME
else:
# We get into this conditional when the app has not specified the system_entity_recognizer
# nlp config, in which case, we default to the duckling API
return True
def get_system_entity_url_config(app_path):
return get_nlp_config(app_path).get(
'system_entity_recognizer', {}).get('url', DEFAULT_DUCKLING_URL)
def get_classifier_config(clf_type, app_path=None, domain=None, intent=None, entity=None):
"""Returns the config for the specified classifier, with the
following order of precedence.
If the application contains a config.py file:
- Return the response from the get_*_model_config function in
config.py for the specified classifier type. E.g.
`get_intent_model_config`.
- If the function does not exist, or raise an exception, return the
config specified by *_MODEL_CONFIG in config.py, e.g.
INTENT_MODEL_CONFIG.
Otherwise, use the MindMeld default config for the classifier type
Args:
clf_type (str): The type of the classifier. One of 'domain',
'intent', 'entity', 'entity_resolution', or 'role'.
app_path (str, optional): The location of the app
domain (str, optional): The domain of the classifier
intent (str, optional): The intent of the classifier
entity (str, optional): The entity type of the classifier
Returns:
dict: A classifier config
"""
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info('No app configuration file found. Using default %s model configuration',
clf_type)
return _get_default_classifier_config(clf_type)
func_name = {
'intent': 'get_intent_classifier_config',
'entity': 'get_entity_recognizer_config',
'entity_resolution': 'get_entity_resolver_config',
'role': 'get_role_classifier_config',
}.get(clf_type)
func_args = {
'intent': ('domain',),
'entity': ('domain', 'intent'),
'entity_resolution': ('domain', 'intent', 'entity'),
'role': ('domain', 'intent', 'entity'),
}.get(clf_type)
if func_name:
func = None
try:
func = getattr(module_conf, func_name)
except AttributeError:
try:
func = getattr(module_conf, CONFIG_DEPRECATION_MAPPING[func_name])
msg = '%s config key is deprecated. Please use the equivalent %s config ' \
'key' % (CONFIG_DEPRECATION_MAPPING[func_name], func_name)
warnings.warn(msg, DeprecationWarning)
except AttributeError:
pass
if func:
try:
raw_args = {'domain': domain, 'intent': intent, 'entity': entity}
args = {k: raw_args[k] for k in func_args}
return copy.deepcopy(func(**args))
except Exception as exc: # pylint: disable=broad-except
# Note: this is intentionally broad -- provider could raise any exception
logger.warning('%r configuration provider raised exception: %s', clf_type, exc)
attr_name = {
'domain': 'DOMAIN_CLASSIFIER_CONFIG',
'intent': 'INTENT_CLASSIFIER_CONFIG',
'entity': 'ENTITY_RECOGNIZER_CONFIG',
'entity_resolution': 'ENTITY_RESOLVER_CONFIG',
'role': 'ROLE_CLASSIFIER_CONFIG',
}[clf_type]
try:
return copy.deepcopy(getattr(module_conf, attr_name))
except AttributeError:
try:
result = copy.deepcopy(getattr(module_conf, CONFIG_DEPRECATION_MAPPING[attr_name]))
msg = '%s config is deprecated. Please use the equivalent %s config ' \
'key' % (CONFIG_DEPRECATION_MAPPING[attr_name], attr_name)
warnings.warn(msg, DeprecationWarning)
return result
except AttributeError:
logger.info('No %s model configuration set. Using default.', clf_type)
return _get_default_classifier_config(clf_type)
def _get_default_classifier_config(clf_type):
return copy.deepcopy({
'domain': DEFAULT_DOMAIN_CLASSIFIER_CONFIG,
'intent': DEFAULT_INTENT_CLASSIFIER_CONFIG,
'entity': DEFAULT_ENTITY_RECOGNIZER_CONFIG,
'entity_resolution': DEFAULT_ENTITY_RESOLVER_CONFIG,
'role': DEFAULT_ROLE_CLASSIFIER_CONFIG
}[clf_type])
def get_parser_config(app_path=None, config=None, domain=None, intent=None):
"""Gets the fully specified parser configuration for the app at the
given path.
Args:
app_path (str, optional): The location of the MindMeld app
config (dict, optional): A config object to use. This will
override the config specified by the app's config.py file.
If necessary, this object will be expanded to a fully
specified config object.
domain (str, optional): The domain of the parser
intent (str, optional): The intent of the parser
Returns:
dict: A fully parser configuration
"""
if config:
return _expand_parser_config(config)
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info('No app configuration file found. Not configuring parser.')
return _get_default_parser_config()
# Try provider first
config_provider = None
try:
config_provider = module_conf.get_parser_config
except AttributeError:
pass
if config_provider:
try:
config = config or config_provider(domain, intent)
return _expand_parser_config(config)
except Exception as exc: # pylint: disable=broad-except
# Note: this is intentionally broad -- provider could raise any exception
logger.warning('Parser configuration provider raised exception: %s', exc)
# Try object second
try:
config = config or module_conf.PARSER_CONFIG
return _expand_parser_config(config)
except AttributeError:
pass
return _get_default_parser_config()
def _get_default_parser_config():
return None
def _expand_parser_config(config):
# Replace with -- since | has a special meaning for parser
return {head.replace('|', '--'): _expand_group_config(group) for head, group in config.items()}
def _expand_group_config(group_config):
"""Expands a parser group configuration.
A group config can either be a list of dependents or a dictionary with a
field for each dependent.
In the list a dependent can be a string containing the name of the
entity-role type identifier or a dictionary with at least a type field.
In the dictionary the dependent must be another dictionary.
Some example parser configs follow below.
A very simple configuration:
{
'head': ['dependent']
}
A more realistic simple config:
{
'product|beverage': ['size', 'quantity', 'option|beverage'],
'product|baked-good': ['size', 'quantity', 'option|baked-good'],
'store': ['location'],
'option': ['size']
}
A fully specified config:
{
'product': {
'quantity': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 3
},
'size': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
},
'option': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
},
'store': {
'location': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
},
'option': {
'size': {
'left': True,
'right': True,
'precedence': 'left',
'min_instances': 0,
'max_instances': 1
}
}
}
"""
group_config = copy.deepcopy(group_config)
expanded = {}
if isinstance(group_config, (tuple, list, set)):
for dependent in group_config:
config = copy.copy(DEFAULT_PARSER_DEPENDENT_CONFIG)
try:
dep_type = dependent.pop('type')
config.update(dependent)
except (AttributeError, ValueError):
# simple style config -- dependent is a str
dep_type = dependent
# Replace with -- since | has a special meaning for parser
expanded[dep_type.replace('|', '--')] = config
else:
for dep_type, dep_config in group_config.items():
config = copy.copy(DEFAULT_PARSER_DEPENDENT_CONFIG)
dep_config.pop('type', None)
config.update(dep_config)
# Replace with -- since | has a special meaning for parser
expanded[dep_type.replace('|', '--')] = config
return expanded
def _get_config_module(app_path):
module_path = path.get_config_module_path(app_path)
import imp
config_module = imp.load_source(
'config_module_' + os.path.basename(app_path), module_path)
return config_module
def _get_default_nlp_config():
return copy.deepcopy(DEFAULT_NLP_CONFIG)
def get_nlp_config(app_path=None, config=None):
"""Gets the fully specified processor configuration for the app at the
given path.
Args:
app_path (str, optional): The location of the MindMeld app
config (dict, optional): A config object to use. This will
override the config specified by the app's config.py file.
If necessary, this object will be expanded to a fully
specified config object.
Returns:
dict: The nbest inference configuration
"""
if config:
return config
try:
module_conf = _get_config_module(app_path)
except (OSError, IOError):
logger.info('No app configuration file found.')
return _get_default_nlp_config()
# Try provider first
try:
return copy.deepcopy(module_conf.get_nlp_config())
except AttributeError:
pass
# Try object second
try:
config = config or module_conf.NLP_CONFIG
return config
except AttributeError:
pass
return _get_default_nlp_config()
|
the-stack_0_26562
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 28 12:54:17 2020
@author: rjovelin
"""
import pysam
import os
import subprocess
import itertools
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
import regex
import gzip
def read_panel(smmip_panel):
'''
(str) -> dict
Returns a dictionary storing information about the smmips using >mip_key as key
Precondition: smmip_panel is a tab delimited file and expected column names are present
Parameters
----------
- smmip_panel (str): Path to file with smmip information.
File generated by MIPGEN. Contains info about the smmips
included in the panel
'''
D = {}
infile = open(smmip_panel)
header = infile.readline().rstrip().split('\t')
for line in infile:
line = line.rstrip()
if line != '':
line = line.split('\t')
# get smmip key
mip_key = line[header.index('>mip_key')]
D[mip_key] = {i: line[header.index(i)] for i in header}
# convert types
for i in D[mip_key]:
if i == 'logistic_score':
D[mip_key][i] = float(D[mip_key][i])
# convert length to int
elif i in ['target length', 'lig arm length', 'ext arm length',
'for primer length', 'rev primer length', 'umi length',
'pcr product length']:
D[mip_key][i] = int(D[mip_key][i])
# convert coordinates to 0-based
elif i in ['ext_probe_start', 'ext_probe_stop', 'lig_probe_start',
'lig_probe_stop', 'mip_scan_start_position', 'mip_scan_stop_position',
'feature_start_position', 'feature_stop_position']:
if 'start' in i:
D[mip_key][i] = int(D[mip_key][i]) - 1
elif 'stop' in i:
D[mip_key][i] = int(D[mip_key][i])
# chromosome field is not a valid chromosome name
# WARNING. IT WOULD BE BETTER IF THE PANEL FILE HAS CORRECT CHR NAMES INSTEAD OF ADJUSTING
# FOR ALTERNATIVE CHROMOS, IT MAY CAUSE AN ERROR
elif i == 'chr':
if 'chr' not in D[mip_key][i].lower():
D[mip_key][i] = 'chr' + D[mip_key][i]
infile.close()
return D
def reverse_complement(S):
'''
(str) -> str
Returns the reverse complement of sequence S.
Nucleotides other than [ACGT] are labeled N
Parameters
----------
- S (str): A nucleotide sequence
Examples
--------
>>> reverse_complement('aaAA')
'TTtt'
>>> reverse_complement('aaaa')
'tttt'
>>> reverse_complement('Atcg')
'cgaT'
>>> reverse_complement('AtcX')
'NgaT'
>>> reverse_complement('TTCX')
'NGAA'
'''
MapNuc = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A',
'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
s = ''
for i in S:
if i in MapNuc:
s = MapNuc[i] + s
else:
s = 'N' + s
return s
def create_tree(outdir):
'''
(str) -> list
Creates a directory structure in directory outdir
and return a list with the paths of the subdirectories created
Parameters
----------
- outdir (str): Path to directory where directory structure is created
Examples
--------
>>> create_tree('foo')
['foo/out', 'foo/stats', 'foo/alignment']
'''
# create outdir doesn't exist
os.makedirs(outdir, exist_ok=True)
# create subdirectories if they don't exist
tree = []
for i in ['out', 'stats', 'alignment']:
j = os.path.join(outdir, i)
os.makedirs(j, exist_ok=True)
tree.append(j)
return tree
def align_fastqs(fastq1, fastq2, reference, outdir, bwa, prefix, remove):
'''
(str, str, str, str, str, str, str, bool) -> None
Align fastq1 and fastq2 using bwa mem into coordinate-sorted and indexed bam
in the out directory in outdir
Parameters
----------
- fastq1 (str): Path to Fastq1
- fastq2 (str): Path to Fastq2
- reference (str): Path to the reference genome
- outdir (str): Path to output directory where subdirectories are written
- bwa (str): Path to the bwa script
- prefix (str): Name of the aligned bam file
- remove (bool): Remove intermediate files if True
'''
# create subdirectories
finaldir, statsdir, aligndir = create_tree(outdir)
# save samfile in alignment directory
samfile = os.path.join(aligndir, os.path.basename(prefix) + '.sam')
# align fastqs
cmd = "{0} mem {1} {2} {3} > {4}".format(bwa, reference, fastq1, fastq2, samfile)
subprocess.call(cmd, shell=True)
# convert sam to bam
bamfile = os.path.join(aligndir, os.path.basename(prefix) + '.bam')
pysam.view('-b', '-h', '-o', bamfile, samfile, catch_stdout=False)
# sort bam on coordinates and index and write to out directory
sortedbam = os.path.join(finaldir, os.path.basename(prefix) + '.sorted.bam')
pysam.sort('-o', sortedbam, bamfile)
pysam.index(sortedbam)
# remove intermediate files
if remove == True:
# remove sam file and non-sorted bam
os.remove(samfile)
os.remove(bamfile)
def track_read(read, d):
'''
(pysam.AlignedSegment, dict) -> None
Update d with read name, read key, value pair in place
Parameters
----------
- read (pysam.AlignedSegment): An aligned segment
- d (dict): Dictionary with read name, list of pysam reads key, value pairs
'''
if read.query_name not in d:
d[read.query_name] = [read]
else:
d[read.query_name].append(read)
def reorder_reads(L):
'''
(list) -> None
Order the list of paired reads L in place with read1, read2
Parameters
----------
- L (list): A list of paired pysam reads
'''
# check if paired reads
assert len(L) == 2
# reorder such that L = [read1, read2]
if L[0].is_read1 == False:
# instert read1 at beginning
L.insert(0, L.pop(1))
assert L[0].is_read1 == True and L[1].is_read2 == True
def remove_read(d, readname):
'''
(dict, str) -> None
Remove readname from d if d is present. Modifies d in place
Parameters
----------
- d (dict): Dictionary with read names as keys
- readname (str): The name of a pysam.AlignedSegment read
'''
if readname in d:
del d[readname]
def extract_from_regex(read_seq, p):
'''
(str, _regex.Pattern) -> str
Returns the UMI sequence extracted from read_seq if present or the empty string
Parameters
----------
- read_seq (str): Sequence of the read
- p (_regex.Pattern): Compiled regex pattern used for matching pattern in read sequence
Examples
--------
read_seq = 'TCATGTCTGCTAATGGGAAAGAGTGTCCTAACTGTCCCAGATCGTTTTTTCTCACGTCTTTTCTCCTTTCACTTCTCTTTTTCTTTTTCTTTCTTCTTCTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT'
# UMI starts at the beginning of the read
>>> extract_from_regex(read_seq, regex.compile('(?<umi_1>.{12})(?<discard_1>ATGGGAAAGAGTGTCC)'))
'TCATGTCTGCTA'
# UMI does not start at the beginning of the read
read_seq = 'ATCATGTCTGCTAATGGGAAAGAGTGTCCTAACTGTCCCAGATCGTTTTTTCTCACGTCTTTTCTCCTTTCACTTCTCTTTTTCTTTTTCTTTCTTCTTCTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT'
# first nucleotide is part of the new read sequence
>>> extract_from_regex(read_seq, regex.compile('(?<umi_1>.{12})(?<discard_1>ATGGGAAAGAGTGTCC)'))
'TCATGTCTGCTA'
# force UMI to start at the beginning of the read sequence
>>> extract_from_regex(read_seq, regex.compile('(?<umi_1>^.{12})(?<discard_1>ATGGGAAAGAGTGTCC)'))
''
# discard nuceotides upstream of UMI
>>> extract_from_regex(read_seq, regex.compile('(?<discard_1>.*)(?<umi_1>.{12})(?<discard_2>ATGGGAAAGAGTGTCC)'))
'TCATGTCTGCTA'
'''
# initialize umi_seq
umi_seq = ''
# scan through the string looking for a match
m = p.search(read_seq)
# process if match is found
if m:
# collect umi positions
umi_pos = []
for i in m.groupdict():
if 'umi' in i:
umi_pos.append(m.span(i))
# sort umi and discard positions
umi_pos.sort()
# get umi sequences
umi_seq = ''.join([read_seq[i[0]:i[1]] for i in umi_pos])
return umi_seq
def get_template_positions(L):
'''
(list) -> tuple
Return a tuple with template start and end coordinates 0-based
Parameters
----------
- L (list): List of pysam reads
'''
# make a list of read coordinates
read_positions = []
for read in L:
# add start, end positions for each paired read
read_positions.append(read.reference_start)
read_positions.append(read.reference_end)
# sort list --> template coordinates are defined by 1st and last positions
read_positions.sort()
return read_positions[0], read_positions[-1]
def get_target_coordinates(panel, smmip):
'''
(dict, str) -> (int, int)
Return a tuple with the start and end positions (0-based) of the target
region for the given smmip
Parameters
----------
- panel (dict): Panel information
- smmip (str): A specific smmip in the panel
'''
# get expected target coordinates (including probes)
L = sorted([panel[smmip]['ext_probe_start'], panel[smmip]['ext_probe_stop'],
panel[smmip]['lig_probe_start'], panel[smmip]['lig_probe_stop']])
start, end = L[0], L[-1]
return start, end
def compute_overlap(template_start, template_end, target_start, target_end):
'''
(int, int, int, int) -> int
Return the length of overlap between a smMIP and a read pair
Precondition: The reads are mapped on the chromosome of the target region
Parameters
----------
- template_start (int): Start position of the template region delimited by read1 and read2 (0-based, included)
- template_end (int): End position of the template region delimited by read1 and read2 (0-based excluded)
Refers to position 1 nucleotide after the last nucleotide of the template
- target_start (int): Start position of the target region on the reference (0-based, included)
- target_end (int): End position of the target region of the reference (0-based excluded).
Refers to position 1 nucleotide after the last nucleotide of the target
'''
overlap = 0
if target_start > template_end or template_start > target_end:
overlap = 0
elif template_start <= target_start <= template_end:
if target_end >= template_end:
overlap = template_end - target_start
else:
overlap = target_end - target_start
elif target_start <= template_start <= target_end:
if template_end >= target_end:
overlap = target_end - template_start
else:
overlap = template_end - template_start
return overlap
def find_overlapping_candidates(chromo, panel, template_start, template_end):
'''
(str, dict, int, int, float) -> list
Return a list candidate smmips overlapping with a read pair reverse-sorted by the length of the overlap
Parameters
----------
- chromo (str): Chromosome name
- panel (dict): Panel information
- template_start (int): Start position of the template region delimited by read1 and read2 (0-based, included)
- template_end (int): End position of the template region delimited by read1 and read2 (0-based excluded)
Refers to position 1 nucleotide after the last nucleotide of the template
'''
# make a list of mips on chromo
mips = [i for i in panel if panel[i]['chr'] == chromo]
# find overlapping smmips on chromo
candidates = []
if len(mips) != 0:
for smmips in mips:
# get expected target coordinates (including probes)
Start, End = get_target_coordinates(panel, smmips)
# compute overlap
overlap = compute_overlap(template_start, template_end, Start, End)
if overlap > 0:
# found a smmip candidate
candidates.append([overlap, smmips])
# sort candidates smmips on overlap length
candidates.sort(key=lambda x:x[0])
# reverse sort candidate smmips by overlap length
candidates.reverse()
return candidates
def align_sequences(seq1, seq2, match, mismatch, gap_opening, gap_extension):
'''
(str, str, num, num, num, num) -> list
Return the possible local pairwise alignments between
seq1 and seq2 as a list of tuples each containing
the aligned sequences of seq1 and seq2, the alignment score,
the start and end of the alignment.
Parameters
----------
- seq1 (str): Sequence 1
- seq2 (str): Sequence 2
- match (float or int): Score of identical characters
- mismatch (float or int): Score of non-identical characters
- gap_opening (float or int): Score for opening a gap
- gap_extension (float or int): Score for extending an open gap
'''
alignments = pairwise2.align.localms(seq1, seq2, match, mismatch, gap_opening, gap_extension)
return alignments
def get_highest_scoring_aligment(alignments):
'''
(list) -> tuple
Return the alignment with highest score among the possible alignments.
Alignment is a tuple with the aligned sequences of seq1 and seq2,
the alignment score, the start and end of the alignment.
Parameters
----------
- alignments (list): List of possible pairwise alignments
generated by AlignSequences
'''
alignments.sort(key = lambda x: x[1])
best = alignments[-1]
return best
def is_empty_smmip(seq1, seq2, match, mismatch, gap_opening, gap_extension, overlap_threshold, matches_threshold):
'''
(str, str, int, float, float, float, float) -> bool
Return True is the read pair did not capture the target (ie, is empty) and False if not empty
Parameters
----------
- seq1 (str): Sequence of read 1
- seq2 (str): Sequence of read 2
- match (float or int): Score of identical characters
- mismatch (float or int): Score of non-identical characters
- gap_opening (float or int): Score for opening a gap
- gap_extension (float or int): Score for extending an open gap
- overlap_threshold (float or int): Cut-off value for the length of
the de-gapped overlap between read1 and read2
- matches_threshold (flot or int): Cut-off value for the number of matching
positions within the de-gapped overlap
between read1 and read2
'''
# initiate variable
empty = False
# align upper cases sequences
seq1, seq2 = seq1.strip().upper(), seq2.strip().upper()
# do a local alignment between r1 and rev. compl of r2
alignments = align_sequences(seq1, reverse_complement(seq2), match, mismatch, gap_opening, gap_extension)
# check if alignemnts exist
if len(alignments) == 0:
# consider empty smmip
empty = True
else:
# find the best scoring alignment
best = get_highest_scoring_aligment(alignments)
# get overlap length without gapped positions
ali1, matches, ali2, score, end = format_alignment(*best).split('\n')
overlap = matches.replace(' ', '')
# count matching positions within de-gapped overlap
matching = overlap.count('|')
# record readnames of empty smmips
if len(overlap) < overlap_threshold and matching / len(overlap) > matches_threshold:
# consider empty smmip
empty = True
return empty
def add_custom_tag(read, tag, value):
'''
(pysam.AlignedSegment, str, str) -> None
Updates the read in place with tag: value
Parameters
----------
- read (pysam.AlignedSegment): An aligned segment
- tag (str): 2-letter code of the custom tag. See samtools tags for specs
- value (str): The value of the tag to be added. Type is string (Z datatype)
'''
# get the read tags
read_tags = read.get_tags()
# replace tags with tag: value (str)
read_tags += [(tag, value, 'Z')]
read.set_tags(read_tags)
def update_read_tags(read, smmip_name, umi_seq):
'''
(pysam.AlignedSegment, str, str) -> None
Update the read in place with tag: value
Parameters
----------
- read (pysam.AlignedSegment): An aligned segment
- smmip_name (str): Name of of the smmip mapping to the read
- umi_seq (str): UMI sequence
'''
# add smmip tag
add_custom_tag(read, 'SP', smmip_name)
# add UMI tag
add_custom_tag(read, 'MI', umi_seq)
# add UMI sequence
add_custom_tag(read, 'OX', umi_seq)
def remove_bam_extension(bamfile):
'''
(str) -> str
Returns the path to bam file stripped from its file extension if extension
is ".sorted.bam", ".bam", ".assigned_reads.bam", ".unassigned_reads.bam",
".empty_reads.bam" or returns path to bam file
Parameters
----------
- bamfile (str): Path to bam file
'''
if '.temp.' in bamfile:
return bamfile[:bamfile.rfind('.temp.')]
else:
if bamfile[-len('.sorted.bam'):] == '.sorted.bam':
return bamfile[: -len('.sorted.bam')]
elif bamfile[-len('.assigned_reads.bam'):] == '.assigned_reads.bam':
return bamfile[: -len('.assigned_reads.bam')]
elif bamfile[-len('.unassigned_reads.bam'):] == '.unassigned_reads.bam':
return bamfile[: -len('.unassigned_reads.bam')]
elif bamfile[-len('.empty_reads.bam'):] == '.empty_reads.bam':
return bamfile[: -len('.empty_reads.bam')]
elif bamfile[-len('.bam'):] == '.bam':
return bamfile[: -len('.bam')]
else:
return bamfile
def get_bam_header(bamfile):
'''
(str) -> dict
Returns the header of the bam file as a dictionary
Parameters
----------
- bamfile (str): Path to the bam file
'''
# parse bam header
infile = pysam.AlignmentFile(bamfile)
header = dict(infile.header)
infile.close()
return header
def find_matching_smmip(candidates, panel, seq1, seq2, upstream_nucleotides, umi_length, max_subs):
'''
(list, dict, str, str, int, int, int) -> (list, list)
Returns a list of mipname matching a given read and a list of extracted umi sequences from the matched read
Parameters
----------
- candidates (list): List of candidate smMIPs (smMIPs overlapping read1 and read2) reverse-sorted by overlap length
Each element of the list is a 2-item list with overlap length and smMIP Id
- panel (dict): Panel information
- seq1 (str): Sequence of read 1
- seq2 (str): Sequence of read 2
- upstream_nucleotides (int): Maximum number of nucleotides upstream the UMI sequence
- umi_length (int): Length of the UMI
- max_subs (int): Maximum number of substitutions allowed in the probe sequence
'''
matching, umi_sequences = [], []
for i in candidates:
extension_probe = panel[i[1]]['ext_probe_sequence']
ligation_probe = panel[i[1]]['lig_probe_sequence']
p1 = regex.compile('(?P<discard_1>^.{0,%s})' %upstream_nucleotides + '(?P<umi_1>.{%s})' %umi_length + '(' + reverse_complement(ligation_probe) + ')' + '{s<=%s}.*' % max_subs)
p2 = regex.compile('(?P<discard_1>^.{0,%s})' %upstream_nucleotides + '(?P<umi_1>.{%s})' %umi_length + '(' + extension_probe + ')' + '{s<=%s}.*' % max_subs)
umi_seq1 = extract_from_regex(seq1, p1)
umi_seq2 = extract_from_regex(seq2, p2)
if umi_seq1 and umi_seq2:
# found matching probe
matching.append(panel[i[1]]['mip_name'])
umi_sequences.append(umi_seq1 + umi_seq2)
# stop searching for matching probes if found
break
return matching, umi_sequences
def record_read(metrics, category, newbam, L):
'''
(dict, str, pysam.libcalignmentfile.AlignmentFile, list) -> None
Update read counter category in place for each read in L and write each reads to newbam
Parameters
----------
- metrics (dict): Dictionary to track read counts
- category (str): read category, key in metrics. Values are: 'reads', 'assigned', 'not_assigned', 'assigned_empty', 'assigned_not_empty'
- newbam (pysam.libcalignmentfile.AlignmentFile): Bam file open for writing in binary mode
- L (list): list of pysam.AlignedSegment reads
'''
for read in L:
# update counter
metrics[category] += 1
# write read to newbam
newbam.write(read)
def sort_index_bam(filename, suffix):
'''
(str, str) -> None
Sort and index filename. Generate filename + suffix bam and filename + suffix.bai index
Parameters
----------
- filename (str): Path to bam file to be sorted
- suffix (str): Suffix string to be part of the sorted file name, appended to filename
'''
sorted_file = remove_bam_extension(filename) + suffix
pysam.sort('-o', sorted_file, filename)
pysam.index(sorted_file)
def get_positions(start, end, chromo_length):
'''
(int | None, int | None, int) -> (int, int)
Returns a tuple with 0-based start and end positions of a contig region
Parameters
----------
- start (int or None): Start position if defined
- end (int or None): End position if defined
- chromo_length (int): Length of the reference (chromosome)
'''
if start:
start_pos = start
else:
start_pos = 0
if end:
# check if end position > chromosome length
if end > chromo_length:
end_pos = chromo_length
else:
end_pos = end
else:
end_pos = chromo_length
return start_pos, end_pos
def count_reads(bamfile, chromosome, start, end):
'''
(str, str, int | None, int | None) -> int
Returns the number of reads in a genomic region.
If chromosome is not defined, returns the number of reads mapped to the reference genome.
If chromosome is defined but start and end are not defined, returns the number of reads mapped to chromosome.
If region chromosome, start and end are defined, returns the number of reads on that region.
Parameters
----------
- bamfile (str): Path to the input bam sorted on coordinates
- chromosome (str | None): Specifies the genomic region in the alignment file where reads are mapped.
Examine reads on chromosome if used and on all chromosomes if None
Chromosome format must match format in the bam header
- start (int or None): Start position of region on chromosome if defined
- end (int or None): End position of region on chromosome if defined
'''
infile = pysam.AlignmentFile(bamfile, 'rb')
if chromosome:
chromo_length = infile.get_reference_length(chromosome)
start_pos, end_pos = get_positions(start, end, chromo_length)
read_count = infile.count(contig=chromosome,start=start_pos, end=end_pos, until_eof=False, read_callback='all')
else:
read_count = infile.count(until_eof=False, read_callback='all')
infile.close()
return read_count
def assign_reads_to_smmips(bamfile, assigned_file, empty_file, panel, upstream_nucleotides, umi_length, max_subs, match, mismatch, gap_opening, gap_extension, alignment_overlap_threshold, matches_threshold, chromosome, start, end):
'''
(str, pysam.AlignmentFile, pysam.AlignmentFile, dict, int, int, int, float, float, float, float, float, float, bool, str | None) -> (dict, dict)
Return a tuple of dictionaries with read counts. The first dictionary counts
total reads, assigned and unassigned reads as well as empty smmips.
The second dictionary tracks the number of reads per smmip
Write assigned reads, assigned but empty smmips to 2 separate output bams
Assigned reads are tagged with the smMip name and the extracted UMI sequence
Pre-condition: bamfile is sorted on coordinates and indexed (a .bai exists in the same directory)
Parameters
----------
- bamfile (str): Path to the input bam sorted on coordinates
- assigned_file (pysam.AlignmentFile): Bam file opened to write assigned reads
- empty_file (pysam.AlignmentFile): Bam file opened to write empty reads
- panel (dict): Panel information
- upstream_nucleotides (int): Maximum number of nucleotides upstream the UMI sequence
- umi_length (int): Length of the UMI
- max_subs (int): Maximum number of substitutions allowed in the probe sequence
- match (float or int): Score of identical characters
- mismatch (float or int): Score of non-identical characters
- gap_opening (float or int): Score for opening a gap
- gap_extension (float or int): Score for extending an open gap
- alignment_overlap_threshold (float or int): Cut-off value for the length of the de-gapped overlap between read1 and read2
- matches_threshold (float or int): Cut-off value for the number of matching positions within the de-gapped overlap between read1 and read2
- chromosome (str | None): Specifies the genomic region in the alignment file where reads are mapped.
Examine reads on chromosome if used and on all chromosomes if None
Chromosome format must match format in the bam header
- start (int | None): Start position of region on chromosome if defined
- end (int | None): End position of region on chromosome if defined
'''
# count total reads in file, reads in region, assigned and unassigned reads
metrics = {'assigned': 0, 'assigned_empty': 0, 'assigned_not_empty': 0}
# count the total number of reads in file, excluding unmapped reads, secondary and supplementary alignments
with pysam.AlignmentFile(bamfile, 'rb') as infile:
total_count = infile.count(until_eof=False, read_callback='all')
metrics['total'] = total_count
# count the number of reads in region, ignoring unmapped reads, secondary and supplementary alignments
metrics['reads'] = count_reads(bamfile, chromosome, start, end)
# count smmips
smmip_counts = {panel[i]['mip_name'] : {'empty':0, 'not_empty':0} for i in panel}
# create AlignmentFile object to read input bam
infile = pysam.AlignmentFile(bamfile, 'rb')
# make a list of chromosomes in panel
panel_chromosomes = [panel[i]['chr'] for i in panel]
# make a list of chromosomes from the bam header
header = get_bam_header(bamfile)
bam_chromosomes = [i['SN'] for i in header['SQ']]
# use specific chromosome if defined
# check that chromosome is in the bam header
if chromosome:
bam_chromosomes = [chromosome] if chromosome in bam_chromosomes else []
# only look at reads expected to map on chromosomes in panel
# discard reads mapping to other chromosomes
for contig in bam_chromosomes:
# create dictionary to keep track of read pairs
D = {}
if contig in panel_chromosomes:
# get the start and end positions of region of interest. include all contig if start and end are not defined
chromo_length = infile.get_reference_length(contig)
start_pos, end_pos = get_positions(start, end, chromo_length)
# loop over all reads mapping to contig
for query in infile.fetch(contig=contig,start=start_pos, end=end_pos, until_eof=False):
qname = query.query_name
# ignore unmapped reads, secondary and supplementary alignments
if query.is_secondary == False and query.is_supplementary == False and query.is_unmapped == False:
# collect all reads with same query name in D until all are found
track_read(query, D)
if qname in D:
# process reads if paired reads have been found
if len(D[qname]) == 2:
# reorder reads
reorder_reads(D[qname])
# get template coordinates
template_start, template_end = get_template_positions(D[qname])
# find candidate overlapping smmips, reverse-sorted by overlap length
candidates = find_overlapping_candidates(contig, panel, template_start, template_end)
if len(candidates) == 0:
# no overlapping smmip. discard read
remove_read(D, qname)
else:
# get read sequences
seq1, seq2 = D[qname][0].get_forward_sequence(), D[qname][1].get_forward_sequence()
# find matching probes
# make a lists of smmips with matching probes and corresponding umis
matching, umi_sequences = find_matching_smmip(candidates, panel, seq1, seq2, upstream_nucleotides, umi_length, max_subs)
if len(matching) == 0:
# no matching probes. discard read
remove_read(D, qname)
else:
# get the name of matching smmip and corresponding umi
matching_smmip = matching[0]
umi_seq = umi_sequences[0]
# get the name of the smmip with greatest overlap
overlapping_smmip = panel[candidates[0][1]]['mip_name']
# check if smmip with matching is the same as smmip with greatest overlap
if matching_smmip != overlapping_smmip:
# conflict between pre-assigned smmip and overlapping smmip. discard read
remove_read(D, qname)
else:
# assign read. add tags
for read in D[qname]:
update_read_tags(read, matching_smmip, umi_seq)
metrics['assigned'] += 1
# check if read is empty
if is_empty_smmip(seq1, seq2, match, mismatch, gap_opening, gap_extension, alignment_overlap_threshold, matches_threshold):
# consider an empty smmip. write reads to empty file
record_read(metrics, 'assigned_empty', empty_file, D[qname])
remove_read(D, qname)
smmip_counts[matching_smmip]['empty'] += 2
else:
# non empty reads. write to outputbam
record_read(metrics, 'assigned_not_empty', assigned_file, D[qname])
remove_read(D, qname)
smmip_counts[matching_smmip]['not_empty'] += 2
# close bams
infile.close()
# update metrics dict
# unassigned reads are reads not assigned in the region of interest
metrics['not_assigned'] = metrics['reads'] - metrics['assigned']
assigned_ratio = round(metrics['assigned'] / metrics['reads'] * 100, 4) if metrics['reads'] != 0 else 0
unassigned_ratio = round(metrics['not_assigned'] / metrics['reads'] * 100, 4) if metrics['reads'] != 0 else 0
empty_ratio = round(metrics['assigned_empty'] / metrics['assigned'] * 100, 4) if metrics['assigned'] != 0 else 0
metrics.update({'percent_assigned': assigned_ratio})
metrics.update({'percent_not_assigned': unassigned_ratio})
metrics.update({'percent_empty_smmips': empty_ratio})
return metrics, smmip_counts
def get_consecutive_items(L):
'''
(list) -> generator
Returns a generator with 1st and last item of consecutive items in L
Parameters
----------
- L (list): List
Examples
--------
>>> list(get_consecutive_items([1, 2, 3, 6, 7, 8, 10,11, 24, 25, 26, 27, 28, 30, 50]))
[(1, 3), (6, 8), (10, 11), (24, 28), (30, 30), (50, 50)]
'''
# remove duplicate and sort L
L = sorted(set(L))
for key, group in itertools.groupby(enumerate(L), lambda t: t[1] - t[0]):
group = list(group)
# return a generator with 1st and last item of consecutive items in L
yield group[0][1], group[-1][1]
def make_non_overlapping_regions(panel):
'''
(dict) -> dict
Returns a dictionary with lists of coordinates (0-based) for non-overlapping
regions on each chromosome
Parameters
----------
- panel (dict): Panel information
'''
# collect all positions per chromosome
D = {}
for i in panel:
chromo = panel[i]['chr']
# get target coordinates 0-based
Start, End = get_target_coordinates(panel, i)
if chromo not in D:
D[chromo] = []
# need to adjust End because get_consecituve items returns coordinates
D[chromo].extend([j for j in range(Start, End+1)])
# remove duplicate positions
for chromo in D:
D[chromo] = sorted(list(set(D[chromo])))
# find non-overlapping regions on each chromosome
regions = {}
for chromo in D:
# get consecutive regions
regions[chromo] = [j for j in get_consecutive_items(D[chromo])]
return regions
def get_base_position(read, pos):
'''
(pysam.PileupRead, int) -> (str, str)
Returns a tuple with reference and alternative bases at position pos
Parameters
----------
- read (pysam.PileupRead): Representation of a read aligned to a particular position in the reference sequence.
- pos (int): Position in the reference genome (0-based)
'''
# get aligned read, ref pos and ref base
pairs = read.alignment.get_aligned_pairs(with_seq=True)
# read.indel looks ahead to see if indel at next position(s)
if read.indel == 0:
# no indel, record ref and alt
# get index of pileupcolumn pos in aligned pairs
j = [i[1] for i in pairs].index(pos)
# record base on ref and read
ref_base = pairs[j][-1].upper()
alt_base = read.alignment.query_sequence[read.query_position].upper()
elif read.indel > 0:
# next position is an insertion
# get index of pileupcolumn pos in aligned pairs
j = [i[1] for i in pairs].index(pos)
# record base on ref and insertion on read
ref_base = pairs[j][-1].upper()
alt_base = read.alignment.query_sequence[read.query_position:read.query_position + abs(read.indel) + 1].upper()
elif read.indel < 0:
# next position is deletion
# get index of pileupcolumn pos in aligned pairs
j = [i[1] for i in pairs].index(pos)
# record base on ref at pos + ref bases deleted on read and base on read
ref_base = ''.join([i[-1] for i in pairs[j: j + abs(read.indel) + 1]]).upper()
alt_base = read.alignment.query_sequence[read.query_position]
return ref_base, alt_base
def track_allele_counts(D, pos, mipname, UMI, allele):
'''
(dict, int, str, str, tuple) -> None
Update dictionary D tracking allele count per smmip and UMI and position with allele
information at pos for a given read. Modidy D in place
Parameters
----------
- D (dict): Dictionary with allele count per position and smmip
- pos (int): Position in the reference genome
- mipname (str): Name of a smmip
- UMI (str): UMI sequence
- allele (tuple): Tuple with reference nucleotide and base in read at position
'''
# get ref and alt bases
ref_base, alt_base = allele
# keep track of ref_base at pos
if pos not in D:
D[pos] = {}
if 'ref_base' not in D[pos]:
# check ref_base.
# ref base is deleted region + ref base if only read with indel overlap positions
if len(ref_base) == 1:
D[pos]['ref_base'] = ref_base
else:
D[pos]['ref_base'] = ref_base[0]
# count the number of reads supporting this allele
if 'smmips' not in D[pos]:
D[pos]['smmips'] = {}
if mipname not in D[pos]['smmips']:
D[pos]['smmips'][mipname] = {}
if UMI not in D[pos]['smmips'][mipname]:
D[pos]['smmips'][mipname][UMI] = {}
if allele in D[pos]['smmips'][mipname][UMI]:
D[pos]['smmips'][mipname][UMI][allele] += 1
else:
D[pos]['smmips'][mipname][UMI][allele] = 1
def get_allele_counts_target(bamfile, contig, region_start, region_end, max_depth, truncate, ignore_orphans, stepper):
'''
(str, str, int, int, int, bool, bool, str) -> dict
Returns a dictionary keeping track of allele counts at each position per smmip
Parameters
----------
- bamfile (str): Path to the coordinate-sorted and indexed bam file with annotated reads with smMIP and UMI tags
- contig: Chromosome name, eg. chrN
- region_start (int): Start index of the region of interest. 0-based half opened
- region_end (int): End index of the region of interest. 0-based half opened
- max_depth (int): Maximum read depth
- truncate: Consider only pileup columns within interval defined by region start and end if True
- ignore_orphans: Ignore orphan reads (paired reads not in proper pair) if True
- stepper: Controls how the iterator advances. Accepted values:
'all': skip reads with following flags: BAM_FUNMAP, BAM_FSECONDARY, BAM_FQCFAIL, BAM_FDUP
'nofilter': uses every single read turning off any filtering
'''
# create a dict to store consensus seq info
# {pos: {'ref_base': ref_base, 'smmips': {smmip_name: {allele: count}}}}
D = {}
with pysam.AlignmentFile(bamfile, "rb") as reader:
# loop over pileup columns
for pileupcolumn in reader.pileup(contig, region_start, region_end, truncate=truncate, ignore_orphans=ignore_orphans, max_depth=max_depth, stepper=stepper):
# get column position
pos = int(pileupcolumn.reference_pos)
# restict pileup columns to genomic region
if region_start <= pos < region_end:
# loop over pileupreads in pileup column
for read in pileupcolumn.pileups:
# skip unmapped reads and reads mapping to multiple positions
if any([read.alignment.is_unmapped, read.alignment.is_secondary, read.alignment.is_supplementary]) == False:
# get smmip name from tags
tags = read.alignment.get_tags()
tags = {i[0]:i[1] for i in tags}
mipname = tags['SP']
# get UMI from tags
UMI = tags['MI']
# skip positions with deletions or ref not defined
# events are captured at the position before they occur
if not read.is_del and not read.is_refskip:
# read.indel looks ahead to see if indel at next position(s)
# 0 --> not indel; > 0 --> insertion; < 0 --> deletion
# get ref and alt bases at position pos
allele = get_base_position(read, pos)
ref_base, alt_base = allele
# count allele per smmip and position
track_allele_counts(D, pos, mipname, UMI, allele)
return D
def count_alleles_across_panel(bamfile, panel, max_depth, truncate, ignore_orphans, stepper):
'''
(str, panel, int, bool, bool, str) -> dict
Parameters
----------
- bamfile (str): Path to the coordinate-sorted and indexed bam with annotated read with smMIP and UMI
- panel (dict): Panel information
- max_depth (int): Maximum read depth
- truncate: Consider only pileup columns within interval defined by region start and end if True
- ignore_orphans: Ignore orphan reads (paired reads not in proper pair) if True
- stepper: Controls how the iterator advances. Accepted values:
'all': skip reads with following flags: BAM_FUNMAP, BAM_FSECONDARY, BAM_FQCFAIL, BAM_FDUP
'nofilter': uses every single read turning off any filtering
Return a dictionary with allele counts per smmips and UMI and target regions
'''
# get coordinates of non-overlapping regions on each chromosome
regions = make_non_overlapping_regions(panel)
# track allele counts for each region of all chromosomes in panel
D = {}
for chromo in regions:
D[chromo] = []
for target in regions[chromo]:
c = get_allele_counts_target(bamfile, chromo, target[0], target[1], max_depth, truncate, ignore_orphans, stepper)
D[chromo].append(c)
return D
def sort_chromos(L):
'''
(list) -> list
Return a list of chromosome names sorted numerically, with non-numeric
chromosomes sorted at the end of the list
Parameters
----------
- L (list): List of chromosome names
'''
# make lists of chromos
numerical, non_numerical, non_chr = [], [], []
for chromo in L:
if chromo.startswith('chr'):
chromo = chromo.replace('chr', '')
if chromo.isnumeric() == False:
non_numerical.append(chromo)
else:
numerical.append(int(chromo))
else:
non_chr.append(chromo)
# sort each list separately
non_chr.sort()
numerical.sort()
non_numerical.sort()
# add back chr
for i in range(len(non_numerical)):
non_numerical[i] = 'chr' + non_numerical[i]
for i in range(len(numerical)):
numerical[i] = 'chr' + str(numerical[i])
# concatenate lists
return numerical + non_numerical + non_chr
def count_variants(d):
'''
(dict) -> dict
Returns a dictionary with single nucleotide and indel counts
Parameters
----------
- d (dict): Dictionary with allele counts
'''
# count each allele, initiate with single nucleotides
counts = {'A': 0, 'C': 0, 'G': 0, 'T': 0, 'N': 0}
for allele in d:
# count single nucleotides
if len(allele[0]) == 1 and len(allele[1]) == 1:
# snv or no change
counts[allele[1]] += d[allele]
else:
# indel, record allele and its count
if allele in counts:
counts[allele] += d[allele]
else:
counts[allele] = d[allele]
return counts
def list_indel_counts(counts):
'''
(dict) -> (list, list)
Returns a tuple with lists of 2-item lists (counts and alleles) for inserstions
and deletions sorted by allele counts
Parameters
----------
- counts (dict): Dictionary of allele counts
'''
# make lists of indels and indel counts
D, I = [], []
for allele in counts:
if len(allele) == 2:
# indel
if len(allele[1]) > 1:
# insertions
I.append([counts[allele], allele])
elif len(allele[0]) > 1:
# deletions
D.append([counts[allele], allele])
D.sort()
I.sort()
return I, D
def get_genomic_positions(D):
'''
(dict) -> dict
Returns a dictionary with all positions (1-based) on each chromosome with
variant information from regions in the panel
Parameters
----------
- D (dict): Dictionary with allele counts per smmips and UMI and target regions.
Output from function count_alleles_across_panel
'''
# create a dict {chromo: [positions]}
positions = {}
# make a list of positions for each chromosome
for chromo in D:
positions[chromo] = []
# loop dicts with allele counts in target on chromo
for C in D[chromo]:
# make a sorted list of positions
pos = list(map(lambda x: str(int(x) + 1), list(set(C.keys()))))
positions[chromo].extend(pos)
return positions
def parse_cosmic(reference, cosmicfile, positions):
'''
(str, str, dict) -> dict
Returns a dictionary with mutation information at each position in reference
Parameters
----------
- reference (str): Reference genome. Accepted values: 37 or 38
- cosmicfile (str): Cosmic file. Tab separated table of all COSMIC coding
point mutations from targeted and genome wide screens
- positions (dict): Dictionary with all positions (1-based) on each chromosome
with variant information from regions in the panel
'''
infile = gzip.open(cosmicfile, 'rt', errors='ignore')
header = infile.readline()
# create a dict {chromo: {pos: [{pos:x, gene_name: x, cosmic_id: x, cosmic_mutation: x, description: x}]
D = {}
for line in infile:
line = line.rstrip()
if line != '':
line = line.split('\t')
genome = line[24]
if genome == reference:
chromo, position = line[25].split(':')
chromo = 'chr' + chromo
position = position.split('-')[0]
if chromo in positions and position in positions[chromo]:
gene_name = line[0]
cosmic_id = line[17]
description = line[21]
mutation = line[39]
d = {'position': position, 'gene_name': gene_name,
'cosmic_id': cosmic_id, 'description': description, 'mutation': mutation}
if chromo not in D:
D[chromo] = {}
# records last mutation if multiple mutations at a given position
D[chromo][position] = d
infile.close()
return D
def write_table_variants(D, outputfile, cosmic):
'''
(dict, str) -> None
Writes a summary table with all nucleotide and indel counts detected for each smMIP
Parameters
----------
- D (dict): Dictionary with allele counts per smmips and target regions
- outputfile (str): Path to output file
- cosmic (dict): Cosmic information about point mutations in reference GRCh37 or CRCh38
'''
# make a sorted list of chromosomes
chromosomes = sort_chromos([chromo for chromo in D])
# open file for writing
newfile = open(outputfile, 'w')
Header = ['CHROM', 'POS', 'SMMIP', 'UMI', 'REF', 'A', 'C', 'G', 'T', 'N', 'I_(ref,ins)', 'I_counts', 'D_(ref,del)', 'D_counts', 'RAWDP', 'REF_FREQ',
'GENE_NAME', 'COSMIC_ID', 'DESCRIPTION', 'MUTATION']
newfile.write('\t'.join(Header) + '\n')
# loop over chromosomes
for chromo in chromosomes:
# loop dicts with allele counts in target on chromo
for C in D[chromo]:
# make a sorted list of positions
positions = sorted(list(map(lambda x: int(x), list(set(C.keys())))))
# loop over positions
for pos in positions:
# loop over smmip
for smmip_name in sorted(C[pos]['smmips'].keys()):
# get the reference
ref_base = C[pos]['ref_base']
# count snvs and indels for each UMI
UMIs = sorted(list(C[pos]['smmips'][smmip_name].keys()))
umi_counts = [count_variants(C[pos]['smmips'][smmip_name][umi]) for umi in UMIs]
umi_insertions, umi_deletions = [], []
for i in umi_counts:
insertion, deletion = list_indel_counts(i)
umi_insertions.append(insertion)
umi_deletions.append(deletion)
# count snvs and indels at the mip level (adding counts from each UMI)
smmip_counts = {}
for i in umi_counts:
for j in i:
if j in smmip_counts:
smmip_counts[j] += i[j]
else:
smmip_counts[j] = i[j]
smmip_insertions, smmip_deletions = list_indel_counts(smmip_counts)
# compute raw depth
rawdepth = 0
for i in smmip_counts:
rawdepth += smmip_counts[i]
# compute ref frequency
ref_freq = smmip_counts[ref_base] / rawdepth
# write info for counts at smmip level
line = [chromo, pos + 1, smmip_name, 'all', ref_base, smmip_counts['A'], smmip_counts['C'],
smmip_counts['G'], smmip_counts['T'], smmip_counts['N'],
';'.join([str(i[1]) for i in smmip_insertions]), ';'.join([str(i[0]) for i in smmip_insertions]),
';'.join([str(i[1]) for i in smmip_deletions]), ';'.join([str(i[0]) for i in smmip_deletions]),
str(rawdepth), str(round(ref_freq, 5))]
# add cosmic info
if chromo in cosmic and str(pos+1) in cosmic[chromo]:
line.extend([cosmic[chromo][str(pos+1)]['gene_name'],
cosmic[chromo][str(pos+1)]['cosmic_id'],
cosmic[chromo][str(pos+1)]['description'],
cosmic[chromo][str(pos+1)]['mutation']])
else:
line.extend(['NA', 'NA', 'NA', 'NA'])
newfile.write('\t'.join(list(map(lambda x: str(x), line))) + '\n')
# write info for counts at umi level
for i in range(len(umi_counts)):
# compte raw depth
rawdepth = 0
for j in umi_counts[i]:
rawdepth += umi_counts[i][j]
ref_freq = umi_counts[i][ref_base] / rawdepth
line = [chromo, pos + 1, smmip_name, UMIs[i], ref_base, umi_counts[i]['A'], umi_counts[i]['C'],
umi_counts[i]['G'], umi_counts[i]['T'], umi_counts[i]['N'],
';'.join([str(k[1]) for k in umi_insertions[i]]), ';'.join([str(k[0]) for k in umi_insertions[i]]),
';'.join([str(k[1]) for k in umi_deletions[i]]), ';'.join([str(k[0]) for k in umi_deletions[i]]),
str(rawdepth), str(round(ref_freq, 5))]
# add cosmic info
if chromo in cosmic and str(pos+1) in cosmic[chromo]:
line.extend([cosmic[chromo][str(pos+1)]['gene_name'],
cosmic[chromo][str(pos+1)]['cosmic_id'],
cosmic[chromo][str(pos+1)]['description'],
cosmic[chromo][str(pos+1)]['mutation']])
else:
line.extend(['NA', 'NA', 'NA', 'NA'])
newfile.write('\t'.join(list(map(lambda x: str(x), line))) + '\n')
# close file after writing
newfile.close()
def merge_stats(L):
'''
(list) -> D
Returns a dictionary with read counts over all chromosomes
Parameters
----------
- L (list): List of dictionaries with read counts for each chromosome
'''
# create a dict to count reads over all chromosomes
D = {"reads": 0, "assigned": 0, "assigned_empty": 0, "assigned_not_empty": 0}
for i in L:
for j in D.keys():
D[j] += i[j]
# compute unassigned read count
D["not_assigned"] = D['reads'] - D['assigned']
# add total number of reads in file
D['total'] = L[0]['total']
# add ratios
D['percent_assigned'] = round(D['assigned'] / D['reads'] * 100, 4) if D['reads'] != 0 else 0
D['percent_not_assigned'] = round(D['not_assigned'] / D['reads'] * 100, 4) if D['reads'] != 0 else 0
D['percent_empty_smmips'] = round(D['assigned_empty'] / D['assigned'] * 100, 4) if D['assigned'] != 0 else 0
return D
def merge_smmip_counts(L):
'''
(list) -> D
Returns a dictionary with read counts supporting empty and non-empty smmips
Parameters
----------
- L (list): List of dictionaries with empty and not_empty read counts for each smmip
'''
# create a dict to record reads supporting empty and non-empty smmips for each smmip
D = {}
for i in L:
for smmip in i:
if smmip not in D:
D[smmip] = {'empty': 0, 'not_empty': 0}
D[smmip]['empty'] += i[smmip]['empty']
D[smmip]['not_empty'] += i[smmip]['not_empty']
return D
def merge_bams(outputfile, L):
'''
(str, list) -> None
Merge the input bams in list L to outputfile
Parameters
----------
- outputfile (str): Path to the merged bam
- L (list): List of input bam bames
'''
# merge bam files
args = ['-f', outputfile]
args.extend(L)
pysam.merge(*args)
|
the-stack_0_26564
|
from django.conf import settings
from shop.models import Product
from decimal import Decimal
from coupons.models import Coupon
class Cart(object):
def __init__(self , request):
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
self.coupon_id = self.session.get('coupon_id')
def add(self , product , quantity=1 , update_quantity=False):
product_id = str(product.id)
if not product_id in self.cart:
self.cart[product_id] = {'quantity':0 , 'price':str(product.price)}
if update_quantity:
self.cart[product_id]['quantity'] = quantity
else:
self.cart[product_id]['quantity'] += quantity
self.save()
def save(self):
self.session.modified=True
def remove(self , product):
product_id = str(product.id)
if product_id in self.cart:
del self.cart[product_id]
self.save()
def clear(self):
del self.session[settings.CART_SESSION_ID]
self.save()
def __iter__(self):
product_ids = self.cart.keys()
cart = self.cart.copy()
products = Product.objects.filter(id__in=product_ids)
for product in products:
cart[str(product.id)]['product'] = product
for item in cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['quantity'] * item['price']
yield item
def get_total_price(self):
return sum(Decimal(item['price']) * item['quantity'] for item in self.cart.values())
def __len__(self):
return sum(item['quantity'] for item in self.cart.values())
@property
def coupon(self):
if self.coupon_id:
return Coupon.objects.get(id=self.coupon_id)
return None
def get_discount(self):
if self.coupon:
return (self.coupon.discount / Decimal('100')) * self.get_total_price()
return Decimal('0')
def get_total_price_after_coupon(self):
return self.get_total_price() - self.get_discount()
|
the-stack_0_26567
|
import pytest
from core import listdir, rename, Selection, changefn, FilenameCollisionError, prefixfn, suffixfn, insertfn, appendfn
def test_listdir(tmp_path):
fs, ds = {'f1', 'f2', 'f3'}, {'d1', 'd2', 'd3'}
for d in ds:
(tmp_path / d).mkdir()
for f in fs:
(tmp_path / f).write_text('')
assert set(listdir(tmp_path)) == fs
def test_rename(tmp_path):
for f in {'a', 'b', 'c', 'd', 'e', 'f'}:
(tmp_path / f).write_text('')
count = rename(tmp_path, [('a', 'A'), ('b', 'B'), ('c', 'c'), ('d', 'd')])
assert set(listdir(tmp_path)) == {'A', 'B', 'c', 'd', 'e', 'f'} and count == 2
@pytest.mark.parametrize("s, x, sx", [
('foo', 'd', 'dfoo'),
('foo', '', 'foo'),
])
def test_prefix(s, x, sx):
assert prefixfn(s, '', x) == sx
@pytest.mark.parametrize("s, x, sx", [
('foo', 'd', 'food'),
('foo', '', 'foo'),
])
def test_suffix(s, x, sx):
assert suffixfn(s, '', x) == sx
@pytest.mark.parametrize("s, ss, t, r", [
('abcd', 'bc', 'x', 'axbcd'),
('abcd', 'foo', 'x', 'abcd'),
('abcd', '', 'x', 'abcd'),
('abcd', 'bc', '', 'abcd'),
])
def test_insertfn(s, ss, t, r):
assert insertfn(s, ss, t) == r
@pytest.mark.parametrize("s, ss, t, r", [
('abcd', 'bc', 'x', 'axd'),
('abcd', 'foo', 'x', 'abcd'),
('abcd', '', 'x', 'abcd'),
('abcd', 'bc', '', 'ad'),
])
def test_changefn(s, ss, t, r):
assert changefn(s, ss, t) == r
@pytest.mark.parametrize("s, ss, t, r", [
('abcd', 'bc', 'x', 'abcxd'),
('abcd', 'foo', 'x', 'abcd'),
('abcd', '', 'x', 'abcd'),
('abcd', 'bc', '', 'abcd'),
])
def test_appendfn(s, ss, t, r):
assert appendfn(s, ss, t) == r
@pytest.fixture
def selection():
return Selection(['foo_123', 'foo_456', 'bar_123', 'baz_123'])
def test_everything_is_selected_by_default(selection):
assert selection.active() == [0, 1, 2, 3]
def test_selections_can_by_refined_using_patterns(selection):
selection.tighten('foo')
assert selection.active() == [0, 1]
def test_selections_can_be_progressively_refined(selection):
selection.tighten('123')
selection.tighten('ba')
assert selection.active() == [2, 3]
def test_invalid_patterns_result_in_empty_selections(selection):
selection.tighten('hello')
assert selection.active() == []
def test_selections_can_be_rolled_back(selection):
selection.tighten('123')
selection.tighten('ba')
selection.loosen()
assert selection.active() == [0, 2, 3]
def test_selections_can_be_resolved_to_filenames(selection):
selection.tighten('ba')
assert selection.peek() == [('bar_123', 'bar_123'), ('baz_123', 'baz_123')]
def test_selections_can_be_reset(selection):
selection.tighten('ba')
selection.tighten('z')
selection.clear()
assert selection.active() == [0, 1, 2, 3]
def test_selections_can_be_transformed(selection):
selection.tighten('foo')
selection.transform(lambda s: changefn(s, 'foo', 'FOO'))
assert selection.peek() == [('foo_123', 'FOO_123'), ('foo_456', 'FOO_456')]
def test_transformations_can_be_undone(selection):
selection.transform(lambda s: changefn(s, 'foo', 'FOO'))
selection.rollback()
assert [x for (_, x) in selection.peek()] == ['foo_123', 'foo_456', 'bar_123', 'baz_123']
def test_transformations_can_be_selectively_undone(selection):
selection.transform(lambda s: changefn(s, 'foo', 'FOO'))
selection.tighten('123')
selection.rollback()
selection.clear()
assert [x for (_, x) in selection.peek()] == ['foo_123', 'FOO_456', 'bar_123', 'baz_123']
def test_transformations_are_aborted_on_filename_collisions(selection):
with pytest.raises(FilenameCollisionError):
selection.transform(lambda s: changefn(s, '123', '456'))
assert [x for (_, x) in selection.peek()] == ['foo_123', 'foo_456', 'bar_123', 'baz_123']
def test_transformations_are_rolledback_only_for_modified_files(selection):
selection.transform(lambda s: changefn(s, 'foo', 'FOO'))
selection.tighten('foo')
with pytest.raises(FilenameCollisionError):
selection.transform(lambda s: changefn(s, 'FOO_123', 'bar_123'))
assert [x for (_, x) in selection.peek()] == ['FOO_123', 'FOO_456']
def test_file_cannot_be_renamed_to_the_empty_string(selection):
selection.transform(lambda s: changefn(s, 'foo_123', ''))
assert [x for (_, x) in selection.peek()] == ['foo_123', 'foo_456', 'bar_123', 'baz_123']
def test_uncommitted_filenames_shouldnot_be_considered_for_selection(selection):
selection.transform(lambda s: changefn(s, 'f', 'F'))
selection.tighten('f')
s1 = selection.peek()
selection.clear()
selection.tighten('F')
s2 = selection.peek()
assert s1 == [('foo_123', 'Foo_123'), ('foo_456', 'Foo_456')] and s2 == []
|
the-stack_0_26568
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Process data and Calc loss landscape."""
import os
import time
import json
import stat
import shutil
import numbers
from collections import defaultdict, namedtuple
from concurrent.futures import wait, ALL_COMPLETED, ProcessPoolExecutor
import numpy as np
from scipy import linalg, sparse
from mindspore import log as logger
from mindspore.common.tensor import Tensor
from mindspore.common.parameter import Parameter
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.train.summary_pb2 import LossLandscape
from mindspore.train.summary import SummaryRecord
from mindspore.train.summary.enums import PluginEnum
from mindspore.train.anf_ir_pb2 import DataType
from mindspore.train._utils import check_value_type, _make_directory
from mindspore.train.dataset_helper import DatasetHelper
from mindspore.nn.metrics import get_metrics
from mindspore import context
# if there is no path, you need to set to empty list
Points = namedtuple("Points", ["x", "y", "z"])
def nptype_to_prototype(np_value):
"""
Transform the np type to proto type.
Args:
np_value (Type): Numpy data type.
Returns:
Type, proto data type.
"""
np2pt_tbl = {
np.bool_: 'DT_BOOL',
np.int8: 'DT_INT8',
np.int16: 'DT_INT16',
np.int32: 'DT_INT32',
np.int64: 'DT_INT64',
np.uint8: 'DT_UINT8',
np.uint16: 'DT_UINT16',
np.uint32: 'DT_UINT32',
np.uint64: 'DT_UINT64',
np.float16: 'DT_FLOAT16',
np.float: 'DT_FLOAT64',
np.float32: 'DT_FLOAT32',
np.float64: 'DT_FLOAT64',
None: 'DT_UNDEFINED'
}
if np_value is None:
return None
np_type = np_value.dtype.type
proto = np2pt_tbl.get(np_type, None)
if proto is None:
raise TypeError("No match for proto data type.")
return proto
def fill_array_to_tensor(np_value, summary_tensor):
"""
Package the tensor summary.
Args:
np_value (Type): Summary data type.
summary_tensor (Tensor): The tensor of summary.
Returns:
Summary, return tensor summary content.
"""
# get tensor dtype
tensor_dtype = nptype_to_prototype(np_value)
summary_tensor.data_type = DataType.Value(tensor_dtype)
# get the value list
tensor_value_list = np_value.reshape(-1).tolist()
summary_tensor.float_data.extend(tensor_value_list)
# get the tensor dim
for vector in np_value.shape:
summary_tensor.dims.append(vector)
return summary_tensor
def transfer_tensor_to_tuple(inputs):
"""
If the input is a tensor, convert it to a tuple. If not, the output is unchanged.
"""
if isinstance(inputs, Tensor):
return (inputs,)
return inputs
class Landscape:
"""Return loss landscape."""
def __init__(self,
intervals,
decomposition,
landscape_points: Points,
convergence_point=None,
path_points=None):
self.landscape_points = landscape_points
self.decomposition = decomposition
self.intervals = intervals
self.num_samples = 2048
self.convergence_point = convergence_point
self.path_points = path_points
self.unit = 'step'
self.step_per_epoch = 1
def set_convergence_point(self, convergence_point: Points):
"""Set the convergence point."""
self.convergence_point = convergence_point
def transform_to_loss_landscape_msg(self, landscape_data):
"""Transform to loss landscape_msg."""
landscape_msg = LossLandscape()
# only save one dim in x and y
fill_array_to_tensor(landscape_data.landscape_points.x[0], landscape_msg.landscape.x)
fill_array_to_tensor(landscape_data.landscape_points.y[:, 0], landscape_msg.landscape.y)
fill_array_to_tensor(landscape_data.landscape_points.z, landscape_msg.landscape.z)
if landscape_data.path_points:
landscape_msg.loss_path.intervals.extend(landscape_data.intervals)
fill_array_to_tensor(landscape_data.path_points.x, landscape_msg.loss_path.points.x)
fill_array_to_tensor(landscape_data.path_points.y, landscape_msg.loss_path.points.y)
fill_array_to_tensor(landscape_data.path_points.z, landscape_msg.loss_path.points.z)
if landscape_data.convergence_point:
fill_array_to_tensor(landscape_data.convergence_point.x, landscape_msg.convergence_point.x)
fill_array_to_tensor(landscape_data.convergence_point.y, landscape_msg.convergence_point.y)
fill_array_to_tensor(landscape_data.convergence_point.z, landscape_msg.convergence_point.z)
landscape_msg.metadata.decomposition = landscape_data.decomposition
landscape_msg.metadata.unit = self.unit
landscape_msg.metadata.step_per_epoch = self.step_per_epoch
return landscape_msg
class SummaryLandscape:
"""
SummaryLandscape can help you to collect loss landscape information.
It can create landscape in PCA direction or random direction by calculating loss.
Note:
1. SummaryLandscape only supports Linux systems.
Args:
summary_dir (str): The path of summary is used to save the model weight,
metadata and other data required to create landscape.
Examples:
>>> import mindspore.nn as nn
>>> from mindspore import context
>>> from mindspore.train.callback import SummaryCollector, SummaryLandscape
>>> from mindspore import Model
>>> from mindspore.nn import Loss, Accuracy
>>>
>>> if __name__ == '__main__':
... # If the device_target is Ascend, set the device_target to "Ascend"
... context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
... mnist_dataset_dir = '/path/to/mnist_dataset_directory'
... # The detail of create_dataset method shown in model_zoo.official.cv.lenet.src.dataset.py
... ds_train = create_dataset(mnist_dataset_dir, 32)
... # The detail of LeNet5 shown in model_zoo.official.cv.lenet.src.lenet.py
... network = LeNet5(10)
... net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
... net_opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
... model = Model(network, net_loss, net_opt, metrics={"Accuracy": Accuracy()})
... # Simple usage for collect landscape information:
... interval_1 = [1, 2, 3, 4, 5]
... summary_collector = SummaryCollector(summary_dir='./summary/lenet_interval_1',
... collect_specified_data={'collect_landscape':{"landscape_size": 4,
... "unit": "step",
... "create_landscape":{"train":True,
... "result":False},
... "num_samples": 2048,
... "intervals": [interval_1]}
... })
... model.train(1, ds_train, callbacks=[summary_collector], dataset_sink_mode=False)
...
... # Simple usage for visualization landscape:
... def callback_fn():
... network = LeNet5(10)
... net_loss = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
... metrics = {"Loss": Loss()}
... model = Model(network, net_loss, metrics=metrics)
... mnist_dataset_dir = '/path/to/mnist_dataset_directory'
... ds_eval = create_dataset(mnist_dataset_dir, 32)
... return model, network, ds_eval, metrics
...
... summary_landscape = SummaryLandscape('./summary/lenet_interval_1')
... # parameters of collect_landscape can be modified or unchanged
... summary_landscape.gen_landscapes_with_multi_process(callback_fn,
... collect_landscape={"landscape_size": 4,
... "create_landscape":{"train":False,
... "result":False},
... "num_samples": 2048,
... "intervals": [interval_1]},
... device_ids=[1])
"""
def __init__(self, summary_dir):
self._summary_dir = os.path.realpath(summary_dir)
self._ckpt_dir = os.path.join(self._summary_dir, 'ckpt_dir')
_make_directory(self._ckpt_dir)
# save the model params file, key is epoch, value is the ckpt file path
self._model_params_file_map = {}
self._epoch_group = defaultdict(list)
def _get_model_params(self, epochs):
"""Get the model params."""
parameters = []
for epoch in epochs:
file_path = self._model_params_file_map[str(epoch)]
parameters.append(load_checkpoint(file_path).values())
return parameters
def _create_epoch_group(self, intervals):
for i, interval in enumerate(intervals):
for j in interval:
self._epoch_group[i].append(j)
def clean_ckpt(self):
"""Clean the checkpoint."""
shutil.rmtree(self._ckpt_dir, ignore_errors=True)
def gen_landscapes_with_multi_process(self, callback_fn, collect_landscape=None,
device_ids=None, output=None):
"""
Use the multi process to generate landscape.
Args:
callback_fn (python function): A python function object. User needs to write a function,
it has no input, and the return requirements are as follows.
- mindspore.train.Model: User's model object.
- mindspore.nn.Cell: User's network object.
- mindspore.dataset: User's dataset object for create loss landscape.
- mindspore.nn.Metrics: User's metrics object.
collect_landscape (Union[dict, None]): The meaning of the parameters
when creating loss landscape is consistent with the fields
with the same name in SummaryCollector. The purpose of setting here
is to allow users to freely modify creating parameters. Default: None.
- landscape_size (int): Specify the image resolution of the generated loss landscape.
For example, if it is set to 128, the resolution of the landscape is 128 * 128.
The calculation time increases with the increase of resolution.
Default: 40. Optional values: between 3 and 256.
- create_landscape (dict): Select how to create loss landscape.
Training process loss landscape(train) and training result loss landscape(result).
Default: {"train": True, "result": True}. Optional: True/False.
- num_samples (int): The size of the dataset used to create the loss landscape.
For example, in image dataset, You can set num_samples is 2048,
which means that 2048 images are used to create loss landscape.
Default: 2048.
- intervals (List[List[int]): Specifies the interval
in which the loss landscape. For example: If the user wants to
create loss landscape of two training processes, they are 1-5 epoch
and 6-10 epoch respectively. They can set [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]].
Note: Each interval have at least three epochs.
device_ids (List(int)): Specifies which devices are used to create loss landscape.
For example: [0, 1] refers to creating loss landscape with device 0 and device 1.
Default: None.
output (str): Specifies the path to save the loss landscape.
Default: None. The default save path is the same as the summary file.
"""
output_path = os.path.realpath(output) if output is not None else self._summary_dir
summary_record = SummaryRecord(output_path)
self._check_device_ids(device_ids)
if collect_landscape is not None:
self._check_collect_landscape_data(collect_landscape)
json_path = os.path.join(self._ckpt_dir, 'train_metadata.json')
if not os.path.exists(json_path):
raise FileNotFoundError(f'json file path not exists.')
with open(json_path, 'r') as file:
data = json.load(file)
for key, value in collect_landscape.items():
if key in data.keys():
data[key] = value
if "intervals" in collect_landscape.keys():
intervals = collect_landscape.get("intervals")
self._create_epoch_group(intervals)
data["epoch_group"] = self._epoch_group
with open(json_path, 'w') as file:
json.dump(data, file)
os.chmod(json_path, stat.S_IRUSR)
for interval, landscape in self._list_landscapes(callback_fn=callback_fn, device_ids=device_ids):
summary_record.add_value(PluginEnum.LANDSCAPE.value, f'landscape_{str(interval)}', landscape)
summary_record.record(0)
summary_record.flush()
summary_record.close()
def _list_landscapes(self, callback_fn, device_ids=None):
"""Create landscape with single device and list all landscape."""
json_path = os.path.join(self._ckpt_dir, 'train_metadata.json')
if not os.path.exists(json_path):
raise FileNotFoundError(f'train_metadata json file path not exists,'
f'please use summary_collector to collect information to create the json file')
with open(json_path, 'r') as file:
data = json.load(file)
self._check_json_file_data(data)
self._epoch_group = data['epoch_group']
self._model_params_file_map = data['model_params_file_map']
create_landscape = data['create_landscape']
landscape_size = data['landscape_size']
kwargs = dict(proz=0.2, landscape_size=landscape_size, device_ids=device_ids, callback_fn=callback_fn)
count = len(device_ids)
start = time.time()
with ProcessPoolExecutor(max_workers=count) as executor:
if count > 1:
futures = []
for device_id in device_ids:
future = executor.submit(self._set_context, device_id)
futures.append(future)
wait(futures, return_when=ALL_COMPLETED)
kwargs['executor'] = executor if count > 1 else None
if create_landscape['train']:
for i, epochs in enumerate(self._epoch_group.values()):
self._log_message(create_landscape, index=i, interval=epochs)
kwargs['epochs'] = epochs
mid_time = time.time()
landscape_data = self._create_landscape_by_pca(**kwargs)
logger.info("Create landscape end, use time: %s s." % (round(time.time() - mid_time, 6)))
landscape_data.unit = data['unit']
landscape_data.step_per_epoch = data['step_per_epoch']
landscape_data.num_samples = data['num_samples']
landscape_msg = landscape_data.transform_to_loss_landscape_msg(landscape_data)
yield [epochs[0], epochs[-1]], landscape_msg
if create_landscape['result']:
final_epochs = [list(self._epoch_group.values())[-1][-1]]
self._log_message(create_landscape, final_epochs=final_epochs)
kwargs['epochs'] = final_epochs
mid_time_2 = time.time()
landscape_data = self._create_landscape_by_random(**kwargs)
logger.info("Create landscape end, use time: %s s." % (round(time.time() - mid_time_2, 6)))
landscape_data.unit = data['unit']
landscape_data.step_per_epoch = data['step_per_epoch']
landscape_data.num_samples = data['num_samples']
landscape_msg = landscape_data.transform_to_loss_landscape_msg(landscape_data)
yield final_epochs, landscape_msg
logger.info("Total use time: %s s." % (round(time.time() - start, 6)))
def _log_message(self, create_landscape, index=None, interval=None, final_epochs=None):
"""Generate drawing information using log."""
if final_epochs is None:
if create_landscape['result']:
msg = f"Start to create the {index + 1}/{len(self._epoch_group) + 1} landscapes, " \
f"checkpoint is {interval}, decomposition is PCA."
else:
msg = f"Start to create the {index + 1}/{len(self._epoch_group)} landscapes, " \
f"checkpoint is {interval}, decomposition is PCA."
else:
if create_landscape['train']:
msg = f"Start to create the {len(self._epoch_group) + 1}/{len(self._epoch_group) + 1} landscapes, " \
f"checkpoint is {final_epochs}, decomposition is Random. "
else:
msg = f"Start to create the {1}/{1} landscapes, " \
f"checkpoint is {final_epochs}, decomposition is Random."
logger.info(msg)
@staticmethod
def _set_context(device_id):
"""Set context."""
context.set_context(device_id=device_id)
context.set_context(mode=context.GRAPH_MODE)
def _create_landscape_by_pca(self, epochs, proz, landscape_size, device_ids=None, callback_fn=None, executor=None):
"""Create landscape by PCA."""
multi_parameters = self._get_model_params(epochs)
param_matrixs = []
for parameters in multi_parameters:
parlis = []
for param in parameters:
if ("weight" in param.name or "bias" in param.name) and ("moment" not in param.name):
data = param.data.asnumpy().copy()
parlis = np.concatenate((parlis, data), axis=None)
else:
continue
param_matrixs.append(parlis)
param_matrixs = np.vstack(param_matrixs)
param_matrixs = param_matrixs[:-1] - param_matrixs[-1]
# Only 2 are needed, as we have to reduce high dimensions into 2D.And we reserve one for loss value.
pca = _PCA(n_comps=2)
principal_components = pca.compute(param_matrixs.T)
v_ori, w_ori = np.array(principal_components[:, 0]), np.array(principal_components[:, -1])
final_params = list(multi_parameters[-1])
# Reshape PCA directions(include dimensions of all parameters) into original shape of Model parameters
v_ndarray = self._reshape_vector(v_ori, final_params)
w_ndarray = self._reshape_vector(w_ori, final_params)
# Reshape PCA directions(include dimensions of only weights) into original shape of Model parameters
final_params_filtered = self._filter_weight_and_bias(final_params)
v_ndarray_filtered = self._reshape_vector(v_ori, final_params_filtered)
w_ndarray_filtered = self._reshape_vector(w_ori, final_params_filtered)
v_ndarray, w_ndarray = self._normalize_vector(final_params, v_ndarray, w_ndarray)
v_ndarray_filtered, w_ndarray_filtered = self._normalize_vector(final_params_filtered, v_ndarray_filtered,
w_ndarray_filtered)
# Flat to a single vector and calc alpha, beta
v_param = self._flat_ndarray(v_ndarray_filtered)
w_param = self._flat_ndarray(w_ndarray_filtered)
final_params_numpy = [param.data.asnumpy().copy() for param in final_params]
final_params_filtered_numpy = [param.data.asnumpy().copy() for param in final_params_filtered]
coefs = self._calc_coefs(multi_parameters, final_params_filtered_numpy, v_param, w_param)
# generate coordinates of loss landscape
coefs_x = coefs[:, 0][np.newaxis]
coefs_y = coefs[:, 1][np.newaxis]
boundaries_x = max(coefs_x[0]) - min(coefs_x[0])
boundaries_y = max(coefs_y[0]) - min(coefs_y[0])
x_axis = np.linspace(min(coefs_x[0]) - proz * boundaries_x, max(coefs_x[0]) +
proz * boundaries_x, landscape_size)
y_axis = np.linspace(min(coefs_y[0]) - proz * boundaries_y, max(coefs_y[0]) +
proz * boundaries_y, landscape_size)
x_points, y_points = np.meshgrid(x_axis, y_axis)
test_final_params = dict()
for param in final_params:
test_final_params[param.name] = param.data.asnumpy().copy()
if executor is not None:
coefs_parts, y_points_parts = [], []
count_per_parts = len(coefs) // len(device_ids)
start = 0
for i in range(len(device_ids)):
if i != len(device_ids) - 1:
coefs_parts.append(coefs[start:start + count_per_parts])
start = start + count_per_parts
else:
coefs_parts.append(coefs[start:])
count_per_parts = len(y_points) // len(device_ids)
start = 0
logger.info("Use multi process, device_id: %s." % (device_ids))
for i in range(len(device_ids)):
if i != len(device_ids) - 1:
y_points_parts.append(y_points[start:start + count_per_parts])
start = start + count_per_parts
else:
y_points_parts.append(y_points[start:])
futures = []
for i, _ in enumerate(device_ids):
future = executor.submit(self._cont_loss_wrapper, callback_fn, test_final_params, final_params_numpy,
v_ndarray, w_ndarray, x_points, y_points_parts[i], coefs=coefs_parts[i])
futures.append(future)
wait(futures, return_when=ALL_COMPLETED)
z_points, paths = [], []
for future in futures:
paths += future.result()[0]
z_points += future.result()[1]
else:
paths, z_points = self._cont_loss_wrapper(callback_fn, test_final_params, final_params_numpy,
v_ndarray, w_ndarray, x_points, y_points, coefs=coefs)
paths = np.array(paths)
landscape_points = Points(x_points, y_points, np.vstack(z_points))
path_points = Points(coefs_x[0], coefs_y[0], paths.T[0])
zero_index = int(np.argwhere(path_points.x == 0))
convergence_point = Points(np.array([0]), np.array([0]), np.array([path_points.z[zero_index]]))
landscape = Landscape(intervals=epochs, decomposition='PCA', landscape_points=landscape_points,
path_points=path_points, convergence_point=convergence_point)
return landscape
def _cont_loss_wrapper(self, callback_fn, test_final_params, final_params_numpy,
v_ndarray, w_ndarray, x_points, y_points, coefs=None):
"""Compute loss wrapper."""
model, network, valid_dataset, metrics = callback_fn()
with open(os.path.join(self._ckpt_dir, 'train_metadata.json'), 'r') as file:
data = json.load(file)
self._check_json_file_data(data)
num_samples = data['num_samples']
batch_size = valid_dataset.get_batch_size()
num_batches = num_samples // batch_size
valid_dataset = valid_dataset.take(num_batches)
paths, final_params = [], []
for (key, value) in test_final_params.items():
parameter = Parameter(Tensor(value), name=key, requires_grad=True)
final_params.append(parameter)
if coefs is not None:
for i, coef in enumerate(coefs):
loss_data = self._cont_loss(valid_dataset, network, model, metrics, final_params,
final_params_numpy, [coef[0]], coef[1], v_ndarray, w_ndarray, path=True)
paths.append(loss_data)
print("Drawing landscape path total progress is %s/%s, landscape path loss is %s."
% (i+1, len(coefs), loss_data[0]))
# Start to calc loss landscape
z_points = list()
# Compute loss landscape
for i, _ in enumerate(y_points):
print("Drawing landscape total progress: %s/%s." % (i+1, len(y_points)))
vals = self._cont_loss(valid_dataset, network, model, metrics, final_params,
final_params_numpy, x_points[i], y_points[i][0],
v_ndarray, w_ndarray)
z_points.append(vals)
return paths, z_points
def _create_landscape_by_random(self, epochs, proz, landscape_size, device_ids=None,
callback_fn=None, executor=None):
"""Create landscape by Random."""
multi_parameters = self._get_model_params(epochs)
final_params = list(multi_parameters[-1])
final_params_numpy = [param.data.asnumpy().copy() for param in final_params]
total_params = sum(np.size(p) for p in final_params_numpy)
v_rand = np.random.normal(size=total_params)
w_rand = np.random.normal(size=total_params)
# Reshape Random directions(include dimensions of all parameters) into original shape of Model parameters
v_ndarray = self._reshape_random_vector(v_rand, final_params_numpy)
w_ndarray = self._reshape_random_vector(w_rand, final_params_numpy)
v_ndarray, w_ndarray = self._normalize_vector(final_params, v_ndarray, w_ndarray)
boundaries_x, boundaries_y = 5, 5
x_axis = np.linspace(-proz * boundaries_x, proz * boundaries_x, landscape_size)
y_axis = np.linspace(-proz * boundaries_y, proz * boundaries_y, landscape_size)
x_points, y_points = np.meshgrid(x_axis, y_axis)
test_final_params = dict()
for param in final_params:
test_final_params[param.name] = param.data.asnumpy().copy()
if executor is not None:
logger.info("Use multi process, device_id: %s." % (device_ids))
y_points_parts = []
count_per_parts = len(y_points) // len(device_ids)
start = 0
for i in range(len(device_ids)):
if i != len(device_ids) - 1:
y_points_parts.append(y_points[start:start + count_per_parts])
start = start + count_per_parts
else:
y_points_parts.append(y_points[start:])
futures = []
for i in range(len(device_ids)):
future = executor.submit(self._cont_loss_wrapper, callback_fn, test_final_params, final_params_numpy,
v_ndarray, w_ndarray, x_points, y_points_parts[i])
futures.append(future)
wait(futures, return_when=ALL_COMPLETED)
z_points = []
for future in futures:
z_points += future.result()[1]
else:
_, z_points = self._cont_loss_wrapper(callback_fn, test_final_params, final_params_numpy,
v_ndarray, w_ndarray, x_points, y_points)
landscape_points = Points(x_points, y_points, np.vstack(z_points))
convergence_point = Points(np.array([x_axis[len(x_axis)//2]]), np.array([y_axis[len(y_axis)//2]]),
np.array([z_points[len(x_axis)//2][len(y_axis)//2]]))
landscape = Landscape(intervals=epochs, decomposition='Random', landscape_points=landscape_points,
convergence_point=convergence_point)
return landscape
@staticmethod
def _filter_weight_and_bias(parameters):
"""Filter the weight and bias of parameters."""
filter_params = []
for param in parameters:
if ('weight' not in param.name and 'bias' not in param.name) or ('moment' in param.name):
continue
filter_params.append(param)
return filter_params
@staticmethod
def _reshape_vector(vector, parameters):
"""Reshape vector into model shape."""
ndarray = list()
index = 0
for param in parameters:
data = param.data.asnumpy().copy()
if ("weight" not in param.name and "bias" not in param.name) or ("moment" in param.name):
ndarray.append(np.array(data, dtype=np.float32))
continue
vec_it = vector[index:(index + data.size)].reshape(data.shape)
ndarray.append(np.array(vec_it, dtype=np.float32))
index += data.size
return ndarray
@staticmethod
def _reshape_random_vector(vector, params_numpy):
""" Reshape random vector into model shape."""
ndarray = list()
index = 0
for param in params_numpy:
len_p = np.size(param)
p_size = np.shape(param)
vec_it = vector[index:(index + len_p)].reshape(p_size)
ndarray.append(np.array(vec_it, dtype=np.float32))
index += len_p
return ndarray
@staticmethod
def _normalize_vector(parameters, get_v, get_w):
"""
Normalizes the vectors spanning the 2D space, to make trajectories comparable between each other.
"""
for i, param in enumerate(parameters):
# Here as MindSpore ckpt has hyperparameters, we should skip them to make sure
# PCA calculation is correct.
data = param.data.asnumpy().copy()
if ("weight" in param.name or "bias" in param.name) and ("moment" not in param.name):
factor_v = np.linalg.norm(data) / np.linalg.norm(get_v[i])
factor_w = np.linalg.norm(data) / np.linalg.norm(get_w[i])
get_v[i] = get_v[i] * factor_v
get_w[i] = get_w[i] * factor_w
else:
get_v[i] = get_v[i] * 0
get_w[i] = get_w[i] * 0
return get_v, get_w
@staticmethod
def _flat_ndarray(ndarray_vector):
"""Concatenates a python array of numpy arrays into a single, flat numpy array."""
return np.concatenate([item.flatten() for item in ndarray_vector], axis=None)
def _calc_coefs(self, parameter_group, final_param_ndarray, v_vector, w_vector):
"""
Calculates the scale factors for plotting points
in the 2D space spanned by the vectors v and w.
"""
matris = [v_vector, w_vector]
matris = np.vstack(matris)
matris = matris.T
pas = self._flat_ndarray(final_param_ndarray)
coefs = list()
for parameters in parameter_group:
testi = list()
for param in parameters:
# Here as MindSpore ckpt has hyperparameters,
# we should skip them to make sure PCA calculation is correct
if ('weight' not in param.name and 'bias' not in param.name) or ('moment' in param.name):
continue
testi.append(param.data.asnumpy().copy())
st_vec = self._flat_ndarray(testi)
b_vec = st_vec - pas
# Here using least square method to get solutions of a equation system to generate alpha and beta.
coefs.append(np.hstack(np.linalg.lstsq(matris, b_vec, rcond=None)[0]))
return np.array(coefs)
def _cont_loss(self, ds_eval, network, model, metrics, parameters,
final_params_numpy, alph, beta, get_v, get_w, path=False):
"""
Calculates the loss landscape based on vectors v and w (which can be principal components).
Changes the internal state of model. Executes model.
"""
logger.info("start to cont loss")
vals = list()
al_item = 0
for i, _ in enumerate(alph):
# calculate new parameters for model
parameters_dict = dict()
for j, param in enumerate(parameters):
parameters_dict[param.name] = self._change_parameter(j, param, final_params_numpy,
alph[al_item], beta,
get_v, get_w)
al_item += 1
# load parameters into model and calculate loss
load_param_into_net(network, parameters_dict)
del parameters_dict
loss = self._loss_compute(model, ds_eval, metrics)
if path is False:
print("Current local landscape progress is %s/%s, landscape loss is %s."
% (i+1, len(alph), loss['Loss']))
vals = np.append(vals, loss['Loss'])
return vals
@staticmethod
def _change_parameter(index, parameter, final_params_numpy, alpha, beta, get_v, get_w):
"""Function for changing parameter value with map and lambda."""
data = final_params_numpy[index]
data_target = data + alpha * get_v[index] + beta * get_w[index]
data_target = Tensor(data_target.astype(np.float32))
parameter.set_data(Tensor(data_target))
return parameter
def _loss_compute(self, model, data, metrics):
"""Compute loss."""
dataset_sink_mode = False
self._metric_fns = get_metrics(metrics)
for metric in self._metric_fns.values():
metric.clear()
network = model.train_network
dataset_helper = DatasetHelper(data, dataset_sink_mode)
network.set_train(True)
network.phase = 'train'
for inputs in dataset_helper:
inputs = transfer_tensor_to_tuple(inputs)
outputs = network(*inputs)
self._update_metrics(outputs)
metrics = self._get_metrics()
return metrics
def _update_metrics(self, outputs):
"""Update metrics local values."""
if isinstance(outputs, Tensor):
outputs = (outputs,)
if not isinstance(outputs, tuple):
raise ValueError(f"The argument 'outputs' should be tuple, but got {type(outputs)}. "
f"Modify 'output' to Tensor or tuple. ")
for metric in self._metric_fns.values():
metric.update(outputs[0])
def _get_metrics(self):
"""Get metrics local values."""
metrics = dict()
for key, value in self._metric_fns.items():
metrics[key] = value.eval()
return metrics
@staticmethod
def _check_landscape_size(landscape_size):
"""Check landscape size type and value."""
check_value_type('landscape_size', landscape_size, int)
# landscape size should be between 3 and 256.
if landscape_size < 3 or landscape_size > 256:
raise ValueError(f'Landscape size should be between 3 and 256, but got the: {landscape_size}')
@staticmethod
def _check_unit(unit):
"""Check unit type and value."""
check_value_type('unit', unit, str)
if unit not in ["step", "epoch"]:
raise ValueError(f'Unit should be step or epoch, but got the: {unit}')
@staticmethod
def _check_create_landscape(create_landscape):
"""Check create landscape type and value."""
check_value_type('create_landscape', create_landscape, dict)
for param, value in create_landscape.items():
if param not in ["train", "result"]:
raise ValueError(f'The key to create landscape should be in ["train", "result"], '
f'but got the: {param}')
if len(create_landscape) < 2:
raise ValueError(f'The key to create landscape should be train and result, '
f'but only got the: {param}')
check_value_type(param, value, bool)
@staticmethod
def _check_intervals(intervals):
"""Check intervals type and value."""
check_value_type('intervals', intervals, list)
for _, interval in enumerate(intervals):
check_value_type('each interval in intervals', interval, list)
#Each interval have at least three epochs.
if len(interval) < 3:
raise ValueError(f'Each landscape interval should not be less than three, '
f'but got the: {interval}.')
for j in interval:
if not isinstance(j, int):
raise TypeError(f'Landscape interval value type should be int, '
f'but got the: {type(j)}.')
@staticmethod
def _check_device_ids(device_ids):
"""Check device_ids type and value."""
check_value_type('device_ids', device_ids, list)
for i in device_ids:
if not isinstance(i, int):
raise TypeError(f'Landscape device_ids type should be int, '
f'but got the: {type(i)}.')
#device_id should be between 0 and 7.
if i < 0 or i > 7:
raise ValueError(f'Landscape device_ids value should be between 0 and 7,but got {i}.')
def _check_collect_landscape_data(self, collect_landscape):
"""Check collect landscape data type and value."""
for param in collect_landscape.keys():
if param not in ["landscape_size", "unit", "num_samples", "create_landscape", "intervals"]:
raise ValueError(f'The key of collect landscape should be landscape_size, unit, num_samples'
f'create_landscape or intervals, but got the: {param}. ')
if "landscape_size" in collect_landscape:
landscape_size = collect_landscape.get("landscape_size")
self._check_landscape_size(landscape_size)
if "unit" in collect_landscape:
unit = collect_landscape.get("unit")
self._check_unit(unit)
if "num_samples" in collect_landscape:
num_samples = collect_landscape.get("num_samples")
check_value_type("num_samples", num_samples, int)
if "create_landscape" in collect_landscape:
create_landscape = collect_landscape.get("create_landscape")
self._check_create_landscape(create_landscape)
if "intervals" in collect_landscape:
intervals = collect_landscape.get("intervals")
self._check_intervals(intervals)
def _check_json_file_data(self, json_file_data):
"""Check json file data."""
file_key = ["epoch_group", "model_params_file_map", "step_per_epoch", "unit",
"num_samples", "landscape_size", "create_landscape"]
for key in json_file_data.keys():
if key not in file_key:
raise ValueError(f'"train_metadata" json file should be {file_key}, but got the: {key}')
epoch_group = json_file_data["epoch_group"]
model_params_file_map = json_file_data["model_params_file_map"]
step_per_epoch = json_file_data["step_per_epoch"]
unit = json_file_data["unit"]
num_samples = json_file_data["num_samples"]
landscape_size = json_file_data["landscape_size"]
create_landscape = json_file_data["create_landscape"]
for _, epochs in enumerate(epoch_group.values()):
# Each epoch_group have at least three epochs.
if len(epochs) < 3:
raise ValueError(f'This group epochs length should not be less than 3'
f'but got: {len(epochs)}. ')
for epoch in epochs:
if str(epoch) not in model_params_file_map.keys():
raise ValueError(f'The model_params_file_map does not exist {epoch}th checkpoint in intervals.')
check_value_type('step_per_epoch', step_per_epoch, int)
self._check_landscape_size(landscape_size)
self._check_unit(unit)
check_value_type("num_samples", num_samples, int)
self._check_create_landscape(create_landscape)
class _PCA:
r"""
The internal class for computing PCA vectors.
.. math::
u, s, vt = svd(x - mean(x)),
u_i = u_i * s_i,
where :math:`mean` is the mean operator, :math:`svd` is the singular value decomposition operator.
:math:`u_i` is line :math:`i` of the :math:`u`, :math:`s_i` is column :math:`i` of the :math:`s`,
:math:`i` ranges from :math:`0` to :math:`n\_comps`.
Args:
n_comps (int): Number of principal components needed.
"""
def __init__(self, n_comps):
self._n_comps = n_comps
self._random_status = None
self._iterated_power = "auto"
self._n_oversamples = 10
def compute(self, x):
"""Main method for computing principal components."""
n_components = self._n_comps
# small dimension (the shape is less than 500), and the full amount is calculated.
if max(x.shape) <= 500:
u, s, _ = self._fit_few(x)
# When dimension of x is much, truncated SVD is used for calculation.
elif 1 <= n_components < 0.8 * min(x.shape):
u, s, _ = self._fit_much(x, n_components)
# A case of n_components in (0, 1)
else:
u, s, _ = self._fit_few(x)
for i, _ in enumerate(s):
# To prevent s from being equal to 0, a small fixed noise is added.
# Adjust 1e-19 was found a good compromise for s.
if s[i] == 0:
s[i] = 1e-19
u = u[:, :self._n_comps]
u *= s[:self._n_comps]
return u
def _fit_few(self, x):
"""Compute principal components with full SVD on x, when dimension of x is few."""
mean_ = np.mean(x, axis=0)
x -= mean_
u, s, vt = linalg.svd(x, full_matrices=False)
u, vt = self._svd_turn(u, vt)
return u, s, vt
def _fit_much(self, x, n_components):
"""Compute principal components with truncated SVD on x, when dimension of x is much."""
random_state = self._check_random_status(self._random_status)
mean_ = np.mean(x, axis=0)
x -= mean_
u, s, vt = self._random_svd(x, n_components, n_oversamples=self._n_oversamples, random_state=random_state)
return u, s, vt
def _random_svd(self, m, n_components, n_oversamples=10, random_state="warn"):
"""Compute a truncated randomized SVD."""
n_random = n_components + n_oversamples
n_samples, n_features = m.shape
# Adjust 7 or 4 was found a good compromise for randomized SVD.
n_iter = 7 if n_components < 0.1 * min(m.shape) else 4
transpose = n_samples < n_features
if transpose:
m = m.T
q = self._random_range_finder(m, size=n_random, n_iter=n_iter, random_state=random_state)
# Project m to the low dimensional space using the basis vectors (q vector).
b = self._safe_dot(q.T, m)
# Compute the svd on this matrix (b matrix)
uhat, s, vt = linalg.svd(b, full_matrices=False)
del b
u = np.dot(q, uhat)
if not transpose:
u, vt = self._svd_turn(u, vt)
else:
u, vt = self._svd_turn(u, vt, u_decision=False)
if transpose:
return vt[:n_components, :].T, s[:n_components], u[:, :n_components].T
return u[:, :n_components], s[:n_components], vt[:n_components, :]
def _random_range_finder(self, a, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A."""
random_state = self._check_random_status(random_state)
# Generate normal random vectors.
q = random_state.normal(size=(a.shape[1], size))
if a.dtype.kind == "f":
# Ensure f32 is retained as f32
q = q.astype(a.dtype, copy=False)
if n_iter <= 2:
power_iteration_normalizer = "none"
else:
power_iteration_normalizer = "LU"
# use power iterations with q to further compute the top singular vectors of a in q
for _ in range(n_iter):
if power_iteration_normalizer == "none":
q = self._safe_dot(a, q)
q = self._safe_dot(a.T, q)
elif power_iteration_normalizer == "LU":
q, _ = linalg.lu(self._safe_dot(a, q), permute_l=True)
q, _ = linalg.lu(self._safe_dot(a.T, q), permute_l=True)
# The orthogonal basis is extracted by the linear projection of Q, and the range of a is sampled.
q, _ = linalg.qr(self._safe_dot(a, q), mode="economic")
return q
def _safe_dot(self, a, b):
"""Dot product that handle the matrix case correctly."""
if a.ndim > 2 or b.ndim > 2:
if sparse.issparse(b):
# Sparse is always 2 dimensional. Implies a is above 3 dimensional.
# [n, ..., o, p] @ [l, m] -> [n, ..., o, m]
a_2d = a.reshape(-1, a.shape[-1])
ret = a_2d @ b
ret = ret.reshape(*a.shape[:-1], b.shape[1])
elif sparse.issparse(a):
# Sparse is always 2 dimensional. Implies b is above 3 dimensional.
# [l, m] @ [n, ..., o, p, q] -> [l, n, ..., o, q]
b_ = np.rollaxis(b, -2)
b_2d = b_.reshape((b.shape[-2], -1))
ret = a @ b_2d
ret = ret.reshape(a.shape[0], *b_.shape[1:])
else:
ret = np.dot(a, b)
else:
ret = a @ b
return ret
def _svd_turn(self, u, v, u_decision=True):
"""Confirm correction to ensure deterministic output from SVD."""
if u_decision:
# rows of v, columns of u
max_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_cols, range(u.shape[1])])
v *= signs[:, np.newaxis]
u *= signs
else:
# rows of u, columns of v
max_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[range(v.shape[0]), max_rows])
v *= signs[:, np.newaxis]
u *= signs
return u, v
def _check_random_status(self, seed):
"""Transform seed into a np.random.RandomState instance."""
if isinstance(seed, np.random.RandomState):
return seed
if seed is None or seed is np.random:
return np.random.RandomState()
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState instance" % seed
)
|
the-stack_0_26569
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.testing import assert_allclose
import pytest
from astropy.coordinates import SkyCoord
import astropy.units as u
from regions import CircleSkyRegion
from ...data import DataStore, ObservationTableSummary, ObservationSummary
from ...data import ObservationStats
from ...utils.testing import requires_data, requires_dependency, mpl_plot_check
from ...background import ReflectedRegionsBackgroundEstimator
@requires_data("gammapy-extra")
class TestObservationSummaryTable:
@classmethod
def setup_class(cls):
data_store = DataStore.from_dir("$GAMMAPY_EXTRA/datasets/hess-dl3-dr1/")
obs_table = data_store.obs_table
obs_table = obs_table[obs_table["TARGET_NAME"] == "Crab"]
target_pos = SkyCoord(83.633083, 22.0145, unit="deg")
cls.table_summary = ObservationTableSummary(obs_table, target_pos)
def test_str(self):
text = str(self.table_summary)
assert "Observation summary" in text
def test_offset(self):
offset = self.table_summary.offset
assert len(offset) == 4
assert_allclose(offset.degree.mean(), 1., rtol=0.01)
assert_allclose(offset.degree.std(), 0.5, rtol=0.01)
@requires_dependency("matplotlib")
def test_plot_zenith(self):
with mpl_plot_check():
self.table_summary.plot_zenith_distribution()
@requires_dependency("matplotlib")
def test_plot_offset(self):
with mpl_plot_check():
self.table_summary.plot_offset_distribution()
@requires_data("gammapy-extra")
@requires_dependency("scipy")
class TestObservationSummary:
"""
Test observation summary.
"""
@classmethod
def setup_class(cls):
datastore = DataStore.from_dir("$GAMMAPY_EXTRA/datasets/hess-dl3-dr1/")
obs_ids = [23523, 23526]
on_region = CircleSkyRegion(
SkyCoord(83.63 * u.deg, 22.01 * u.deg, frame="icrs"), 0.3 * u.deg
)
obs_stats_list = []
for obs_id in obs_ids:
obs = datastore.obs(obs_id)
bkg = ReflectedRegionsBackgroundEstimator(
on_region=on_region, obs_list=[obs]
)
bkg.run()
bg_estimate = bkg.result[0]
obs_stats = ObservationStats.from_obs(obs, bg_estimate)
obs_stats_list.append(obs_stats)
cls.obs_summary = ObservationSummary(obs_stats_list)
@pytest.mark.xfail
def test_results(self):
# TODO: add test with assert on result numbers yet!!!
# from pprint import pprint
# pprint(self.obs_summary.__dict__)
assert 0
def test_obs_str(self):
text = str(self.obs_summary)
assert "Observation summary" in text
@requires_dependency("matplotlib")
def test_plot_significance(self):
with mpl_plot_check():
self.obs_summary.plot_significance_vs_livetime()
@requires_dependency("matplotlib")
def test_plot_excess(self):
with mpl_plot_check():
self.obs_summary.plot_excess_vs_livetime()
@requires_dependency("matplotlib")
def test_plot_background(self):
with mpl_plot_check():
self.obs_summary.plot_background_vs_livetime()
@requires_dependency("matplotlib")
def test_plot_gamma_rate(self):
with mpl_plot_check():
self.obs_summary.plot_gamma_rate()
@requires_dependency("matplotlib")
def test_plot_background_rate(self):
with mpl_plot_check():
self.obs_summary.plot_background_rate()
|
the-stack_0_26571
|
# -*- coding: utf-8 -*-
# Copyright 2018-2021 releng-tool
from releng_tool.tool.hg import HG
from releng_tool.util.log import err
from releng_tool.util.log import log
def extract(opts):
"""
support extraction (checkout) of a mercurial cache into a build directory
With provided extraction options (``RelengExtractOptions``), the extraction
stage will be processed. A Mercurial extraction process will populate a
working tree based off the cached Mercurial repository acquired from the
fetch stage.
Args:
opts: the extraction options
Returns:
``True`` if the extraction stage is completed; ``False`` otherwise
"""
assert opts
cache_dir = opts.cache_dir
revision = opts.revision
work_dir = opts.work_dir
if not HG.exists():
err('unable to extract package; mercurial (hg) is not installed')
return None
log('checking out target revision into work tree')
if not HG.execute(['--verbose', 'clone', '--rev', revision,
cache_dir, work_dir],
cwd=work_dir):
err('unable to checkout revision')
return False
return True
|
the-stack_0_26574
|
# -*- coding: utf-8 -*-
import random
def sort_by_quick(array: list) -> list:
"""
Быстрая сортировка Хоара.
Суть: Разделение массива на две части производится следующим образом. Устанавливаем один курсор на левую границу
массива, а второй – на правую границу. Затем осуществляем перемещение курсоров навстречу друг другу до тех пор,
пока они не пересекутся. При перемещении курсоров сравниваем значения текущих элементов со «средним». Находим левый
текущий элемент, больший «среднего», и правый текущий элемент, меньший «среднего» (т. е. элементы, которые
находятся «не на своем месте»). Осуществляем обмен этих элементов.
Максимальная временная сложность: О(n^2)
Средняя временная сложность: О(n*log n)
Минимальная временная сложность: О(n*log n)
Пространственная сложность: О(n)
(*) Алгоритм НЕ устойчивой сортировки.
:param array: исходный массив
:return array: упорядоченный исходный массив
"""
if len(array) <= 1:
return array
else:
q = random.choice(array)
L = []
M = []
R = []
for elem in array:
if elem < q:
L.append(elem)
elif elem > q:
R.append(elem)
else:
M.append(elem)
return sort_by_quick(L) + M + sort_by_quick(R)
|
the-stack_0_26576
|
from enum import auto
from src.common.meta import CommonEnumMeta
class PipelineStages(CommonEnumMeta):
EXTRACT_FEATURES = auto()
FEATURE_SCALER = auto()
IMPUTER = auto()
RESAMPLER = auto()
MODEL = auto()
DENSE = auto()
FEATURE_SELECTION = auto()
def get_prefix(self):
return str(self) + "__"
class FeatureExtractionStages(CommonEnumMeta):
SELECT = auto()
FEATURES_FROM_DATA = auto ()
LANGUAGE = auto()
VECTORIZER = auto()
WORD_EMBEDDINGS = auto()
SEQUENCE = auto()
INCREMENTAL_PREDICTIONS = auto()
|
the-stack_0_26579
|
import argparse
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import numpy as np
from textwrap import wrap
from rlo import experiment_result
from rlo import expr_sets
from rlo import plotting
from rlo import utils
def check_expansions(expansions):
min_time_left = None
for _budget, time_left, _cost in expansions:
assert min_time_left is None or time_left >= min_time_left - 1
min_time_left = min(
time_left, float("inf") if min_time_left is None else min_time_left
)
def plot_time_lefts_during_search_from_config(
config, events, phase="train", repetition=0, cmap="plasma"
):
if phase == "train":
expr_set = expr_sets.get_expression_set(config["train_exprs"])
search_alg = config["train_search"]
expr_key = "expr"
max_gnn = config["max_gnn_train"]
else:
expr_set = expr_sets.get_expression_set(config["train_exprs"]).union(
expr_sets.get_expression_set(
config["test_exprs"], take_defs=config.get("test_on_defs")
)
)
search_alg = config["eval_search"]
expr_key = "eval_expr"
max_gnn = config["max_gnn_eval"]
expressions = dict(expr_set.named_exprenvs())
try:
expert = expr_set.get_expert(config["rules"])
def sort_order(expr_name):
expr = expressions[expr_name]
return len(list(expert.optimize(expr)))
except (ValueError, NotImplementedError):
# cannot find an expert
sort_order = lambda expr_name: expr_name
events = [
e
for e in events
if e["event"] == "rollout_end_verbose"
and expr_key in e
and e["repetition"] == repetition
]
generations = sorted(set([e["generation"] for e in events]))
exprs = sorted(set([e[expr_key] for e in events]), key=sort_order)
if len(generations) * len(exprs) > 1000:
gen_every = int(np.ceil((len(generations) * len(exprs)) / 1000))
generations = [g for i, g in enumerate(generations) if (i % gen_every) == 0]
print(generations)
print(exprs)
fig_width = 8 * len(generations)
fig_height = 4 if len(exprs) == 1 else 2 * len(exprs)
# matplotlib has a maximum figure size of 2^16 dots in each direction
dpi = min(100, 0.999 * 2 ** 16 / max(fig_width, fig_height))
fig, axs = plt.subplots(
len(exprs),
len(generations),
figsize=(fig_width, fig_height),
dpi=dpi,
squeeze=False,
)
# Generations may be 0, 1, ..., NUM_GENERATIONS or 1, 2, ..., NUM_GENERATIONS
axs = [dict(zip(generations, axrs)) for axrs in axs]
for i, expr_name in enumerate(exprs):
original_cost = expressions[expr_name].cost()
best_cost = expr_set.best_cost_for_expression(
expr_name, config["rules"], config[f"simulation_depth_{phase}"] + 1
).cost
for generation in generations:
ax = axs[i][generation]
if generation == generations[0]:
ax.set_ylabel("{}\ntime_left".format(utils.format_expr(expr_name, 25)))
if i == 0:
ax.set_title("Generation {}".format(generation))
if i == len(exprs) - 1:
ax.set_xlabel("Remaining GNN budget")
expansions = [
e
for e in events
if e[expr_key] == expr_name and e["generation"] == generation
]
if len(expansions) == 0:
# skip if expression did not appear in the current generation
continue
expansions = utils.single_elem(expansions)["expansions"]
check_expansions(expansions)
gnn_evals, time_left, cost = zip(*expansions)
norm = Normalize(0.0, original_cost - best_cost, clip=True)
ax.scatter(
max_gnn - np.array(gnn_evals),
time_left,
c=original_cost - np.array(cost),
marker="x",
cmap=plt.get_cmap(cmap),
norm=norm,
linewidth=2,
)
ax.invert_xaxis()
ax.grid(True)
ax.set_ylim((0, config[f"simulation_depth_{phase}"]))
plt.tight_layout(rect=[0, 0, 1, (fig_height - 1.0) / fig_height])
fig.suptitle(
"\n".join(
wrap(
"Time left vs. remaining GNN budget, repetition={}, search={}, {} (cmap={})".format(
repetition, search_alg, plotting.config_suffix(config), cmap
),
80 * len(generations),
)
),
y=(fig_height - 0.5) / fig_height,
)
output_file = plotting.format_figure_filename(
config, "time_lefts_{}.png".format(phase)
)
plt.savefig(output_file, dpi="figure")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("run_id", type=str, help="A run ID")
parser.add_argument("--phase", choices=["train", "eval"], default="train")
parser.add_argument("--repetition", type=int, default=0, help="Repetition")
parser.add_argument("--cmap", type=str, default="plasma")
args = parser.parse_args()
config, events = experiment_result.load_config_events(args.run_id, verbosity=1)
plot_time_lefts_during_search_from_config(
config, events, args.phase, args.repetition, args.cmap
)
if __name__ == "__main__":
main()
|
the-stack_0_26580
|
"""Test user login and logout module."""
from unittest.mock import patch
from django.test import TestCase
from mspray.apps.main.views.user import get_form_users
FORM_DATA = {
"users": [
{
"first_name": "Paul",
"last_name": "Green",
"user": "paulG",
"role": "owner",
"is_org": False
},
{
"first_name": "Cynthia",
"last_name": "Sale",
"user": "CSale",
"role": "owner",
"is_org": False
},
{
"first_name": "Philip",
"last_name": "Khali",
"user": "PKhali",
"role": "dataentry",
"is_org": False
},
{
"first_name": "Mary",
"last_name": "Rose",
"user": "MRose",
"role": "owner",
"is_org": False
},
{
"first_name": "Lucy",
"last_name": "",
"user": "lucy",
"role": "readonly",
"is_org": False
},
{
"first_name": "Ken",
"last_name": "Larry",
"user": "larryK",
"role": "owner",
"is_org": False
},
{
"first_name": "Mitchelle",
"last_name": "Jones",
"user": "Mjones",
"role": "readonly",
"is_org": False
}]}
class TestUser(TestCase):
"""Test get_form_owners to retrieve form instance."""
@patch('mspray.apps.main.views.user.fetch_form')
def test_get_form_users(self, fetch_form_mock):
"""Test fetch form users."""
fetch_form_mock.return_value = FORM_DATA
form_id = 344713
result = [
'paulG', 'CSale', 'MRose', 'lucy', 'larryK', 'Mjones']
users = get_form_users(form_id)
fetch_form_mock.assert_called()
self.assertEqual(users, result)
|
the-stack_0_26583
|
"""
:copyright: © 2019 by the Lin team.
:license: MIT, see LICENSE for more details.
"""
from app.app import create_app
from tests.utils import get_token
app = create_app(environment='development')
def test_create():
with app.test_client() as c:
rv = c.post('/v1/book/', json={
'title': '论如何做单测',
'author': 'pedro',
'summary': '在写这章之前,笔者一直很踌躇,因为我并没有多年的开发经验,甚至是一年都没有。换言之,我还没有一个良好的软件开发习惯,没有一个标准的开发约束,如果你和我一样,那么请你一定要仔细阅读本小节,并且开始尝试认真,仔细的做单测,它将会让你受益匪浅。',
'image': 'https://img3.doubanio.com/lpic/s1470003.jpg'
})
json_data = rv.get_json()
print(json_data)
assert json_data['msg'] == '新建图书成功'
assert rv.status_code == 201
def test_update():
with app.test_client() as c:
rv = c.put('/v1/book/7', json={
'title': '论如何做单测',
'author': 'pedro & erik',
'summary': '在写这章之前,笔者一直很踌躇,因为我并没有多年的开发经验,甚至是一年都没有。换言之,我还没有一个良好的软件开发习惯,没有一个标准的开发约束,如果你和我一样',
'image': 'https://img3.doubanio.com/lpic/s1470003.jpg'
})
json_data = rv.get_json()
print(json_data)
assert json_data['msg'] == '更新图书成功'
assert rv.status_code == 201
def test_delete():
with app.test_client() as c:
rv = c.delete('/v1/book/7', headers={
'Authorization': 'Bearer ' + get_token()
})
json_data = rv.get_json()
print(json_data)
assert json_data['msg'] == '删除图书成功'
assert rv.status_code == 201
def test_get_books():
with app.test_client() as c:
rv = c.get('/v1/book/')
json_data = rv.get_json()
print(json_data)
assert rv.status_code == 200
|
the-stack_0_26584
|
"""
Check wheel files for appropriate contents
Getting the right files into your wheel is tricky, and sometimes we mess up and
publish a wheel containing ``__pycache__`` directories or ``tests/``. Do we
have to manually check the contents of every wheel we build before uploading it
to PyPI? How about letting this program check for you? Just run
``check-wheel-contents`` on your wheel, and it'll fail and notify you if any of
several common errors & mistakes are detected. The errors are described in the
README, along with common causes and corresponding fixes.
Visit <https://github.com/jwodder/check-wheel-contents> for more information.
"""
__version__ = "0.4.0.dev1"
__author__ = "John Thorvald Wodder II"
__author_email__ = "[email protected]"
__license__ = "MIT"
__url__ = "https://github.com/jwodder/check-wheel-contents"
|
the-stack_0_26585
|
"""
Offsite Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
import re
import logging
import warnings
from scrapy import signals
from scrapy.http import Request
from scrapy.utils.httpobj import urlparse_cached
logger = logging.getLogger(__name__)
class OffsiteMiddleware:
def __init__(self, stats):
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
async def process_spider_output(self, response, result, spider):
async for x in result:
if isinstance(x, Request):
if x.dont_filter or self.should_follow(x, spider):
yield x
else:
domain = urlparse_cached(x).hostname
if domain and domain not in self.domains_seen:
self.domains_seen.add(domain)
logger.debug(
"Filtered offsite request to %(domain)r: %(request)s",
{'domain': domain, 'request': x}, extra={'spider': spider})
self.stats.inc_value('offsite/domains', spider=spider)
self.stats.inc_value('offsite/filtered', spider=spider)
else:
yield x
def should_follow(self, request, spider):
regex = self.host_regex
# hostname can be None for wrong urls (like javascript links)
host = urlparse_cached(request).hostname or ''
return bool(regex.search(host))
def get_host_regex(self, spider):
"""Override this method to implement a different offsite policy"""
allowed_domains = getattr(spider, 'allowed_domains', None)
if not allowed_domains:
return re.compile('') # allow all by default
url_pattern = re.compile(r"^https?://.*$")
port_pattern = re.compile(r":\d+$")
domains = []
for domain in allowed_domains:
if domain is None:
continue
elif url_pattern.match(domain):
message = ("allowed_domains accepts only domains, not URLs. "
f"Ignoring URL entry {domain} in allowed_domains.")
warnings.warn(message, URLWarning)
elif port_pattern.search(domain):
message = ("allowed_domains accepts only domains without ports. "
f"Ignoring entry {domain} in allowed_domains.")
warnings.warn(message, PortWarning)
else:
domains.append(re.escape(domain))
regex = fr'^(.*\.)?({"|".join(domains)})$'
return re.compile(regex)
def spider_opened(self, spider):
self.host_regex = self.get_host_regex(spider)
self.domains_seen = set()
class URLWarning(Warning):
pass
class PortWarning(Warning):
pass
|
the-stack_0_26586
|
import torch
import numpy as np
import random
import os
import pickle
command_list = ['yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go']
def Apply_cmvn(inputs): # apply cepstral mean and variance normalization
batch_size, time, dim = inputs.shape
mu = torch.mean(inputs, dim=1).repeat(1, time).reshape(batch_size, time, dim)
sigma = torch.pow(torch.mean(torch.pow(inputs, 2), dim=1).repeat(1, time).reshape(batch_size, time, dim) - torch.pow(mu, 2), 0.5)
return (inputs - mu) / sigma
def insert_index_descending_order(query, num_list):
matching_list = list(filter(lambda x: x < query, num_list)) # list(filter(if x < query for x in num_list))
if len(matching_list) == 0:
return len(num_list)
else:
return num_list.index(matching_list[0])
mfcc_dict = None
def Batch_generator(mfcc_root, dataset, batch_size): # data batch generator
global mfcc_dict
if mfcc_dict is None:
with open(mfcc_root+'/mfcc.pkl', 'rb') as f:
mfcc_dict = pickle.load(f)
datalist_txt = open(dataset, 'r')
#OUTDIR = mfcc_root + '/'
datalist = datalist_txt.read().strip().split('\n')
shuffled_data = random.sample(datalist, len(datalist))
datalist_txt.close()
epoch = 1
while True:
data_batch = np.array([], dtype=np.float32)
label_batch = []
length_batch = []
MAX_LEN = 0
for i in range(batch_size):
sample = shuffled_data.pop() # pop data from shuffled dataset
label = sample.split('/')[0]
mfcc = mfcc_dict[sample]
MAX_LEN = len(mfcc) if MAX_LEN < len(mfcc) else MAX_LEN # find max len in a batch
index = insert_index_descending_order(len(mfcc), length_batch) # insert data to get the decending sequence (for latter pack_padded_sequence)
if i == 0:
data_batch = np.asarray([mfcc])
else:
data_batch = np.pad(data_batch, ((0, 0), (0, MAX_LEN - data_batch.shape[1]), (0, 0)), mode='constant', constant_values=0)
data_batch = np.insert(data_batch, index, np.pad(mfcc, ((0, MAX_LEN - len(mfcc)), (0, 0)), mode='constant', constant_values=0), axis=0)
label_batch.insert(index, command_list.index(label)) # add to current batch
length_batch.insert(index, len(mfcc))
data_batch = np.asarray(data_batch, dtype=np.float32) # format change
label_batch = np.asarray(label_batch, dtype=np.int64)
if len(shuffled_data) < batch_size: # if remaining data (wait for pop into the batch) is not enough, do extension
shuffled_data = random.sample(datalist, len(datalist)) + shuffled_data
epoch += 1
yield data_batch, label_batch, length_batch, epoch
|
the-stack_0_26587
|
from collections import defaultdict
from pathlib import Path
# DEBUG
from pprint import pprint
def parse_map(text_map):
orbits = text_map.split()
orbit_map = defaultdict(set)
for orbit in orbits:
parent, child = orbit.split(")")
orbit_map[parent].add(child)
return orbit_map
def count_orbits(orbit_map, graph=dict(), orbits=None, chain=list(), direct=0,
indirect=0):
if orbits is None:
orbits = orbit_map.get("COM")
if orbits is None:
return direct, indirect
for orbit in orbits:
direct = direct + 1
indirect = indirect + len(chain)
satellites = orbit_map.get(orbit)
if satellites is not None:
chain_orbit = chain + [orbit]
args = orbit_map, graph, satellites, chain_orbit, direct, indirect
graph, direct, indirect = count_orbits(*args)
else:
graph[orbit] = ["COM"] + chain
return graph, direct, indirect
def find_shortest_distance(graph):
you = graph.get("YOU")
san = graph.get("SAN")
if None in [you, san]:
return -1
closest_intersection = None
you_r = you[::-1]
san_r = san[::-1]
for orbit in you_r:
if orbit in san_r:
closest_intersection = orbit
break
you_i = you_r.index(closest_intersection)
san_i = san.index(closest_intersection)
path = you_r[1:you_i] + san[san_i:]
return len(path)
if __name__ == "__main__":
orbit_map = parse_map(Path("../etc/aoc6.txt").read_text())
#orbit_map = parse_map("COM)B\nB)C\nC)D\nD)E\nE)F\nB)G\nG)H\nD)I\nE)J\nJ)K\nK)L\nK)YOU\nI)SAN")
graph, direct_orbits, indirect_orbits = count_orbits(orbit_map)
total_orbits = direct_orbits + indirect_orbits
print("Part 1:", total_orbits)
distance = find_shortest_distance(graph)
print("Part 2:", distance)
|
the-stack_0_26590
|
from django.test import TestCase
from dojo.models import Test
from dojo.tools.harbor_vulnerability.parser import HarborVulnerabilityParser
class TestHarborVulnerabilityParser(TestCase):
def test_parse_without_file_has_no_findings(self):
parser = HarborVulnerabilityParser(None, Test())
self.assertEqual(0, len(parser.items))
def test_parse_file_with_no_vuln_has_no_findings(self):
testfile = open("dojo/unittests/scans/harbor_vulnerability/harbor-0-vuln.json")
parser = HarborVulnerabilityParser(testfile, Test())
self.assertEqual(0, len(parser.items))
# Sample with One Test
# + also verify data with one test
def test_parse_file_with_one_vuln_has_one_findings(self):
testfile = open("dojo/unittests/scans/harbor_vulnerability/harbor-1-vuln.json")
parser = HarborVulnerabilityParser(testfile, Test())
self.assertEqual(1, len(parser.items))
findings = parser.items[0]
self.assertEqual(findings.title, "CVE-YYYY-NNN - package (exploitable-version)")
self.assertEqual(
findings.description,
"This is a sample description for sample description from Harbor API.",
)
self.assertEqual(
findings.severity, "Informational"
) # Negligible is translated to Informational
# Sample with Multiple Test
def test_parse_file_with_multiple_vuln_has_multiple_findings(self):
testfile = open("dojo/unittests/scans/harbor_vulnerability/harbor-5-vuln.json")
parser = HarborVulnerabilityParser(testfile, Test())
self.assertEqual(5, len(parser.items))
|
the-stack_0_26591
|
import torch.nn as nn
from mmcv.cnn import ConvModule, build_conv_layer
from mmcv.runner import load_checkpoint
from mmedit.models.common import generation_init_weights
from mmedit.models.registry import COMPONENTS
from mmedit.utils import get_root_logger
@COMPONENTS.register_module()
class PatchDiscriminator(nn.Module):
"""A PatchGAN discriminator.
Args:
in_channels (int): Number of channels in input images.
base_channels (int): Number of channels at the first conv layer.
Default: 64.
num_conv (int): Number of stacked intermediate convs (excluding input
and output conv). Default: 3.
norm_cfg (dict): Config dict to build norm layer. Default:
`dict(type='BN')`.
init_cfg (dict): Config dict for initialization.
`type`: The name of our initialization method. Default: 'normal'.
`gain`: Scaling factor for normal, xavier and orthogonal.
Default: 0.02.
"""
def __init__(self,
in_channels,
base_channels=64,
num_conv=3,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='normal', gain=0.02)):
super(PatchDiscriminator, self).__init__()
assert isinstance(norm_cfg, dict), ("'norm_cfg' should be dict, but"
f'got {type(norm_cfg)}')
assert 'type' in norm_cfg, "'norm_cfg' must have key 'type'"
# We use norm layers in the patch discriminator.
# Only for IN, use bias since it does not have affine parameters.
use_bias = norm_cfg['type'] == 'IN'
kernel_size = 4
padding = 1
# input layer
sequence = [
ConvModule(
in_channels=in_channels,
out_channels=base_channels,
kernel_size=kernel_size,
stride=2,
padding=padding,
bias=True,
norm_cfg=None,
act_cfg=dict(type='LeakyReLU', negative_slope=0.2))
]
# stacked intermediate layers,
# gradually increasing the number of filters
multiple_now = 1
multiple_prev = 1
for n in range(1, num_conv):
multiple_prev = multiple_now
multiple_now = min(2**n, 8)
sequence += [
ConvModule(
in_channels=base_channels * multiple_prev,
out_channels=base_channels * multiple_now,
kernel_size=kernel_size,
stride=2,
padding=padding,
bias=use_bias,
norm_cfg=norm_cfg,
act_cfg=dict(type='LeakyReLU', negative_slope=0.2))
]
multiple_prev = multiple_now
multiple_now = min(2**num_conv, 8)
sequence += [
ConvModule(
in_channels=base_channels * multiple_prev,
out_channels=base_channels * multiple_now,
kernel_size=kernel_size,
stride=1,
padding=padding,
bias=use_bias,
norm_cfg=norm_cfg,
act_cfg=dict(type='LeakyReLU', negative_slope=0.2))
]
# output one-channel prediction map
sequence += [
build_conv_layer(
dict(type='Conv2d'),
base_channels * multiple_now,
1,
kernel_size=kernel_size,
stride=1,
padding=padding)
]
self.model = nn.Sequential(*sequence)
self.init_type = 'normal' if init_cfg is None else init_cfg.get(
'type', 'normal')
self.init_gain = 0.02 if init_cfg is None else init_cfg.get(
'gain', 0.02)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
return self.model(x)
def init_weights(self, pretrained=None):
"""Initialize weights for the model.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Default: None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
generation_init_weights(
self, init_type=self.init_type, init_gain=self.init_gain)
else:
raise TypeError("'pretrained' must be a str or None. "
f'But received {type(pretrained)}.')
|
the-stack_0_26598
|
TRANSACTION_CREATE_REAL = 1
TRANSACTION_CREATE_FAKE = 2
TRANSACTION_PARSE = 3
INS_NONE = 0x00
INS_RESET = 0x02
INS_GET_KEY = 0x20
INS_PUT_KEY = 0x22
INS_GET_CHACHA8_PREKEY = 0x24
INS_VERIFY_KEY = 0x26
INS_MANAGE_SEEDWORDS = 0x28
INS_SECRET_KEY_TO_PUBLIC_KEY = 0x30
INS_GEN_KEY_DERIVATION = 0x32
INS_DERIVATION_TO_SCALAR = 0x34
INS_DERIVE_PUBLIC_KEY = 0x36
INS_DERIVE_SECRET_KEY = 0x38
INS_GEN_KEY_IMAGE = 0x3A
INS_SECRET_KEY_ADD = 0x3C
INS_SECRET_KEY_SUB = 0x3E
INS_GENERATE_KEYPAIR = 0x40
INS_SECRET_SCAL_MUL_KEY = 0x42
INS_SECRET_SCAL_MUL_BASE = 0x44
INS_DERIVE_SUBADDRESS_PUBLIC_KEY = 0x46
INS_GET_SUBADDRESS = 0x48
INS_GET_SUBADDRESS_SPEND_PUBLIC_KEY = 0x4A
INS_GET_SUBADDRESS_SECRET_KEY = 0x4C
INS_OPEN_TX = 0x70
INS_SET_SIGNATURE_MODE = 0x72
INS_GET_ADDITIONAL_KEY = 0x74
INS_STEALTH = 0x76
INS_BLIND = 0x78
INS_UNBLIND = 0x7A
INS_VALIDATE = 0x7C
INS_MLSAG = 0x7E
INS_CLOSE_TX = 0x80
INS_GET_RESPONSE = 0xC0
IN_OPTION_MASK = 0x000000FF
OUT_OPTION_MASK = 0x0000FF00
IN_OPTION_MORE_COMMAND = 0x00000080
SW_OK = 0x9000
SW_ALGORITHM_UNSUPPORTED = 0x9484
SW_BYTES_REMAINING_00 = 0x6100
SW_WARNING_STATE_UNCHANGED = 0x6200
SW_STATE_TERMINATED = 0x6285
SW_MORE_DATA_AVAILABLE = 0x6310
SW_WRONG_LENGTH = 0x6700
SW_LOGICAL_CHANNEL_NOT_SUPPORTED = 0x6881
SW_SECURE_MESSAGING_NOT_SUPPORTED = 0x6882
SW_LAST_COMMAND_EXPECTED = 0x6883
SW_COMMAND_CHAINING_NOT_SUPPORTED = 0x6884
SW_SECURITY_LOAD_KEY = 0x6900
SW_SECURITY_COMMITMENT_CONTROL = 0x6911
SW_SECURITY_AMOUNT_CHAIN_CONTROL = 0x6912
SW_SECURITY_COMMITMENT_CHAIN_CONTROL = 0x6913
SW_SECURITY_STATUS_NOT_SATISFIED = 0x6982
SW_FILE_INVALID = 0x6983
SW_PIN_BLOCKED = 0x6983
SW_DATA_INVALID = 0x6984
SW_CONDITIONS_NOT_SATISFIED = 0x6985
SW_COMMAND_NOT_ALLOWED = 0x6986
SW_APPLET_SELECT_FAILED = 0x6999
SW_WRONG_DATA = 0x6A80
SW_FUNC_NOT_SUPPORTED = 0x6A81
SW_FILE_NOT_FOUND = 0x6A82
SW_RECORD_NOT_FOUND = 0x6A83
SW_FILE_FULL = 0x6A84
SW_INCORRECT_P1P2 = 0x6A86
SW_REFERENCED_DATA_NOT_FOUND = 0x6A88
SW_WRONG_P1P2 = 0x6B00
SW_CORRECT_LENGTH_00 = 0x6C00
SW_INS_NOT_SUPPORTED = 0x6D00
SW_CLA_NOT_SUPPORTED = 0x6E00
SW_UNKNOWN = 0x6F00
|
the-stack_0_26602
|
import _plotly_utils.basevalidators
class HoverinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoverinfo", parent_name="image", **kwargs):
super(HoverinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
extras=kwargs.pop("extras", ["all", "none", "skip"]),
flags=kwargs.pop("flags", ["x", "y", "z", "color", "name", "text"]),
**kwargs
)
|
the-stack_0_26604
|
import string
class cipher_vigenere:
def __init__(self):
self.alphabet = string.ascii_lowercase
def new_alph(self, char):
char = char.lower()
new_alph = self.alphabet[self.alphabet.index(char):] + self.alphabet[:self.alphabet.index(char)]
return new_alph
def encrypt(self, cleartext, key):
if key is None:
raise ValueError('No key given')
if not (type(key) is str):
if(type(key) is int):
raise ValueError('Key must be string not int')
else:
raise ValueError('Key needs to be string')
if(len(key) < len(cleartext)):
raise ValueError('Key shorter then cleartext - loosing data')
ciphertext = ''
i = 1
for char in key:
new = self.new_alph(char)
for t in cleartext:
if self.alphabet.count(t) == 1 :
ciphertext += new[self.alphabet.index(t)]
cleartext = cleartext[i:]
break
elif self.alphabet.count(t.lower()) == 1:
ciphertext += new[self.alphabet.index(t.lower())].upper()
cleartext = cleartext[i:]
break
else:
ciphertext += t
cleartext = cleartext[i:]
break
i += 1
return ciphertext
def decrypt(self, ciphertext, key):
if(key is None):
raise ValueError('No key given')
if not (isinstance(key, str)):
if(isinstance(key, int)):
raise ValueError('Key must be string not int')
else:
raise ValueError('Key needs to be string')
if(len(key) < len(ciphertext)):
raise ValueError('Key shorter then cleartext - loosing data')
cleartext = ''
i = 1
for char in key:
new = self.new_alph(char)
for t in ciphertext:
if self.alphabet.count(t) == 1 :
cleartext += self.alphabet[new.index(t)]
ciphertext = ciphertext[i:]
break
elif self.alphabet.count(t.lower()) == 1:
cleartext += self.alphabet[new.index(t.lower())].upper()
ciphertext = ciphertext[i:]
break
else:
cleartext += t
ciphertext = ciphertext[i:]
break
i += 1
return cleartext
|
the-stack_0_26605
|
import keras
import cv2
import numpy as np
import argparse
from glob import glob
import copy
# GPU config
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from keras import backend as K
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list="0"
sess = tf.Session(config=config)
K.set_session(sess)
# network
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, Input, BatchNormalization, concatenate, AveragePooling2D, Add, SeparableConv2D
num_classes = 2
img_height, img_width = 128, 128
channel = 3
def Xception():
def Entry_flow(x, dim):
x = SeparableConv2D(dim, [3, 3], strides=1, padding='same', depth_multiplier=1, activation=None)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = SeparableConv2D(dim, [3, 3], strides=1, padding='same', depth_multiplier=1, activation=None)(x)
x = BatchNormalization()(x)
x = MaxPooling2D([3, 3], strides=2, padding='same')(x)
return x
def Middle_flow(x, dim=728):
x_sc = x
for _ in range(3):
x = Activation("relu")(x)
x = SeparableConv2D(dim, [3, 3], strides=1, padding='same', depth_multiplier=1, activation=None)(x)
x = BatchNormalization()(x)
x = Add()([x, x_sc])
return x
inputs = Input((img_height, img_width, channel))
x = inputs
# Entry flow
x = Conv2D(32, [3, 3], strides=2, padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(64, [3, 3], padding='same', activation=None)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x_sc = Conv2D(128, [1,1], strides=2, padding="same", activation=None)(x)
x_sc = BatchNormalization()(x_sc)
x = Entry_flow(x, 128)
x = Add()([x, x_sc])
x_sc = Conv2D(256, [1,1], strides=2, padding="same", activation=None)(x_sc)
x_sc = BatchNormalization()(x_sc)
x = Activation("relu")(x)
x = Entry_flow(x, 256)
x = Add()([x, x_sc])
x = Activation("relu")(x)
x_sc = Conv2D(728, [1,1], strides=2, padding="same", activation=None)(x)
x_sc = BatchNormalization()(x_sc)
x = Activation("relu")(x)
x = Entry_flow(x, 728)
x = Add()([x, x_sc])
# Middle flow
for _ in range(8):
x = Middle_flow(x)
# Exit flow
x_sc = Conv2D(1024, [1, 1], strides=2, padding="same", activation=None)(x)
x_sc = BatchNormalization()(x_sc)
x = Activation("relu")(x)
x = SeparableConv2D(728, [3, 3], strides=1, padding='same', depth_multiplier=1, activation=None)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = SeparableConv2D(1024, [3, 3], strides=1, padding='same', depth_multiplier=1, activation=None)(x)
x = BatchNormalization()(x)
x = MaxPooling2D([3, 3], strides=2, padding='same')(x)
x = Add()([x, x_sc])
x = SeparableConv2D(1536, [3, 3], strides=1, padding='same', depth_multiplier=1, activation=None)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = SeparableConv2D(2048, [3, 3], strides=1, padding='same', depth_multiplier=1, activation=None)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = AveragePooling2D([img_height // 32, img_width // 32], strides=1, padding='valid')(x)
x = Flatten()(x)
x = Dense(num_classes, activation='softmax')(x)
model = Model(inputs=inputs, outputs=x)
return model
CLS = ['akahara', 'madara']
# get train data
def data_load(path, hf=False, vf=False, rot=False):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
x = x[..., ::-1]
xs.append(x)
t = [0 for _ in range(num_classes)]
for i, cls in enumerate(CLS):
if cls in path:
t[i] = 1
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t)
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t)
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t)
paths.append(path)
if rot != False:
angle = rot
scale = 1
# show
a_num = 360 // rot
w_num = np.ceil(np.sqrt(a_num))
h_num = np.ceil(a_num / w_num)
count = 1
#plt.subplot(h_num, w_num, count)
#plt.axis('off')
#plt.imshow(x)
#plt.title("angle=0")
while angle < 360:
_h, _w, _c = x.shape
max_side = max(_h, _w)
tmp = np.zeros((max_side, max_side, _c))
tx = int((max_side - _w) / 2)
ty = int((max_side - _h) / 2)
tmp[ty: ty+_h, tx: tx+_w] = x.copy()
M = cv2.getRotationMatrix2D((max_side/2, max_side/2), angle, scale)
_x = cv2.warpAffine(tmp, M, (max_side, max_side))
_x = _x[tx:tx+_w, ty:ty+_h]
xs.append(_x)
ts.append(t)
paths.append(path)
# show
#count += 1
#plt.subplot(h_num, w_num, count)
#plt.imshow(_x)
#plt.axis('off')
#plt.title("angle={}".format(angle))
angle += rot
#plt.show()
xs = np.array(xs, dtype=np.float32)
ts = np.array(ts, dtype=np.int)
return xs, ts, paths
# train
def train():
model = Xception()
for layer in model.layers:
layer.trainable = True
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True),
metrics=['accuracy'])
xs, ts, paths = data_load('../Dataset/train/images', hf=True, vf=True, rot=1)
# training
mb = 32
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
for i in range(500):
if mbi + mb > len(xs):
mb_ind = copy.copy(train_ind)[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
mbi = mb - (len(xs) - mbi)
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
x = xs[mb_ind]
t = ts[mb_ind]
loss, acc = model.train_on_batch(x=x, y={'out':t})
if (i+1) % 10 == 0:
print("iter >>", i+1, ", loss >>", loss_total, ', accuracy >>', acc)
model.save('model.h5')
# test
def test():
# load trained model
model = Xception()
model.load_weights('model.h5')
xs, ts, paths = data_load("../Dataset/test/images/")
for i in range(len(paths)):
x = xs[i]
t = ts[i]
path = paths[i]
x = np.expand_dims(x, axis=0)
pred = model.predict_on_batch(x)[0]
print("in {}, predicted probabilities >> {}".format(path, pred))
def arg_parse():
parser = argparse.ArgumentParser(description='CNN implemented with Keras')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args()
return args
# main
if __name__ == '__main__':
args = arg_parse()
if args.train:
train()
if args.test:
test()
if not (args.train or args.test):
print("please select train or test flag")
print("train: python main.py --train")
print("test: python main.py --test")
print("both: python main.py --train --test")
|
the-stack_0_26606
|
import os
import dnaweaver as dw
from dnaweaver.biotools import gc_content
import matplotlib.pyplot as plt
oligo_com = dw.CommercialDnaOffer(
name="Oligo.com",
sequence_constraints=[dw.SequenceLengthConstraint(max_length=200)],
pricing=dw.PerBasepairPricing(0.10),
lead_time=7,
)
deluxe_dna_com = dw.CommercialDnaOffer(
name="DeluxeDNA.com",
sequence_constraints=[dw.SequenceLengthConstraint(max_length=10000)],
pricing=dw.PerBasepairPricing(0.25),
lead_time=7,
)
cheap_dna_com = dw.CommercialDnaOffer(
name="CheapDNA.com",
sequence_constraints=[
dw.SequenceLengthConstraint(max_length=4000),
dw.NoPatternConstraint(enzyme="AarI"),
dw.NoPatternConstraint(enzyme="BsaI"),
lambda seq: (0.4 < gc_content(seq) < 0.6),
],
pricing=dw.PerBasepairPricing(0.10),
lead_time=15,
)
# OLIGOS TO BLOCKS ASSEMBLY
oligo_assembly_station = dw.DnaAssemblyStation(
name="Oligo Assembly Station",
assembly_method=dw.OligoAssemblyMethod(
overhang_selector=dw.TmSegmentSelector(
min_size=15, max_size=25, min_tm=50, max_tm=70
),
min_segment_length=40,
max_segment_length=200,
sequence_constraints=[dw.SequenceLengthConstraint(max_length=1500)],
duration=8,
cost=2,
),
supplier=oligo_com,
coarse_grain=20,
fine_grain=False,
a_star_factor="auto",
memoize=True,
)
# BLOCKS TO CHUNKS ASSEMBLY
blocks_sources_comparator = dw.DnaSuppliersComparator(
name="bs_comparator",
suppliers=[oligo_assembly_station, cheap_dna_com, deluxe_dna_com],
memoize=True,
)
gibson_blocks_assembly_station = dw.DnaAssemblyStation(
name="Gibson Blocks Assembly",
assembly_method=dw.GibsonAssemblyMethod(
overhang_selector=dw.FixedSizeSegmentSelector(80),
min_segment_length=1000,
max_segment_length=4000,
duration=8,
cost=16,
),
supplier=blocks_sources_comparator,
coarse_grain=300,
fine_grain=False,
memoize=True,
a_star_factor="auto",
)
goldengate_blocks_assembly_station = dw.DnaAssemblyStation(
name="Golden Gate Blocks Assembly",
assembly_method=dw.GoldenGateAssemblyMethod(
enzyme="BsmBI",
wildcard_basepair="A",
min_segment_length=1000,
max_segment_length=4000,
duration=5,
cost=6,
),
supplier=blocks_sources_comparator,
coarse_grain=400,
fine_grain=False,
memoize=True,
a_star_factor="auto",
)
ECOLI_DB_PATH = os.path.join("..", "..", "data", "ecoli_blast_db", "ecoli")
ecoli_genome = dw.PcrExtractionStation(
"E. coli Genome (PCR)",
primers_supplier=oligo_com,
homology_selector=dw.TmSegmentSelector(
min_size=18, max_size=22, min_tm=55, max_tm=65
),
blast_database=ECOLI_DB_PATH,
max_amplicon_length=10000,
extra_time=3,
extra_cost=1,
)
# CHUNKS TO MEGACHUNKS ASSEMBLY
chunks_assembly_station = dw.DnaAssemblyStation(
name="Chunks assembly (Gibson)",
assembly_method=dw.GibsonAssemblyMethod(
overhang_selector=dw.FixedSizeSegmentSelector(300),
min_segment_length=7000,
max_segment_length=25000,
duration=8,
),
supplier=dw.DnaSuppliersComparator(
[
ecoli_genome,
goldengate_blocks_assembly_station,
gibson_blocks_assembly_station,
deluxe_dna_com,
]
),
coarse_grain=1000,
fine_grain=None,
memoize=True,
a_star_factor="auto",
)
with open("50kb_sequence.txt", "r") as f:
sequence = f.read()
fig, axes = plt.subplots(1, 4, figsize=(16, 3), sharey=True)
chunks_assembly_station.prepare_network_on_sequence(sequence)
for ax, max_lead_time in zip(axes, [20, 25, 30, 35]):
quote = chunks_assembly_station.get_quote(
sequence, max_lead_time=max_lead_time - 1, with_assembly_plan=True
)
print("Computing plan for lead time under:", max_lead_time)
report = quote.to_assembly_plan_report(refine_fragments_locations=False)
report.plot_assembly_blocks(
ax=ax, parts_offset=0.1, legend=False, plot_top_assembly=False
)
ax.set_title(
"Best plan under %d days\n\n£%d, %d days"
% (max_lead_time, quote.price, quote.lead_time)
)
ax.set_ylim(top=1)
fig.savefig("different_max_lead_times.pdf")
print ("Done! See different_max_lead_times.pdf")
|
the-stack_0_26609
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.results import page_test_results
from telemetry.page import page
from telemetry.web_perf.metrics import layout
from collections import namedtuple
FakeEvent = namedtuple('Event', 'name, start, end')
class LayoutMetricUnitTest(unittest.TestCase):
def testAvgStddev(self):
results = page_test_results.PageTestResults()
results.WillRunPage(page.Page('file://blank.html'))
events = map(FakeEvent._make, [(name, 42, 43) for name in
layout.LayoutMetric.EVENTS])
layout.LayoutMetric()._AddResults(events, results)
expected = set()
for name in layout.LayoutMetric.EVENTS.itervalues():
expected.add((name + '_avg', 1))
expected.add((name + '_stddev', 0))
actual = set((value.name, value.value) for value in
results.current_page_run.values)
self.assertEquals(expected, actual)
|
the-stack_0_26613
|
from dataclasses import dataclass
from math import prod
from typing import List
import aocd
from . import aoc_year
from loguru import logger
aoc_day = 2
@dataclass
class Box:
l: int
w: int
h: int
def paper_required(self):
sides = [self.l * self.w, self.w * self.h, self.h * self.l]
return sum(2 * s for s in sides) + min(sides)
def ribbon_required(self):
sorted_sides = sorted([self.l, self.w, self.h])
ribbon = 2 * sorted_sides[0] + 2 * sorted_sides[1]
bow = prod(sorted_sides)
return ribbon + bow
def preprocess():
dimensions = []
for line in aocd.get_data(day=aoc_day, year=aoc_year).splitlines():
l, w, h = line.split("x")
dimensions.append(Box(l=int(l), w=int(w), h=int(h)))
return dimensions
def part1(measurements: List[Box]):
return str(sum(b.paper_required() for b in measurements))
def part2(measurements: List[Box]):
return str(sum(b.ribbon_required() for b in measurements))
tests = [
(
"""2x3x4
1x1x10
""",
34 + 14,
part2,
),
]
def test(start: int = 0, finish: int = len(tests)):
for i, t in enumerate(tests[start:finish]):
def gd(*args, **kwargs):
return t[0]
aocd.get_data = gd
result = t[2](preprocess())
if result != f"{t[1]}":
logger.error(f"Test {start + i + 1} failed: got {result}, expected {t[1]}")
break
else:
logger.success(f"Test {start + i + 1}: {t[1]}")
if __name__ == "__main__":
test()
|
the-stack_0_26614
|
#!/usr/bin/env python
from bs4 import BeautifulSoup
from bs4 import Tag
from collections import OrderedDict, defaultdict
import copy
from http import client
import gettext
import hashlib
import json
from le_utils.constants import licenses, content_kinds, file_formats
import logging
import os
from pathlib import Path
import re
import requests
from ricecooker.classes.licenses import get_license
from ricecooker.chefs import JsonTreeChef
from ricecooker.utils.caching import CacheForeverHeuristic, FileCache, CacheControlAdapter
from ricecooker.utils import downloader, html_writer
from ricecooker.utils.jsontrees import write_tree_to_json_tree, SUBTITLES_FILE
import sys
import time
from urllib.error import URLError
from urllib.parse import urljoin, urlencode
from utils import save_thumbnail, if_file_exists, load_tree
from utils import if_dir_exists, get_name_from_url, get_name_from_url_no_ext
from utils import build_path, remove_links, remove_iframes, check_shorter_url
from utils import get_level_map, get_node_from_channel
import urllib.parse as urlparse
import youtube_dl
# Additional Constants
################################################################################
LOGGER = logging.getLogger()
__logging_handler = logging.StreamHandler()
LOGGER.addHandler(__logging_handler)
LOGGER.setLevel(logging.INFO)
# BASE_URL is used to identify when a resource is owned by Edsitement
BASE_URL = "http://www.tess-india.edu.in/learning-materials"
# If False then no download is made
# for debugging proporses
DOWNLOAD_VIDEOS = True
# time.sleep for debugging proporses, it helps to check log messages
TIME_SLEEP = .8
DATA_DIR = "chefdata"
COPYRIGHT_HOLDER = "The Open University"
#Curricular units with its lessons
CURRICULAR_UNITS_MAP = defaultdict(OrderedDict)
#Lessons related with curricular units
LESSONS_CURRICULAR_MAP = defaultdict(set)
# webcache
###############################################################
sess = requests.Session()
cache = FileCache('.webcache')
basic_adapter = CacheControlAdapter(cache=cache)
forever_adapter = CacheControlAdapter(heuristic=CacheForeverHeuristic(), cache=cache)
sess.mount('http://', basic_adapter)
sess.mount(BASE_URL, forever_adapter)
# Main Scraping Method
################################################################################
def test():
"""
Test individual resources
"""
url = "http://www.tess-india.edu.in/learning-materials?course_tid=136&subject_tid=181&educational_level_tid=226"
global channel_tree
channel_tree = dict(
source_domain=TESSIndiaChef.HOSTNAME,
source_id='tessindia',
title='TESSIndia',
description="""TESS-India is led by The Open University and Save The Children India, funded by UK Aid it is a multilingual teacher professional development programme whose aim is to support India’s national educational policy by enhancing the classroom practice of primary and secondary school teachers through the provision of freely available, adaptable Open Educational Resources (OER)."""[:400], #400 UPPER LIMIT characters allowed
thumbnail=None,
language="en",
children=[],
license=TESSIndiaChef.LICENSE,
)
try:
resource = Resource(source_id=url,
lang="en",
state="All India - English",
subject="English",
level="Elementary")
resource.scrape()
resource.to_tree(channel_tree)
except requests.exceptions.HTTPError as e:
LOGGER.info("Error: {}".format(e))
return channel_tree
def test_lesson():
lesson_url = "http://www.tess-india.edu.in/learning-resource-1001"
lesson = Lesson(name="test", key_resource_id=lesson_url, lang="en",
extra_resources=None, path=["A", "B"])
lesson.download()
lesson_node = lesson.to_node()
print(lesson_node)
class ResourceBrowser(object):
def __init__(self, resource_url):
self.resource_url = resource_url
def build_url(self, course_tid=None, subject_tid=None, educational_level_tid=None):
if educational_level_tid is not None:
params = dict(course_tid=course_tid, subject_tid=subject_tid,
educational_level_tid=educational_level_tid)
else:
params = dict(course_tid=course_tid, subject_tid=subject_tid)
url_parts = list(urlparse.urlparse(self.resource_url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update(params)
url_parts[4] = urlencode(query)
return urlparse.urlunparse(url_parts)
def get_total_items(self, text):
string = re.search(r"\d+\-\d+ of \d+", text).group()
return int(string.split("of")[-1].strip())
def run(self, limit_page=1, page_number=1):
total_items = None
counter = 0
try:
page_contents = downloader.read(self.resource_url, loadjs=False)
except requests.exceptions.HTTPError as e:
LOGGER.info("Error: {}".format(e))
else:
page = BeautifulSoup(page_contents, 'html.parser')
states = page.find("div", class_=["lm-filter-course"])
states_tree = self.get_state_lang(states)
subjects = page.find("div", class_=["lm-filter-subject"])
subjects_tree = self.get_subjects(subjects)
levels = page.find("div", class_=["lm-filter-level"])
levels_tree = self.get_levels(levels)
pages_params = self.build_page_params(states_tree, subjects_tree, levels_tree)
for page_params in pages_params:
url = self.build_url(page_params["course_tid"],
page_params["subject_tid"],
page_params.get("educational_level_tid", None))
yield dict(url=url,
subject_name=page_params["subject_name"],
state_lang=page_params["state_lang"],
level_name=page_params.get("level_name", None))
LOGGER.info("CRAWLING : URL {}".format(url))
time.sleep(TIME_SLEEP)
def get_state_lang(self, items):
tree = {}
for state_data in items.findAll("button"):
tree[state_data["data-tid"]] = state_data.text.strip()
return tree
def get_subjects(self, items):
tree = {}
for subject_data in items.findAll("button"):
if subject_data["data-course"] == "all":
continue
tree.setdefault(subject_data["data-course"], {})
tree[subject_data["data-course"]][subject_data["data-tid"]] = (subject_data.text.strip(), bool(int(subject_data.get("data-hide-level", "0"))))
return tree
def get_levels(self, items):
tree = {}
for subject_data in items.findAll("button"):
tree.setdefault(subject_data["data-course"], {})
tree[subject_data["data-course"]][subject_data["data-tid"]] = subject_data.text.strip()
return tree
def build_page_params(self, states, subjects, levels):
pages = []#course_tid, subject_tid, educational_level_tid
for course_tid in subjects:
for subjects_tid in subjects[course_tid]:
subject_name = subjects[course_tid][subjects_tid][0]
not_has_levels = subjects[course_tid][subjects_tid][1]
info = {"course_tid": course_tid, "subject_tid": subjects_tid,
"state_lang": states[course_tid], "subject_name": subject_name}
if not_has_levels is False:
for level_tid in levels[course_tid]:
info_tmp = info.copy()
info_tmp["educational_level_tid"] = level_tid
info_tmp["level_name"] = levels[course_tid][level_tid]
pages.append(info_tmp)
else:
pages.append(info)
return pages
class Resource(object):
def __init__(self, source_id, lang="en", state=None, subject=None, level=None):
self.source_id = source_id
self.lang = lang
self.state = state
self.subject = subject
self.level = level
self.nodes = []
self.ids = set([])
def scrape(self):
page = download(self.source_id)
for material in page.findAll("div", class_=["node-learning-material"]):
resource = material.find(lambda tag: tag.name == "a" and tag.findParent("h2"))
if resource is not None:
lesson_name = resource.text
lesson_url = resource["href"]
else:
lesson_name = material.find("h2").text
lesson_url = material.attrs.get("about", "")
extra_resources = material.findAll(lambda tag: tag.name == "a" and \
tag.findParent("div", class_=["lmat-download"]))
extra_resources_urls = set([])
for extra_resource in extra_resources:
extra_resources_urls.add(extra_resource["href"])
if not lesson_url in self.ids:
lesson = Lesson(name=lesson_name, key_resource_id=lesson_url, lang=self.lang,
extra_resources=extra_resources_urls, path=[self.state, self.subject, self.level])
lesson.download()
lesson_node = lesson.to_node()
if len(lesson_node["children"]) > 0:
self.nodes.append(lesson_node)
self.ids.add(lesson_url)
def empty_state_node(self):
return dict(
kind=content_kinds.TOPIC,
source_id=self.state,
title=self.state,
description="",
license=None,
language=self.lang,
children=[]
)
def empty_subject_node(self):
return dict(
kind=content_kinds.TOPIC,
source_id=self.subject,
title=self.subject,
description="",
license=None,
language=self.lang,
children=[]
)
def empty_level_node(self):
return dict(
kind=content_kinds.TOPIC,
source_id=self.level,
title=self.level,
description="",
license=None,
language=self.lang,
children=[]
)
def build_tree(self, nodes, subtree=None, tree_level=0):
if tree_level == 0:
if subtree is None:
root = self.empty_state_node()
else:
root = subtree
subject = self.empty_subject_node()
if self.level is not None:
level = self.empty_level_node()
level["children"].extend(nodes)
subject["children"].append(level)
else:
subject["children"].extend(nodes)
root["children"].append(subject)
return root
elif tree_level == 1:
subject = subtree
if self.level is not None:
level = self.empty_level_node()
level["children"].extend(nodes)
subject["children"].append(level)
else:
subject["children"].extend(nodes)
elif tree_level == 2:
level = subtree
level["children"].extend(nodes)
def get_tree_level(self, channel_tree):
subtree = get_level_map(channel_tree, [self.state, self.subject, self.level])
level = 2
if subtree is None:
subtree = get_level_map(channel_tree, [self.state, self.subject])
level -= 1
if subtree is None:
subtree = get_level_map(channel_tree, [self.state])
level -= 1
return subtree, level
def to_tree(self, channel_tree):
subtree, tree_level = self.get_tree_level(channel_tree)
root = self.build_tree(self.nodes, subtree, tree_level=tree_level)
if subtree is None and root is not None:
channel_tree["children"].append(root)
class Lesson(object):
def __init__(self, name=None, key_resource_id=None, extra_resources=None,
path=None, lang="en"):
self.key_resource_id = urljoin(BASE_URL, key_resource_id.strip())
self.filename = hashlib.sha1(name.encode("utf-8")).hexdigest()
self.title = name if len(name) < 80 else name[:80]
self.path_levels = path
self.lang = lang
self.file = None
self.video = None
self.ids = set([])
LOGGER.info("Collecting: {}".format(self.key_resource_id))
LOGGER.info(" - Name: {}".format(self.title))
LOGGER.info(" - Lang: {}".format(self.lang))
self.html = HTMLLesson(source_id=self.key_resource_id, name=self.title,
lang=self.lang)
if self.path_levels[-1] is None:
self.base_path = build_path([DATA_DIR] + self.path_levels[:-1] + [self.filename])
else:
self.base_path = build_path([DATA_DIR] + self.path_levels + [self.filename])
if extra_resources is not None:
LOGGER.info(" - Extra resources: {}".format(len(extra_resources)))
self.set_extra_resources(extra_resources)
def set_extra_resources(self, extra_resources):
for resource in extra_resources:
LOGGER.info(" - Resource: {}".format(resource))
if resource.endswith(".pdf"):
self.file = File(resource, lang=self.lang, name=self.title)
elif resource.endswith(".doc") or resource.endswith(".docx"):
pass
else:
resource = urljoin(BASE_URL, resource.strip())
if resource != self.key_resource_id:
self.video = HTMLLesson(source_id=resource,
name=self.title + " - Videos", lang=self.lang)
def download(self):
self.html.scrape(self.base_path, name="index")
if self.file:
self.file.download(self.base_path)
if self.video:
self.video.scrape(self.base_path, name="video")
def to_node(self):
topic_node = dict(
kind=content_kinds.TOPIC,
source_id=self.key_resource_id,
title=self.title,
description="",
language=self.lang,
license=None,
children=[]
)
for html_node in self.html.to_nodes():
if html_node is not None and html_node["source_id"] not in self.ids:
topic_node["children"].append(html_node)
self.ids.add(html_node["source_id"])
if self.file is not None:
file_node = self.file.to_node()
if file_node is not None and file_node["source_id"] not in self.ids:
topic_node["children"].append(file_node)
self.ids.add(file_node["source_id"])
if self.video is not None:
videos_nodes = self.video.to_nodes()
for video_node in videos_nodes:
if video_node is not None and video_node["source_id"] not in self.ids:
topic_node["children"].append(video_node)
self.ids.add(video_node["source_id"])
return topic_node
class File(object):
def __init__(self, source_id, lang="en", lincese="", name=None):
self.filename = get_name_from_url(source_id)
self.source_id = urljoin(BASE_URL, source_id) if source_id.startswith("/") else source_id
self.filepath = None
self.lang = lang
self.name = "{}_{}".format(name, self.filename)
self.license = get_license(licenses.CC_BY_NC_SA, copyright_holder=COPYRIGHT_HOLDER).as_dict()
def download(self, base_path):
PDFS_DATA_DIR = build_path([base_path, 'pdfs'])
try:
response = sess.get(self.source_id)
content_type = response.headers.get('content-type')
if 'application/pdf' in content_type:
self.filepath = os.path.join(PDFS_DATA_DIR, self.filename)
with open(self.filepath, 'wb') as f:
for chunk in response.iter_content(10000):
f.write(chunk)
LOGGER.info(" - Get file: {}, node name: {}".format(self.filename, self.name))
except requests.exceptions.HTTPError as e:
LOGGER.info("Error: {}".format(e))
except requests.exceptions.ConnectionError:
### this is a weird error, may be it's raised when the webpage
### is slow to respond requested resources
LOGGER.info("Connection error, the resource will be scraped in 5s...")
time.sleep(3)
except requests.exceptions.ReadTimeout as e:
LOGGER.info("Error: {}".format(e))
except requests.exceptions.TooManyRedirects as e:
LOGGER.info("Error: {}".format(e))
def to_node(self):
if self.filepath is not None:
node = dict(
kind=content_kinds.DOCUMENT,
source_id=self.source_id,
title=self.name,
description='',
files=[dict(
file_type=content_kinds.DOCUMENT,
path=self.filepath
)],
language=self.lang,
license=self.license)
return node
class HTMLLesson(object):
def __init__(self, source_id=None, lang="en", name=None):
self.source_id = source_id
self.filepath = None
self.name = name
self.lang = lang
self.menu = Menu(lang=self.lang, name=name)
self.license = get_license(licenses.CC_BY_NC_SA, copyright_holder=COPYRIGHT_HOLDER).as_dict()
def sections_to_menu(self):
page = download(self.source_id)
if page:
content = page.find("main", class_="content-main")
ul = content.find(lambda tag: tag.name == "ul" and tag.findParent("div", class_="content"))
self.menu.index_content = ul
href = None
for link in content.findAll("a"):
href = link.get("href", "")
links_class = link.get("class", [])
if href:# and "active" not in links_class:
self.menu.add_item(title=link.text, url=urljoin(self.source_id, href))
def scrape(self, base_path, name="htmlapp"):
self.filepath = "{path}/{name}.zip".format(path=base_path, name=name)
self.sections_to_menu()
self.menu.to_file(self.filepath, base_path)
def to_nodes(self):
if self.menu.is_valid:
menu_node = self.menu.to_nodes()
node = dict(
kind=content_kinds.HTML5,
source_id=self.source_id,
title=self.name,
description="",
thumbnail=None,
author="",
files=[dict(
file_type=content_kinds.HTML5,
path=self.filepath
)],
language=self.lang,
license=self.license)
return [node] + menu_node
else:
return []
class Menu(object):
def __init__(self, lang="en", name=None):
self.items = OrderedDict()
self.index_content = None
self.images = {}
self.pdfs_url = set([])
self.nodes = []
self.ids = set([])
self.is_valid = False
self.lang = lang
self.name = name
def build_index(self, directory="files/"):
items = iter(self.items.values())
if self.index_content is not None:
self.index_content["class"] = "sidebar-items"
for ul in self.index_content:
if hasattr(ul, 'findAll'):
for a in ul.findAll("a"):
item = next(items)
a["href"] = "{}{}".format(directory, item["filename"])
a["class"] = "sidebar-link"
else:
return
self.is_valid = True
return str(self.index_content)
def add_item(self, title=None, url=None):
filename = self.item_to_filename(title)
if url not in self.items:
content = self.get_sections_content(url)
self.items[url] = {"title": title, "filename": filename, "content": content}
def clean_content(self, content):
content.find("div", class_="addthis").decompose()
obj_tags = content.find_all("div", class_="oucontent-media")#oucontent-embedtemplate")
if obj_tags is not None:
for obj_tag in obj_tags:
obj_tag.decompose()
if content is not None:
for link in content.find_all("a"):
if "active" not in link.attrs.get("class", []):
link.replaceWithChildren()
def pager(self, content, index):
ul = content.find("ul", class_="pager")
first_page = ul.find(lambda tag: tag.name == "a" and tag.findParent("li", class_="pager-first"))
last_page = ul.find(lambda tag: tag.name == "a" and tag.findParent("li", class_="pager-last"))
previous = ul.find(lambda tag: tag.name == "a" and tag.findParent("li", class_="pager-previous"))
next = ul.find(lambda tag: tag.name == "a" and tag.findParent("li", class_="pager-next"))
if first_page is not None:
first_page["href"] = "../index.html"
items = list(self.items.values())
if last_page is not None:
last_page["href"] = items[-1]["filename"]
if previous is not None:
if index > 0:
previous["href"] = items[index - 1]["filename"]
else:
previous["href"] = first_page["href"]
if next is not None:
if index < len(items) - 1:
next["href"] = items[index + 1]["filename"]
else:
next["href"] = last_page["href"]
def get_sections_content(self, url):
page = download(url)
content = page.find("section", class_="main-content")
return content
def get_images(self, content):
for img in content.findAll("img"):
if img["src"].startswith("/"):
img_src = urljoin(BASE_URL, img["src"])
else:
img_src = img["src"]
filename = get_name_from_url(img_src)
if img_src not in self.images and img_src:
img["src"] = filename
self.images[img_src] = filename
def write_pdfs(self, base_path, content):
for tag_a in content.findAll(lambda tag: tag.name == "a" and tag.attrs.get("href", "").endswith(".pdf")):
pdf_url = tag_a.get("href", "")
if pdf_url not in self.pdfs_url and pdf_url:
self.pdfs_url.add(pdf_url)
pdf_file = File(pdf_url, lang=self.lang, name=self.name)
pdf_file.download(base_path)
node = pdf_file.to_node()
if node is not None and node["source_id"] not in self.ids:
self.nodes.append(node)
self.ids.add(node["source_id"])
def write_video(self, base_path, content):
videos = content.find_all(lambda tag: tag.name == "a" and tag.attrs.get("href", "").find("youtube") != -1 or tag.attrs.get("href", "").find("youtu.be") != -1 or tag.text.lower() == "youtube")
VIDEOS_DATA_DIR = build_path([base_path, 'videos'])
for video in videos:
youtube = YouTubeResource(video.get("href", ""), lang=self.lang)
node = get_node_from_channel(youtube.resource_url, channel_tree)
if node is None:
youtube.to_file(filepath=VIDEOS_DATA_DIR)
node = youtube.node
if node is not None:
if video.parent.name == 'li':
video.parent.replace_with("Video name: " + node["title"])
if node["source_id"] not in self.ids:
self.nodes.append(node)
self.ids.add(node["source_id"])
def write_index(self, filepath, content):
with html_writer.HTMLWriter(filepath, "w") as zipper:
zipper.write_index_contents(content)
def write_contents(self, filepath_index, filename, content, directory="files"):
with html_writer.HTMLWriter(filepath_index, "a") as zipper:
content = '<html><head><meta charset="utf-8"><link rel="stylesheet" href="../css/styles.css"></head><body>{}<script src="../js/scripts.js"></script></body></html>'.format(content)
zipper.write_contents(filename, content, directory=directory)
def write_images(self, filepath, content):
self.get_images(content)
with html_writer.HTMLWriter(filepath, "a") as zipper:
for img_src, img_filename in self.images.items():
try:
zipper.write_url(img_src, img_filename, directory="files")
except requests.exceptions.HTTPError:
pass
def write_css_js(self, filepath):
with html_writer.HTMLWriter(filepath, "a") as zipper, open("chefdata/styles.css") as f:
content = f.read()
zipper.write_contents("styles.css", content, directory="css/")
with html_writer.HTMLWriter(filepath, "a") as zipper, open("chefdata/scripts.js") as f:
content = f.read()
zipper.write_contents("scripts.js", content, directory="js/")
def item_to_filename(self, name):
name = "_".join(name.lower().split(" "))
hash_name = hashlib.sha1(name.encode("utf-8")).hexdigest()
return "{}.html".format(hash_name)
def to_file(self, filepath, base_path):
index_content_str = self.build_index()
if index_content_str is not None:
self.write_index(filepath, '<html><head><meta charset="utf-8"><link rel="stylesheet" href="css/styles.css"></head><body><div class="main-content-with-sidebar">{}</div><script src="js/scripts.js"></script></body></html>'.format(index_content_str))
self.write_css_js(filepath)
for i, item in enumerate(self.items.values()):
self.write_images(filepath, item["content"])
file_nodes = self.write_pdfs(base_path, item["content"])
video_nodes = self.write_video(base_path, item["content"])
self.pager(item["content"], i)
self.clean_content(item["content"])
content = '<div class="sidebar"><a class="sidebar-link toggle-sidebar-button" href="javascript:void(0)" onclick="javascript:toggleNavMenu();">☰</a>'+\
self.build_index(directory="./") +"</div>"+\
'<div class="main-content-with-sidebar">'+str(item["content"])+'</div>'
self.write_contents(filepath, item["filename"], content)
def to_nodes(self):
return self.nodes
class ResourceType(object):
"""
Base class for File, WebPage, Video, Audio resources
"""
def __init__(self, type_name=None, source_id=None):
LOGGER.info("Resource Type: {} [{}]".format(type_name, source_id))
self.type_name = type_name
self.node = None
self.resource_url = source_id
def to_file(self, filepath=None):
pass
class YouTubeResource(ResourceType):
def __init__(self, resource_url, type_name="Youtube", lang="en"):
super(YouTubeResource, self).__init__(type_name=type_name,
source_id=self.clean_url(resource_url))
self.file_format = file_formats.MP4
self.lang = lang
self.filename = None
self.filepath = None
def clean_url(self, url):
if url[-1] == "/":
url = url[:-1]
return url.strip()
@classmethod
def is_youtube(self, url, get_channel=False):
youtube = url.find("youtube") != -1 or url.find("youtu.be") != -1
if get_channel is False:
youtube = youtube and url.find("user") == -1 and url.find("/c/") == -1
return youtube
@classmethod
def transform_embed(self, url):
url = "".join(url.split("?")[:1])
return url.replace("embed/", "watch?v=").strip()
def get_video_info(self, download_to=None, subtitles=True):
ydl_options = {
'writesubtitles': subtitles,
'allsubtitles': subtitles,
'no_warnings': True,
'restrictfilenames':True,
'continuedl': True,
'quiet': False,
'format': "bestvideo[height<={maxheight}][ext=mp4]+bestaudio[ext=m4a]/best[height<={maxheight}][ext=mp4]".format(maxheight='480'),
'outtmpl': '{}/%(id)s'.format(download_to),
'noplaylist': False
}
with youtube_dl.YoutubeDL(ydl_options) as ydl:
try:
ydl.add_default_info_extractors()
info = ydl.extract_info(self.resource_url, download=(download_to is not None))
return info
except(youtube_dl.utils.DownloadError, youtube_dl.utils.ContentTooShortError,
youtube_dl.utils.ExtractorError) as e:
LOGGER.info('An error occured ' + str(e))
LOGGER.info(self.resource_url)
except KeyError as e:
LOGGER.info(str(e))
def subtitles_dict(self):
subs = []
video_info = self.get_video_info()
if video_info is not None:
video_id = video_info["id"]
if 'subtitles' in video_info:
subtitles_info = video_info["subtitles"]
LOGGER.info("Subtitles: {}".format(",".join(subtitles_info.keys())))
for language in subtitles_info.keys():
subs.append(dict(file_type=SUBTITLES_FILE, youtube_id=video_id, language=language))
return subs
def process_file(self, download=False, filepath=None):
self.download(download=download, base_path=filepath)
if self.filepath:
files = [dict(file_type=content_kinds.VIDEO, path=self.filepath)]
files += self.subtitles_dict()
self.node = dict(
kind=content_kinds.VIDEO,
source_id=self.resource_url,
title=self.filename,
description='',
files=files,
language=self.lang,
license=get_license(licenses.CC_BY, copyright_holder=COPYRIGHT_HOLDER).as_dict())
def download(self, download=True, base_path=None):
if not "watch?" in self.resource_url or "/user/" in self.resource_url or\
download is False:
return
download_to = base_path
for i in range(4):
try:
info = self.get_video_info(download_to=download_to, subtitles=False)
if info is not None:
LOGGER.info("Video resolution: {}x{}".format(info.get("width", ""), info.get("height", "")))
self.filepath = os.path.join(download_to, "{}.mp4".format(info["id"]))
self.filename = info["title"]
if self.filepath is not None and os.stat(self.filepath).st_size == 0:
LOGGER.info("Empty file")
self.filepath = None
except (ValueError, IOError, OSError, URLError, ConnectionResetError) as e:
LOGGER.info(e)
LOGGER.info("Download retry")
time.sleep(.8)
except (youtube_dl.utils.DownloadError, youtube_dl.utils.ContentTooShortError,
youtube_dl.utils.ExtractorError, OSError) as e:
LOGGER.info("An error ocurred, may be the video is not available.")
return
except OSError:
return
else:
return
def to_file(self, filepath=None):
if "watch?" in self.resource_url or not "/user/" in self.resource_url:
self.process_file(download=DOWNLOAD_VIDEOS, filepath=filepath)
def download(source_id):
tries = 0
while tries < 4:
try:
document = downloader.read(source_id, loadjs=False, session=sess)
except requests.exceptions.HTTPError as e:
LOGGER.info("Error: {}".format(e))
except requests.exceptions.ConnectionError:
### this is a weird error, may be it's raised when the webpage
### is slow to respond requested resources
LOGGER.info("Connection error, the resource will be scraped in 5s...")
time.sleep(3)
except requests.exceptions.TooManyRedirects as e:
LOGGER.info("Error: {}".format(e))
else:
return BeautifulSoup(document, 'html.parser') #html5lib
tries += 1
return False
#When a node has only one child and this child it's a object (file, video, etc),
#this is moved to an upper level
def clean_leafs_nodes_plus(channel_tree):
children = channel_tree.get("children", None)
if children is None:
return
elif len(children) == 1 and not "children" in children[0]:
return channel_tree["children"][0]
elif len(children) == 0:
return -1
else:
del_nodes = []
for i, node in enumerate(children):
leaf_node = clean_leafs_nodes_plus(node)
if leaf_node is not None and leaf_node != -1:
if leaf_node["source_id"].endswith(".js"):
levels = leaf_node["source_id"].split("/")
parent_dir = levels[-2] #dirname
leaf_node["title"] = "{}_{}".format(parent_dir, leaf_node["title"])
children[i] = leaf_node
elif leaf_node == -1:
del children[i]
elif leaf_node is None:
try:
if len(node["children"]) == 0:
del children[i]
elif len(node["children"]) == 1:
children[i] = node["children"][0]
except KeyError:
pass
def language_map(subject):
lang_map = {
"All India - English": "en",
"अखिल भारतीय हिंदी": "hi",
"उत्तर प्रदेश": "hi",
"बिहार": "hi",
"मध्य प्रदेश": "hi",
"অসম": "as",
"পশ্চিমবঙ্গ": "bn",
"ଓଡ଼ିଶା": "or",
"ಕರ್ನಾಟಕ": "kn"
}
return lang_map.get(subject, "en")
class TESSIndiaChef(JsonTreeChef):
HOSTNAME = BASE_URL
TREES_DATA_DIR = os.path.join(DATA_DIR, 'trees')
CRAWLING_STAGE_OUTPUT_TPL = 'web_resource_tree.json'
SCRAPING_STAGE_OUTPUT_TPL = 'ricecooker_json_tree.json'
LICENSE = get_license(licenses.CC_BY_NC_SA, copyright_holder=COPYRIGHT_HOLDER).as_dict()
THUMBNAIL = ""
def __init__(self):
build_path([TESSIndiaChef.TREES_DATA_DIR])
self.scrape_stage = os.path.join(TESSIndiaChef.TREES_DATA_DIR,
TESSIndiaChef.SCRAPING_STAGE_OUTPUT_TPL)
self.crawling_stage = os.path.join(TESSIndiaChef.TREES_DATA_DIR,
TESSIndiaChef.CRAWLING_STAGE_OUTPUT_TPL)
super(TESSIndiaChef, self).__init__()
def pre_run(self, args, options):
css = os.path.join(os.path.dirname(os.path.realpath(__file__)), "chefdata/styles.css")
js = os.path.join(os.path.dirname(os.path.realpath(__file__)), "chefdata/scripts.js")
if not if_file_exists(css) or not if_file_exists(js):
LOGGER.info("Downloading styles")
self.download_css_js()
self.crawl(args, options)
channel_tree = self.scrape(args, options)
clean_leafs_nodes_plus(channel_tree)
self.write_tree_to_json(channel_tree, "en")
def download_css_js(self):
r = requests.get("https://raw.githubusercontent.com/learningequality/html-app-starter/master/css/styles.css")
with open("chefdata/styles.css", "wb") as f:
f.write(r.content)
r = requests.get("https://raw.githubusercontent.com/learningequality/html-app-starter/master/js/scripts.js")
with open("chefdata/scripts.js", "wb") as f:
f.write(r.content)
def crawl(self, args, options):
web_resource_tree = dict(
kind='TESSIndiaResourceTree',
title='TESSIndia',
children=[]
)
crawling_stage = os.path.join(TESSIndiaChef.TREES_DATA_DIR,
TESSIndiaChef.CRAWLING_STAGE_OUTPUT_TPL)
resource_browser = ResourceBrowser(BASE_URL)
for data in resource_browser.run(limit_page=None, page_number=1):
web_resource_tree["children"].append(data)
with open(crawling_stage, 'w') as f:
json.dump(web_resource_tree, f, indent=2)
return web_resource_tree
def scrape(self, args, options):
cache_tree = options.get('cache_tree', '1')
download_video = options.get('--download-video', "1")
with open(self.crawling_stage, 'r') as f:
web_resource_tree = json.load(f)
assert web_resource_tree['kind'] == 'TESSIndiaResourceTree'
if int(download_video) == 0:
global DOWNLOAD_VIDEOS
DOWNLOAD_VIDEOS = False
return self._build_scraping_json_tree(cache_tree, web_resource_tree)
def write_tree_to_json(self, channel_tree, lang):
write_tree_to_json_tree(self.scrape_stage, channel_tree)
def _build_scraping_json_tree(self, cache_tree, web_resource_tree):
LANG = 'mul'
global channel_tree
channel_tree = dict(
source_domain=TESSIndiaChef.HOSTNAME,
source_id='tessindia',
title='TESSIndia',
description="""TESS-India is led by The Open University and Save The Children India, funded by UK Aid it is a multilingual teacher professional development programme whose aim is to support India’s national educational policy by enhancing the classroom practice of primary and secondary school teachers through the provision of freely available, adaptable Open Educational Resources (OER)."""[:400], #400 UPPER LIMIT characters allowed
thumbnail=None,
language=LANG,
children=[],
license=TESSIndiaChef.LICENSE,
)
counter = 0
types = set([])
total_size = len(web_resource_tree["children"])
copyrights = []
for resource in web_resource_tree["children"]:
if 0 <= counter <= total_size:
LOGGER.info("{} of {}".format(counter, total_size))
LOGGER.info("Resource: {}".format(resource["url"]))
resource = Resource(source_id=resource["url"],
lang=language_map(resource["state_lang"].strip()),
state=resource["state_lang"],
subject=resource["subject_name"],
level=resource["level_name"])
resource.scrape()
resource.to_tree(channel_tree)
counter += 1
return channel_tree
# CLI: This code will run when `souschef.py` is called on the command line
################################################################################
if __name__ == '__main__':
chef = TESSIndiaChef()
chef.main()
|
the-stack_0_26617
|
def displayGrid(src):
state = src.copy()
state[state.index(-1)] = ' '
print(state[0], state[1], state[2])
print(state[3], state[4], state[5])
print(state[6], state[7], state[8])
print()
def gen(state, m, b):
temp = state.copy()
if m == 'u':
temp[b-3], temp[b] = temp[b], temp[b-3]
if m == 'd':
temp[b+3], temp[b] = temp[b], temp[b+3]
if m == 'r':
temp[b+1], temp[b] = temp[b], temp[b+1]
if m == 'l':
temp[b-1], temp[b] = temp[b], temp[b-1]
return temp
def possible_moves(state, visited_states):
b = state.index(-1)
d = []
if b not in [0,1,2]:
d += 'u'
if b not in [6,7,8]:
d += 'd'
if b not in [2,5,8]:
d += 'r'
if b not in [0,3,6]:
d += 'l'
pos_moves = []
for move in d:
pos_moves.append(gen(state,move,b))
return [move for move in pos_moves if tuple(move) not in visited_states]
def bfs(src,target):
queue = [src]
visited_states = set()
while len(queue):
state = queue.pop(0)
displayGrid(state)
if state == target:
print(f"Success")
return
for move in possible_moves(state, visited_states):
if tuple(move) not in queue and tuple(move) not in visited_states:
queue.append(move)
visited_states.add(tuple(state))
print("Fail")
src = [ 1, 2, 3,
-1, 4, 5,
6, 7, 8 ]
target = [ 1, 2, 3,
4, 5,-1,
6, 7, 8 ]
bfs(src, target)
|
the-stack_0_26620
|
"""
__author__ = Yash Patel
__name__ = app.py
__description__ = Main file for creating test SBM models and running clustering
"""
import numpy as np
import networkx as nx
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sklearn.cluster
from collections import defaultdict
import pickle
import time
import sys, getopt
import subprocess
from prettytable import PrettyTable
from algorithms import get_algorithms
from analysis.pca import plot_pca
from analysis.spectral import spectral_analysis, kmeans_analysis, cluster_analysis
from analysis.deanonymize import write_results, draw_results, calc_accuracy, calc_accuracies
from analysis.streaming import create_stream, streaming_analysis
from blockchain.read import get_data
from blockchain.metis import format_metis, run_metis
from coarsen.contract import contract_edges, contract_edges_matching, reconstruct_contracted
from setup.sbm import create_sbm, create_clusters
DELINEATION = "**********************************************************************"
def _cmd_graph(argv):
"""Parses arguments as specified by argv and returns as a dictionary. Entries
are parsed as specified in the help menu (visible by running "python3 app.py -h")
Returns parameters dictionary
"""
params = {
"byte_percent" : .01,
"cluster_size" : 10,
"pca" : False,
"guess_clusters" : False,
"run_metis" : True,
"run_spectral" : True,
"num_clusters" : 2,
"run_test" : True,
"weighted" : False,
"p" : 0.75,
"q" : 0.25,
"cs" : None,
"graph_coarsen" : None,
"lib" : "matplotlib",
"multi_run" : 1
}
USAGE_STRING = """eigenvalues.py
-b <byte_percent> [(float) percent of bytes in full data to be analyzed]
-c <cluster_size> [(int) size of each cluster (assumed to be same for all)]
-d <display_bool> [(y/n) for whether to show PCA projections]
-g <guess_bool> [(y/n) to guess the number of clusters vs. take it as known]
-m <run_metis> [(y/n) to additionally enable METIS clustering]
-n <num_cluster> [(int) number of clusters (distinct people)]
-p <p_value> [(0,1) float for in-cluster probability]
-q <q_value> [(0,1) float for non-cluster probability]
-r <run_test_bool> [(y/n) for whether to create SBM to run test or run on actual data]
-s <run_spectral> [(y/n) to enable spectral clustering]
-w <weighted_graph> [(y/n) for whether to have weights on edges (randomized)]
--cs <cluster_sizes> [(int list) size of each cluster (comma delimited)]
--gc <graph_coarsen> [(int) iterations of matchings found to be coarsened (default 0)]
--lib [('matplotlib','plotly') for plotting library]
--mr [(int) indicates how many trials to be run in testing]"""
opts, args = getopt.getopt(argv,"hb:c:d:g:m:n:p:q:r:s:w:",['lib=','cs=','gc=','mr='])
for opt, arg in opts:
if opt in ('-h'):
print(USAGE_STRING)
sys.exit()
elif opt in ("-b"): params["byte_percent"] = float(arg)
elif opt in ("-c"): params["cluster_size"] = int(arg)
elif opt in ("-d"): params["pca"] = (arg == "y")
elif opt in ("-g"): params["guess_clusters"] = (arg == "y")
elif opt in ("-m"): params["run_metis"] = (arg == "y")
elif opt in ("-n"): params["num_clusters"] = int(arg)
elif opt in ("-r"): params["run_test"] = (arg == "y")
elif opt in ("-s"): params["run_spectral"] = (arg == "y")
elif opt in ("-w"): params["weighted"] = (arg == "y")
elif opt in ("-p"): params["p"] = float(arg)
elif opt in ("-q"): params["q"] = float(arg)
elif opt in ("--cs"): params["cs"] = arg
elif opt in ("--gc"): params["graph_coarsen"] = int(arg)
elif opt in ("--lib"): params["lib"] = arg
elif opt in ("--mr"): params["multi_run"] = int(arg)
if params["run_test"]:
if params["cs"] is not None:
params["cluster_sizes"] = [int(cluster) for cluster in params["cs"].split(",")]
else:
params["cluster_sizes"] = [cluster_size] * num_clusters
params["clusters"] = create_clusters(params["cluster_sizes"])
return params
def _pretty_format(d, header):
t = PrettyTable(header)
sorted_keys = sorted(d.keys())
for key in sorted_keys:
t.add_row([key, d[key]])
return str(t)
def _update_accuracies(updates, purity, nmi, rand_ind, weighted_rand_ind, alg_name):
purity[alg_name] += updates["purity"]
nmi[alg_name] += updates["nmi"]
rand_ind[alg_name] += updates["rand_ind"]
weighted_rand_ind[alg_name] += updates["weighted_ri"]
def main(argv):
"""Main application method that parses command line arguments and runs hierarchical
and kmeans clustering. CMD-line arguments are specified in the help menu (run with -h).
Final clustering outputs are provided in the output/ folder as eigen_guess and
kmeans_guess respectively. Intermediate results are available in output/eigen/
Returns void
"""
params = _cmd_graph(argv)
produce_figures = True
# algorithms to be used in the clustering runs (BOTH in testing and full analysis)
to_run = set(["DBSCAN"])
if params["run_test"]:
clusters = params["clusters"]
else:
clusters = None
# change the line below if the remote source of the data is updated
data_src = "https://s3.amazonaws.com/bitcoinclustering/cluster_data.dat"
S, index_to_id = get_data(data_src, percent_bytes=params["byte_percent"])
if params["run_test"]:
purity = defaultdict(lambda: 0.0)
nmi = defaultdict(lambda: 0.0)
rand_ind = defaultdict(lambda: 0.0)
weighted_rand_ind = defaultdict(lambda: 0.0)
accuracy_measures = [# ("purity",purity), ("nmi",nmi),
("rand_ind",rand_ind), ("weighted_ri",weighted_rand_ind)]
timeElapsed = defaultdict(lambda: 0.0)
for _ in range(params["multi_run"]):
G = create_sbm(clusters, params["p"], params["q"], params["weighted"])
if params["pca"]:
plot_pca(G, clusters, plot_2d=True, plot_3d=True, plot_lib=params["lib"])
spring_pos = nx.spring_layout(G)
n = sum([len(cluster) for cluster in clusters])
num_clusters = len(clusters)
weigh_edges = False
if params["graph_coarsen"] is not None:
params_fn = "p-{}_q-{}_gc-{}_n-{}".format(params["p"],
params["q"], params["graph_coarsen"], n)
else:
params_fn = "p-{}_q-{}_n-{}".format(params["p"], params["q"], n)
if params["run_spectral"]:
if params["graph_coarsen"] is not None:
# contracted_G, identified_nodes = contract_edges(G, num_edges=to_contract)
contracted_G, identified_nodes = contract_edges_matching(G,
num_iters=params["graph_coarsen"])
print("Edges removed: {}".format(len(G.edges) - len(contracted_G.edges)))
start = time.time()
hier_cont_partitions = spectral_analysis(G, k=num_clusters)
hier_partitions = reconstruct_contracted(identified_nodes, hier_cont_partitions)
timeElapsed["ManualHierarchical"] += time.time() - start
start = time.time()
kmeans_cont_partitions = kmeans_analysis(G, k=num_clusters)
kmeans_partitions = reconstruct_contracted(identified_nodes, kmeans_cont_partitions)
timeElapsed["ManualKmeans"] += time.time() - start
if produce_figures:
contracted_spring_pos = nx.spring_layout(contracted_G)
draw_results(contracted_G, contracted_spring_pos, hier_cont_partitions,
"ManualHierarchical_cont_{}.png".format(params_fn), weigh_edges=weigh_edges)
draw_results(contracted_G, contracted_spring_pos, kmeans_cont_partitions,
"ManualKmeans_cont_{}.png".format(params_fn), weigh_edges=weigh_edges)
else:
# if params["guess_clusters"]:
# num_clusters = None
# else:
# num_clusters = len(clusters)
start = time.time()
hier_partitions = spectral_analysis(G, k=num_clusters)
timeElapsed["ManualHierarchical"] += time.time() - start
start = time.time()
kmeans_partitions = kmeans_analysis(G, k=num_clusters)
timeElapsed["ManualKmeans"] += time.time() - start
_update_accuracies(calc_accuracies(clusters, hier_partitions, n),
purity, nmi, rand_ind, weighted_rand_ind, "ManualHierarchical")
_update_accuracies(calc_accuracies(clusters, kmeans_partitions, n),
purity, nmi, rand_ind, weighted_rand_ind, "ManualKmeans")
if produce_figures:
draw_results(G, spring_pos, clusters,
"truth_{}.png".format(params_fn), weigh_edges=weigh_edges)
draw_results(G, spring_pos, hier_partitions,
"ManualHierarchical_{}.png".format(params_fn), weigh_edges=weigh_edges)
draw_results(G, spring_pos, kmeans_partitions,
"ManualKmeans_{}.png".format(params_fn), weigh_edges=weigh_edges)
algorithms = get_algorithms(num_clusters)
if params["graph_coarsen"] is not None:
S = nx.adjacency_matrix(contracted_G)
else:
S = nx.adjacency_matrix(G)
if params["run_metis"]:
metis_fn = "output/test_metis.graph"
format_metis(nx.adjacency_matrix(G), metis_fn)
metis_partitions, time_elapsed = run_metis(metis_fn, num_clusters)
_update_accuracies(calc_accuracies(clusters, metis_partitions, n),
purity, nmi, rand_ind, weighted_rand_ind, "Metis")
timeElapsed["Metis"] += time_elapsed
if produce_figures:
draw_results(G, spring_pos, metis_partitions,
"Metis_{}.png".format(params_fn), weigh_edges=weigh_edges)
for alg_name in algorithms:
if alg_name in to_run:
algorithm, args, kwds = algorithms[alg_name]
print(DELINEATION)
print("Running {} partitioning (coarsened: {})...".format(
alg_name, params["graph_coarsen"]))
start = time.time()
if params["graph_coarsen"] is not None:
cont_partitions, _ = cluster_analysis(S, algorithm, args, kwds)
partitions = reconstruct_contracted(identified_nodes, cont_partitions)
outliers = None # TODO : handles outliers for contracted graphs
else:
partitions, outliers = cluster_analysis(S, algorithm, args, kwds)
end = time.time()
if params["graph_coarsen"] is not None:
if produce_figures:
draw_results(contracted_G, contracted_spring_pos, cont_partitions,
"{}_contracted_{}.png".format(alg_name, params_fn),
weigh_edges=weigh_edges, outliers=outliers)
_update_accuracies(calc_accuracies(clusters, partitions, n),
purity, nmi, rand_ind, weighted_rand_ind, alg_name)
timeElapsed[alg_name] += end - start
if produce_figures:
draw_results(G, spring_pos, partitions,
"{}_{}.png".format(alg_name, params_fn),
weigh_edges=weigh_edges, outliers=outliers)
print(DELINEATION)
print(timeElapsed)
for accuracy_name, accuracies in accuracy_measures:
for alg_name in accuracies.keys():
accuracies[alg_name] /= params["multi_run"]
timeElapsed[alg_name] /= params["multi_run"]
with open("output/{}_{}.txt".format(accuracy_name, params_fn),"w") as f:
f.write(_pretty_format(accuracies, ["algorithm","accuracy"]))
f.write("\n")
f.write(_pretty_format(timeElapsed, ["algorithm","time (s)"]))
else:
num_clusters = params["num_clusters"]
algorithms = get_algorithms(num_clusters)
weigh_edges = False
print("Creating NetworkX graph...")
# G = nx.from_scipy_sparse_matrix(S)
# spring_pos = nx.spring_layout(G)
for alg_name in algorithms:
if alg_name in to_run:
algorithm, args, kwds = algorithms[alg_name]
print("Running {} partitioning...".format(alg_name))
partitions = cluster_analysis(S, algorithm, args, kwds)
write_results(partitions, index_to_id, "{}_guess".format(alg_name))
# draw_results(G, spring_pos, partitions,
# "{}_guess.png".format(alg_name), weigh_edges=weigh_edges)
if params["run_metis"]:
metis_fn = "blockchain/data_{0:f}.pickle".format(percent_bytes)
metis_partitions = metis_from_pickle(metis_fn, num_clusters)
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_0_26622
|
import math
from functools import partial
from shutterstock.exceptions import APIResponseError
class ResourceObjectInitializer:
def __init__(self, cls, func, instance):
self.func = func
self.instance = instance or cls()
def __call__(self, *args, **kwargs):
self.instance.reset()
self.instance.set_data(**self.func(*args, **kwargs))
return self.instance
class Collection(list):
def __init__(self, *args, total_count=None, page=None, per_page=None, **kwargs):
super().__init__(*args, **kwargs)
self.total_count = total_count
self.page = page
self.per_page = per_page
if not self.total_count or not self.per_page:
self.pages = 0
else:
self.pages = math.ceil(self.total_count / self.per_page)
class ResourceCollectionInitializer:
def __init__(self, cls, func, instance):
self.cls = cls
self.func = func
def __call__(self, *args, **kwargs):
data = self.func(*args, **kwargs)
if 'errors' in data:
response_message = 'No error response provided'
response_data = {}
code = 200
if 'data' in data:
response_data = data['data']
raise APIResponseError(response_message, code, data['errors'], response_data)
collection = Collection(total_count=data.get('total_count', None),
page=data.get('page', None),
per_page=data.get('per_page', None))
for item in data.get('data', []):
collection.append(self.cls(**item))
return collection
class ResourceMethodAccessor:
def __init__(self, func, initializer, resource=None, **params):
self.func = func
self.initializer = initializer
self.resource = resource
self.params = params
def configure_for_client(self, client):
if self.resource is None:
return self
resource = getattr(client, self.resource.__name__)
return ResourceMethodAccessor(self.func, self.initializer, resource, **self.params)
def __get__(self, instance, cls):
params = {}
if instance:
for key, val in self.params.items():
params[key] = getattr(instance, val)
return self.initializer(self.resource or cls, partial(self.func, cls=cls, **params), instance)
class ResourceMethod:
def __init__(self, initializer, resource=None, **params):
self.initializer = initializer
self.resource = resource
self.params = params
def __call__(self, func):
return ResourceMethodAccessor(func, self.initializer, resource=self.resource, **self.params)
class ResourceObjectMethod(ResourceMethod):
def __init__(self, **params):
super().__init__(ResourceObjectInitializer, **params)
class ResourceCollectionMethod(ResourceMethod):
def __init__(self, **params):
super().__init__(ResourceCollectionInitializer, **params)
class Resource:
API = None
LIST = None
GET = None
CREATE = None
UPDATE = None
DELETE = None
def __init__(self, **kwargs):
self.set_data(**kwargs)
def reset(self):
pass
def set_data(self, **params):
for key, val in params.items():
setattr(self, key, val)
@ResourceCollectionMethod()
def all(cls, **params):
return cls.API.get(cls.LIST, **params)
@ResourceCollectionMethod()
def list(cls, **params):
return cls.API.get(cls.LIST, **params)
@ResourceObjectMethod(id='id')
def get(cls, **params):
return cls.API.get(cls.GET, **params)
|
the-stack_0_26624
|
from django import forms
from django.forms import widgets
from .models import Topic, Entry
class TopicForm(forms.ModelForm):
""" Build forms to enter the information by the user."""
class Meta:
model = Topic
fields = ['text']
labels = {'text': ''}
class EntryForm(forms.ModelForm):
class Meta:
model = Entry
fields = ['text']
labels = {'text': 'Entry:'}
widgets = {'text': forms.Textarea(attrs={'cols': 80})}
|
the-stack_0_26625
|
import sys
sys.path.append('..')
sys.path.append('../..')
import argparse
import utils
from student_utils import *
import input_validator
import tqdm
import re
import os
def validate_output(input_file, output_file, params=[]):
# print('Processing', input_file)
input_data = utils.read_file(input_file)
output_data = utils.read_file(output_file)
cost = tests(input_data, output_data, params=params)
return cost
def validate_all_outputs(input_directory, output_directory, check_dir, params=[]):
input_files = utils.get_files_with_extension(input_directory, '.in')
output_files = utils.get_files_with_extension(output_directory, '.out')
# if os.path.isfile(check_dir):
# print('ERROR: ' + check_dir + ' already exist!')
# return
check_file = open(check_dir, 'w')
for input_file in tqdm.tqdm(input_files):
output_file = utils.input_to_output(input_file, output_directory)
# print(input_file, output_file)
if output_file not in output_files:
print(f'No corresponding .out file for {input_file}')
else:
cost = validate_output(input_file, output_file, params=params)
name = input_file.split('/')[-1]
check_file.write(name.replace('in', 'out') + ':' + str(cost) + '\n')
check_file.close()
def tests(input_data, output_data, params=[]):
number_of_locations, number_of_houses, list_of_locations, list_of_houses, starting_location, adjacency_matrix = data_parser(input_data)
try:
G, message = adjacency_matrix_to_graph(adjacency_matrix)
except Exception:
return 'Your adjacency matrix is not well formed.\n', 'infinite'
message = ''
cost = -1
car_cycle = output_data[0]
num_dropoffs = int(output_data[1][0])
targets = []
dropoffs = {}
for i in range(num_dropoffs):
dropoff = output_data[i + 2]
dropoff_index = list_of_locations.index(dropoff[0])
dropoffs[dropoff_index] = convert_locations_to_indices(dropoff[1:], list_of_locations)
car_cycle = convert_locations_to_indices(car_cycle, list_of_locations)
if cost != 'infinite':
cost, solution_message = cost_of_solution(G, car_cycle, dropoffs)
return cost
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parsing arguments')
parser.add_argument('input', type=str, help='The path to the input file or directory')
parser.add_argument('output', type=str, help='The path to the output file or directory')
parser.add_argument('check', type=str, help='the path to the output file check log')
parser.add_argument('params', nargs=argparse.REMAINDER, help='Extra arguments passed in')
args = parser.parse_args()
input_directory, output_directory = args.input, args.output
check_dir = args.check
validate_all_outputs(input_directory, output_directory, check_dir, params=args.params)
|
the-stack_0_26626
|
from os.path import basename, splitext
from numpy import zeros, pi, roll, cos, sin
# from scipy.interpolate import interp1d
from ....Classes._FEMMHandler import FEMMHandler
from ....Functions.FEMM.update_FEMM_simulation import update_FEMM_simulation
from ....Functions.FEMM.comp_FEMM_torque import comp_FEMM_torque
from ....Functions.FEMM.comp_FEMM_Phi_wind import comp_FEMM_Phi_wind
def solve_FEMM(
self,
femm,
output,
out_dict,
sym,
Nt,
angle,
Is,
Ir,
angle_rotor,
is_close_femm,
filename=None,
start_t=0,
end_t=None,
):
"""
Solve FEMM model to calculate airgap flux density, torque instantaneous/average/ripple values,
flux induced in stator windings and flux density, field and permeability maps
Parameters
----------
self: MagFEMM
A MagFEMM object
femm: FEMMHandler
Object to handle FEMM
output: Output
An Output object
out_dict: dict
Dict containing the following quantities to update for each time step:
Br : ndarray
Airgap radial flux density (Nt,Na) [T]
Bt : ndarray
Airgap tangential flux density (Nt,Na) [T]
Tem : ndarray
Electromagnetic torque over time (Nt,) [Nm]
Phi_wind_stator : ndarray
Stator winding flux (qs,Nt) [Wb]
sym: int
Spatial symmetry factor
Nt: int
Number of time steps for calculation
angle: ndarray
Angle vector for calculation
Is : ndarray
Stator current matrix (qs,Nt) [A]
Ir : ndarray
Stator current matrix (qs,Nt) [A]
angle_rotor: ndarray
Rotor angular position vector (Nt,)
is_close_femm: bool
True to close FEMM handler in the end
filename: str
Path to FEMM model to open
start_t: int
Index of first time step (0 by default, used for parallelization)
end_t: int
Index of last time step (Nt by default, used for parallelization)
Returns
-------
B: ndarray
3D Magnetic flux density for all time steps and each element (Nt, Nelem, 3) [T]
H : ndarray
3D Magnetic field for all time steps and each element (Nt, Nelem, 3) [A/m]
mu : ndarray
Magnetic relative permeability for all time steps and each element (Nt, Nelem) []
mesh: MeshMat
Object containing magnetic mesh at first time step
groups: dict
Dict whose values are group label and values are array of indices of related elements
"""
# Open FEMM file if not None, else it is already open
if filename is not None:
try:
# Open the document
femm.openfemm(1)
except:
# Create a new FEMM handler in case of parallelization on another FEMM instance
femm = FEMMHandler()
# Open the document
femm.openfemm(1)
# Import FEMM file
femm.opendocument(filename)
# Take last time step at Nt by default
if end_t is None:
end_t = Nt
# Init mesh solution as None since array allocation can only be done once
# number of elements is known, i.e. after first time step resolution
B_elem, H_elem, mu_elem, meshFEMM, groups = None, None, None, None, None
# Number of angular steps
Na = angle.size
# Loading parameters for readibility
Rag = output.simu.machine.comp_Rgap_mec()
L1 = output.simu.machine.stator.comp_length()
save_path = self.get_path_save(output)
FEMM_dict = output.mag.FEA_dict
is_internal_rotor = output.simu.machine.rotor.is_internal
if "Phi_wind_stator" in out_dict:
qs = output.simu.machine.stator.winding.qs # Winding phase number
Npcpp = output.simu.machine.stator.winding.Npcpp
# Account for initial angular shift of stator and rotor and apply it to the sliding band
angle_shift = self.angle_rotor_shift - self.angle_stator_shift
# Compute the data for each time step
for ii in range(start_t, end_t):
self.get_logger().debug("Solving step " + str(ii + 1) + " / " + str(Nt))
# Update rotor position and currents
update_FEMM_simulation(
femm=femm,
circuits=FEMM_dict["circuits"],
is_sliding_band=self.is_sliding_band,
is_internal_rotor=is_internal_rotor,
angle_rotor=angle_rotor + angle_shift,
Is=Is,
Ir=Ir,
ii=ii,
)
# try "previous solution" for speed up of FEMM calculation
if self.is_sliding_band:
try:
base = basename(self.get_path_save_fem(output))
ans_file = splitext(base)[0] + ".ans"
femm.mi_setprevious(ans_file, 0)
except:
pass
# Run the computation
femm.mi_analyze()
# Load results
femm.mi_loadsolution()
# Get the flux result
if self.is_sliding_band:
for jj in range(Na):
out_dict["Br"][ii, jj], out_dict["Bt"][ii, jj] = femm.mo_getgapb(
"bc_ag2", angle[jj] * 180 / pi
)
else:
for jj in range(Na):
B = femm.mo_getb(Rag * cos(angle[jj]), Rag * sin(angle[jj]))
out_dict["Br"][ii, jj] = B[0] * cos(angle[jj]) + B[1] * sin(angle[jj])
out_dict["Bt"][ii, jj] = -B[0] * sin(angle[jj]) + B[1] * cos(angle[jj])
# Compute the torque
out_dict["Tem"][ii] = comp_FEMM_torque(femm, FEMM_dict, sym=sym)
if "Phi_wind_stator" in out_dict:
# Phi_wind computation
out_dict["Phi_wind_stator"][ii, :] = comp_FEMM_Phi_wind(
femm,
qs,
Npcpp,
is_stator=True,
Lfemm=FEMM_dict["Lfemm"],
L1=L1,
sym=sym,
)
# Load mesh data & solution
if (self.is_sliding_band or Nt == 1) and (self.is_get_mesh or self.is_save_FEA):
# Get mesh data and magnetic quantities from .ans file
tmpmeshFEMM, tmpB, tmpH, tmpmu, tmpgroups = self.get_meshsolution(
femm,
save_path,
j_t0=ii,
id_worker=start_t,
is_get_mesh=ii == start_t,
)
# Initialize mesh and magnetic quantities for first time step
if ii == start_t:
meshFEMM = [tmpmeshFEMM]
groups = [tmpgroups]
Nelem = meshFEMM[0].cell["triangle"].nb_cell
Nt0 = end_t - start_t
B_elem = zeros([Nt0, Nelem, 3])
H_elem = zeros([Nt0, Nelem, 3])
mu_elem = zeros([Nt0, Nelem])
# Shift time index ii in case start_t is not 0 (parallelization)
ii0 = ii - start_t
# Store magnetic flux density, field and relative permeability for the current time step
B_elem[ii0, :, 0:2] = tmpB
H_elem[ii0, :, 0:2] = tmpH
mu_elem[ii0, :] = tmpmu
# Shift to take into account stator position
if self.angle_stator_shift != 0:
roll_id = int(self.angle_stator_shift * Na / (2 * pi))
out_dict["Br"] = roll(out_dict["Br"], roll_id, axis=1)
out_dict["Bt"] = roll(out_dict["Bt"], roll_id, axis=1)
# # Interpolate on updated angular position # TODO to improve accuracy
# angle_new = (angle - self.angle_stator_shift) % (2 * pi / sym)
# out_dict["Br"] = interp1d(append(angle, 2 * pi / sym), append(out_dict["Br"], out_dict["Br"][:,0]), axis=1)[angle_new]
# out_dict["Bt"] = interp1d(append(angle, 2 * pi / sym), append(out_dict["Bt"], out_dict["Bt"][:,0]), axis=1)[angle_new]
# Close FEMM handler
if is_close_femm:
femm.closefemm()
# Store FEMM_dict in OutMag if FEMM file is not imported
if filename is None:
output.mag.FEA_dict = FEMM_dict
return B_elem, H_elem, mu_elem, meshFEMM, groups
|
the-stack_0_26627
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.gcodelexer
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for the G Code Language.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Comment, Name, Text, Keyword, Number
__all__ = ['GcodeLexer']
class GcodeLexer(RegexLexer):
"""
For gcode source code.
.. versionadded:: 2.9
"""
name = 'g-code'
aliases = ['gcode']
filenames = ['*.gcode']
tokens = {
'root': [
(r';.*\n', Comment),
(r'^[gmGM]\d{1,4}\s', Name.Builtin), # M or G commands
(r'([^gGmM])([+-]?\d*[.]?\d+)', bygroups(Keyword, Number)),
(r'\s', Text.Whitespace),
(r'.*\n', Text),
]
}
|
the-stack_0_26628
|
import pygame
import constants as c
import math
from particle import Spark
class Player:
def __init__(self, game):
self.game = game
self.x = c.WINDOW_WIDTH//2
self.y = c.WINDOW_HEIGHT//2
self.shield_angle = 0
self.shield_vis_angle = 0
self.shield_surf = pygame.image.load(c.image_path("shield.png")).convert()
self.shield_bonk = pygame.image.load(c.image_path("shield_bonk.png")).convert()
self.shield_surf.set_colorkey(c.BLACK)
self.shield_bonk.set_colorkey(c.BLACK)
self.shield_surf.set_alpha(0)
self.shield_bonk.set_alpha(0)
self.radius = 20
self.shield_radius = 50
self.shield_spread = 100 # in degrees
self.has_shield = False
self.move_disabled = False
self.surf = pygame.image.load(c.image_path("player.png")).convert()
self.surf.set_colorkey(c.BLACK)
self.bonk_time = 0.08
self.bonk_timer = self.bonk_time
self.recoil = 0
self.age = 0
self.mortal = False
self.health = 100
self.dead = False
def take_damage(self):
if not self.mortal:
self.health = int(max(self.health*0.8, 1))
else:
self.health = int(max(self.health - 15, 0))
def draw(self, surface):
if self.dead:
return
x, y = self.game.xy_transform(self.x, self.y)
self.surf.set_alpha((220 + 20 * math.sin(self.age * 2)) * (self.health+30)/130)
r = int((self.radius * 1.2 + 5 * math.sin(self.age * 2)) * (self.health + 50)/120)
glow = pygame.Surface((r*2, r*2))
pygame.draw.circle(glow, (200, 255, 215), (r, r), r)
glow.set_alpha(60 * (self.health/100))
glow.set_colorkey(c.BLACK)
surface.blit(glow, (x - glow.get_width()//2, y - glow.get_height()//2))
r = int((self.radius * 1.6 + 8 * math.sin(self.age * 2)) * (self.health + 50)/120)
glow = pygame.Surface((r*2, r*2))
pygame.draw.circle(glow, c.GREEN, (r, r), r)
glow.set_alpha(30 * (self.health/100))
glow.set_colorkey(c.BLACK)
surface.blit(glow, (x - glow.get_width()//2, y - glow.get_height()//2))
scale = int((self.health+30)/120 * self.surf.get_height())
surf = pygame.transform.scale(self.surf, (scale, scale))
surface.blit(surf, (x - surf.get_width()//2, y - surf.get_height()//2))
self.draw_shield(surface)
pass
def die(self):
self.dead = True
for i in range(40):
Spark(self.game, (self.x, self.y), c.WHITE, speed=800)
def update_shield(self, dt):
self.recoil *= 0.025**dt
self.bonk_timer += dt
d = self.shield_angle - self.shield_vis_angle
d2 = self.shield_angle - self.shield_vis_angle + 360
d3 = self.shield_angle - self.shield_vis_angle - 360
true_d = d
for item in [d2, d3]:
if abs(item) < abs(true_d):
true_d = item
if self.shield_surf.get_alpha() < 255 and self.has_shield:
self.shield_surf.set_alpha(min(255, self.shield_surf.get_alpha() + dt * 600))
self.shield_bonk.set_alpha(self.shield_surf.get_alpha())
if self.shield_surf.get_alpha() > 0 and not self.has_shield:
self.shield_surf.set_alpha(max(0, self.shield_surf.get_alpha() - dt * 600))
diff = 20*true_d*dt
if true_d > 0:
diff = min(diff, true_d)
else:
diff = max(diff, true_d)
self.shield_vis_angle += diff
def is_blocking(self, other):
if not self.has_shield:
return False
a1 = other.direction
a2 = self.shield_vis_angle
d = a1 - a2
d1 = a1 - a2 + 360
d2 = a1 - a2 - 360
for item in [d, d1, d2]:
if abs(item) <= self.shield_spread//2:
return True
return False
def draw_shield(self, surface):
shield_surf = self.shield_surf if self.bonk_timer > self.bonk_time else self.shield_bonk
if self.shield_surf.get_alpha() < 0:
return
ssurf = pygame.transform.rotate(shield_surf, self.shield_vis_angle)
x = self.x - ssurf.get_width()//2
y = self.y - ssurf.get_height()//2
x, y = self.game.xy_transform(x, y)
rad = self.shield_vis_angle * math.pi / 180
xoff = int(self.recoil * -math.cos(rad))
yoff = int(self.recoil * math.sin(rad))
surface.blit(ssurf, (x + xoff, y + yoff))
def update(self, dt, events):
if self.mortal and self.health == 0:
self.die()
if self.health < 100:
self.health += 7 * dt
self.age += dt
old = self.shield_angle
for event in events:
if event.type == pygame.KEYDOWN and self.has_shield and not self.dead and not self.move_disabled:
if event.key == pygame.K_UP:
self.shield_angle = c.UP
elif event.key == pygame.K_RIGHT:
self.shield_angle = c.RIGHT
elif event.key == pygame.K_LEFT:
self.shield_angle = c.LEFT
elif event.key == pygame.K_DOWN:
self.shield_angle = c.DOWN
if self.shield_angle != old:
self.game.change_direction_sound.play()
self.update_shield(dt)
self.shield_vis_angle %= 360
|
the-stack_0_26629
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
# 80x80x2 to 38x38x4
# 2 channel from the stacked frame
self.conv1 = nn.Conv2d(2, 4, kernel_size=6, stride=2, bias=False)
# 38x38x4 to 9x9x32
self.conv2 = nn.Conv2d(4, 16, kernel_size=6, stride=4)
self.size=9*9*16
# two fully connected layer
self.fc1 = nn.Linear(self.size, 256)
self.fc2 = nn.Linear(256, 1)
# Sigmoid to
self.sig = nn.Sigmoid()
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = x.view(-1,self.size)
x = F.relu(self.fc1(x))
return self.sig(self.fc2(x))
|
the-stack_0_26633
|
import cv2, os, argparse, math
from PIL import Image
import random
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument(
'-d',
type = str,
help = 'The dataset directory.',
required = True
)
parser.add_argument(
'-r',
type=int,
help='The number of rows',
default=3,
required=False,
)
parser.add_argument(
'-c',
type=int,
help='No of columns',
default=4,
required=False
)
parser.add_argument(
'-sz',
type=int,
help='resize multiple. 2x reduces image size by 2.',
default=1.5,
required=False
)
parser.add_argument(
'-w',
type=int,
help='Width of bbox to draw',
default=2,
required=False
)
opt = parser.parse_args()
IMG_DIR = opt.d + 'images'
LAB_DIR = opt.d + 'labels'
# rows and cols of image to view
nrows = opt.r
ncols = opt.c
imgs = []
for img in os.listdir(IMG_DIR):
imgs.append(
(IMG_DIR + '/' + img,
LAB_DIR + '/' + img[:-4] + '.txt')
)
image = cv2.imread(imgs[0][0])
imagew = image.shape[0] * opt.r
imageh = image.shape[1] * opt.c
image = np.empty((imagew, imageh, 3), dtype=np.uint8)
classes = {}
imgsel = np.random.choice(len(imgs), nrows*ncols, replace=False)
for i in range(nrows):
for j in range(ncols):
# ipath, lpath = imgs[random.randint(0, len(imgs)-1)]
ipath, lpath = imgs[imgsel[i*ncols+j]]
img = cv2.imread(ipath)
dh, dw, _ = img.shape
fl = open(lpath, 'r')
data = fl.readlines()
fl.close()
for dt in data:
# Split string to float
objclass, x, y, w, h = map(float, dt.split(' '))
if objclass not in classes:
classes[objclass] = (random.randint(0,255), random.randint(0,255), random.randint(0,255))
l = int((x - w / 2) * dw)
r = int((x + w / 2) * dw)
t = int((y - h / 2) * dh)
b = int((y + h / 2) * dh)
if l < 0:
l = 0
if r > dw - 1:
r = dw - 1
if t < 0:
t = 0
if b > dh - 1:
b = dh - 1
alpha = 0.2
cv2.rectangle(img, (l, t), (r, b), classes[objclass], opt.w)
i1 = i * img.shape[0]
i2 = i * img.shape[0] + img.shape[0]
j1 = j * img.shape[1]
j2 = j * img.shape[1] + img.shape[1]
image[i1:i2, j1:j2] = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = image.resize(
(math.ceil(imageh/opt.sz), math.ceil(imagew/opt.sz))
)
image.show()
|
the-stack_0_26636
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceIPConfigurationsOperations:
"""NetworkInterfaceIPConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
"""Get all ip configurations in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs: Any
) -> "_models.NetworkInterfaceIPConfiguration":
"""Gets the specified network interface ip configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
|
the-stack_0_26637
|
from torch import nn
import torch
class MemoryUpdater(nn.Module):
def update_memory(self, unique_node_ids, unique_messages, timestamps):
pass
class SequenceMemoryUpdater(MemoryUpdater):
def __init__(self, memory, message_dimension, memory_dimension, device):
super(SequenceMemoryUpdater, self).__init__()
self.memory = memory
self.layer_norm = torch.nn.LayerNorm(memory_dimension)
self.message_dimension = message_dimension
self.device = device
def update_memory(self, unique_node_ids, unique_messages, timestamps):
if len(unique_node_ids) <= 0:
return
assert (
(self.memory.get_last_update(unique_node_ids) <= timestamps).all().item()
), ("Trying to " "update memory to time in the past")
memory = self.memory.get_memory(unique_node_ids)
self.memory.last_update[unique_node_ids] = timestamps
updated_memory = self.memory_updater(unique_messages, memory)
self.memory.set_memory(unique_node_ids, updated_memory)
def get_updated_memory(self, unique_node_ids, unique_messages, timestamps):
if len(unique_node_ids) <= 0:
return self.memory.memory.data.clone(), self.memory.last_update.data.clone()
assert (
(self.memory.get_last_update(unique_node_ids) <= timestamps).all().item()
), ("Trying to " "update memory to time in the past")
updated_memory = self.memory.memory.data.clone()
updated_memory[unique_node_ids] = self.memory_updater(
unique_messages, updated_memory[unique_node_ids]
)
updated_last_update = self.memory.last_update.data.clone()
updated_last_update[unique_node_ids] = timestamps
return updated_memory, updated_last_update
class GRUMemoryUpdater(SequenceMemoryUpdater):
def __init__(self, memory, message_dimension, memory_dimension, device):
super(GRUMemoryUpdater, self).__init__(
memory, message_dimension, memory_dimension, device
)
self.memory_updater = nn.GRUCell(
input_size=message_dimension, hidden_size=memory_dimension
)
class RNNMemoryUpdater(SequenceMemoryUpdater):
def __init__(self, memory, message_dimension, memory_dimension, device):
super(RNNMemoryUpdater, self).__init__(
memory, message_dimension, memory_dimension, device
)
self.memory_updater = nn.RNNCell(
input_size=message_dimension, hidden_size=memory_dimension
)
def get_memory_updater(
module_type, memory, message_dimension, memory_dimension, device
):
if module_type == "gru":
return GRUMemoryUpdater(memory, message_dimension, memory_dimension, device)
elif module_type == "rnn":
return RNNMemoryUpdater(memory, message_dimension, memory_dimension, device)
|
the-stack_0_26638
|
import dataclasses
import queue
import time
from typing import List, Callable
import colorama
from colorama import Fore, Style
@dataclasses.dataclass
class Exercise:
name: str
reps: int
hold_time: float
rep_delay: float = 2
def _perform(self) -> None:
for i in range(self.reps):
print(
f"\tNow doing rep: {i + 1}, hold for {self.hold_time} seconds")
time.sleep(self.hold_time)
print("\tGet ready to do the next rep!")
time.sleep(self.rep_delay)
print(
f"\tExercise done! Press {Fore.GREEN}ENTER{Style.RESET_ALL} to continue."
)
def exec_buf(self) -> List[Callable[[], None]]:
return [
lambda: print(
f"Press {Fore.GREEN}ENTER{Style.RESET_ALL} when ready to "
f"do exercise {Fore.YELLOW}'{self.name}'{Style.RESET_ALL}"),
self._perform
]
def welcome_msg():
print(f"Press {Fore.GREEN}ENTER{Style.RESET_ALL} to begin")
def main():
colorama.init()
exercises = map(lambda name: Exercise(name, 10, 10),
["Push up", "Pull down", "Push left", "Push right"])
exercise_funcs = [
func for exercise in exercises for func in exercise.exec_buf()
]
exec_queue = queue.SimpleQueue()
exec_queue.put(welcome_msg)
for exercise_func in exercise_funcs:
exec_queue.put(exercise_func)
while not exec_queue.empty():
exec_queue.get()()
input()
if __name__ == "__main__":
main()
|
the-stack_0_26640
|
from asynctest import (
mock as async_mock,
TestCase as AsyncTestCase,
)
from ......messaging.request_context import RequestContext
from ......messaging.responder import MockResponder
from ......transport.inbound.receipt import MessageReceipt
from ...handlers import transaction_cancel_handler as handler
from ...messages.cancel_transaction import CancelTransaction
from ......connections.models.conn_record import ConnRecord
class TestTransactionCancelHandler(AsyncTestCase):
async def test_called(self):
request_context = RequestContext.test_context()
request_context.message_receipt = MessageReceipt()
with async_mock.patch.object(
handler, "TransactionManager", autospec=True
) as mock_tran_mgr:
mock_tran_mgr.return_value.receive_cancel_transaction = (
async_mock.CoroutineMock()
)
request_context.message = CancelTransaction()
request_context.connection_record = ConnRecord(
connection_id="b5dc1636-a19a-4209-819f-e8f9984d9897"
)
request_context.connection_ready = True
handler_inst = handler.TransactionCancelHandler()
responder = MockResponder()
await handler_inst.handle(request_context, responder)
mock_tran_mgr.return_value.receive_cancel_transaction.assert_called_once_with(
request_context.message, request_context.connection_record.connection_id
)
assert not responder.messages
async def test_called_not_ready(self):
request_context = RequestContext.test_context()
request_context.message_receipt = MessageReceipt()
request_context.connection_record = async_mock.MagicMock()
with async_mock.patch.object(
handler, "TransactionManager", autospec=True
) as mock_tran_mgr:
mock_tran_mgr.return_value.receive_cancel_transaction = (
async_mock.CoroutineMock()
)
request_context.message = CancelTransaction()
request_context.connection_ready = False
handler_inst = handler.TransactionCancelHandler()
responder = MockResponder()
with self.assertRaises(handler.HandlerException):
await handler_inst.handle(request_context, responder)
assert not responder.messages
|
the-stack_0_26643
|
import pytest
import pytest_check as check
from numbers_parser import Document, FormulaCell, ErrorCell
TABLE_1_FORMULAS = [
[None, "A1", "$B$1=1"],
[None, "A1+A2", "A$2&B2"],
[None, "A1×A2", "NOW()"],
[None, "A1-A2", "NOW()+0.1"],
[None, "A1÷A2", "$C4-C3"],
[None, "SUM(A1:A2)", "IF(A6>6,TRUE,FALSE)"],
[None, "MEDIAN(A1:A2)", "IF(A7>0,TRUE,FALSE)"],
[None, "AVERAGE(A1:A2)", "A8≠10"],
["A9", None, None],
]
TABLE_2_FORMULAS = [
[None, "A1&A2&A3"],
[None, "LEN(A2)+LEN(A3)"],
[None, "LEFT(A3,1)"],
[None, "MID(A4,2,2)"],
[None, "RIGHT(A5,2)"],
[None, 'FIND("_",A6)'],
[None, 'FIND("YYY",A7)'],
[None, 'IF(FIND("_",A8)>2,A1,A2)'],
[None, "100×(A9×2)%"],
[None, 'IF(A10<5,"smaller","larger")'],
[None, 'IF(A11≤5,"smaller","larger")'],
]
def compare_tables(table, ref):
for row_num in range(table.num_rows):
for col_num in range(table.num_cols):
if ref[row_num][col_num] is None:
check.is_none(
table.cell(row_num, col_num).formula,
f"!existsy@[{row_num},{col_num}]",
)
else:
check.is_not_none(
table.cell(row_num, col_num).formula,
f"exists@[{row_num},{col_num}]",
)
check.equal(
table.cell(row_num, col_num).formula,
ref[row_num][col_num],
f"formula@[{row_num},{col_num}]",
)
def test_table_functions():
doc = Document("tests/data/test-10.numbers")
sheets = doc.sheets()
table = sheets[0].tables()[0]
compare_tables(table, TABLE_1_FORMULAS)
table = sheets[1].tables()[0]
compare_tables(table, TABLE_2_FORMULAS)
|
the-stack_0_26645
|
from plumbum import local
from benchbuild.project import Project
from benchbuild.environments import container
from benchbuild.source import HTTP
from benchbuild.utils.cmd import make, tar
class Python(Project):
""" python benchmarks """
NAME: str = 'python'
DOMAIN: str = 'compilation'
GROUP: str = 'benchbuild'
SOURCE = [
HTTP(remote={
'3.4.3':
'https://www.python.org/ftp/python/3.4.3/Python-3.4.3.tar.xz'
},
local='python.tar.xz')
]
CONTAINER = container.Buildah().from_('debian:buster-slim')
def compile(self):
python_source = local.path(self.source_of('python.tar.xz'))
python_version = self.version_of('python.tar.xz')
tar("xfJ", python_source)
unpack_dir = local.path(f'Python-{python_version}')
clang = compiler.cc(self)
clang_cxx = compiler.cxx(self)
with local.cwd(unpack_dir):
configure = local["./configure"]
configure = run.watch(configure)
with local.env(CC=str(clang), CXX=str(clang_cxx)):
configure("--disable-shared", "--without-gcc")
make_ = run.watch(make)
make_()
def run_tests(self):
python_version = self.version_of('python.tar.xz')
unpack_dir = local.path(f'Python-{python_version}')
wrapping.wrap(unpack_dir / "python", self)
with local.cwd(unpack_dir):
make_ = run.watch(make)
make_("-i", "test")
|
the-stack_0_26649
|
from __future__ import print_function, division
import os
import json
import time
from utils import command_parser
from utils.class_finder import model_class, agent_class
from main_eval import main_eval
from tqdm import tqdm
from tabulate import tabulate
from tensorboardX import SummaryWriter
os.environ["OMP_NUM_THREADS"] = "1"
def full_eval(args=None):
if args is None:
args = command_parser.parse_arguments()
create_shared_model = model_class(args.model)
init_agent = agent_class(args.agent_type)
args.phase = 'eval'
args.episode_type = 'TestValEpisode'
args.test_or_val = 'val'
start_time = time.time()
local_start_time_str = time.strftime(
'%Y_%m_%d_%H_%M_%S', time.localtime(start_time)
)
tb_log_dir = args.log_dir + "/" + args.title + '_' + args.phase + '_' + local_start_time_str
log_writer = SummaryWriter(log_dir=tb_log_dir)
# Get all valid saved_models for the given title and sort by train_ep.
checkpoints = [(f, f.split("_")) for f in os.listdir(args.save_model_dir)]
checkpoints = [
(f, int(s[-7]))
for (f, s) in checkpoints
if len(s) >= 4 and f.startswith(args.title) and int(s[-7]) >= args.test_start_from
]
checkpoints.sort(key=lambda x: x[1])
best_model_on_val = None
best_performance_on_val = 0.0
for (f, train_ep) in tqdm(checkpoints, desc="Checkpoints."):
# break
model = os.path.join(args.save_model_dir, f)
args.load_model = model
args.present_model =f
args.test_or_val = "test"
main_eval(args, create_shared_model, init_agent)
# check if best on val.
with open(args.results_json, "r") as f:
results = json.load(f)
if results["success"] > best_performance_on_val:
best_model_on_val = model
best_performance_on_val = results["success"]
log_writer.add_scalar("val/success", results["success"], train_ep)
log_writer.add_scalar("val/spl", results["spl"], train_ep)
# best models
# HOZ_38909040_3300000_2021_09_16_15_12_10.dat
# TPNHOZ_29208145_2500000_2021_09_16_15_12_33.dat
args.test_or_val = "test"
args.load_model = best_model_on_val
# args.load_model = "./trained_models/HOZ_38909040_3300000_2021_09_16_15_12_10.dat"
# args.load_model = "./trained_models/TPNHOZ_29208145_2500000_2021_09_16_15_12_33.dat"
main_eval(args, create_shared_model, init_agent)
with open(args.results_json, "r") as f:
results = json.load(f)
print(
tabulate(
[
["SPL >= 1:", results["GreaterThan/1/spl"]],
["Success >= 1:", results["GreaterThan/1/success"]],
["SPL >= 5:", results["GreaterThan/5/spl"]],
["Success >= 5:", results["GreaterThan/5/success"]],
],
headers=["Metric", "Result"],
tablefmt="orgtbl",
)
)
print("Best model:", args.load_model)
if __name__ == "__main__":
full_eval()
|
the-stack_0_26650
|
# from PIL import Image
# import glob, os
#
# size = 128, 128
#
# for infile in glob.glob("*.jpg"):
# file, ext = os.path.splitext(infile)
# im = Image.open(infile)
# im.thumbnail(size)
# im.save(file + ".thumbnail", "JPEG")
import os
import subprocess
for root, dirs, files in os.walk('images'):
images = []
for f in files:
if f.endswith('.jpg') and not '.thumbnail' in f:
if not os.path.exists(os.path.join(root, f.replace('.jpg', '.thumbnail.jpg'))):
images.append(os.path.join(root, f))
for f in images:
outbase = f[:-4] # simply remove '.jpg'
out = outbase = outbase +'.thumbnail.jpg'
args = ['convert', f, '-scale', '250x250', out]
subprocess.call(args)
|
the-stack_0_26651
|
import _k4a
import _k4arecord
import _k4atypes
import record
from kinectBodyTracker import kinectBodyTracker, _k4abt
import numpy as np
import cv2
import sys
import ctypes
from config import config
import postProcessing
import platform
class pyKinectAzure:
def __init__(self,modulePath=None):
if modulePath is None:
if platform.system().lower() == 'linux':
modulePath = r'/usr/lib/x86_64-linux-gnu/libk4a.so'
else:
modulePath = 'C:\\Program Files\\Azure Kinect SDK v1.4.0\\sdk\\windows-desktop\\amd64\\release\\bin\\k4a.dll'
self.modulePath = modulePath
_k4a.k4a.setup_library(modulePath)
self.k4a = _k4a.k4a()
self.device_handle = _k4a.k4a_device_t()
self.capture_handle = _k4a.k4a_capture_t()
self.config = config()
self.imu_sample = _k4a.k4a_imu_sample_t()
self.cameras_running = False
self.imu_running = False
self.recording = False
def update(self):
# Get capture
self.device_get_capture()
# Write capture if recording
if self.recording:
self.write_frame()
def bodyTracker_start(self, bodyTrackerModulePath, modelType = _k4abt.K4ABT_DEFAULT_MODEL):
# Get depth sensor calibration
depthSensorCalibration = _k4a.k4a_calibration_t()
self.getDepthSensorCalibration(depthSensorCalibration)
# Initialize the body tracker
self.body_tracker = kinectBodyTracker(bodyTrackerModulePath, depthSensorCalibration, modelType)
def bodyTracker_update(self):
# Add capture to the body tracker processing queue
self.body_tracker.enqueue_capture(self.capture_handle)
# Perform body detection
self.body_tracker.detectBodies()
def bodyTracker_get_body_segmentation(self):
# Get the body segmentation image
body_image = self.image_convert_to_numpy(self.body_tracker.segmented_body_img).astype(np.uint8)
# Add color to the segmentation based on the id value of each pixel
body_image_color = np.dstack([cv2.LUT(body_image, _k4abt.body_colors[:,i]) for i in range(3)])
# Add bod
return body_image_color
def bodyTracker_project_skeleton(self, skeleton, dest_camera=None):
if dest_camera is None:
dest_camera = _k4a.K4A_CALIBRATION_TYPE_DEPTH
# Project using the calibration of the camera for the image
position_2d = _k4a.k4a_float2_t()
valid = ctypes.c_int()
skeleton2D = _k4abt.k4abt_skeleton2D_t()
for jointID,joint in enumerate(skeleton.joints):
_k4a.VERIFY(self.k4a.k4a_calibration_3d_to_2d(
self.body_tracker.sensor_calibration,
joint.position,
_k4a.K4A_CALIBRATION_TYPE_DEPTH,
dest_camera,
position_2d,
valid),
"Project skeleton failed")
skeleton2D.joints2D[jointID].position = position_2d
skeleton2D.joints2D[jointID].confidence_level = joint.confidence_level
return skeleton2D
def device_get_installed_count(self):
"""Gets the number of connected devices
Parameters:
None
Returns:
int: Number of sensors connected to the PC.
Remarks:
This API counts the number of Azure Kinect devices connected to the host PC.
"""
return int(self.k4a.k4a_device_get_installed_count())
def device_open(self, index=0):
"""Open an Azure Kinect device.
Parameters:
index (int): The index of the device to open, starting with
Returns:
None
Remarks:
If successful, k4a_device_open() will return a device handle in the device_handle parameter.
This handle grants exclusive access to the device and may be used in the other Azure Kinect API calls.
When done with the device, close the handle with k4a_device_close()
"""
_k4a.VERIFY(self.k4a.k4a_device_open(index,self.device_handle),"Open K4A Device failed!")
def device_close(self):
"""Closes an Azure Kinect device.
Parameters:
None
Returns:
None
Remarks:
Once closed, the handle is no longer valid.
Before closing the handle to the device, ensure that all captures have been released with
k4a_capture_release().
"""
self.k4a.k4a_device_close(self.device_handle)
def device_get_serialnum(self):
"""Get the Azure Kinect device serial number.
Parameters:
None
Returns:
A return of ::K4A_BUFFER_RESULT_SUCCEEDED means that the serial_number has been filled in. If the buffer is too
small the function returns ::K4A_BUFFER_RESULT_TOO_SMALL and the size of the serial number is
returned in the serial_number_size parameter. All other failures return ::K4A_BUFFER_RESULT_FAILED.
Remarks:
Queries the device for its serial number. If the caller needs to know the size of the serial number to allocate
memory, the function should be called once with a NULL serial_number to get the needed size in the
serial_number_size output, and then again with the allocated buffer.
Only a complete serial number will be returned. If the caller's buffer is too small, the function will return
::K4A_BUFFER_RESULT_TOO_SMALL without returning any data in serial_number.
"""
# First call to get the size of the buffer
serial_number_size = ctypes.c_size_t()
result = self.k4a.k4a_device_get_serialnum(self.device_handle, None, serial_number_size)
if result == _k4a.K4A_BUFFER_RESULT_TOO_SMALL:
serial_number = ctypes.create_string_buffer(serial_number_size.value)
_k4a.VERIFY(self.k4a.k4a_device_get_serialnum(self.device_handle,serial_number,serial_number_size),"Read serial number failed!")
return serial_number.value.decode("utf-8")
def device_start_cameras(self, device_config=None):
"""Starts color and depth camera capture.
Parameters:
device_config (k4a_device_configuration_t): The configuration we want to run the device in. This can be initialized with ::K4A_DEVICE_CONFIG_INIT_DEFAULT.
Returns:
None
Remarks:
Individual sensors configured to run will now start to stream captured data..
It is not valid to call k4a_device_start_cameras() a second time on the same k4a_device_t until
k4a_device_stop_cameras() has been called.
"""
if device_config is not None:
self.config = device_config
if not self.cameras_running:
_k4a.VERIFY(self.k4a.k4a_device_start_cameras(self.device_handle,self.config.current_config),"Start K4A cameras failed!")
self.cameras_running = True
def device_stop_cameras(self):
"""Stops the color and depth camera capture..
Parameters:
None
Returns:
None
Remarks:
The streaming of individual sensors stops as a result of this call. Once called, k4a_device_start_cameras() may
be called again to resume sensor streaming.
"""
if self.cameras_running:
self.k4a.k4a_device_stop_cameras(self.device_handle)
self.cameras_running = False
def device_start_imu(self):
"""Starts the IMU sample stream.
Parameters:
None
Returns:
None
Remarks:
Call this API to start streaming IMU data. It is not valid to call this function a second time on the same
k4a_device_t until k4a_device_stop_imu() has been called.
This function is dependent on the state of the cameras. The color or depth camera must be started before the IMU.
K4A_RESULT_FAILED will be returned if one of the cameras is not running.
"""
if self.cameras_running:
if not self.imu_running:
_k4a.VERIFY(self.k4a.k4a_device_start_imu(self.device_handle),"Start K4A IMU failed!")
self.imu_running = True
else:
print("\nTurn on cameras before running IMU.\n")
def device_stop_imu(self):
"""Stops the IMU capture.
Parameters:
None
Returns:
None
Remarks:
The streaming of the IMU stops as a result of this call. Once called, k4a_device_start_imu() may
be called again to resume sensor streaming, so long as the cameras are running.
This function may be called while another thread is blocking in k4a_device_get_imu_sample().
Calling this function while another thread is in that function will result in that function returning a failure.
"""
if self.imu_running:
self.k4a.k4a_device_stop_imu(self.device_handle)
self.imu_running = False
def device_get_capture(self, timeout_in_ms=_k4a.K4A_WAIT_INFINITE):
"""Reads a sensor capture.
Parameters:h
timeout_in_ms (int):Specifies the time in milliseconds the function should block waiting for the capture. If set to 0, the function will
return without blocking. Passing a value of #K4A_WAIT_INFINITE will block indefinitely until data is available, the
device is disconnected, or another error occurs.
Returns:
None
Remarks:
Gets the next capture in the streamed sequence of captures from the camera. If a new capture is not currently
available, this function will block until the timeout is reached. The SDK will buffer at least two captures worth
of data before dropping the oldest capture. Callers needing to capture all data need to ensure they read the data as
fast as the data is being produced on average.
Upon successfully reading a capture this function will return success and populate capture.
If a capture is not available in the configured timeout_in_ms, then the API will return ::K4A_WAIT_RESULT_TIMEOUT.
"""
if self.cameras_running:
_k4a.VERIFY(self.k4a.k4a_device_get_capture(self.device_handle,self.capture_handle,timeout_in_ms),"Get capture failed!")
def device_get_imu_sample(self, timeout_in_ms=_k4a.K4A_WAIT_INFINITE):
"""Reads an IMU sample.
Parameters:h
timeout_in_ms (int):Specifies the time in milliseconds the function should block waiting for the capture. If set to 0, the function will
return without blocking. Passing a value of #K4A_WAIT_INFINITE will block indefinitely until data is available, the
device is disconnected, or another error occurs.
Returns:
None
Remarks:
Gets the next sample in the streamed sequence of IMU samples from the device. If a new sample is not currently
available, this function will block until the timeout is reached. The API will buffer at least two camera capture
intervals worth of samples before dropping the oldest sample. Callers needing to capture all data need to ensure they
read the data as fast as the data is being produced on average.
Upon successfully reading a sample this function will return success and populate imu_sample.
If a sample is not available in the configured timeout_in_ms, then the API will return ::K4A_WAIT_RESULT_TIMEOUT.
"""
if self.imu_running:
_k4a.VERIFY(self.k4a.k4a_device_get_imu_sample(self.device_handle,self.imu_sample,timeout_in_ms),"Get IMU failed!")
def device_get_calibration(self, depth_mode, color_resolution,calibration):
"""Get the camera calibration for the entire Azure Kinect device.
Parameters:h
depth_mode(k4a_depth_mode_t): Mode in which depth camera is operated.
color_resolution(k4a_color_resolution_t): Resolution in which color camera is operated.
calibration(k4a_calibration_t):Location to write the calibration
Returns:
K4A_RESULT_SUCCEEDED if calibration was successfully written. ::K4A_RESULT_FAILED otherwise.
Remarks:
The calibration represents the data needed to transform between the camera views and may be
different for each operating depth_mode and color_resolution the device is configured to operate in.
The calibration output is used as input to all calibration and transformation functions.
"""
_k4a.VERIFY(self.k4a.k4a_device_get_calibration(self.device_handle,depth_mode,color_resolution,calibration),"Get calibration failed!")
def capture_get_color_image(self):
"""Get the color image associated with the given capture.
Parameters:
None
Returns:
k4a_image_t: Handle to the Image
Remarks:
Call this function to access the color image part of this capture. Release the ref k4a_image_t with
k4a_image_release();
"""
return self.k4a.k4a_capture_get_color_image(self.capture_handle)
def capture_get_depth_image(self):
"""Get the depth image associated with the given capture.
Parameters:
None
Returns:
k4a_image_t: Handle to the Image
Remarks:
Call this function to access the depth image part of this capture. Release the k4a_image_t with
k4a_image_release();
"""
return self.k4a.k4a_capture_get_depth_image(self.capture_handle)
def capture_get_ir_image(self):
"""Get the IR image associated with the given capture.
Parameters:
None
Returns:
k4a_image_t: Handle to the Image
Remarks:
Call this function to access the IR image part of this capture. Release the k4a_image_t with
k4a_image_release();
"""
return self.k4a.k4a_capture_get_ir_image(self.capture_handle)
def image_create(self,image_format,width_pixels,height_pixels,stride_bytes,image_handle):
"""Create an image.
Parameters:
image_format(k4a_image_format_t): The format of the image that will be stored in this image container.
width_pixels(int): Width in pixels.
height_pixels(int): Height in pixels.
stride_bytes(int): The number of bytes per horizontal line of the image.
If set to 0, the stride will be set to the minimum size given the format and width_pixels.
image_handle(k4a_image_t): Pointer to store image handle in.
Returns:
Returns #K4A_RESULT_SUCCEEDED on success. Errors are indicated with #K4A_RESULT_FAILED.
Remarks:
This function is used to create images of formats that have consistent stride. The function is not suitable for
compressed formats that may not be represented by the same number of bytes per line.
For most image formats, the function will allocate an image buffer of size height_pixels * stride_bytes.
Buffers #K4A_IMAGE_FORMAT_COLOR_NV12 format will allocate an additional height_pixels / 2 set of lines (each of
stride_bytes). This function cannot be used to allocate #K4A_IMAGE_FORMAT_COLOR_MJPG buffers.
"""
_k4a.VERIFY(self.k4a.k4a_image_create(image_format,width_pixels,height_pixels,stride_bytes,image_handle),"Create image failed!")
def image_get_buffer(self, image_handle):
"""Get the image buffer.
Parameters:
image_handle (k4a_image_t): Handle to the Image
Returns:
ctypes.POINTER(ctypes.c_uint8): The function will return NULL if there is an error, and will normally return a pointer to the image buffer.
Since all k4a_image_t instances are created with an image buffer, this function should only return NULL if the
image_handle is invalid.
Remarks:
Use this buffer to access the raw image data.
"""
return self.k4a.k4a_image_get_buffer(image_handle)
def image_get_size(self, image_handle):
"""Get the image buffer size.
Parameters:
image_handle (k4a_image_t): Handle to the Image
Returns:
int: The function will return 0 if there is an error, and will normally return the image size.
Since all k4a_image_t instances are created with an image buffer, this function should only return 0 if the
image_handle is invalid.
Remarks:
Use this function to know what the size of the image buffer is returned by k4a_image_get_buffer().
"""
return int(self.k4a.k4a_image_get_size(image_handle))
def image_get_format(self, image_handle):
"""Get the format of the image.
Parameters:
image_handle (k4a_image_t): Handle to the Image
Returns:
int: This function is not expected to fail, all k4a_image_t's are created with a known format. If the
image_handle is invalid, the function will return ::K4A_IMAGE_FORMAT_CUSTOM.
Remarks:
Use this function to determine the format of the image buffer.
"""
return int(self.k4a.k4a_image_get_format(image_handle))
def image_get_width_pixels(self, image_handle):
"""Get the image width in pixels.
Parameters:
image_handle (k4a_image_t): Handle to the Image
Returns:
int: This function is not expected to fail, all k4a_image_t's are created with a known width. If the part
image_handle is invalid, the function will return 0.
"""
return int(self.k4a.k4a_image_get_width_pixels(image_handle))
def image_get_height_pixels(self, image_handle):
"""Get the image height in pixels.
Parameters:
image_handle (k4a_image_t): Handle to the Image
Returns:
int: This function is not expected to fail, all k4a_image_t's are created with a known height. If the part
image_handle is invalid, the function will return 0.
"""
return int(self.k4a.k4a_image_get_height_pixels(image_handle))
def image_get_stride_bytes(self,image_handle):
"""Get the image stride in bytes.
Parameters:
image_handle (k4a_image_t): Handle to the Image
Returns:
int: This function is not expected to fail, all k4a_image_t's are created with a known stride. If the
image_handle is invalid, or the image's format does not have a stride, the function will return 0.
"""
return int(self.k4a.k4a_image_get_stride_bytes(image_handle))
def transformation_create(self, calibration):
"""Get handle to transformation handle.
Parameters:
calibration(k4a_calibration_t): A calibration structure obtained by k4a_device_get_calibration().
Returns:
k4a_transformation_t: A transformation handle. A NULL is returned if creation fails.
Remarks:
The transformation handle is used to transform images from the coordinate system of one camera into the other. Each
transformation handle requires some pre-computed resources to be allocated, which are retained until the handle is
destroyed.
The transformation handle must be destroyed with k4a_transformation_destroy() when it is no longer to be used.
"""
return self.k4a.k4a_transformation_create(calibration)
def transformation_destroy(self, transformation_handle):
"""Destroy transformation handle.
Parameters:
transformation_handle(k4a_transformation_t): Transformation handle to destroy.
Returns:
None
Remarks:
None
"""
self.k4a.k4a_transformation_destroy(transformation_handle)
def transform_depth_image_to_point_cloud(self, depth_image_handle= _k4a.k4a_image_t):
"""Transforms the depth map to point clouds
Parameters:
depth_image_handle (k4a_image_t): Handle to the Image
Returns:
point_cloud (k4a_image_t): Handle to point cloud
"""
calibration = _k4a.k4a_calibration_t()
self.getDepthSensorCalibration(calibration)
transformation_handle = self.transformation_create(calibration)
point_cloud = _k4atypes.k4a_image_t()
self.image_create(
_k4atypes.K4A_IMAGE_FORMAT_CUSTOM,
self.image_get_width_pixels(depth_image_handle),
self.image_get_height_pixels(depth_image_handle),
self.image_get_width_pixels(depth_image_handle) * 6,
point_cloud
)
_k4a.VERIFY(self.k4a.k4a_transformation_depth_image_to_point_cloud(
transformation_handle,
depth_image_handle,
_k4atypes.K4A_CALIBRATION_TYPE_DEPTH,
point_cloud
), "Error Occur When Make Point Cloud")
return point_cloud
def transformation_depth_image_to_color_camera(self,transformation_handle,input_depth_image_handle, transformed_depth_image_handle):
"""Transforms the depth map into the geometry of the color camera.
Parameters:
transformation_handle (k4a_transformation_t): Transformation handle.
input_depth_image_handle (k4a_image_t): Handle to input depth image.
transformed_depth_image_handle (k4a_image_t): Handle to output transformed depth image.
Returns:
K4A_RESULT_SUCCEEDED if transformed_depth_image was successfully written and ::K4A_RESULT_FAILED otherwise.
Remarks:
This produces a depth image for which each pixel matches the corresponding pixel coordinates of the color camera.
transformed_depth_image must have a width and height matching the width and height of the color camera in the mode
specified by the k4a_calibration_t used to create the transformation_handle with k4a_transformation_create().
"""
_k4a.VERIFY(self.k4a.k4a_transformation_depth_image_to_color_camera(transformation_handle,input_depth_image_handle,transformed_depth_image_handle),"Transformation from depth to color failed!")
def image_convert_to_numpy(self, image_handle):
"""Get the image data as a numpy array
Parameters:
image_handle (k4a_image_t): Handle to the Image
Returns:
numpy.ndarray: Numpy array with the image data
"""
# Get the pointer to the buffer containing the image data
buffer_pointer = self.image_get_buffer(image_handle)
# Get the size of the buffer
image_size = self.image_get_size(image_handle)
image_width = self.image_get_width_pixels(image_handle)
image_height = self.image_get_height_pixels(image_handle)
# Get the image format
image_format = self.image_get_format(image_handle)
# Read the data in the buffer
buffer_array = np.ctypeslib.as_array(buffer_pointer,shape=(image_size,))
# Parse buffer based on image format
if image_format == _k4a.K4A_IMAGE_FORMAT_COLOR_MJPG:
return cv2.imdecode(np.frombuffer(buffer_array, dtype=np.uint8), -1)
elif image_format == _k4a.K4A_IMAGE_FORMAT_COLOR_NV12:
yuv_image = np.frombuffer(buffer_array, dtype=np.uint8).reshape(int(image_height*1.5),image_width)
return cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR_NV12)
elif image_format == _k4a.K4A_IMAGE_FORMAT_COLOR_YUY2:
yuv_image = np.frombuffer(buffer_array, dtype=np.uint8).reshape(image_height,image_width,2)
return cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR_YUY2)
elif image_format == _k4a.K4A_IMAGE_FORMAT_COLOR_BGRA32:
return np.frombuffer(buffer_array, dtype=np.uint8).reshape(image_height,image_width,4)
elif image_format == _k4a.K4A_IMAGE_FORMAT_DEPTH16:
return np.frombuffer(buffer_array, dtype="<u2").reshape(image_height,image_width)#little-endian 16 bits unsigned Depth data
elif image_format == _k4a.K4A_IMAGE_FORMAT_IR16:
return np.frombuffer(buffer_array, dtype="<u2").reshape(image_height,image_width)#little-endian 16 bits unsigned IR data. For more details see: https://microsoft.github.io/Azure-Kinect-Sensor-SDK/release/1.2.x/namespace_microsoft_1_1_azure_1_1_kinect_1_1_sensor_a7a3cb7a0a3073650bf17c2fef2bfbd1b.html
elif image_format == _k4a.K4A_IMAGE_FORMAT_CUSTOM8:
return np.frombuffer(buffer_array, dtype="<u1").reshape(image_height,image_width)
def transform_depth_to_color(self,input_depth_image_handle, color_image_handle):
calibration = _k4a.k4a_calibration_t()
# Get desired image format
image_format = self.image_get_format(input_depth_image_handle)
image_width = self.image_get_width_pixels(color_image_handle)
image_height = self.image_get_height_pixels(color_image_handle)
image_stride = 0
# Get the camera calibration
self.device_get_calibration(self.config.depth_mode,self.config.color_resolution,calibration)
# Create transformation
transformation_handle = self.transformation_create(calibration)
# Create the image handle
transformed_depth_image_handle = _k4a.k4a_image_t()
self.image_create(image_format,image_width,image_height,image_stride,transformed_depth_image_handle)
# Transform the depth image to the color image format
self.transformation_depth_image_to_color_camera(transformation_handle,input_depth_image_handle, transformed_depth_image_handle)
# Get transformed image data
transformed_image = self.image_convert_to_numpy(transformed_depth_image_handle)
# Close transformation
self.transformation_destroy(transformation_handle)
return transformed_image
def getDepthSensorCalibration(self, calibration):
self.device_get_calibration(self.config.depth_mode, self.config.color_resolution, calibration)
def image_release(self, image_handle):
"""Remove a reference from the k4a_image_t.
Parameters:
image_handle (k4a_image_t): Handle to the Image
Returns:
None
Remarks:
References manage the lifetime of the object. When the references reach zero the object is destroyed. A caller must
not access the object after its reference is released.
"""
self.k4a.k4a_image_release(image_handle)
def capture_release(self):
"""Release a capture.
Parameters:
None
Returns:
None
Remarks:
Call this function when finished using the capture.
"""
self.k4a.k4a_capture_release(self.capture_handle)
def get_imu_sample(self, timeout_in_ms=_k4a.K4A_WAIT_INFINITE):
# Get the sample from the device
self.device_get_imu_sample(timeout_in_ms)
# Read the raw data from the buffer pointer
buffer_array = np.array(np.ctypeslib.as_array(self.imu_sample,shape=(_k4a.IMU_SAMPLE_SIZE,)).tolist())
imu_results = self.imu_results
imu_results.temperature = buffer_array[0]
imu_results.acc_sample = buffer_array[1][1]
imu_results.acc_timestamp_usec = buffer_array[2]
imu_results.gyro_sample = buffer_array[3][1]
imu_results.gyro_timestamp_usec = buffer_array[4]
return imu_results
def start_recording(self, filepath="output.mkv"):
self.record = record.record(self.modulePath, self.device_handle, self.config.current_config, filepath)
self.recording = True
def stop_recording(self):
self.record = None
self.recording = False
def write_frame(self):
self.record.write_capture(self.capture_handle)
class imu_results:
def __init__(self):
self.temperature = None
self.acc_sample = None
self.acc_timestamp_usec = None
self.gyro_sample = None
self.gyro_timestamp_usec = None
|
the-stack_0_26653
|
import re
from collections import Counter
def count_words(sentence):
count_by_word = {}
pattern = re.compile(
"'+[_,;:.!?&@$%^\\s]+'*|[_,;:.!?&@$%^\\s]+'+|[_,;:.!?&@$%^\\s]+|'+$|^'+")
words = [word for word in pattern.split(sentence.lower()) if word]
return Counter(words)
|
the-stack_0_26656
|
#!/usr/bin/env python
import math
import time
class WatchedProcess:
""" MINIMAL wrapper around multiprocessing.Process so we can more easily track/time them. """
def __init__(self, proc):
self.process = proc
self.state = 0 # 0=not-yet-started; 1=started; 2=finished-or-terminated
self._start_time = None
def start_process(self):
if self.state:
raise RuntimeError("Already started: " + str(self.process))
self._start_time = time.time()
self.process.start()
self.state = 1
def join_process(self):
if self.state < 1:
raise RuntimeError("Not started: " + str(self.process))
self.process.join()
self.state = 2
def time_since_started(self):
if self.state <= 0:
raise RuntimeError("Not yet started: " + str(self.process))
return time.time() - self._start_time
def __repr__(self):
return "WatchedProcess for: "+str(self.process)+', state='+str(self.state)
def launch_and_wait(mp_proc_list, pool_size):
""" Given a list of multiprocessing.Process objects which have not yet
been started, this function launches them and blocks until the last
finishes. This makes sure that only <pool_size> processes are ever
working at any one time (this number does not include the main process
which called this function, since that will not tax the CPU).
The idea here is roughly analogous to multiprocessing.Pool
with the exceptions that:
1 - The caller will get to use the multiprocessing.Process model of
using shared memory (inheritance) to pass arg data to the child,
2 - maxtasksperchild is always 1,
3 - no function return value is kept/tranferred (not yet implemented)
"""
# Sanity check
if len(mp_proc_list) < 1:
return
# Create or own list with easy state watching
procs = []
for p in mp_proc_list:
procs.append(WatchedProcess(p))
# Launch all of them, but only so pool_size are running at any time
keep_going = True
while (keep_going):
# Before we start any more, find out how many are running. First go
# through the list of those started and see if alive. Update state.
for p in procs:
if p.state == 1: # been started
if not p.process.is_alive():
p.state = 2 # process has finished or been terminated
if p.process.exitcode is None:
raise RuntimeError(
"Process is not alive but has no exitcode? " +
str(p.process))
# now figure num_running
num_running = len([p for p in procs if p.state == 1])
# Start some. Only as many as pool_size should ever be running.
num_avail_cpus = pool_size - num_running
num_to_start = len([p for p in procs if p.state == 0])
if num_to_start < 1:
# all have been started, can finally leave loop and go wait
break
if num_avail_cpus > 0 and num_to_start > 0:
num_to_start_now = min(num_avail_cpus, num_to_start)
started_now = 0
for p in procs:
if started_now < num_to_start_now and p.state == 0:
p.start_process()
# debug "launch_and_wait: started: "+str(p.process)
started_now += 1
# else: otherwise, all cpus are in use, just wait ...
# sleep to tame loop activity, but also must sleep a bit after each
# start call so that the call to is_alive() woorks correctly
time.sleep(1)
# Out of the launching loop, can now wait on all procs left.
for p in procs:
p.join_process()
# Check all exit codes before returning
for p in procs:
if 0 != p.process.exitcode:
raise RuntimeError("Problem during: "+str(p.process.name)+ \
', exitcode: '+str(p.process.exitcode)+'. Check log.')
# all is well, can return
def best_tile_layout(pool_size):
""" Determine and return the best layout of "tiles" for fastest
overall parallel processing of a rectangular image broken up into N
smaller equally-sized rectangular tiles, given as input the number
of processes/chunks which can be run/worked at the same time (pool_size).
This attempts to return a layout whose total number of tiles is as
close as possible to pool_size, without going over (and thus not
really taking advantage of pooling). Since we can vary the
size of the rectangles, there is not much (any?) benefit to pooling.
Returns a tuple of ( <num tiles in X dir>, <num in Y direction> )
This assumes the image in question is relatively close to square, and
so the returned tuple attempts to give a layout which is as
squarishly-blocked as possible, except in cases where speed would be
sacrificed.
EXAMPLES:
For pool_size of 4, the best result is 2x2.
For pool_size of 6, the best result is 2x3.
For pool_size of 5, a result of 1x5 is better than a result of
2x2 (which would leave one core unused), and 1x5 is also better than
a result of 2x3 (which would require one core to work twice while all
others wait).
For higher, odd pool_size values (say 39), it is deemed best to
sacrifice a few unused cores to satisfy our other constraints, and thus
the result of 6x6 is best (giving 36 tiles and 3 unused cores).
"""
# Easy answer sanity-checks
if pool_size < 2:
return (1, 1)
# Next, use a small mapping of hard-coded results. While we agree
# that many of these are unlikely pool_size values, they are easy
# to accomodate.
mapping = { 0:(1,1), 1:(1,1), 2:(1,2), 3:(1,3), 4:(2,2), 5:(1,5),
6:(2,3), 7:(2,3), 8:(2,4), 9:(3,3), 10:(2,5), 11:(2,5),
14:(2,7), 18:(3,6), 19:(3,6), 28:(4,7), 29:(4,7),
32:(4,8), 33:(4,8), 34:(4,8), 40:(4,10), 41:(4,10) }
if pool_size in mapping:
return mapping[pool_size]
# Next, take a guess using the square root and (for the sake of
# simplicity), go with it. We *could* get much fancier here...
# Use floor-rounding (not ceil) so that the total number of resulting
# tiles is <= pool_size.
xnum = int(math.sqrt(pool_size))
ynum = int((1.*pool_size)/xnum)
return (xnum, ynum)
|
the-stack_0_26661
|
# Copyright © 2020 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File processing rules and actions for the incorporation of a business."""
import copy
from contextlib import suppress
from http import HTTPStatus
from typing import Dict
import requests
import sentry_sdk
from entity_queue_common.service_utils import QueueException
from flask import current_app
from legal_api.models import Business, Document, Filing, RegistrationBootstrap
from legal_api.models.document import DocumentType
from legal_api.services.bootstrap import AccountService
from entity_filer.filing_processors.filing_components import aliases, business_info, business_profile, shares
from entity_filer.filing_processors.filing_components.offices import update_offices
from entity_filer.filing_processors.filing_components.parties import update_parties
def get_next_corp_num(legal_type: str):
"""Retrieve the next available sequential corp-num from COLIN."""
try:
# TODO: update this to grab the legal 'class' after legal classes have been defined in lear
if legal_type == Business.LegalTypes.BCOMP.value:
business_type = 'BC'
else:
business_type = legal_type
resp = requests.post(f'{current_app.config["COLIN_API"]}/{business_type}')
except requests.exceptions.ConnectionError:
current_app.logger.error(f'Failed to connect to {current_app.config["COLIN_API"]}')
return None
if resp.status_code == 200:
new_corpnum = int(resp.json()['corpNum'])
if new_corpnum and new_corpnum <= 9999999:
# TODO: Fix endpoint
return f'{business_type}{new_corpnum:07d}'
return None
def update_affiliation(business: Business, filing: Filing):
"""Create an affiliation for the business and remove the bootstrap."""
try:
bootstrap = RegistrationBootstrap.find_by_identifier(filing.temp_reg)
rv = AccountService.create_affiliation(
account=bootstrap.account,
business_registration=business.identifier,
business_name=business.legal_name,
corp_type_code=business.legal_type
)
if rv not in (HTTPStatus.OK, HTTPStatus.CREATED):
deaffiliation = AccountService.delete_affiliation(bootstrap.account, business.identifier)
sentry_sdk.capture_message(
f'Queue Error: Unable to affiliate business:{business.identifier} for filing:{filing.id}',
level='error'
)
else:
# flip the registration
# recreate the bootstrap, but point to the new business in the name
old_bs_affiliation = AccountService.delete_affiliation(bootstrap.account, bootstrap.identifier)
new_bs_affiliation = AccountService.create_affiliation(
account=bootstrap.account,
business_registration=bootstrap.identifier,
business_name=business.identifier,
corp_type_code='TMP'
)
reaffiliate = bool(new_bs_affiliation in (HTTPStatus.OK, HTTPStatus.CREATED)
and old_bs_affiliation == HTTPStatus.OK)
if rv not in (HTTPStatus.OK, HTTPStatus.CREATED) \
or ('deaffiliation' in locals() and deaffiliation != HTTPStatus.OK)\
or ('reaffiliate' in locals() and not reaffiliate):
raise QueueException
except Exception as err: # pylint: disable=broad-except; note out any exception, but don't fail the call
sentry_sdk.capture_message(
f'Queue Error: Affiliation error for filing:{filing.id}, with err:{err}',
level='error'
)
def _update_cooperative(incorp_filing: Dict, business: Business, filing: Filing):
cooperative_obj = incorp_filing.get('cooperative', None)
if cooperative_obj:
business.association_type = cooperative_obj.get('cooperativeAssociationType')
document = Document()
document.type = DocumentType.COOP_RULES.value
document.file_key = cooperative_obj.get('rulesFileKey')
document.file_name = cooperative_obj.get('rulesFileName')
document.content_type = document.file_name.split('.')[-1]
document.business_id = business.id
document.filing_id = filing.id
business.documents.append(document)
document = Document()
document.type = DocumentType.COOP_MEMORANDUM.value
document.file_key = cooperative_obj.get('memorandumFileKey')
document.file_name = cooperative_obj.get('memorandumFileName')
document.content_type = document.file_name.split('.')[-1]
document.business_id = business.id
document.filing_id = filing.id
business.documents.append(document)
return business
def process(business: Business, filing: Dict, filing_rec: Filing): # pylint: disable=too-many-branches
"""Process the incoming incorporation filing."""
# Extract the filing information for incorporation
incorp_filing = filing.get('filing', {}).get('incorporationApplication')
is_correction = filing_rec.filing_type == 'correction'
if not incorp_filing:
raise QueueException(f'IA legal_filing:incorporationApplication missing from {filing_rec.id}')
if business and not is_correction:
raise QueueException(f'Business Already Exist: IA legal_filing:incorporationApplication {filing_rec.id}')
business_info_obj = incorp_filing.get('nameRequest')
if is_correction:
business_info.set_legal_name(business.identifier, business, business_info_obj)
else:
if filing_rec.colin_event_ids:
corp_num = filing['filing']['business']['identifier']
else:
# Reserve the Corp Number for this entity
corp_num = get_next_corp_num(business_info_obj['legalType'])
if not corp_num:
raise QueueException(
f'incorporationApplication {filing_rec.id} unable to get a business registration number.')
# Initial insert of the business record
business = Business()
business = business_info.update_business_info(corp_num, business, business_info_obj, filing_rec)
business = _update_cooperative(incorp_filing, business, filing_rec)
if not business:
raise QueueException(f'IA incorporationApplication {filing_rec.id}, Unable to create business.')
if offices := incorp_filing['offices']:
update_offices(business, offices)
if parties := incorp_filing.get('parties'):
update_parties(business, parties)
if share_structure := incorp_filing.get('shareStructure'):
shares.update_share_structure(business, share_structure)
if name_translations := incorp_filing.get('nameTranslations'):
aliases.update_aliases(business, name_translations)
if not is_correction and not filing_rec.colin_event_ids:
# Update the filing json with identifier and founding date.
ia_json = copy.deepcopy(filing_rec.filing_json)
ia_json['filing']['business']['identifier'] = business.identifier
ia_json['filing']['business']['foundingDate'] = business.founding_date.isoformat()
filing_rec._filing_json = ia_json # pylint: disable=protected-access; bypass to update filing data
return business, filing_rec
def post_process(business: Business, filing: Filing):
"""Post processing activities for incorporations.
THIS SHOULD NOT ALTER THE MODEL
"""
with suppress(IndexError, KeyError, TypeError):
if err := business_profile.update_business_profile(
business,
filing.json['filing']['incorporationApplication']['contactPoint']
):
sentry_sdk.capture_message(
f'Queue Error: Update Business for filing:{filing.id}, error:{err}',
level='error')
|
the-stack_0_26662
|
"""Converts media files in specified sub directories of parent_dir to x265 video and opus audio. Keeps only the
first video and audio stream found.Doesn't attempt to retain subtitles or attachment streams. Attempts to
calculate the bits/pixel of each file and uses a user specified crf quality for bits/pixel intervals, otherwise
uses a user specified default CRF and audio bitrate & channel setup. Prints helpful stats and can save a log
file to each sub directory."""
import datetime
import os
import sys
import time
from tympeg import MediaConverter, makeMediaObjectsInDirectory, calc_bits_per_pixel, split_ext, get_dir_size
# This will convert all files in /media/folder1 and /media/folder2 (non-recursive) and will place a log file in each folder
# parent_dir = '/media/'
# dirs_to_convert = ['folder1', 'folder2']
speed = 'veryfast' # Reminder: this is x265, don't expect x264 speeds
log_file = True
# Quality intervals for quality dicts: X & Y are bits/pixel thresholds; a, b, & c are crfs corresponding to intervals
# Bits/pixel X Y
# <----------------](----------](----------->
# CRF a b c
# Upper bounds are always inclusive, lower bounds are exclusive
# Some example quality dicts. They can be any number of intervals, but threshold[0] == 0, and each entry
# except 'default' must be equal lengths. see save_bits_per_pixel_dist() in tools.py for help visualizing bits/pixel
# distribution for defining your own intervals
s = 'stereo'
m = 'mono'
qualities_HQ = { # HQ
'threshold': [0, 0.08, 0.11],
'video': [25, 23, 20],
'audio': [(64, m), (96, m), (128, s)],
'default': [23, (96, s)]} # default to stereo here 96k
qualities_LQ = {
'threshold': [0, 0.10, 0.14],
'video': [27, 25, 23],
'audio': [(64, m), (82, m), (96, m)],
'default': [23, (96, s)]}
qualities_HQ_high_min = {
'threshold': [0, 0.08, 0.11],
'video': [23, 21, 20],
'audio': [(96, s), (96, s), (128, s)],
'default': [23, (96, s)]} # default to stereo here 96k
qualities = qualities_HQ_high_min
class PrintLogger:
"""
Simple class that can write and print the same string.
"""
def __init__(self, log_file_path, logging):
self.log_file_path = log_file_path
self.logging = logging
def pl(self, st):
print(st)
if self.logging:
with open(self.log_file_path, 'a', encoding='utf8') as log:
log.write(st + "\n")
log.close()
def convert_folder_x265(dir_path, qualities, speed, autodelete=False, log=True):
"""
Does the converting of sub directories. A lot of the stuff in here is for reporting stats back to user/log.
:param dir_path: string, path to directory
:param qualities: dict, qualities dict, see note at top of file
:param speed: str, x265 speed parameter
:param autodelete: bool, sets if original files should be deleted after conversion
:param log: bool, True writes log file into directory, False doesn't
:return:
"""
# declare some media arguments
codec = 'x265'
# start the log
sep = "{:-^60}".format('--')
now = datetime.datetime.now()
log_file_name = 'converter_log.txt'
log_file_path = os.path.join(dir_path, log_file_name)
lo = PrintLogger(log_file_path, log)
lo.pl("Log of \"{}\" conversion".format(dir_path))
lo.pl("Run on: {}".format(now.strftime("%Y %b %d")))
lo.pl("Started at: {}".format(now.strftime("%I:%M:%S %p")))
lo.pl(sep)
# Figure out what files need to be converted to h265
all_files = makeMediaObjectsInDirectory(dir_path)
files_to_move = []
for media in all_files:
if media.videoCodec != 'hevc':
files_to_move.append(media)
# move files
original_files_dir = os.path.join(dir_path, 'original_files')
# if len(files_to_move) == 0:
# print("\n\nNo files to convert in {}, breaking...\n\n".format(dir_path))
# return
if not os.path.isdir(original_files_dir):
os.mkdir(original_files_dir)
for media in files_to_move:
try:
os.rename(media.filePath, os.path.join(original_files_dir, media.fileName))
except FileExistsError:
lo.pl("\nFile: {}\n\tAlready exists! Skipping this one...".format(media.filePath))
continue
# convert files
files_to_convert = makeMediaObjectsInDirectory(original_files_dir)
# print(original_files_dir)
# print(files_to_convert)
output_file_size = 0
input_file_size = 0
count = 1
time_start = time.time()
total_files = len(files_to_convert)
total_input_size = get_dir_size(original_files_dir)/1000000
for media in files_to_convert:
video_rate, audio_rate, channels = decide_quality(qualities, media)
name, ext = split_ext(media.fileName)
output_file_path = os.path.join(dir_path, name + '.mkv')
media_size = media.file_size/1000000 # MB
now = datetime.datetime.now()
lo.pl("\nBeginning to convert file {} of {}:".format(count, total_files))
lo.pl("\t{}".format(media.fileName))
lo.pl("\tFile is {:0,.2f} MB".format(media_size))
lo.pl("\tFile bits/pixel: {:0,.4f}".format(calc_bits_per_pixel(media)))
lo.pl("\tVideo quality of {} and audio rate of {} kb/s".format(video_rate, audio_rate))
lo.pl("\tStarted at {}\n".format(now.strftime("%I:%M:%S %p")))
if os.path.isfile(output_file_path):
lo.pl("Output file already exists!!! Skipping...")
lo.pl("{:{align}{width}}".format("------- DONE -------", align='^', width=len(sep)))
count += 1
total_input_size -= media_size
continue
lo.pl("\t...converting...")
print("Using profile")
cvt = MediaConverter(media, output_file_path)
audio = True
video = True
try:
cvt.createVideoStream(codec, 'crf', video_rate, speed)
except IndexError:
lo.pl("NO VIDEO FOUND")
video = False
try:
cvt.createAudioStream(media.audioStreams[0], 'opus', audioBitrate=audio_rate, audioChannels=channels)
except IndexError:
lo.pl("NO AUDIO FOUND")
audio = False
if not audio and not video:
print("\nNo audio or video found, skipping...")
continue
sec = cvt.convert()
end = time.time()
output_file_size += os.path.getsize(output_file_path)/1000000
input_file_size += media_size
minutes = sec/60
input_rate = media_size/minutes
avg_rate = input_file_size/((end - time_start)/60)
eta_hours, eta_min = divmod(round((total_input_size - input_file_size)/avg_rate, 0), 60)
if autodelete:
if os.path.getsize(media.filePath) > os.path.getsize(output_file_path):
os.remove(media.filePath)
else:
print("Output size is larger than input size!")
lo.pl('\nCompleted file {0} of {1} at {2} in {3:,.2f} min'.format(count, total_files,
now.strftime("%I:%M:%S %p"), minutes))
lo.pl('Completed file at input rate of: {0:,.2f} MB/min'.format(input_rate))
lo.pl('Average rate of: {0:,.2f} MB/min so far'.format(avg_rate))
lo.pl('Estimated time for remaining files: {0}:{1}'.format(int(eta_hours), int(eta_min)))
lo.pl('Total input converted: {0:,.2f} MB of {1:,.2f} MB'.format(input_file_size, total_input_size))
lo.pl('Total output size: {0:,.2f} MB'.format(output_file_size))
lo.pl('Output/Input ratio: {0:,.3f}\n'.format(output_file_size/input_file_size))
lo.pl(sep)
count += 1
if autodelete:
try:
os.rmdir(original_files_dir)
except OSError:
print("{} could not be removed. Most likely because a file wasn't converted because "
"it already exists in the parent directory and the original file is present "
"for your review. An input file could be smaller than an output file, in which"
"case deletion is not done")
lo.pl("{:{align}{width}}".format("------- DONE -------", align='^', width=len(sep)))
def decide_quality(qualities, media_object):
"""Chooses the crf quality of the video as well as the bitrate and channels of the audio files from the
supplied qualities dict.
:param qualities: dict, see notes at top of file
:param media_object: MediaObject
:return: Int, crf level
Int or Float, audio bitrate
Int, audio channels
"""
q = qualities
bits_pixel = calc_bits_per_pixel(media_object)
# Making sure qualities is valid
n = len(q['threshold'])
if (len(q['video']) != n) or (len(q['audio']) != n):
print("\n\nYour qualities variable isn't set up correctly!")
print("'threshold', 'video', and audio values need to have equal length.")
print("Additionally, 'threshold'[0] needs to be 0")
print("Exiting...")
sys.exit()
# Set defaults up front
crf = q['default'][0]
audio_bitrate = q['default'][1][0]
audio_channels = q['default'][1][1]
if bits_pixel <= 0: # Print warning if it looks like defaults will be used
print("Unable to calculate bits per pixel, defaulting to: "
"crf = {}, audio = {}k, channels = {}".format(crf, audio_bitrate, audio_channels))
for x in range(0, n):
if bits_pixel > q['threshold'][x]:
crf = q['video'][x]
audio_bitrate = q['audio'][x][0]
audio_channels = q['audio'][x][1]
return crf, audio_bitrate, audio_channels
|
the-stack_0_26664
|
import argparse
import os
import requests
import zipfile
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Dict, List, Any
import datetime
import rockset # type: ignore[import]
import boto3 # type: ignore[import]
PYTORCH_REPO = "https://api.github.com/repos/pytorch/pytorch"
GITHUB_TOKEN = os.environ["GITHUB_TOKEN"]
REQUEST_HEADERS = {
"Accept": "application/vnd.github.v3+json",
"Authorization": "token " + GITHUB_TOKEN,
}
S3_RESOURCE = boto3.resource("s3")
TEMP_DIR = Path(os.environ["RUNNER_TEMP"]) / "tmp-test-stats"
def parse_xml_report(report: Path, workflow_id: int) -> List[Dict[str, Any]]:
"""Convert a test report xml file into a JSON-serializable list of test cases."""
# Retrieve the job id from the report path. In our GHA workflows, we append
# the job id to the end of the report name, so `report` looks like:
# unzipped-test-reports-foo_5596745227/test/test-reports/foo/TEST-foo.xml
# and we want to get `5596745227` out of it.
job_id = int(report.parts[0].rpartition("_")[2])
print(f"Parsing test report: {report}, job id: {job_id}")
root = ET.parse(
report,
ET.XMLParser(target=ET.TreeBuilder(insert_comments=True)), # type: ignore[call-arg]
)
test_cases = []
for test_case in root.findall("testcase"):
case = process_xml_element(test_case)
case["workflow_id"] = workflow_id
case["job_id"] = job_id
test_cases.append(case)
return test_cases
def process_xml_element(element: ET.Element) -> Dict[str, Any]:
"""Convert a test suite element into a JSON-serializable dict."""
ret: Dict[str, Any] = {}
# Convert attributes directly into dict elements.
# e.g.
# <testcase name="test_foo" classname="test_bar"></testcase>
# becomes:
# {"name": "test_foo", "classname": "test_bar"}
ret.update(element.attrib)
# By default, all attributes are strings. Apply a few special conversions
# here for well-known attributes so that they are the right type in Rockset.
if line := ret.get("line"):
ret["line"] = int(line)
if time := ret.get("time"):
ret["time"] = float(time)
if timestamp := ret.get("timestamp"):
# Timestamps reported are not valid ISO8601 because they have no timezone. Add one.
# This assumes that
ret["timestamp"] = (
datetime.datetime.fromisoformat(timestamp).astimezone().isoformat()
)
# Convert inner and outer text into special dict elements.
# e.g.
# <testcase>my_inner_text</testcase> my_tail
# becomes:
# {"text": "my_inner_text", "tail": " my_tail"}
if element.text and element.text.strip():
ret["text"] = element.text
if element.tail and element.tail.strip():
ret["tail"] = element.tail
# Convert child elements recursively, placing them at a key:
# e.g.
# <testcase>
# <foo>hello</foo>
# </testcase>
# becomes
# {"foo": {"text": "hello"}}
for child in element:
# Special handling for comments.
if child.tag is ET.Comment: # type: ignore[comparison-overlap]
ret["comment"] = child.text
else:
ret[child.tag] = process_xml_element(child)
return ret
def get_artifact_urls(workflow_run_id: int) -> Dict[Path, str]:
"""Get all workflow artifacts with 'test-report' in the name."""
response = requests.get(
f"{PYTORCH_REPO}/actions/runs/{workflow_run_id}/artifacts?per_page=100",
)
artifacts = response.json()["artifacts"]
while "next" in response.links.keys():
response = requests.get(response.links["next"]["url"], headers=REQUEST_HEADERS)
artifacts.extend(response.json()["artifacts"])
artifact_urls = {}
for artifact in artifacts:
if "test-report" in artifact["name"]:
artifact_urls[Path(artifact["name"])] = artifact["archive_download_url"]
return artifact_urls
def unzip(p: Path) -> None:
"""Unzip the provided zipfile to a similarly-named directory.
Returns None if `p` is not a zipfile.
Looks like: /tmp/test-reports.zip -> /tmp/unzipped-test-reports/
"""
assert p.is_file()
unzipped_dir = p.with_name("unzipped-" + p.stem)
with zipfile.ZipFile(p, "r") as zip:
zip.extractall(unzipped_dir)
def download_and_extract_artifact(artifact_name: Path, artifact_url: str) -> None:
response = requests.get(artifact_url, headers=REQUEST_HEADERS)
print(f"Downloading and extracting {artifact_name}")
with open(artifact_name, "wb") as f:
f.write(response.content)
unzip(artifact_name)
def download_and_extract_s3_reports(workflow_run_id: int) -> None:
bucket = S3_RESOURCE.Bucket("gha-artifacts")
objs = bucket.objects.filter(
Prefix=f"pytorch/pytorch/{workflow_run_id}/artifact/test-reports"
)
for obj in objs:
p = Path(Path(obj.key).name)
print(f"Downloading and extracting {p}")
with open(p, "wb") as f:
f.write(obj.get()["Body"].read())
unzip(p)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Upload test stats to Rockset")
parser.add_argument(
"--workflow-run-id",
required=True,
help="id of the workflow to get artifacts from",
)
args = parser.parse_args()
print("mkdir: ", TEMP_DIR)
TEMP_DIR.mkdir()
print("cd to ", TEMP_DIR)
os.chdir(TEMP_DIR)
# Download and extract all the reports (both GHA and S3)
download_and_extract_s3_reports(args.workflow_run_id)
artifact_urls = get_artifact_urls(args.workflow_run_id)
for name, url in artifact_urls.items():
download_and_extract_artifact(Path(name), url)
# Parse the reports and transform them to JSON
test_cases = []
for xml_report in Path(".").glob("**/*.xml"):
test_cases.extend(parse_xml_report(xml_report, int(args.workflow_run_id)))
# Write the JSON to rockset
print(f"Writing {len(test_cases)} test cases to Rockset")
client = rockset.Client(
api_server="api.rs2.usw2.rockset.com", api_key=os.environ["ROCKSET_API_KEY"]
)
client.Collection.retrieve("test_run").add_docs(test_cases)
print("Done!")
|
the-stack_0_26665
|
from unittest import TestCase, skipIf
import hcl
import requests
from nose.tools import *
from time import sleep
from hvac import Client, exceptions
from hvac.tests import util
def create_client(**kwargs):
return Client(url='https://localhost:8200',
cert=('test/client-cert.pem', 'test/client-key.pem'),
verify='test/server-cert.pem',
**kwargs)
class IntegrationTest(TestCase):
@classmethod
def setUpClass(cls):
cls.manager = util.ServerManager(config_path='test/vault-tls.hcl', client=create_client())
cls.manager.start()
cls.manager.initialize()
cls.manager.unseal()
@classmethod
def tearDownClass(cls):
cls.manager.stop()
def root_token(self):
cls = type(self)
return cls.manager.root_token
def setUp(self):
self.client = create_client(token=self.root_token())
def test_unseal_multi(self):
cls = type(self)
self.client.seal()
keys = cls.manager.keys
result = self.client.unseal_multi(keys[0:2])
assert result['sealed']
assert result['progress'] == 2
result = self.client.unseal_reset()
assert result['progress'] == 0
result = self.client.unseal_multi(keys[1:3])
assert result['sealed']
assert result['progress'] == 2
result = self.client.unseal_multi(keys[0:1])
result = self.client.unseal_multi(keys[2:3])
assert not result['sealed']
def test_seal_unseal(self):
cls = type(self)
assert not self.client.is_sealed()
self.client.seal()
assert self.client.is_sealed()
cls.manager.unseal()
assert not self.client.is_sealed()
def test_ha_status(self):
assert 'ha_enabled' in self.client.ha_status
def test_generic_secret_backend(self):
self.client.write('secret/foo', zap='zip')
result = self.client.read('secret/foo')
assert result['data']['zap'] == 'zip'
self.client.delete('secret/foo')
def test_list_directory(self):
self.client.write('secret/test-list/bar/foo', value='bar')
self.client.write('secret/test-list/foo', value='bar')
result = self.client.list('secret/test-list')
assert result['data']['keys'] == ['bar/', 'foo']
self.client.delete('secret/test-list/bar/foo')
self.client.delete('secret/test-list/foo')
def test_write_with_response(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
plaintext = 'test'
self.client.write('transit/keys/foo')
result = self.client.write('transit/encrypt/foo', plaintext=plaintext)
ciphertext = result['data']['ciphertext']
result = self.client.write('transit/decrypt/foo', ciphertext=ciphertext)
assert result['data']['plaintext'] == plaintext
def test_wrap_write(self):
if 'approle/' not in self.client.list_auth_backends():
self.client.enable_auth_backend("approle")
self.client.write("auth/approle/role/testrole")
result = self.client.write('auth/approle/role/testrole/secret-id', wrap_ttl="10s")
assert 'token' in result['wrap_info']
self.client.unwrap(result['wrap_info']['token'])
self.client.disable_auth_backend("approle")
def test_read_nonexistent_key(self):
assert not self.client.read('secret/I/dont/exist')
def test_auth_backend_manipulation(self):
assert 'github/' not in self.client.list_auth_backends()
self.client.enable_auth_backend('github')
assert 'github/' in self.client.list_auth_backends()
self.client.token = self.root_token()
self.client.disable_auth_backend('github')
assert 'github/' not in self.client.list_auth_backends()
def test_secret_backend_manipulation(self):
assert 'test/' not in self.client.list_secret_backends()
self.client.enable_secret_backend('generic', mount_point='test')
assert 'test/' in self.client.list_secret_backends()
self.client.tune_secret_backend('generic', mount_point='test', default_lease_ttl='3600s', max_lease_ttl='8600s')
assert 'max_lease_ttl' in self.client.get_secret_backend_tuning('generic', mount_point='test')
assert 'default_lease_ttl' in self.client.get_secret_backend_tuning('generic', mount_point='test')
self.client.remount_secret_backend('test', 'foobar')
assert 'test/' not in self.client.list_secret_backends()
assert 'foobar/' in self.client.list_secret_backends()
self.client.token = self.root_token()
self.client.disable_secret_backend('foobar')
assert 'foobar/' not in self.client.list_secret_backends()
def test_audit_backend_manipulation(self):
assert 'tmpfile/' not in self.client.list_audit_backends()
options = {
'path': '/tmp/vault.audit.log'
}
self.client.enable_audit_backend('file', options=options, name='tmpfile')
assert 'tmpfile/' in self.client.list_audit_backends()
self.client.token = self.root_token()
self.client.disable_audit_backend('tmpfile')
assert 'tmpfile/' not in self.client.list_audit_backends()
def prep_policy(self, name):
text = """
path "sys" {
policy = "deny"
}
path "secret" {
policy = "write"
}
"""
obj = {
'path': {
'sys': {
'policy': 'deny'},
'secret': {
'policy': 'write'}
}
}
self.client.set_policy(name, text)
return text, obj
def test_policy_manipulation(self):
assert 'root' in self.client.list_policies()
assert self.client.get_policy('test') is None
policy, parsed_policy = self.prep_policy('test')
assert 'test' in self.client.list_policies()
assert policy == self.client.get_policy('test')
assert parsed_policy == self.client.get_policy('test', parse=True)
self.client.delete_policy('test')
assert 'test' not in self.client.list_policies()
def test_json_policy_manipulation(self):
assert 'root' in self.client.list_policies()
policy = {
"path": {
"sys": {
"policy": "deny"
},
"secret": {
"policy": "write"
}
}
}
self.client.set_policy('test', policy)
assert 'test' in self.client.list_policies()
self.client.delete_policy('test')
assert 'test' not in self.client.list_policies()
def test_auth_token_manipulation(self):
result = self.client.create_token(lease='1h', renewable=True)
assert result['auth']['client_token']
lookup = self.client.lookup_token(result['auth']['client_token'])
assert result['auth']['client_token'] == lookup['data']['id']
renew = self.client.renew_token(lookup['data']['id'])
assert result['auth']['client_token'] == renew['auth']['client_token']
self.client.revoke_token(lookup['data']['id'])
try:
lookup = self.client.lookup_token(result['auth']['client_token'])
assert False
except exceptions.Forbidden:
assert True
except exceptions.InvalidPath:
assert True
except exceptions.InvalidRequest:
assert True
def test_userpass_auth(self):
if 'userpass/' in self.client.list_auth_backends():
self.client.disable_auth_backend('userpass')
self.client.enable_auth_backend('userpass')
self.client.write('auth/userpass/users/testuser', password='testpass', policies='not_root')
result = self.client.auth_userpass('testuser', 'testpass')
assert self.client.token == result['auth']['client_token']
assert self.client.is_authenticated()
self.client.token = self.root_token()
self.client.disable_auth_backend('userpass')
def test_create_userpass(self):
if 'userpass/' not in self.client.list_auth_backends():
self.client.enable_auth_backend('userpass')
self.client.create_userpass('testcreateuser', 'testcreateuserpass', policies='not_root')
result = self.client.auth_userpass('testcreateuser', 'testcreateuserpass')
assert self.client.token == result['auth']['client_token']
assert self.client.is_authenticated()
# Test ttl:
self.client.token = self.root_token()
self.client.create_userpass('testcreateuser', 'testcreateuserpass', policies='not_root', ttl='10s')
self.client.token = result['auth']['client_token']
result = self.client.auth_userpass('testcreateuser', 'testcreateuserpass')
assert result['auth']['lease_duration'] == 10
self.client.token = self.root_token()
self.client.disable_auth_backend('userpass')
def test_delete_userpass(self):
if 'userpass/' not in self.client.list_auth_backends():
self.client.enable_auth_backend('userpass')
self.client.create_userpass('testcreateuser', 'testcreateuserpass', policies='not_root')
result = self.client.auth_userpass('testcreateuser', 'testcreateuserpass')
assert self.client.token == result['auth']['client_token']
assert self.client.is_authenticated()
self.client.token = self.root_token()
self.client.delete_userpass('testcreateuser')
assert_raises(exceptions.InvalidRequest, self.client.auth_userpass, 'testcreateuser', 'testcreateuserpass')
def test_app_id_auth(self):
if 'app-id/' in self.client.list_auth_backends():
self.client.disable_auth_backend('app-id')
self.client.enable_auth_backend('app-id')
self.client.write('auth/app-id/map/app-id/foo', value='not_root')
self.client.write('auth/app-id/map/user-id/bar', value='foo')
result = self.client.auth_app_id('foo', 'bar')
assert self.client.token == result['auth']['client_token']
assert self.client.is_authenticated()
self.client.token = self.root_token()
self.client.disable_auth_backend('app-id')
def test_create_app_id(self):
if 'app-id/' not in self.client.list_auth_backends():
self.client.enable_auth_backend('app-id')
self.client.create_app_id('testappid', policies='not_root', display_name='displayname')
result = self.client.read('auth/app-id/map/app-id/testappid')
lib_result = self.client.get_app_id('testappid')
del result['request_id']
del lib_result['request_id']
assert result == lib_result
assert result['data']['key'] == 'testappid'
assert result['data']['display_name'] == 'displayname'
assert result['data']['value'] == 'not_root'
self.client.delete_app_id('testappid')
assert self.client.get_app_id('testappid')['data'] is None
self.client.token = self.root_token()
self.client.disable_auth_backend('app-id')
def test_create_user_id(self):
if 'app-id/' not in self.client.list_auth_backends():
self.client.enable_auth_backend('app-id')
self.client.create_app_id('testappid', policies='not_root', display_name='displayname')
self.client.create_user_id('testuserid', app_id='testappid')
result = self.client.read('auth/app-id/map/user-id/testuserid')
lib_result = self.client.get_user_id('testuserid')
del result['request_id']
del lib_result['request_id']
assert result == lib_result
assert result['data']['key'] == 'testuserid'
assert result['data']['value'] == 'testappid'
result = self.client.auth_app_id('testappid', 'testuserid')
assert self.client.token == result['auth']['client_token']
assert self.client.is_authenticated()
self.client.token = self.root_token()
self.client.delete_user_id('testuserid')
assert self.client.get_user_id('testuserid')['data'] is None
self.client.token = self.root_token()
self.client.disable_auth_backend('app-id')
def test_create_role(self):
if 'approle/' in self.client.list_auth_backends():
self.client.disable_auth_backend('approle')
self.client.enable_auth_backend('approle')
self.client.create_role('testrole')
result = self.client.read('auth/approle/role/testrole')
lib_result = self.client.get_role('testrole')
del result['request_id']
del lib_result['request_id']
assert result == lib_result
self.client.token = self.root_token()
self.client.disable_auth_backend('approle')
def test_create_delete_role_secret_id(self):
if 'approle/' in self.client.list_auth_backends():
self.client.disable_auth_backend('approle')
self.client.enable_auth_backend('approle')
self.client.create_role('testrole')
create_result = self.client.create_role_secret_id('testrole', {'foo': 'bar'})
secret_id = create_result['data']['secret_id']
result = self.client.get_role_secret_id('testrole', secret_id)
assert result['data']['metadata']['foo'] == 'bar'
self.client.delete_role_secret_id('testrole', secret_id)
try:
self.client.get_role_secret_id('testrole', secret_id)
assert False
except (exceptions.InvalidPath, ValueError):
assert True
self.client.token = self.root_token()
self.client.disable_auth_backend('approle')
def test_auth_approle(self):
if 'approle/' in self.client.list_auth_backends():
self.client.disable_auth_backend('approle')
self.client.enable_auth_backend('approle')
self.client.create_role('testrole')
create_result = self.client.create_role_secret_id('testrole', {'foo': 'bar'})
secret_id = create_result['data']['secret_id']
role_id = self.client.get_role_id('testrole')
result = self.client.auth_approle(role_id, secret_id)
assert result['auth']['metadata']['foo'] == 'bar'
assert self.client.token == result['auth']['client_token']
assert self.client.is_authenticated()
self.client.token = self.root_token()
self.client.disable_auth_backend('approle')
def test_auth_approle_dont_use_token(self):
if 'approle/' in self.client.list_auth_backends():
self.client.disable_auth_backend('approle')
self.client.enable_auth_backend('approle')
self.client.create_role('testrole')
create_result = self.client.create_role_secret_id('testrole', {'foo':'bar'})
secret_id = create_result['data']['secret_id']
role_id = self.client.get_role_id('testrole')
result = self.client.auth_approle(role_id, secret_id, use_token=False)
assert result['auth']['metadata']['foo'] == 'bar'
assert self.client.token != result['auth']['client_token']
self.client.token = self.root_token()
self.client.disable_auth_backend('approle')
def test_transit_read_write(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
self.client.transit_create_key('foo')
result = self.client.transit_read_key('foo')
assert not result['data']['exportable']
self.client.transit_create_key('foo_export', exportable=True, key_type="ed25519")
result = self.client.transit_read_key('foo_export')
assert result['data']['exportable']
assert result['data']['type'] == 'ed25519'
self.client.enable_secret_backend('transit', mount_point='bar')
self.client.transit_create_key('foo', mount_point='bar')
result = self.client.transit_read_key('foo', mount_point='bar')
assert not result['data']['exportable']
def test_transit_list_keys(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
self.client.transit_create_key('foo1')
self.client.transit_create_key('foo2')
self.client.transit_create_key('foo3')
result = self.client.transit_list_keys()
assert result['data']['keys'] == ["foo1", "foo2", "foo3"]
def test_transit_update_delete_keys(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
self.client.transit_create_key('foo')
self.client.transit_update_key('foo', deletion_allowed=True)
result = self.client.transit_read_key('foo')
assert result['data']['deletion_allowed']
self.client.transit_delete_key('foo')
try:
self.client.transit_read_key('foo')
except exceptions.InvalidPath:
assert True
else:
assert False
def test_transit_rotate_key(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
self.client.transit_create_key('foo')
self.client.transit_rotate_key('foo')
response = self.client.transit_read_key('foo')
assert '2' in response['data']['keys']
self.client.transit_rotate_key('foo')
response = self.client.transit_read_key('foo')
assert '3' in response['data']['keys']
def test_transit_export_key(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
self.client.transit_create_key('foo', exportable=True)
response = self.client.transit_export_key('foo', key_type='encryption-key')
assert response is not None
def test_transit_encrypt_data(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
self.client.transit_create_key('foo')
ciphertext_resp = self.client.transit_encrypt_data('foo', 'abbaabba')['data']['ciphertext']
plaintext_resp = self.client.transit_decrypt_data('foo', ciphertext_resp)['data']['plaintext']
assert plaintext_resp == 'abbaabba'
def test_transit_rewrap_data(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
self.client.transit_create_key('foo')
ciphertext_resp = self.client.transit_encrypt_data('foo', 'abbaabba')['data']['ciphertext']
self.client.transit_rotate_key('foo')
response_wrap = self.client.transit_rewrap_data('foo', ciphertext=ciphertext_resp)['data']['ciphertext']
plaintext_resp = self.client.transit_decrypt_data('foo', response_wrap)['data']['plaintext']
assert plaintext_resp == 'abbaabba'
def test_transit_generate_data_key(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
self.client.transit_create_key('foo')
response_plaintext = self.client.transit_generate_data_key('foo', key_type='plaintext')['data']['plaintext']
assert response_plaintext
response_ciphertext = self.client.transit_generate_data_key('foo', key_type='wrapped')['data']
assert 'ciphertext' in response_ciphertext
assert 'plaintext' not in response_ciphertext
def test_transit_generate_rand_bytes(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
response_data = self.client.transit_generate_rand_bytes(data_bytes=4)['data']['random_bytes']
assert response_data
def test_transit_hash_data(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
response_hash = self.client.transit_hash_data('abbaabba')['data']['sum']
assert len(response_hash) == 64
response_hash = self.client.transit_hash_data('abbaabba', algorithm="sha2-512")['data']['sum']
assert len(response_hash) == 128
def test_transit_generate_verify_hmac(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
self.client.transit_create_key('foo')
response_hmac = self.client.transit_generate_hmac('foo', 'abbaabba')['data']['hmac']
assert response_hmac
verify_resp = self.client.transit_verify_signed_data('foo', 'abbaabba', hmac=response_hmac)['data']['valid']
assert verify_resp
response_hmac = self.client.transit_generate_hmac('foo', 'abbaabba', algorithm='sha2-512')['data']['hmac']
assert response_hmac
verify_resp = self.client.transit_verify_signed_data('foo', 'abbaabba',
algorithm='sha2-512', hmac=response_hmac)['data']['valid']
assert verify_resp
def test_transit_sign_verify_signature_data(self):
if 'transit/' in self.client.list_secret_backends():
self.client.disable_secret_backend('transit')
self.client.enable_secret_backend('transit')
self.client.transit_create_key('foo', key_type='ed25519')
signed_resp = self.client.transit_sign_data('foo', 'abbaabba')['data']['signature']
assert signed_resp
verify_resp = self.client.transit_verify_signed_data('foo', 'abbaabba', signature=signed_resp)['data']['valid']
assert verify_resp
signed_resp = self.client.transit_sign_data('foo', 'abbaabba', algorithm='sha2-512')['data']['signature']
assert signed_resp
verify_resp = self.client.transit_verify_signed_data('foo', 'abbaabba',
algorithm='sha2-512',
signature=signed_resp)['data']['valid']
assert verify_resp
def test_missing_token(self):
client = create_client()
assert not client.is_authenticated()
def test_invalid_token(self):
client = create_client(token='not-a-real-token')
assert not client.is_authenticated()
def test_illegal_token(self):
client = create_client(token='token-with-new-line\n')
try:
client.is_authenticated()
except ValueError as e:
assert 'Invalid header value' in str(e)
def test_broken_token(self):
client = create_client(token='\x1b')
try:
client.is_authenticated()
except exceptions.InvalidRequest as e:
assert "invalid header value" in str(e)
def test_client_authenticated(self):
assert self.client.is_authenticated()
def test_client_logout(self):
self.client.logout()
assert not self.client.is_authenticated()
def test_revoke_self_token(self):
if 'userpass/' in self.client.list_auth_backends():
self.client.disable_auth_backend('userpass')
self.client.enable_auth_backend('userpass')
self.client.write('auth/userpass/users/testuser', password='testpass', policies='not_root')
result = self.client.auth_userpass('testuser', 'testpass')
self.client.revoke_self_token()
assert not self.client.is_authenticated()
def test_rekey_multi(self):
cls = type(self)
assert not self.client.rekey_status['started']
self.client.start_rekey()
assert self.client.rekey_status['started']
self.client.cancel_rekey()
assert not self.client.rekey_status['started']
result = self.client.start_rekey()
keys = cls.manager.keys
result = self.client.rekey_multi(keys, nonce=result['nonce'])
assert result['complete']
cls.manager.keys = result['keys']
cls.manager.unseal()
def test_rotate(self):
status = self.client.key_status
self.client.rotate()
assert self.client.key_status['term'] > status['term']
def test_tls_auth(self):
self.client.enable_auth_backend('cert')
with open('test/client-cert.pem') as fp:
certificate = fp.read()
self.client.write('auth/cert/certs/test', display_name='test',
policies='not_root', certificate=certificate)
result = self.client.auth_tls()
def test_gh51(self):
key = 'secret/http://test.com'
self.client.write(key, foo='bar')
result = self.client.read(key)
assert result['data']['foo'] == 'bar'
def test_token_accessor(self):
# Create token, check accessor is provided
result = self.client.create_token(lease='1h')
token_accessor = result['auth'].get('accessor', None)
assert token_accessor
# Look up token by accessor, make sure token is excluded from results
lookup = self.client.lookup_token(token_accessor, accessor=True)
assert lookup['data']['accessor'] == token_accessor
assert not lookup['data']['id']
# Revoke token using the accessor
self.client.revoke_token(token_accessor, accessor=True)
# Look up by accessor should fail
with self.assertRaises(exceptions.InvalidRequest):
lookup = self.client.lookup_token(token_accessor, accessor=True)
# As should regular lookup
with self.assertRaises(exceptions.Forbidden):
lookup = self.client.lookup_token(result['auth']['client_token'])
def test_wrapped_token_success(self):
wrap = self.client.create_token(wrap_ttl='1m')
# Unwrap token
result = self.client.unwrap(wrap['wrap_info']['token'])
assert result['auth']['client_token']
# Validate token
lookup = self.client.lookup_token(result['auth']['client_token'])
assert result['auth']['client_token'] == lookup['data']['id']
def test_wrapped_token_intercept(self):
wrap = self.client.create_token(wrap_ttl='1m')
# Intercept wrapped token
_ = self.client.unwrap(wrap['wrap_info']['token'])
# Attempt to retrieve the token after it's been intercepted
with self.assertRaises(exceptions.Forbidden):
result = self.client.unwrap(wrap['wrap_info']['token'])
def test_wrapped_token_cleanup(self):
wrap = self.client.create_token(wrap_ttl='1m')
_token = self.client.token
_ = self.client.unwrap(wrap['wrap_info']['token'])
assert self.client.token == _token
def test_wrapped_token_revoke(self):
wrap = self.client.create_token(wrap_ttl='1m')
# Revoke token before it's unwrapped
self.client.revoke_token(wrap['wrap_info']['wrapped_accessor'], accessor=True)
# Unwrap token anyway
result = self.client.unwrap(wrap['wrap_info']['token'])
assert result['auth']['client_token']
# Attempt to validate token
with self.assertRaises(exceptions.Forbidden):
lookup = self.client.lookup_token(result['auth']['client_token'])
def test_create_token_explicit_max_ttl(self):
token = self.client.create_token(ttl='30m', explicit_max_ttl='5m')
assert token['auth']['client_token']
assert token['auth']['lease_duration'] == 300
# Validate token
lookup = self.client.lookup_token(token['auth']['client_token'])
assert token['auth']['client_token'] == lookup['data']['id']
def test_create_token_max_ttl(self):
token = self.client.create_token(ttl='5m')
assert token['auth']['client_token']
assert token['auth']['lease_duration'] == 300
# Validate token
lookup = self.client.lookup_token(token['auth']['client_token'])
assert token['auth']['client_token'] == lookup['data']['id']
def test_create_token_periodic(self):
token = self.client.create_token(period='30m')
assert token['auth']['client_token']
assert token['auth']['lease_duration'] == 1800
# Validate token
lookup = self.client.lookup_token(token['auth']['client_token'])
assert token['auth']['client_token'] == lookup['data']['id']
assert lookup['data']['period'] == 1800
def test_token_roles(self):
# No roles, list_token_roles == None
before = self.client.list_token_roles()
assert not before
# Create token role
assert self.client.create_token_role('testrole').status_code == 204
# List token roles
during = self.client.list_token_roles()['data']['keys']
assert len(during) == 1
assert during[0] == 'testrole'
# Delete token role
self.client.delete_token_role('testrole')
# No roles, list_token_roles == None
after = self.client.list_token_roles()
assert not after
def test_create_token_w_role(self):
# Create policy
self.prep_policy('testpolicy')
# Create token role w/ policy
assert self.client.create_token_role('testrole',
allowed_policies='testpolicy').status_code == 204
# Create token against role
token = self.client.create_token(lease='1h', role='testrole')
assert token['auth']['client_token']
assert token['auth']['policies'] == ['default', 'testpolicy']
# Cleanup
self.client.delete_token_role('testrole')
self.client.delete_policy('testpolicy')
def test_ec2_role_crud(self):
if 'aws-ec2/' in self.client.list_auth_backends():
self.client.disable_auth_backend('aws-ec2')
self.client.enable_auth_backend('aws-ec2')
# create a policy to associate with the role
self.prep_policy('ec2rolepolicy')
# attempt to get a list of roles before any exist
no_roles = self.client.list_ec2_roles()
# doing so should succeed and return None
assert (no_roles is None)
# test binding by AMI ID (the old way, to ensure backward compatibility)
self.client.create_ec2_role('foo',
'ami-notarealami',
policies='ec2rolepolicy')
# test binding by Account ID
self.client.create_ec2_role('bar',
bound_account_id='123456789012',
policies='ec2rolepolicy')
# test binding by IAM Role ARN
self.client.create_ec2_role('baz',
bound_iam_role_arn='arn:aws:iam::123456789012:role/mockec2role',
policies='ec2rolepolicy')
# test binding by instance profile ARN
self.client.create_ec2_role('qux',
bound_iam_instance_profile_arn='arn:aws:iam::123456789012:instance-profile/mockprofile',
policies='ec2rolepolicy')
roles = self.client.list_ec2_roles()
assert ('foo' in roles['data']['keys'])
assert ('bar' in roles['data']['keys'])
assert ('baz' in roles['data']['keys'])
assert ('qux' in roles['data']['keys'])
foo_role = self.client.get_ec2_role('foo')
assert (foo_role['data']['bound_ami_id'] == 'ami-notarealami')
assert ('ec2rolepolicy' in foo_role['data']['policies'])
bar_role = self.client.get_ec2_role('bar')
assert (bar_role['data']['bound_account_id'] == '123456789012')
assert ('ec2rolepolicy' in bar_role['data']['policies'])
baz_role = self.client.get_ec2_role('baz')
assert (baz_role['data']['bound_iam_role_arn'] == 'arn:aws:iam::123456789012:role/mockec2role')
assert ('ec2rolepolicy' in baz_role['data']['policies'])
qux_role = self.client.get_ec2_role('qux')
assert (
qux_role['data']['bound_iam_instance_profile_arn'] == 'arn:aws:iam::123456789012:instance-profile/mockprofile')
assert ('ec2rolepolicy' in qux_role['data']['policies'])
# teardown
self.client.delete_ec2_role('foo')
self.client.delete_ec2_role('bar')
self.client.delete_ec2_role('baz')
self.client.delete_ec2_role('qux')
self.client.delete_policy('ec2rolepolicy')
self.client.disable_auth_backend('aws-ec2')
|
the-stack_0_26666
|
# BSD-3-Clause License
#
# Copyright 2017 Orange
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import pathlib
from collections import defaultdict
from collections import Iterable as CollectionIterable
from typing import Dict, Iterable, Union, List
import yaml
from pydcop.dcop.objects import (
VariableDomain,
Variable,
ExternalVariable,
VariableWithCostFunc,
VariableNoisyCostFunc,
AgentDef,
)
from pydcop.dcop.scenario import EventAction, DcopEvent, Scenario
from pydcop.dcop.dcop import DCOP
from pydcop.dcop.relations import (
relation_from_str,
RelationProtocol,
NAryMatrixRelation,
assignment_matrix,
generate_assignment_as_dict, constraint_from_str,
constraint_from_external_definition,
)
from pydcop.utils.expressionfunction import ExpressionFunction
from pydcop.distribution.objects import DistributionHints
class DcopInvalidFormatError(Exception):
pass
def load_dcop_from_file(filenames: Union[str, Iterable[str]]):
"""
load a dcop from one or several files
Parameters
----------
filenames: str or iterable of str
The dcop can the given as a single file or as several files. When
passing an iterable of file names, their content is concatenated
before parsing. This can be usefull when you want to define the
agents in a separate file.
Returns
-------
A DCOP object built by parsing the files
"""
content = ""
main_dir = None
if not isinstance(filenames, CollectionIterable):
filenames = [filenames]
for filename in filenames:
p = pathlib.Path(filename)
if main_dir is None:
main_dir = p.parent
content += p.read_text(encoding="utf-8")
if content:
return load_dcop(content, main_dir)
def load_dcop(dcop_str: str, main_dir=None) -> DCOP:
loaded = yaml.load(dcop_str, Loader=yaml.FullLoader)
if "name" not in loaded:
raise ValueError("Missing name in dcop string")
if "objective" not in loaded or loaded["objective"] not in ["min", "max"]:
raise ValueError("Objective is mandatory and must be min or max")
dcop = DCOP(
loaded["name"],
loaded["objective"],
loaded["description"] if "description" in loaded else "",
)
dcop.domains = _build_domains(loaded)
dcop.variables = _build_variables(loaded, dcop)
dcop.external_variables = _build_external_variables(loaded, dcop)
dcop._constraints = _build_constraints(loaded, dcop, main_dir)
dcop._agents_def = _build_agents(loaded)
dcop.dist_hints = _build_dist_hints(loaded, dcop)
return dcop
def dcop_yaml(dcop: DCOP) -> str:
dcop_dict = {"name": dcop.name, "objective": dcop.objective}
dcop_str = yaml.dump(dcop_dict, default_flow_style=False)
dcop_str += "\n"
dcop_str += _yaml_domains(dcop.domains.values())
dcop_str += "\n"
dcop_str += _yaml_variables(dcop.variables.values())
dcop_str += "\n"
dcop_str += _yaml_constraints(dcop.constraints.values())
dcop_str += "\n"
dcop_str += yaml_agents(dcop.agents.values())
return dcop_str
def _yaml_domains(domains):
d_dict = {}
for domain in domains:
d_dict[domain.name] = {"values": list(domain.values), "type": domain.type}
return yaml.dump({"domains": d_dict}) # , default_flow_style=False)
def _build_domains(loaded) -> Dict[str, VariableDomain]:
domains = {}
if "domains" in loaded:
for d_name in loaded["domains"]:
d = loaded["domains"][d_name]
values = d["values"]
if len(values) == 1 and ".." in values[0]:
values = str_2_domain_values(d["values"][0])
d_type = d["type"] if "type" in d else ""
domains[d_name] = VariableDomain(d_name, d_type, values)
return domains
def _yaml_variables(variables):
var_dict = {}
for v in variables:
var_dict[v.name] = {"domain": v.domain.name}
if v.initial_value is not None:
var_dict[v.name]["initial_value"] = v.initial_value
return yaml.dump({"variables": var_dict}, default_flow_style=False)
def _build_variables(loaded, dcop) -> Dict[str, Variable]:
variables = {}
if "variables" in loaded:
for v_name in loaded["variables"]:
v = loaded["variables"][v_name]
domain = dcop.domain(v["domain"])
initial_value = v["initial_value"] if "initial_value" in v else None
if initial_value and initial_value not in domain.values:
raise ValueError(
"initial value {} is not in the domain {} "
"of the variable {}".format(initial_value, domain.name, v_name)
)
if "cost_function" in v:
cost_expression = v["cost_function"]
cost_func = ExpressionFunction(cost_expression)
if "noise_level" in v:
variables[v_name] = VariableNoisyCostFunc(
v_name,
domain,
cost_func,
initial_value,
noise_level=v["noise_level"],
)
else:
variables[v_name] = VariableWithCostFunc(
v_name, domain, cost_func, initial_value
)
else:
variables[v_name] = Variable(v_name, domain, initial_value)
return variables
def _build_external_variables(loaded, dcop) -> Dict[str, ExternalVariable]:
ext_vars = {}
if "external_variables" in loaded:
for v_name in loaded["external_variables"]:
v = loaded["external_variables"][v_name]
domain = dcop.domain(v["domain"])
initial_value = v["initial_value"] if "initial_value" in v else None
if initial_value and initial_value not in domain.values:
raise ValueError(
"initial value {} is not in the domain {} "
"of the variable {}".format(initial_value, domain.name, v_name)
)
ext_vars[v_name] = ExternalVariable(v_name, domain, initial_value)
return ext_vars
def _build_constraints(loaded, dcop, main_dir) -> Dict[str, RelationProtocol]:
constraints = {}
if "constraints" in loaded:
for c_name in loaded["constraints"]:
c = loaded["constraints"][c_name]
if "type" not in c:
raise ValueError(
"Error in contraints {} definition: type is "
'mandatory and only "intention" is '
"supported for now".format(c_name)
)
elif c["type"] == "intention":
if "source" in c:
src_path = c["source"] \
if pathlib.Path(c["source"]).is_absolute() \
else main_dir / c["source"]
constraints[c_name] = constraint_from_external_definition(
c_name, src_path, c["function"], dcop.all_variables
)
else:
constraints[c_name] = constraint_from_str(
c_name, c["function"], dcop.all_variables
)
elif c["type"] == "extensional":
values_def = c["values"]
default = None if "default" not in c else c["default"]
if type(c["variables"]) != list:
# specific case for constraint with a single variable
v = dcop.variable(c["variables"].strip())
values = [default] * len(v.domain)
for value, assignments_def in values_def.items():
if isinstance(assignments_def, str):
for ass_def in assignments_def.split("|"):
iv, _ = v.domain.to_domain_value(ass_def.strip())
values[iv] = value
else:
values[v.domain.index(assignments_def)] = value
constraints[c_name] = NAryMatrixRelation([v], values, name=c_name)
continue
# For constraints that depends on several variables
vars = [dcop.variable(v) for v in c["variables"]]
values = assignment_matrix(vars, default)
for value, assignments_def in values_def.items():
# can be a str like "1 2 3" or "1 2 3 | 1 3 4"
# several assignment for the same value are separated with |
assignments_def = assignments_def.split("|")
for ass_def in assignments_def:
val_position = values
vals_def = ass_def.split()
for i, val_def in enumerate(vals_def[:-1]):
iv, _ = vars[i].domain.to_domain_value(val_def.strip())
val_position = val_position[iv]
# value for the last variable of the assignment
val_def = vals_def[-1]
iv, _ = vars[-1].domain.to_domain_value(val_def.strip())
val_position[iv] = value
constraints[c_name] = NAryMatrixRelation(vars, values, name=c_name)
else:
raise ValueError(
"Error in contraints {} definition: type is mandatory "
'and must be "intention" or "intensional"'.format(c_name)
)
return constraints
def _yaml_constraints(constraints: Iterable[RelationProtocol]):
constraints_dict = {}
for r in constraints:
if hasattr(r, "expression"):
constraints_dict[r.name] = {"type": "intention", "function": r.expression}
else:
# fallback to extensional constraint
variables = [v.name for v in r.dimensions]
values = defaultdict(lambda: [])
for assignment in generate_assignment_as_dict(r.dimensions):
val = r(**assignment)
ass_str = " ".join([str(assignment[var]) for var in variables])
values[val].append(ass_str)
for val in values:
values[val] = " | ".join(values[val])
values = dict(values)
constraints_dict[r.name] = {
"type": "extensional",
"variables": variables,
"values": values,
}
return yaml.dump({"constraints": constraints_dict}, default_flow_style=False)
def _build_agents(loaded) -> Dict[str, AgentDef]:
# Read agents list, without creating AgentDef object yet.
# We need the preferences to create the AgentDef objects
agents_list = {}
if "agents" in loaded:
for a_name in loaded["agents"]:
try:
kw = loaded["agents"][a_name]
# we accept any attribute for the agent
# Most of the time it will be capacity and also preference but
# any named value is valid:
agents_list[a_name] = kw if kw else {}
except TypeError:
# means agents are given as a list and not a map:
agents_list[a_name] = {}
routes = {}
default_route = 1
if "routes" in loaded:
for a1 in loaded["routes"]:
if a1 == "default":
default_route = loaded["routes"]["default"]
continue
if a1 not in agents_list:
raise DcopInvalidFormatError("Route for unknown " "agent " + a1)
a1_routes = loaded["routes"][a1]
for a2 in a1_routes:
if a2 not in agents_list:
raise DcopInvalidFormatError("Route for unknown " "agent " + a2)
if (a2, a1) in routes or (a1, a2) in routes:
if routes[(a2, a1)] != a1_routes[a2]:
raise DcopInvalidFormatError(
"Multiple route definition r{} = {}"
" != r({}) = {}".format(
(a2, a1), routes[(a2, a1)], (a1, a2), a1_routes[a2]
)
)
routes[(a1, a2)] = a1_routes[a2]
hosting_costs = {}
default_cost = 0
default_agt_costs = {}
if "hosting_costs" in loaded:
costs = loaded["hosting_costs"]
for a in costs:
if a == "default":
default_cost = costs["default"]
continue
if a not in agents_list:
raise DcopInvalidFormatError("hosting_costs for unknown " "agent " + a)
a_costs = costs[a]
if "default" in a_costs:
default_agt_costs[a] = a_costs["default"]
if "computations" in a_costs:
for c in a_costs["computations"]:
hosting_costs[(a, c)] = a_costs["computations"][c]
# Now that we parsed all agents info, we can build the objects:
agents = {}
for a in agents_list:
d = default_cost
if a in default_agt_costs:
d = default_agt_costs[a]
p = {c: hosting_costs[b, c] for (b, c) in hosting_costs if b == a}
routes_a = {a2: v for (a1, a2), v in routes.items() if a1 == a}
routes_a.update({a1: v for (a1, a2), v in routes.items() if a2 == a})
agents[a] = AgentDef(
a,
default_hosting_cost=d,
hosting_costs=p,
default_route=default_route,
routes=routes_a,
**agents_list[a]
)
return agents
def yaml_agents(agents: List[AgentDef]) -> str:
"""
Serialize a list of agents into a json string.
Parameters
----------
agents: list
a list of agents
Returns
-------
string:
a json string representing the list of agents
"""
agt_dict = {}
hosting_costs = {}
routes = {}
for agt in agents:
if hasattr(agt, "capacity"):
agt_dict[agt.name] = {"capacity": agt.capacity}
else:
agt_dict[agt.name] = {}
if agt.default_hosting_cost or agt.hosting_costs:
hosting_costs[agt.name] = {
"default": agt.default_hosting_cost,
"computations": agt.hosting_costs,
}
if agt.routes:
routes[agt.name] = agt.routes
if agt.default_route is not None:
routes["default"] = agt.default_route
res = {}
if agt_dict:
res["agents"] = agt_dict
if routes:
res["routes"] = routes
if hosting_costs:
res["hosting_costs"] = hosting_costs
if res:
return yaml.dump(res, default_flow_style=False)
else:
return ""
def _build_dist_hints(loaded, dcop):
if "distribution_hints" not in loaded:
return None
loaded = loaded["distribution_hints"]
must_host, host_with = None, None
if "must_host" in loaded:
for a in loaded["must_host"]:
if a not in dcop.agents:
raise ValueError(
"Cannot use must_host with unknown agent " "{}".format(a)
)
for c in loaded["must_host"][a]:
if c not in dcop.variables and c not in dcop.constraints:
raise ValueError(
"Cannot use must_host with unknown "
"variable or constraint {}".format(c)
)
must_host = loaded["must_host"]
if "host_with" in loaded:
host_with = defaultdict(lambda: set())
for i in loaded["host_with"]:
host_with[i].update(loaded["host_with"][i])
for j in loaded["host_with"][i]:
s = {i}.union(loaded["host_with"][i])
s.remove(j)
host_with[j].update(s)
return DistributionHints(
must_host, dict(host_with) if host_with is not None else {}
)
def str_2_domain_values(domain_str):
"""
Deserialize a domain expressed as a string.
If all variable in the domain can be interpreted as a int, the list is a
list of int, otherwise it is a list of strings.
:param domain_str: a string like 0..5 of A, B, C, D
:return: the list of values in the domain
"""
try:
sep_index = domain_str.index("..")
# Domain str is : [0..5]
min_d = int(domain_str[0:sep_index])
max_d = int(domain_str[sep_index + 2 :])
return list(range(min_d, max_d + 1))
except ValueError:
values = [v.strip() for v in domain_str[1:].split(",")]
try:
return [int(v) for v in values]
except ValueError:
return values
def load_scenario_from_file(filename: str) -> Scenario:
"""
Load a scenario from a yaml file.
:param filename:
:return:
"""
with open(filename, mode="r", encoding="utf-8") as f:
content = f.read()
if content:
return load_scenario(content)
def load_scenario(scenario_str) -> Scenario:
"""
Load a scenario from a yaml string.
:param scenario_str:
:return:
"""
loaded = yaml.load(scenario_str, Loader=yaml.FullLoader)
evts = []
for evt in loaded["events"]:
id_evt = evt["id"]
if "actions" in evt:
actions = []
for a in evt["actions"]:
args = dict(a)
args.pop("type")
actions.append(EventAction(a["type"], **args))
evts.append(DcopEvent(id_evt, actions=actions))
elif "delay" in evt:
evts.append(DcopEvent(id_evt, delay=evt["delay"]))
return Scenario(evts)
def yaml_scenario(scenario: Scenario) -> str:
events = [_dict_event(event) for event in scenario.events]
scenario_dict = {"events": events}
return yaml.dump(scenario_dict, default_flow_style=False)
def _dict_event(event: DcopEvent) -> Dict:
evt_dict = {"id": event.id}
if event.is_delay:
evt_dict["delay"] = event.delay
else:
print(f" event {event}")
evt_dict["actions"] = [_dict_action(a) for a in event.actions]
return evt_dict
def _dict_action(action: EventAction) -> Dict:
action_dict = {"type": action.type}
action_dict.update(action.args)
return action_dict
|
the-stack_0_26668
|
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""upgradewallet RPC functional test
Test upgradewallet RPC. Download node binaries:
contrib/devtools/previous_release.py -b v0.19.1 v0.18.1 v0.17.1 v0.16.3 v0.15.2
Only v0.15.2 and v0.16.3 are required by this test. The others are used in feature_backwards_compatibility.py
"""
import os
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_is_hex_string,
)
class UpgradeWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [
["-addresstype=bech32"], # current wallet version
["-usehd=1"], # v0.16.3 wallet
["-usehd=0"] # v0.15.2 wallet
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
self.skip_if_no_previous_releases()
def setup_network(self):
self.setup_nodes()
def setup_nodes(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
None,
160300,
150200,
])
self.start_nodes()
def dumb_sync_blocks(self):
"""
Little helper to sync older wallets.
Notice that v0.15.2's regtest is hardforked, so there is
no sync for it.
v0.15.2 is only being used to test for version upgrade
and master hash key presence.
v0.16.3 is being used to test for version upgrade and balances.
Further info: https://github.com/bitcoin/bitcoin/pull/18774#discussion_r416967844
"""
node_from = self.nodes[0]
v16_3_node = self.nodes[1]
to_height = node_from.getblockcount()
height = self.nodes[1].getblockcount()
for i in range(height, to_height+1):
b = node_from.getblock(blockhash=node_from.getblockhash(i), verbose=0)
v16_3_node.submitblock(b)
assert_equal(v16_3_node.getblockcount(), to_height)
def run_test(self):
self.nodes[0].generatetoaddress(101, self.nodes[0].getnewaddress())
self.dumb_sync_blocks()
# # Sanity check the test framework:
res = self.nodes[0].getblockchaininfo()
assert_equal(res['blocks'], 101)
node_master = self.nodes[0]
v16_3_node = self.nodes[1]
v15_2_node = self.nodes[2]
# Send coins to old wallets for later conversion checks.
v16_3_wallet = v16_3_node.get_wallet_rpc('wallet.dat')
v16_3_address = v16_3_wallet.getnewaddress()
node_master.generatetoaddress(101, v16_3_address)
self.dumb_sync_blocks()
v16_3_balance = v16_3_wallet.getbalance()
self.log.info("Test upgradewallet RPC...")
# Prepare for copying of the older wallet
node_master_wallet_dir = os.path.join(node_master.datadir, "regtest/wallets")
v16_3_wallet = os.path.join(v16_3_node.datadir, "regtest/wallets/wallet.dat")
v15_2_wallet = os.path.join(v15_2_node.datadir, "regtest/wallet.dat")
self.stop_nodes()
# Copy the 0.16.3 wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
v16_3_wallet,
node_master_wallet_dir
)
self.restart_node(0, ['-nowallet'])
node_master.loadwallet('')
wallet = node_master.get_wallet_rpc('')
old_version = wallet.getwalletinfo()["walletversion"]
# calling upgradewallet without version arguments
# should return nothing if successful
assert_equal(wallet.upgradewallet(), "")
new_version = wallet.getwalletinfo()["walletversion"]
# upgraded wallet version should be greater than older one
assert_greater_than(new_version, old_version)
# wallet should still contain the same balance
assert_equal(wallet.getbalance(), v16_3_balance)
self.stop_node(0)
# Copy the 0.15.2 wallet to the last Bitcoin Core version and open it:
shutil.rmtree(node_master_wallet_dir)
os.mkdir(node_master_wallet_dir)
shutil.copy(
v15_2_wallet,
node_master_wallet_dir
)
self.restart_node(0, ['-nowallet'])
node_master.loadwallet('')
wallet = node_master.get_wallet_rpc('')
# should have no master key hash before conversion
assert_equal('hdseedid' in wallet.getwalletinfo(), False)
# calling upgradewallet with explicit version number
# should return nothing if successful
assert_equal(wallet.upgradewallet(169900), "")
new_version = wallet.getwalletinfo()["walletversion"]
# upgraded wallet should have version 169900
assert_equal(new_version, 169900)
# after conversion master key hash should be present
assert_is_hex_string(wallet.getwalletinfo()['hdseedid'])
if __name__ == '__main__':
UpgradeWalletTest().main()
|
the-stack_0_26669
|
import os
import copy
import warnings
import torch
from nanodet.util import mkdir, DataParallel, load_model_weight, save_model, MovingAverage, AverageMeter
class Trainer:
"""
Epoch based trainer
"""
def __init__(self, rank, cfg, model, logger):
self.rank = rank # local rank for distributed training. For single gpu training, default is -1
self.cfg = cfg
self.model = model
self.logger = logger
self._init_optimizer()
self._iter = 1
self.epoch = 1
def set_device(self, batch_per_gpu, gpu_ids, device):
"""
Set model device to GPU.
:param batch_per_gpu: batch size of each gpu
:param gpu_ids: a list of gpu ids
:param device: cuda
"""
num_gpu = len(gpu_ids)
batch_sizes = [batch_per_gpu for i in range(num_gpu)]
self.logger.log('Training batch size: {}'.format(batch_per_gpu*num_gpu))
self.model = DataParallel(self.model, gpu_ids, chunk_sizes=batch_sizes).to(device)
def _init_optimizer(self):
optimizer_cfg = copy.deepcopy(self.cfg.schedule.optimizer)
name = optimizer_cfg.pop('name')
Optimizer = getattr(torch.optim, name)
self.optimizer = Optimizer(params=self.model.parameters(), **optimizer_cfg)
def _init_scheduler(self):
schedule_cfg = copy.deepcopy(self.cfg.schedule.lr_schedule)
name = schedule_cfg.pop('name')
Scheduler = getattr(torch.optim.lr_scheduler, name)
self.lr_scheduler = Scheduler(optimizer=self.optimizer, **schedule_cfg)
def run_step(self, model, meta, mode='train'):
"""
Training step including forward and backward
:param model: model to train
:param meta: a batch of input data
:param mode: train or val or test
:return: result, total loss and a dict of all losses
"""
output, loss, loss_dict = model.module.forward_train(meta)
loss = loss.mean()
if mode == 'train':
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return output, loss, loss_dict
def run_epoch(self, epoch, data_loader, mode):
"""
train or validate one epoch
:param epoch: current epoch number
:param data_loader: dataloader of train or test dataset
:param mode: train or val or test
:return: outputs and a dict of epoch average losses
"""
model = self.model
if mode == 'train':
model.train()
if self.rank > -1: # Using distributed training, need to set epoch for sampler
self.logger.log("distributed sampler set epoch at {}".format(epoch))
data_loader.sampler.set_epoch(epoch)
else:
model.eval()
torch.cuda.empty_cache()
results = {}
epoch_losses = {}
step_losses = {}
num_iters = len(data_loader)
for iter_id, meta in enumerate(data_loader):
if iter_id >= num_iters:
break
meta['img'] = meta['img'].to(device=torch.device('cuda'), non_blocking=True)
output, loss, loss_stats = self.run_step(model, meta, mode)
if mode == 'val' or mode == 'test':
dets = model.module.head.post_process(output, meta)
results[meta['img_info']['id'].cpu().numpy()[0]] = dets
for k in loss_stats:
if k not in epoch_losses:
epoch_losses[k] = AverageMeter(loss_stats[k].mean().item())
step_losses[k] = MovingAverage(loss_stats[k].mean().item(), window_size=self.cfg.log.interval)
else:
epoch_losses[k].update(loss_stats[k].mean().item())
step_losses[k].push(loss_stats[k].mean().item())
if iter_id % self.cfg.log.interval == 0:
log_msg = '{}|Epoch{}/{}|Iter{}({}/{})| lr:{:.2e}| '.format(mode, epoch, self.cfg.schedule.total_epochs,
self._iter, iter_id, num_iters, self.optimizer.param_groups[0]['lr'])
for l in step_losses:
log_msg += '{}:{:.4f}| '.format(l, step_losses[l].avg())
if mode == 'train' and self.rank < 1:
self.logger.scalar_summary('Train_loss/' + l, mode, step_losses[l].avg(), self._iter)
self.logger.log(log_msg)
if mode == 'train':
self._iter += 1
del output, loss, loss_stats
epoch_loss_dict = {k: v.avg for k, v in epoch_losses.items()}
return results, epoch_loss_dict
def run(self, train_loader, val_loader, evaluator):
"""
start running
:param train_loader:
:param val_loader:
:param evaluator:
"""
start_epoch = self.epoch
save_flag = -10
if self.cfg.schedule.warmup.steps > 0 and start_epoch == 1:
self.logger.log('Start warming up...')
self.warm_up(train_loader)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.cfg.schedule.optimizer.lr
self._init_scheduler()
self.lr_scheduler.last_epoch = start_epoch - 1
# resume learning rate of last epoch
if start_epoch > 1:
for param_group, lr in zip(self.optimizer.param_groups, self.lr_scheduler.get_lr()):
param_group['lr'] = lr
for epoch in range(start_epoch, self.cfg.schedule.total_epochs + 1):
results, train_loss_dict = self.run_epoch(epoch, train_loader, mode='train')
self.lr_scheduler.step()
save_model(self.rank, self.model, os.path.join(self.cfg.save_dir, 'model_last.pth'), epoch, self._iter, self.optimizer)
for k, v in train_loss_dict.items():
self.logger.scalar_summary('Epoch_loss/' + k, 'train', v, epoch)
# --------evaluate----------
if self.cfg.schedule.val_intervals > 0 and epoch % self.cfg.schedule.val_intervals == 0:
with torch.no_grad():
results, val_loss_dict = self.run_epoch(self.epoch, val_loader, mode='val')
for k, v in val_loss_dict.items():
self.logger.scalar_summary('Epoch_loss/' + k, 'val', v, epoch)
eval_results = evaluator.evaluate(results, self.cfg.save_dir, epoch, self.logger, rank=self.rank)
if self.cfg.evaluator.save_key in eval_results:
metric = eval_results[self.cfg.evaluator.save_key]
if metric > save_flag:
# ------save best model--------
save_flag = metric
best_save_path = os.path.join(self.cfg.save_dir, 'model_best')
mkdir(self.rank, best_save_path)
save_model(self.rank, self.model, os.path.join(best_save_path, 'model_best.pth'), epoch,
self._iter, self.optimizer)
txt_path = os.path.join(best_save_path, "eval_results.txt")
if self.rank < 1:
with open(txt_path, "a") as f:
f.write("Epoch:{}\n".format(epoch))
for k, v in eval_results.items():
f.write("{}: {}\n".format(k, v))
else:
warnings.warn('Warning! Save_key is not in eval results! Only save model last!')
self.epoch += 1
def get_warmup_lr(self, cur_iters):
if self.cfg.schedule.warmup.name == 'constant':
warmup_lr = self.cfg.schedule.optimizer.lr * self.cfg.schedule.warmup.ratio
elif self.cfg.schedule.warmup.name == 'linear':
k = (1 - cur_iters / self.cfg.schedule.warmup.steps) * (1 - self.cfg.schedule.warmup.ratio)
warmup_lr = self.cfg.schedule.optimizer.lr * (1 - k)
elif self.cfg.schedule.warmup.name == 'exp':
k = self.cfg.schedule.warmup.ratio ** (1 - cur_iters / self.cfg.schedule.warmup.steps)
warmup_lr = self.cfg.schedule.optimizer.lr * k
else:
raise Exception('Unsupported warm up type!')
return warmup_lr
def warm_up(self, data_loader):
model = self.model
model.train()
step_losses = {}
num_iters = self.cfg.schedule.warmup.steps
cur_iter = 0
while cur_iter < num_iters:
for iter_id, batch in enumerate(data_loader):
cur_iter += 1
if cur_iter >= num_iters:
break
lr = self.get_warmup_lr(cur_iter)
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
batch['img'] = batch['img'].to(device=torch.device('cuda'), non_blocking=True)
output, loss, loss_stats = self.run_step(model, batch)
# TODO: simplify code
for k in loss_stats:
if k not in step_losses:
step_losses[k] = MovingAverage(loss_stats[k].mean().item(), window_size=self.cfg.log.interval)
else:
step_losses[k].push(loss_stats[k].mean().item())
if iter_id % self.cfg.log.interval == 0:
log_msg = '{}|Iter({}/{})| lr:{:.2e}| '.format('warmup', cur_iter, num_iters, self.optimizer.param_groups[0]['lr'])
for l in step_losses:
log_msg += '{}:{:.4f}| '.format(l, step_losses[l].avg())
self.logger.log(log_msg)
del output, loss, loss_stats
def load_model(self, cfg):
load_path = cfg.schedule.load_model
checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage)
self.logger.log('loaded {}, epoch {}'.format(load_path, checkpoint['epoch']))
if hasattr(self.model, 'module'):
load_model_weight(self.model.module, checkpoint, self.logger)
else:
load_model_weight(self.model, checkpoint, self.logger)
def resume(self, cfg):
"""
load model and optimizer state
"""
if cfg.schedule.resume is not None:
load_path = cfg.schedule.resume
else:
load_path = os.path.join(cfg.save_dir, 'model_last.pth')
checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage)
self.logger.log('loaded {}, epoch {}'.format(load_path, checkpoint['epoch']))
if hasattr(self.model, 'module'):
load_model_weight(self.model.module, checkpoint, self.logger)
else:
load_model_weight(self.model, checkpoint, self.logger)
if 'optimizer' in checkpoint:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.epoch = checkpoint['epoch'] + 1
self.logger.log('resumed at epoch: {}'.format(self.epoch))
if 'iter' in checkpoint:
self._iter = checkpoint['iter'] + 1
self.logger.log('resumed at steps: {}'.format(self._iter))
else:
self.logger.log('No optimizer parameters in checkpoint.')
|
the-stack_0_26670
|
# Copyright (c) 2013-2014 Will Thames <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from typing import TYPE_CHECKING, Any, Dict, Union
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.utils import convert_to_boolean, get_first_cmd_arg
if TYPE_CHECKING:
from typing import Optional
from ansiblelint.file_utils import Lintable
class CommandsInsteadOfModulesRule(AnsibleLintRule):
id = 'command-instead-of-module'
shortdesc = 'Using command rather than module'
description = (
'Executing a command when there is an Ansible module is generally a bad idea'
)
severity = 'HIGH'
tags = ['command-shell', 'idiom']
version_added = 'historic'
_commands = ['command', 'shell']
_modules = {
'apt-get': 'apt-get',
'chkconfig': 'service',
'curl': 'get_url or uri',
'git': 'git',
'hg': 'hg',
'letsencrypt': 'acme_certificate',
'mktemp': 'tempfile',
'mount': 'mount',
'patch': 'patch',
'rpm': 'yum or rpm_key',
'rsync': 'synchronize',
'sed': 'template, replace or lineinfile',
'service': 'service',
'supervisorctl': 'supervisorctl',
'svn': 'subversion',
'systemctl': 'systemd',
'tar': 'unarchive',
'unzip': 'unarchive',
'wget': 'get_url or uri',
'yum': 'yum',
}
def matchtask(
self, task: Dict[str, Any], file: 'Optional[Lintable]' = None
) -> Union[bool, str]:
if task['action']['__ansible_module__'] not in self._commands:
return False
first_cmd_arg = get_first_cmd_arg(task)
if not first_cmd_arg:
return False
executable = os.path.basename(first_cmd_arg)
if executable in self._modules and convert_to_boolean(
task['action'].get('warn', True)
):
message = '{0} used in place of {1} module'
return message.format(executable, self._modules[executable])
return False
|
the-stack_0_26676
|
import copy
import errno
import os
import signal
import time
import sys
from random import randint
try:
from itertools import zip_longest as izip_longest
except ImportError:
from itertools import izip_longest # NOQA
import site
from tornado import gen
from psutil import NoSuchProcess, TimeoutExpired
import zmq.utils.jsonapi as json
from zmq.eventloop import ioloop
from circus.process import Process, DEAD_OR_ZOMBIE, UNEXISTING
from circus.papa_process_proxy import PapaProcessProxy
from circus import logger
from circus import util
from circus.stream import get_stream, Redirector
from circus.stream.papa_redirector import PapaRedirector
from circus.util import parse_env_dict, resolve_name, tornado_sleep, IS_WINDOWS
from circus.util import papa
from circus.py3compat import bytestring, is_callable, b, PY2
class Watcher(object):
"""
Class managing a list of processes for a given command.
Options:
- **name**: name given to the watcher. Used to uniquely identify it.
- **cmd**: the command to run. May contain *$WID*, which will be
replaced by **wid**.
- **args**: the arguments for the command to run. Can be a list or
a string. If **args** is a string, it's splitted using
:func:`shlex.split`. Defaults to None.
- **numprocesses**: Number of processes to run.
- **working_dir**: the working directory to run the command in. If
not provided, will default to the current working directory.
- **shell**: if *True*, will run the command in the shell
environment. *False* by default. **warning: this is a
security hazard**.
- **uid**: if given, is the user id or name the command should run
with. The current uid is the default.
- **gid**: if given, is the group id or name the command should run
with. The current gid is the default.
- **send_hup**: if True, a process reload will be done by sending
the SIGHUP signal. Defaults to False.
- **stop_signal**: the signal to send when stopping the process.
Defaults to SIGTERM.
- **stop_children**: send the **stop_signal** to the children too.
Defaults to False.
- **env**: a mapping containing the environment variables the command
will run with. Optional.
- **rlimits**: a mapping containing rlimit names and values that will
be set before the command runs.
- **stdout_stream**: a mapping that defines the stream for
the process stdout. Defaults to None.
Optional. When provided, *stdout_stream* is a mapping containing up to
three keys:
- **class**: the stream class. Defaults to
`circus.stream.FileStream`
- **filename**: the filename, if using a FileStream
- **max_bytes**: maximum file size, after which a new output file is
opened. defaults to 0 which means no maximum size (only applicable
with FileStream).
- **backup_count**: how many backups to retain when rotating files
according to the max_bytes parameter. defaults to 0 which means
no backups are made (only applicable with FileStream)
This mapping will be used to create a stream callable of the specified
class.
Each entry received by the callable is a mapping containing:
- **pid** - the process pid
- **name** - the stream name (*stderr* or *stdout*)
- **data** - the data
This is not supported on Windows.
- **stderr_stream**: a mapping that defines the stream for
the process stderr. Defaults to None.
Optional. When provided, *stderr_stream* is a mapping containing up to
three keys:
- **class**: the stream class. Defaults to `circus.stream.FileStream`
- **filename**: the filename, if using a FileStream
- **max_bytes**: maximum file size, after which a new output file is
opened. defaults to 0 which means no maximum size (only applicable
with FileStream)
- **backup_count**: how many backups to retain when rotating files
according to the max_bytes parameter. defaults to 0 which means
no backups are made (only applicable with FileStream).
This mapping will be used to create a stream callable of the specified
class.
Each entry received by the callable is a mapping containing:
- **pid** - the process pid
- **name** - the stream name (*stderr* or *stdout*)
- **data** - the data
This is not supported on Windows.
- **priority** -- integer that defines a priority for the watcher. When
the Arbiter do some operations on all watchers, it will sort them
with this field, from the bigger number to the smallest.
(default: 0)
- **singleton** -- If True, this watcher has a single process.
(default:False)
- **use_sockets** -- If True, the processes will inherit the file
descriptors, thus can reuse the sockets opened by circusd.
(default: False)
- **on_demand** -- If True, the processes will be started only
at the first connection to the socket
(default: False)
- **copy_env** -- If True, the environment in which circus is running
run will be reproduced for the workers. This defaults to True on
Windows as you cannot run any executable without the **SYSTEMROOT**
variable. (default: False)
- **copy_path** -- If True, circusd *sys.path* is sent to the
process through *PYTHONPATH*. You must activate **copy_env** for
**copy_path** to work. (default: False)
- **max_age**: If set after around max_age seconds, the process is
replaced with a new one. (default: 0, Disabled)
- **max_age_variance**: The maximum number of seconds that can be added to
max_age. This extra value is to avoid restarting all processes at the
same time. A process will live between max_age and
max_age + max_age_variance seconds.
- **hooks**: callback functions for hooking into the watcher startup
and shutdown process. **hooks** is a dict where each key is the hook
name and each value is a 2-tuple with the name of the callable
or the callabled itself and a boolean flag indicating if an
exception occuring in the hook should not be ignored.
Possible values for the hook name: *before_start*, *after_start*,
*before_spawn*, *after_spawn*, *before_stop*, *after_stop*.,
*before_signal*, *after_signal* or *extended_stats*.
- **options** -- extra options for the worker. All options
found in the configuration file for instance, are passed
in this mapping -- this can be used by plugins for watcher-specific
options.
- **respawn** -- If set to False, the processes handled by a watcher will
not be respawned automatically. (default: True)
- **virtualenv** -- The root directory of a virtualenv. If provided, the
watcher will load the environment for its execution. (default: None)
- **close_child_stdout**: If True, closes the stdout after the fork.
default: False.
- **close_child_stderr**: If True, closes the stderr after the fork.
default: False.
- **use_papa**: If True, use the papa process kernel for this process.
default: False.
"""
def __init__(self, name, cmd, args=None, numprocesses=1, warmup_delay=0.,
working_dir=None, shell=False, shell_args=None, uid=None,
max_retry=5, gid=None, send_hup=False,
stop_signal=signal.SIGTERM, stop_children=False, env=None,
graceful_timeout=30.0, prereload_fn=None, rlimits=None,
executable=None, stdout_stream=None, stderr_stream=None,
priority=0, loop=None, singleton=False, use_sockets=False,
copy_env=False, copy_path=False, max_age=0,
max_age_variance=30, hooks=None, respawn=True,
autostart=True, on_demand=False, virtualenv=None,
close_child_stdout=False, close_child_stderr=False,
virtualenv_py_ver=None, use_papa=False, **options):
self.name = name
self.use_sockets = use_sockets
self.on_demand = on_demand
self.res_name = name.lower().replace(" ", "_")
self.numprocesses = int(numprocesses)
self.warmup_delay = warmup_delay
self.cmd = cmd
self.args = args
self._status = "stopped"
self.graceful_timeout = float(graceful_timeout)
self.prereload_fn = prereload_fn
self.executable = None
self.priority = priority
self.stdout_stream_conf = copy.copy(stdout_stream)
self.stderr_stream_conf = copy.copy(stderr_stream)
self.stdout_stream = get_stream(self.stdout_stream_conf)
self.stderr_stream = get_stream(self.stderr_stream_conf)
self.stream_redirector = None
self.max_retry = max_retry
self._options = options
self.singleton = singleton
self.copy_env = copy_env
self.copy_path = copy_path
self.virtualenv = virtualenv
self.virtualenv_py_ver = virtualenv_py_ver
self.max_age = int(max_age)
self.max_age_variance = int(max_age_variance)
self.ignore_hook_failure = ['before_stop', 'after_stop',
'before_signal', 'after_signal',
'extended_stats']
self.respawn = respawn
self.autostart = autostart
self.close_child_stdout = close_child_stdout
self.close_child_stderr = close_child_stderr
self.use_papa = use_papa and papa is not None
self.loop = loop or ioloop.IOLoop.instance()
if singleton and self.numprocesses not in (0, 1):
raise ValueError("Cannot have %d processes with a singleton "
" watcher" % self.numprocesses)
if IS_WINDOWS:
if self.stdout_stream or self.stderr_stream:
raise NotImplementedError("Streams are not supported"
" on Windows.")
if not copy_env and not env:
# Copy the env by default on Windows as we can't run any
# executable without some env variables
# Eventually, we could set only some required variables,
# such as SystemRoot
self.copy_env = True
self.optnames = (("numprocesses", "warmup_delay", "working_dir",
"uid", "gid", "send_hup", "stop_signal",
"stop_children", "shell", "shell_args",
"env", "max_retry", "cmd", "args",
"graceful_timeout", "executable", "use_sockets",
"priority", "copy_env", "singleton",
"stdout_stream_conf", "on_demand",
"stderr_stream_conf", "max_age", "max_age_variance",
"close_child_stdout", "close_child_stderr",
"use_papa")
+ tuple(options.keys()))
if not working_dir:
# working dir hasn't been set
working_dir = util.get_working_dir()
self.working_dir = working_dir
self.processes = {}
self.shell = shell
self.shell_args = shell_args
self.uid = uid
self.gid = gid
if self.copy_env:
self.env = os.environ.copy()
if self.copy_path:
path = os.pathsep.join(sys.path)
self.env['PYTHONPATH'] = path
if env is not None:
self.env.update(env)
else:
if self.copy_path:
raise ValueError(('copy_env and copy_path must have the '
'same value'))
self.env = env
if self.virtualenv:
util.load_virtualenv(self, py_ver=virtualenv_py_ver)
# load directories in PYTHONPATH if provided
# so if a hook is there, it can be loaded
if self.env is not None and 'PYTHONPATH' in self.env:
for path in self.env['PYTHONPATH'].split(os.pathsep):
if path in sys.path:
continue
site.addsitedir(path)
self.rlimits = rlimits
self.send_hup = send_hup
self.stop_signal = stop_signal
self.stop_children = stop_children
self.sockets = self.evpub_socket = None
self.arbiter = None
self.hooks = {}
self._resolve_hooks(hooks)
self._found_wids = []
if self.use_papa:
with papa.Papa() as p:
base_name = 'circus.{0}.*'.format(name.lower())
running = p.list_processes(base_name)
self._found_wids = [int(proc_name[len(base_name) - 1:])
for proc_name in running]
def _reload_hook(self, key, hook, ignore_error):
hook_name = key.split('.')[-1]
self._resolve_hook(hook_name, hook, ignore_error, reload_module=True)
@property
def _redirector_class(self):
return PapaRedirector if self.use_papa else Redirector
@property
def _process_class(self):
return PapaProcessProxy if self.use_papa else Process
def _reload_stream(self, key, val):
parts = key.split('.', 1)
stream_type = 'stdout' if parts[0] == 'stdout_stream' else 'stderr'
old_stream = self.stream_redirector.get_stream(stream_type) if\
self.stream_redirector else None
if stream_type == 'stdout':
self.stdout_stream_conf[parts[1]] = val
new_stream = get_stream(self.stdout_stream_conf, reload=True)
self.stdout_stream = new_stream
else:
self.stderr_stream_conf[parts[1]] = val
new_stream = get_stream(self.stderr_stream_conf, reload=True)
self.stderr_stream = new_stream
if self.stream_redirector:
self.stream_redirector.change_stream(stream_type, new_stream)
else:
self.stream_redirector = self._redirector_class(
self.stdout_stream, self.stderr_stream, loop=self.loop)
if old_stream:
if hasattr(old_stream, 'close'):
old_stream.close()
return 0
self.stream_redirector.start()
return 1
def _create_redirectors(self):
if self.stdout_stream or self.stderr_stream:
if self.stream_redirector:
self.stream_redirector.stop()
self.stream_redirector = self._redirector_class(
self.stdout_stream, self.stderr_stream, loop=self.loop)
else:
self.stream_redirector = None
def _resolve_hook(self, name, callable_or_name, ignore_failure,
reload_module=False):
if is_callable(callable_or_name):
self.hooks[name] = callable_or_name
else:
# will raise ImportError on failure
self.hooks[name] = resolve_name(callable_or_name,
reload=reload_module)
if ignore_failure:
self.ignore_hook_failure.append(name)
def _resolve_hooks(self, hooks):
"""Check the supplied hooks argument to make sure we can find
callables"""
if hooks is None:
return
for name, (callable_or_name, ignore_failure) in hooks.items():
self._resolve_hook(name, callable_or_name, ignore_failure)
@property
def pending_socket_event(self):
return self.on_demand and not self.arbiter.socket_event
@classmethod
def load_from_config(cls, config):
if 'env' in config:
config['env'] = parse_env_dict(config['env'])
cfg = config.copy()
w = cls(name=config.pop('name'), cmd=config.pop('cmd'), **config)
w._cfg = cfg
return w
@util.debuglog
def initialize(self, evpub_socket, sockets, arbiter):
self.evpub_socket = evpub_socket
self.sockets = sockets
self.arbiter = arbiter
def __len__(self):
return len(self.processes)
def notify_event(self, topic, msg):
"""Publish a message on the event publisher channel"""
name = bytestring(self.res_name)
multipart_msg = [b("watcher.%s.%s" % (name, topic)), json.dumps(msg)]
if self.evpub_socket is not None and not self.evpub_socket.closed:
self.evpub_socket.send_multipart(multipart_msg)
@util.debuglog
def reap_process(self, pid, status=None):
"""ensure that the process is killed (and not a zombie)"""
if pid not in self.processes:
return
process = self.processes.pop(pid)
timeout = 0.001
while status is None:
if IS_WINDOWS:
try:
# On Windows we can't use waitpid as it's blocking,
# so we use psutils' wait
status = process.wait(timeout=timeout)
except TimeoutExpired:
continue
else:
try:
_, status = os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno == errno.EAGAIN:
time.sleep(timeout)
continue
elif e.errno == errno.ECHILD:
status = None
else:
raise
if status is None:
# nothing to do here, we do not have any child
# process running
# but we still need to send the "reap" signal.
#
# This can happen if poll() or wait() were called on
# the underlying process.
logger.debug('reaping already dead process %s [%s]',
pid, self.name)
self.notify_event(
"reap",
{"process_pid": pid,
"time": time.time(),
"exit_code": process.returncode()})
process.stop()
return
# get return code
if hasattr(os, 'WIFSIGNALED'):
exit_code = 0
if os.WIFSIGNALED(status):
# The Python Popen object returns <-signal> in it's returncode
# property if the process exited on a signal, so emulate that
# behavior here so that pubsub clients watching for reap can
# distinguish between an exit with a non-zero exit code and
# a signal'd exit. This is also consistent with the notify
# event reap message above that uses the returncode function
# (that ends up calling Popen.returncode)
exit_code = -os.WTERMSIG(status)
# process exited using exit(2) system call; return the
# integer exit(2) system call has been called with
elif os.WIFEXITED(status):
exit_code = os.WEXITSTATUS(status)
else:
# should never happen
raise RuntimeError("Unknown process exit status")
else:
# On Windows we don't have such distinction
exit_code = status
# if the process is dead or a zombie try to definitely stop it.
if process.status in (DEAD_OR_ZOMBIE, UNEXISTING):
process.stop()
logger.debug('reaping process %s [%s]', pid, self.name)
self.notify_event("reap",
{"process_pid": pid,
"time": time.time(),
"exit_code": exit_code})
@util.debuglog
def reap_processes(self):
"""Reap all the processes for this watcher.
"""
if self.is_stopped():
logger.debug('do not reap processes as the watcher is stopped')
return
# reap_process changes our dict, look through the copy of keys
for pid in list(self.processes.keys()):
self.reap_process(pid)
@gen.coroutine
@util.debuglog
def manage_processes(self):
"""Manage processes."""
if self.is_stopped():
return
# remove dead or zombie processes first
for process in list(self.processes.values()):
if process.status in (DEAD_OR_ZOMBIE, UNEXISTING):
self.processes.pop(process.pid)
if self.max_age:
yield self.remove_expired_processes()
# adding fresh processes
if len(self.processes) < self.numprocesses and not self.is_stopping():
if self.respawn:
yield self.spawn_processes()
elif not len(self.processes) and not self.on_demand:
yield self._stop()
# removing extra processes
if len(self.processes) > self.numprocesses:
processes_to_kill = []
for process in sorted(self.processes.values(),
key=lambda process: process.started,
reverse=True)[self.numprocesses:]:
if process.status in (DEAD_OR_ZOMBIE, UNEXISTING):
self.processes.pop(process.pid)
else:
processes_to_kill.append(process)
removes = yield [self.kill_process(process)
for process in processes_to_kill]
for i, process in enumerate(processes_to_kill):
if removes[i]:
self.processes.pop(process.pid)
@gen.coroutine
@util.debuglog
def remove_expired_processes(self):
max_age = self.max_age + randint(0, self.max_age_variance)
expired_processes = [p for p in self.processes.values()
if p.age() > max_age]
removes = yield [self.kill_process(x) for x in expired_processes]
for i, process in enumerate(expired_processes):
if removes[i]:
self.processes.pop(process.pid)
@gen.coroutine
@util.debuglog
def reap_and_manage_processes(self):
"""Reap & manage processes."""
if self.is_stopped():
return
self.reap_processes()
yield self.manage_processes()
@gen.coroutine
@util.debuglog
def spawn_processes(self):
"""Spawn processes.
"""
# when an on_demand process dies, do not restart it until
# the next event
if self.pending_socket_event:
self._status = "stopped"
return
for i in self._found_wids:
self.spawn_process(i)
yield tornado_sleep(0)
self._found_wids = {}
for i in range(self.numprocesses - len(self.processes)):
res = self.spawn_process()
if res is False:
yield self._stop()
break
delay = self.warmup_delay
if isinstance(res, float):
delay -= (time.time() - res)
if delay < 0:
delay = 0
yield tornado_sleep(delay)
def _get_sockets_fds(self):
# XXX should be cached
if self.sockets is None:
return {}
return dict((name, sock.fileno())
for name, sock in self.sockets.items()
if sock.use_papa == self.use_papa)
def spawn_process(self, recovery_wid=None):
"""Spawn process.
Return True if ok, False if the watcher must be stopped
"""
if self.is_stopped():
return True
if not recovery_wid and not self.call_hook('before_spawn'):
return False
cmd = util.replace_gnu_args(self.cmd, env=self.env)
nb_tries = 0
# start the redirector now so we can catch any startup errors
if self.stream_redirector:
self.stream_redirector.start()
while nb_tries < self.max_retry or self.max_retry == -1:
process = None
pipe_stdout = self.stdout_stream is not None
pipe_stderr = self.stderr_stream is not None
# noinspection PyPep8Naming
ProcCls = self._process_class
try:
process = ProcCls(self.name, recovery_wid or self._nextwid,
cmd, args=self.args,
working_dir=self.working_dir,
shell=self.shell, uid=self.uid, gid=self.gid,
env=self.env, rlimits=self.rlimits,
executable=self.executable,
use_fds=self.use_sockets, watcher=self,
pipe_stdout=pipe_stdout,
pipe_stderr=pipe_stderr,
close_child_stdout=self.close_child_stdout,
close_child_stderr=self.close_child_stderr)
# stream stderr/stdout if configured
if self.stream_redirector:
self.stream_redirector.add_redirections(process)
self.processes[process.pid] = process
logger.debug('running %s process [pid %d]', self.name,
process.pid)
if not self.call_hook('after_spawn', pid=process.pid):
self.kill_process(process)
del self.processes[process.pid]
return False
# catch ValueError as well, as a misconfigured rlimit setting could
# lead to bad infinite retries here
except (OSError, ValueError) as e:
logger.warning('error in %r: %s', self.name, str(e))
if process is None:
nb_tries += 1
continue
else:
self.notify_event("spawn", {"process_pid": process.pid,
"time": process.started})
return process.started
return False
@util.debuglog
def send_signal_process(self, process, signum):
"""Send the signum signal to the process
The signal is sent to the process itself then to all the children
"""
children = None
try:
# getting the process children
children = process.children()
# sending the signal to the process itself
self.send_signal(process.pid, signum)
self.notify_event("kill", {"process_pid": process.pid,
"time": time.time()})
except NoSuchProcess:
# already dead !
if children is None:
return
# now sending the same signal to all the children
for child_pid in children:
try:
process.send_signal_child(child_pid, signum)
self.notify_event("kill", {"process_pid": child_pid,
"time": time.time()})
except NoSuchProcess:
# already dead !
pass
@gen.coroutine
@util.debuglog
def kill_process(self, process):
"""Kill process (stop_signal, graceful_timeout then SIGKILL)
"""
if process.stopping:
raise gen.Return(False)
try:
logger.debug("%s: kill process %s", self.name, process.pid)
if self.stop_children:
self.send_signal_process(process, self.stop_signal)
else:
self.send_signal(process.pid, self.stop_signal)
self.notify_event("kill", {"process_pid": process.pid,
"time": time.time()})
except NoSuchProcess:
raise gen.Return(False)
process.stopping = True
waited = 0
while waited < self.graceful_timeout:
if not process.is_alive():
break
yield tornado_sleep(0.1)
waited += 0.1
if waited >= self.graceful_timeout:
# On Windows we can't send a SIGKILL signal, but the
# process.stop function will terminate the process
# later anyway
if hasattr(signal, 'SIGKILL'):
# We are not smart anymore
self.send_signal_process(process, signal.SIGKILL)
if self.stream_redirector:
self.stream_redirector.remove_redirections(process)
process.stopping = False
process.stop()
raise gen.Return(True)
@gen.coroutine
@util.debuglog
def kill_processes(self):
"""Kill all processes (stop_signal, graceful_timeout then SIGKILL)
"""
active_processes = self.get_active_processes()
try:
yield [self.kill_process(process) for process in active_processes]
except OSError as e:
if e.errno != errno.ESRCH:
raise
@util.debuglog
def send_signal(self, pid, signum):
is_sigkill = hasattr(signal, 'SIGKILL') and signum == signal.SIGKILL
if pid in self.processes:
process = self.processes[pid]
hook_result = self.call_hook("before_signal",
pid=pid, signum=signum)
if not is_sigkill and not hook_result:
logger.debug("before_signal hook didn't return True "
"=> signal %i is not sent to %i" % (signum, pid))
else:
process.send_signal(signum)
self.call_hook("after_signal", pid=pid, signum=signum)
else:
logger.debug('process %s does not exist' % pid)
@util.debuglog
def send_signal_child(self, pid, child_id, signum):
"""Send signal to a child.
"""
process = self.processes[pid]
try:
process.send_signal_child(int(child_id), signum)
except OSError as e:
if e.errno != errno.ESRCH:
raise
@util.debuglog
def send_signal_children(self, pid, signum):
"""Send signal to all children.
"""
process = self.processes[int(pid)]
process.send_signal_children(signum)
@util.debuglog
def status(self):
return self._status
@util.debuglog
def process_info(self, pid, extended=False):
process = self.processes[int(pid)]
result = process.info()
if extended and 'extended_stats' in self.hooks:
self.hooks['extended_stats'](self, self.arbiter,
'extended_stats',
pid=pid, stats=result)
return result
@util.debuglog
def info(self, extended=False):
result = dict([(proc.pid, proc.info())
for proc in self.processes.values()])
if extended and 'extended_stats' in self.hooks:
for pid, stats in result.items():
self.hooks['extended_stats'](self, self.arbiter,
'extended_stats',
pid=pid, stats=stats)
return result
@util.synchronized("watcher_stop")
@gen.coroutine
def stop(self):
yield self._stop()
@util.debuglog
@gen.coroutine
def _stop(self, close_output_streams=False, for_shutdown=False):
if self.is_stopped():
return
self._status = "stopping"
skip = for_shutdown and self.use_papa
if not skip:
logger.debug('stopping the %s watcher' % self.name)
logger.debug('gracefully stopping processes [%s] for %ss' % (
self.name, self.graceful_timeout))
# We ignore the hook result
self.call_hook('before_stop')
yield self.kill_processes()
self.reap_processes()
# stop redirectors
if self.stream_redirector:
self.stream_redirector.stop()
self.stream_redirector = None
if close_output_streams:
if self.stdout_stream and hasattr(self.stdout_stream, 'close'):
self.stdout_stream.close()
if self.stderr_stream and hasattr(self.stderr_stream, 'close'):
self.stderr_stream.close()
# notify about the stop
if skip:
logger.info('%s left running in papa', self.name)
else:
if self.evpub_socket is not None:
self.notify_event("stop", {"time": time.time()})
self._status = "stopped"
# We ignore the hook result
self.call_hook('after_stop')
logger.info('%s stopped', self.name)
def get_active_processes(self):
"""return a list of pids of active processes (not already stopped)"""
return [p for p in self.processes.values()
if p.status not in (DEAD_OR_ZOMBIE, UNEXISTING)]
def get_active_pids(self):
"""return a list of pids of active processes (not already stopped)"""
return [p.pid for p in self.processes.values()
if p.status not in (DEAD_OR_ZOMBIE, UNEXISTING)]
@property
def pids(self):
"""Returns a list of PIDs"""
return [process.pid for process in self.processes]
@property
def _nextwid(self):
used_wids = set([p.wid for p in self.processes.values()])
all_wids = set(range(1, self.numprocesses * 2 + 1))
available_wids = sorted(all_wids - used_wids)
try:
return available_wids[0]
except IndexError:
raise RuntimeError("Process count > numproceses*2")
def call_hook(self, hook_name, **kwargs):
"""Call a hook function"""
hook_kwargs = {'watcher': self, 'arbiter': self.arbiter,
'hook_name': hook_name}
hook_kwargs.update(kwargs)
if hook_name in self.hooks:
try:
result = self.hooks[hook_name](**hook_kwargs)
self.notify_event("hook_success",
{"name": hook_name, "time": time.time()})
except Exception as error:
logger.exception('Hook %r failed' % hook_name)
result = hook_name in self.ignore_hook_failure
self.notify_event("hook_failure",
{"name": hook_name, "time": time.time(),
"error": str(error)})
return result
else:
return True
@util.synchronized("watcher_start")
@gen.coroutine
def start(self):
before_pids = set() if self.is_stopped() else set(self.processes)
yield self._start()
after_pids = set(self.processes)
raise gen.Return({'started': sorted(after_pids - before_pids),
'kept': sorted(after_pids & before_pids)})
@gen.coroutine
@util.debuglog
def _start(self):
"""Start.
"""
if self.pending_socket_event:
return
if not self.is_stopped():
if len(self.processes) < self.numprocesses:
self.reap_processes()
yield self.spawn_processes()
return
found_wids = len(self._found_wids)
if not self._found_wids and not self.call_hook('before_start'):
logger.debug('Aborting startup')
return
self._status = "starting"
self._create_redirectors()
self.reap_processes()
yield self.spawn_processes()
# If not self.processes, the before_spawn or after_spawn hooks have
# probably prevented startup so give up
if not self.processes or not self.call_hook('after_start'):
logger.debug('Aborting startup')
yield self._stop()
return
self._status = "active"
if found_wids:
logger.info('%s already running' % self.name)
else:
logger.info('%s started' % self.name)
self.notify_event("start", {"time": time.time()})
@util.synchronized("watcher_restart")
@gen.coroutine
def restart(self):
before_pids = set() if self.is_stopped() else set(self.processes)
yield self._restart()
after_pids = set(self.processes)
raise gen.Return({'stopped': sorted(before_pids - after_pids),
'started': sorted(after_pids - before_pids),
'kept': sorted(after_pids & before_pids)})
@gen.coroutine
@util.debuglog
def _restart(self):
yield self._stop()
yield self._start()
@util.synchronized("watcher_reload")
@gen.coroutine
def reload(self, graceful=True, sequential=False):
before_pids = set() if self.is_stopped() else set(self.processes)
yield self._reload(graceful=graceful, sequential=sequential)
after_pids = set(self.processes)
raise gen.Return({'stopped': sorted(before_pids - after_pids),
'started': sorted(after_pids - before_pids),
'kept': sorted(after_pids & before_pids)})
@gen.coroutine
@util.debuglog
def _reload(self, graceful=True, sequential=False):
""" reload
"""
if not graceful and sequential:
logger.warn("with graceful=False, sequential=True is ignored")
if self.prereload_fn is not None:
self.prereload_fn(self)
if not graceful:
yield self._restart()
return
if self.is_stopped():
yield self._start()
elif self.send_hup:
for process in self.processes.values():
logger.info("SENDING HUP to %s" % process.pid)
process.send_signal(signal.SIGHUP)
else:
if sequential:
active_processes = self.get_active_processes()
for process in active_processes:
yield self.kill_process(process)
self.reap_process(process.pid)
self.spawn_process()
yield tornado_sleep(self.warmup_delay)
else:
for i in range(self.numprocesses):
self.spawn_process()
yield self.manage_processes()
self.notify_event("reload", {"time": time.time()})
logger.info('%s reloaded', self.name)
@gen.coroutine
def set_numprocesses(self, np):
if np < 0:
np = 0
if self.singleton and np > 1:
raise ValueError('Singleton watcher has a single process')
self.numprocesses = np
yield self.manage_processes()
raise gen.Return(self.numprocesses)
@util.synchronized("watcher_incr")
@gen.coroutine
@util.debuglog
def incr(self, nb=1):
res = yield self.set_numprocesses(self.numprocesses + nb)
raise gen.Return(res)
@util.synchronized("watcher_decr")
@gen.coroutine
@util.debuglog
def decr(self, nb=1):
res = yield self.set_numprocesses(self.numprocesses - nb)
raise gen.Return(res)
@util.synchronized("watcher_set_opt")
def set_opt(self, key, val):
"""Set a watcher option.
This function set the watcher options. unknown keys are ignored.
This function return an action number:
- 0: trigger the process management
- 1: trigger a graceful reload of the processes;
"""
action = 0
if key in self._options:
self._options[key] = val
action = -1 # XXX for now does not trigger a reload
elif key == "numprocesses":
val = int(val)
if val < 0:
val = 0
if self.singleton and val > 1:
raise ValueError('Singleton watcher has a single process')
self.numprocesses = val
elif key == "warmup_delay":
self.warmup_delay = float(val)
elif key == "working_dir":
self.working_dir = val
action = 1
elif key == "uid":
self.uid = util.to_uid(val)
action = 1
elif key == "gid":
self.gid = util.to_gid(val)
action = 1
elif key == "send_hup":
self.send_hup = val
elif key == "stop_signal":
self.stop_signal = util.to_signum(val)
elif key == "stop_children":
self.stop_children = util.to_bool(val)
elif key == "shell":
self.shell = val
action = 1
elif key == "env":
if PY2 and IS_WINDOWS:
# Windows on Python 2 does not accept Unicode values
# in env dictionary
self.env = dict((b(k), b(v)) for k, v in val.iteritems())
else:
self.env = val
action = 1
elif key == "cmd":
self.cmd = val
action = 1
elif key == "args":
self.args = val
action = 1
elif key == "graceful_timeout":
self.graceful_timeout = float(val)
action = -1
elif key == "max_age":
self.max_age = int(val)
action = 1
elif key == "max_age_variance":
self.max_age_variance = int(val)
action = 1
elif (key.startswith('stdout_stream') or
key.startswith('stderr_stream')):
action = self._reload_stream(key, val)
elif key.startswith('hooks'):
val = val.split(',')
if len(val) == 2:
ignore_error = util.to_bool(val[1])
else:
ignore_error = False
hook = val[0]
self._reload_hook(key, hook, ignore_error)
action = 0
# send update event
self.notify_event("updated", {"time": time.time()})
return action
@util.synchronized("watcher_do_action")
@gen.coroutine
def do_action(self, num):
# trigger needed action
if num == 0:
yield self.manage_processes()
elif not self.is_stopped():
# graceful restart
yield self._reload()
@util.debuglog
def options(self, *args):
options = []
for name in sorted(self.optnames):
if name in self._options:
options.append((name, self._options[name]))
else:
options.append((name, getattr(self, name)))
return options
def is_stopping(self):
return self._status == 'stopping'
def is_stopped(self):
return self._status == 'stopped'
def is_active(self):
return self._status == 'active'
|
the-stack_0_26678
|
from .. import abc
from .. import util
machinery = util.import_importlib('importlib.machinery')
import errno
import os
import py_compile
import stat
import sys
import tempfile
from test.support import make_legacy_pyc
import unittest
import warnings
class FinderTests(abc.FinderTests):
"""For a top-level module, it should just be found directly in the
directory being searched. This is true for a directory with source
[top-level source], bytecode [top-level bc], or both [top-level both].
There is also the possibility that it is a package [top-level package], in
which case there will be a directory with the module name and an
__init__.py file. If there is a directory without an __init__.py an
ImportWarning is returned [empty dir].
For sub-modules and sub-packages, the same happens as above but only use
the tail end of the name [sub module] [sub package] [sub empty].
When there is a conflict between a package and module having the same name
in the same directory, the package wins out [package over module]. This is
so that imports of modules within the package can occur rather than trigger
an import error.
When there is a package and module with the same name, always pick the
package over the module [package over module]. This is so that imports from
the package have the possibility of succeeding.
"""
def get_finder(self, root):
loader_details = [(self.machinery.SourceFileLoader,
self.machinery.SOURCE_SUFFIXES),
(self.machinery.SourcelessFileLoader,
self.machinery.BYTECODE_SUFFIXES)]
return self.machinery.FileFinder(root, *loader_details)
def import_(self, root, module):
finder = self.get_finder(root)
return self._find(finder, module, loader_only=True)
def run_test(self, test, create=None, *, compile_=None, unlink=None):
"""Test the finding of 'test' with the creation of modules listed in
'create'.
Any names listed in 'compile_' are byte-compiled. Modules
listed in 'unlink' have their source files deleted.
"""
if create is None:
create = {test}
with util.create_modules(*create) as mapping:
if compile_:
for name in compile_:
py_compile.compile(mapping[name])
if unlink:
for name in unlink:
os.unlink(mapping[name])
try:
make_legacy_pyc(mapping[name])
except OSError as error:
# Some tests do not set compile_=True so the source
# module will not get compiled and there will be no
# PEP 3147 pyc file to rename.
if error.errno != errno.ENOENT:
raise
loader = self.import_(mapping['.root'], test)
self.assertTrue(hasattr(loader, 'load_module'))
return loader
def test_module(self):
# [top-level source]
self.run_test('top_level')
# [top-level bc]
self.run_test('top_level', compile_={'top_level'},
unlink={'top_level'})
# [top-level both]
self.run_test('top_level', compile_={'top_level'})
# [top-level package]
def test_package(self):
# Source.
self.run_test('pkg', {'pkg.__init__'})
# Bytecode.
self.run_test('pkg', {'pkg.__init__'}, compile_={'pkg.__init__'},
unlink={'pkg.__init__'})
# Both.
self.run_test('pkg', {'pkg.__init__'}, compile_={'pkg.__init__'})
# [sub module]
def test_module_in_package(self):
with util.create_modules('pkg.__init__', 'pkg.sub') as mapping:
pkg_dir = os.path.dirname(mapping['pkg.__init__'])
loader = self.import_(pkg_dir, 'pkg.sub')
self.assertTrue(hasattr(loader, 'load_module'))
# [sub package]
def test_package_in_package(self):
context = util.create_modules('pkg.__init__', 'pkg.sub.__init__')
with context as mapping:
pkg_dir = os.path.dirname(mapping['pkg.__init__'])
loader = self.import_(pkg_dir, 'pkg.sub')
self.assertTrue(hasattr(loader, 'load_module'))
# [package over modules]
def test_package_over_module(self):
name = '_temp'
loader = self.run_test(name, {'{0}.__init__'.format(name), name})
self.assertIn('__init__', loader.get_filename(name))
def test_failure(self):
with util.create_modules('blah') as mapping:
nothing = self.import_(mapping['.root'], 'sdfsadsadf')
self.assertIsNone(nothing)
def test_empty_string_for_dir(self):
# The empty string from sys.path means to search in the cwd.
finder = self.machinery.FileFinder('', (self.machinery.SourceFileLoader,
self.machinery.SOURCE_SUFFIXES))
with open('mod.py', 'w') as file:
file.write("# test file for importlib")
try:
loader = self._find(finder, 'mod', loader_only=True)
self.assertTrue(hasattr(loader, 'load_module'))
finally:
os.unlink('mod.py')
def test_invalidate_caches(self):
# invalidate_caches() should reset the mtime.
finder = self.machinery.FileFinder('', (self.machinery.SourceFileLoader,
self.machinery.SOURCE_SUFFIXES))
finder._path_mtime = 42
finder.invalidate_caches()
self.assertEqual(finder._path_mtime, -1)
# Regression test for http://bugs.python.org/issue14846
def test_dir_removal_handling(self):
mod = 'mod'
with util.create_modules(mod) as mapping:
finder = self.get_finder(mapping['.root'])
found = self._find(finder, 'mod', loader_only=True)
self.assertIsNotNone(found)
found = self._find(finder, 'mod', loader_only=True)
self.assertIsNone(found)
@unittest.skipUnless(sys.platform != 'win32',
'os.chmod() does not support the needed arguments under Windows')
def test_no_read_directory(self):
# Issue #16730
tempdir = tempfile.TemporaryDirectory()
original_mode = os.stat(tempdir.name).st_mode
def cleanup(tempdir):
"""Cleanup function for the temporary directory.
Since we muck with the permissions, we want to set them back to
their original values to make sure the directory can be properly
cleaned up.
"""
os.chmod(tempdir.name, original_mode)
# If this is not explicitly called then the __del__ method is used,
# but since already mucking around might as well explicitly clean
# up.
tempdir.__exit__(None, None, None)
self.addCleanup(cleanup, tempdir)
os.chmod(tempdir.name, stat.S_IWUSR | stat.S_IXUSR)
finder = self.get_finder(tempdir.name)
found = self._find(finder, 'doesnotexist')
self.assertEqual(found, self.NOT_FOUND)
@unittest.skip("TODO: RUSTPYTHON")
def test_ignore_file(self):
# If a directory got changed to a file from underneath us, then don't
# worry about looking for submodules.
with tempfile.NamedTemporaryFile() as file_obj:
finder = self.get_finder(file_obj.name)
found = self._find(finder, 'doesnotexist')
self.assertEqual(found, self.NOT_FOUND)
class FinderTestsPEP451(FinderTests):
NOT_FOUND = None
def _find(self, finder, name, loader_only=False):
spec = finder.find_spec(name)
return spec.loader if spec is not None else spec
(Frozen_FinderTestsPEP451,
Source_FinderTestsPEP451
) = util.test_both(FinderTestsPEP451, machinery=machinery)
class FinderTestsPEP420(FinderTests):
NOT_FOUND = (None, [])
def _find(self, finder, name, loader_only=False):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
loader_portions = finder.find_loader(name)
return loader_portions[0] if loader_only else loader_portions
(Frozen_FinderTestsPEP420,
Source_FinderTestsPEP420
) = util.test_both(FinderTestsPEP420, machinery=machinery)
class FinderTestsPEP302(FinderTests):
NOT_FOUND = None
def _find(self, finder, name, loader_only=False):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
return finder.find_module(name)
(Frozen_FinderTestsPEP302,
Source_FinderTestsPEP302
) = util.test_both(FinderTestsPEP302, machinery=machinery)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_26679
|
# ext/mypy/names.py
# Copyright (C) 2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Union
from mypy.nodes import ClassDef
from mypy.nodes import Expression
from mypy.nodes import FuncDef
from mypy.nodes import MemberExpr
from mypy.nodes import NameExpr
from mypy.nodes import SymbolNode
from mypy.nodes import TypeAlias
from mypy.nodes import TypeInfo
from mypy.plugin import SemanticAnalyzerPluginInterface
from mypy.types import CallableType
from mypy.types import get_proper_type
from mypy.types import Instance
from mypy.types import UnboundType
from ... import util
COLUMN: int = util.symbol("COLUMN") # type: ignore
RELATIONSHIP: int = util.symbol("RELATIONSHIP") # type: ignore
REGISTRY: int = util.symbol("REGISTRY") # type: ignore
COLUMN_PROPERTY: int = util.symbol("COLUMN_PROPERTY") # type: ignore
TYPEENGINE: int = util.symbol("TYPEENGNE") # type: ignore
MAPPED: int = util.symbol("MAPPED") # type: ignore
DECLARATIVE_BASE: int = util.symbol("DECLARATIVE_BASE") # type: ignore
DECLARATIVE_META: int = util.symbol("DECLARATIVE_META") # type: ignore
MAPPED_DECORATOR: int = util.symbol("MAPPED_DECORATOR") # type: ignore
COLUMN_PROPERTY: int = util.symbol("COLUMN_PROPERTY") # type: ignore
SYNONYM_PROPERTY: int = util.symbol("SYNONYM_PROPERTY") # type: ignore
COMPOSITE_PROPERTY: int = util.symbol("COMPOSITE_PROPERTY") # type: ignore
DECLARED_ATTR: int = util.symbol("DECLARED_ATTR") # type: ignore
MAPPER_PROPERTY: int = util.symbol("MAPPER_PROPERTY") # type: ignore
AS_DECLARATIVE: int = util.symbol("AS_DECLARATIVE") # type: ignore
AS_DECLARATIVE_BASE: int = util.symbol("AS_DECLARATIVE_BASE") # type: ignore
DECLARATIVE_MIXIN: int = util.symbol("DECLARATIVE_MIXIN") # type: ignore
_lookup: Dict[str, Tuple[int, Set[str]]] = {
"Column": (
COLUMN,
{
"sqlalchemy.sql.schema.Column",
"sqlalchemy.sql.Column",
},
),
"RelationshipProperty": (
RELATIONSHIP,
{
"sqlalchemy.orm.relationships.RelationshipProperty",
"sqlalchemy.orm.RelationshipProperty",
},
),
"registry": (
REGISTRY,
{
"sqlalchemy.orm.decl_api.registry",
"sqlalchemy.orm.registry",
},
),
"ColumnProperty": (
COLUMN_PROPERTY,
{
"sqlalchemy.orm.properties.ColumnProperty",
"sqlalchemy.orm.ColumnProperty",
},
),
"SynonymProperty": (
SYNONYM_PROPERTY,
{
"sqlalchemy.orm.descriptor_props.SynonymProperty",
"sqlalchemy.orm.SynonymProperty",
},
),
"CompositeProperty": (
COMPOSITE_PROPERTY,
{
"sqlalchemy.orm.descriptor_props.CompositeProperty",
"sqlalchemy.orm.CompositeProperty",
},
),
"MapperProperty": (
MAPPER_PROPERTY,
{
"sqlalchemy.orm.interfaces.MapperProperty",
"sqlalchemy.orm.MapperProperty",
},
),
"TypeEngine": (TYPEENGINE, {"sqlalchemy.sql.type_api.TypeEngine"}),
"Mapped": (MAPPED, {"sqlalchemy.orm.attributes.Mapped"}),
"declarative_base": (
DECLARATIVE_BASE,
{
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.orm.declarative_base",
"sqlalchemy.orm.decl_api.declarative_base",
},
),
"DeclarativeMeta": (
DECLARATIVE_META,
{
"sqlalchemy.ext.declarative.DeclarativeMeta",
"sqlalchemy.orm.DeclarativeMeta",
"sqlalchemy.orm.decl_api.DeclarativeMeta",
},
),
"mapped": (
MAPPED_DECORATOR,
{
"sqlalchemy.orm.decl_api.registry.mapped",
"sqlalchemy.orm.registry.mapped",
},
),
"as_declarative": (
AS_DECLARATIVE,
{
"sqlalchemy.ext.declarative.as_declarative",
"sqlalchemy.orm.decl_api.as_declarative",
"sqlalchemy.orm.as_declarative",
},
),
"as_declarative_base": (
AS_DECLARATIVE_BASE,
{
"sqlalchemy.orm.decl_api.registry.as_declarative_base",
"sqlalchemy.orm.registry.as_declarative_base",
},
),
"declared_attr": (
DECLARED_ATTR,
{
"sqlalchemy.orm.decl_api.declared_attr",
"sqlalchemy.orm.declared_attr",
},
),
"declarative_mixin": (
DECLARATIVE_MIXIN,
{
"sqlalchemy.orm.decl_api.declarative_mixin",
"sqlalchemy.orm.declarative_mixin",
},
),
}
def _has_base_type_id(info: TypeInfo, type_id: int) -> bool:
for mr in info.mro:
check_type_id, fullnames = _lookup.get(mr.name, (None, None))
if check_type_id == type_id:
break
else:
return False
if fullnames is None:
return False
return mr.fullname in fullnames
def _mro_has_id(mro: List[TypeInfo], type_id: int) -> bool:
for mr in mro:
check_type_id, fullnames = _lookup.get(mr.name, (None, None))
if check_type_id == type_id:
break
else:
return False
if fullnames is None:
return False
return mr.fullname in fullnames
def _type_id_for_unbound_type(
type_: UnboundType, cls: ClassDef, api: SemanticAnalyzerPluginInterface
) -> Optional[int]:
type_id = None
sym = api.lookup_qualified(type_.name, type_)
if sym is not None:
if isinstance(sym.node, TypeAlias):
target_type = get_proper_type(sym.node.target)
if isinstance(target_type, Instance):
type_id = _type_id_for_named_node(target_type.type)
elif isinstance(sym.node, TypeInfo):
type_id = _type_id_for_named_node(sym.node)
return type_id
def _type_id_for_callee(callee: Expression) -> Optional[int]:
if isinstance(callee, (MemberExpr, NameExpr)):
if isinstance(callee.node, FuncDef):
return _type_id_for_funcdef(callee.node)
elif isinstance(callee.node, TypeAlias):
target_type = get_proper_type(callee.node.target)
if isinstance(target_type, Instance):
type_id = _type_id_for_fullname(target_type.type.fullname)
elif isinstance(callee.node, TypeInfo):
type_id = _type_id_for_named_node(callee)
else:
type_id = None
return type_id
def _type_id_for_funcdef(node: FuncDef) -> Optional[int]:
if node.type and isinstance(node.type, CallableType):
ret_type = get_proper_type(node.type.ret_type)
if isinstance(ret_type, Instance):
return _type_id_for_fullname(ret_type.type.fullname)
return None
def _type_id_for_named_node(
node: Union[NameExpr, MemberExpr, SymbolNode]
) -> Optional[int]:
type_id, fullnames = _lookup.get(node.name, (None, None))
if type_id is None or fullnames is None:
return None
elif node.fullname in fullnames:
return type_id
else:
return None
def _type_id_for_fullname(fullname: str) -> Optional[int]:
tokens = fullname.split(".")
immediate = tokens[-1]
type_id, fullnames = _lookup.get(immediate, (None, None))
if type_id is None or fullnames is None:
return None
elif fullname in fullnames:
return type_id
else:
return None
|
the-stack_0_26680
|
import logging
import operator
import os
from functools import reduce
import anndata
import numpy as np
import pandas as pd
import scipy.sparse as sp_sparse
from typing import Dict, Optional
from scvi.dataset.dataset import (
DownloadableDataset,
GeneExpressionDataset,
CellMeasurement,
)
logger = logging.getLogger(__name__)
class AnnDatasetFromAnnData(GeneExpressionDataset):
"""Forms a ``GeneExpressionDataset`` from a ``anndata.AnnData`` object.
:param ad: ``anndata.AnnData`` instance.
:param batch_label: ``str`` representing AnnData obs column name for batches
:param ctype_label: ``str`` representing AnnData obs column name for cell_types
:param class_label: ``str`` representing AnnData obs column name for labels
:param use_raw: if True, copies data from .raw attribute of AnnData
"""
def __init__(
self,
ad: anndata.AnnData,
batch_label: str = "batch_indices",
ctype_label: str = "cell_types",
class_label: str = "labels",
use_raw: bool = False,
cell_measurements_col_mappings: Optional[Dict[str, str]] = None,
):
super().__init__()
(
X,
batch_indices,
labels,
gene_names,
cell_types,
obs,
obsm,
var,
_,
uns,
) = extract_data_from_anndata(
ad,
batch_label=batch_label,
ctype_label=ctype_label,
class_label=class_label,
use_raw=use_raw,
)
# Dataset API takes a dict as input
obs = obs.to_dict(orient="list")
var = var.to_dict(orient="list")
# add external cell measurements
Ys = []
if cell_measurements_col_mappings is not None:
for name, attr_name in cell_measurements_col_mappings.items():
columns = uns[attr_name]
measurement = CellMeasurement(
name=name,
data=obsm[name],
columns_attr_name=attr_name,
columns=columns,
)
Ys.append(measurement)
self.populate_from_data(
X=X,
Ys=Ys,
labels=labels,
batch_indices=batch_indices,
gene_names=gene_names,
cell_types=cell_types,
cell_attributes_dict=obs,
gene_attributes_dict=var,
)
self.filter_cells_by_count()
class DownloadableAnnDataset(DownloadableDataset):
"""Forms a ``DownloadableDataset`` from a `.h5ad` file using the ``anndata`` package.
:param filename: Name of the `.h5ad` file to save/load.
:param save_path: Location to use when saving/loading the data.
:param url: URL pointing to the data which will be downloaded
if it's not already in ``save_path``.
:param delayed_populating: Switch for delayed populating mechanism.
:param batch_label: ``str`` representing AnnData obs column name for batches
:param ctype_label: ``str`` representing AnnData obs column name for cell_types
:param class_label: ``str`` representing AnnData obs column name for labels
:param use_raw: if True, copies data from .raw attribute of AnnData
Examples:
>>> # Loading a local dataset
>>> dataset = DownloadableAnnDataset("TM_droplet_mat.h5ad", save_path = 'data/')
.. _Anndata:
http://anndata.readthedocs.io/en/latest/
"""
def __init__(
self,
filename: str = "anndataset",
save_path: str = "data/",
url: str = None,
delayed_populating: bool = False,
batch_label: str = "batch_indices",
ctype_label: str = "cell_types",
class_label: str = "labels",
use_raw: bool = False,
):
self.batch_label = batch_label
self.ctype_label = ctype_label
self.class_label = class_label
self.use_raw = use_raw
super().__init__(
urls=url,
filenames=filename,
save_path=save_path,
delayed_populating=delayed_populating,
)
def populate(self):
ad = anndata.read_h5ad(
os.path.join(self.save_path, self.filenames[0])
) # obs = cells, var = genes
# extract GeneExpressionDataset relevant attributes
# and provide access to annotations from the underlying AnnData object.
(
X,
batch_indices,
labels,
gene_names,
cell_types,
self.obs,
self.obsm,
self.var,
self.varm,
self.uns,
) = extract_data_from_anndata(
ad,
batch_label=self.batch_label,
ctype_label=self.ctype_label,
class_label=self.class_label,
use_raw=self.use_raw,
)
self.populate_from_data(
X=X,
batch_indices=batch_indices,
labels=labels,
gene_names=gene_names,
cell_types=cell_types,
)
self.filter_cells_by_count()
def extract_data_from_anndata(
ad: anndata.AnnData,
batch_label: str = "batch_indices",
ctype_label: str = "cell_types",
class_label: str = "labels",
use_raw: bool = False,
):
data, labels, batch_indices, gene_names, cell_types = None, None, None, None, None
# We use obs that will contain all the observation except those associated with
# batch_label, ctype_label and class_label.
obs = ad.obs.copy()
if use_raw:
counts = ad.raw.X
else:
counts = ad.X
# treat all possible cases according to anndata doc
if isinstance(counts, np.ndarray):
data = counts.copy()
if isinstance(counts, pd.DataFrame):
data = counts.values.copy()
if sp_sparse.issparse(counts):
# keep sparsity above 1 Gb in dense form
if reduce(operator.mul, counts.shape) * counts.dtype.itemsize < 1e9:
logger.info("Dense size under 1Gb, casting to dense format (np.ndarray).")
data = counts.toarray()
else:
data = counts.copy()
gene_names = np.asarray(ad.var.index.values, dtype=str)
if batch_label in obs.columns:
batch_indices = obs.pop(batch_label).values
if ctype_label in obs.columns:
cell_types = obs.pop(ctype_label)
res = pd.factorize(cell_types)
labels = res[0].astype(int)
cell_types = np.array(res[1]).astype(str)
elif class_label in obs.columns:
labels = obs.pop(class_label)
return (
data,
batch_indices,
labels,
gene_names,
cell_types,
obs,
ad.obsm,
ad.var,
ad.varm,
ad.uns,
)
|
the-stack_0_26681
|
import logging
import time
from nmtwizard import common
from nmtwizard.service import Service
from nmtwizard.capacity import Capacity
logger = logging.getLogger(__name__)
def _hostname(server):
if 'name' in server:
return server['name']
if server['port'] == 22:
return server['host']
return "%s:%s" % (server['host'], server['port'])
def _get_params(config, options):
params = {}
if 'server' not in options:
server_pool = config['variables']['server_pool']
if len(server_pool) > 1:
raise ValueError('server option is required to select a server and a resource')
resource = _hostname(config['variables']['server_pool'][0])
options['server'] = resource
params['server'] = options['server']
servers = {_hostname(server): server for server in config['variables']['server_pool']}
if params['server'] not in servers:
raise ValueError('server %s not in server_pool list' % params['server'])
params['gpus'] = servers[params['server']]['gpus']
params['cpus'] = servers[params['server']]['cpus']
server_cfg = servers[params['server']]
if 'login' not in server_cfg and 'login' not in options:
raise ValueError('login not found in server configuration or user options')
if 'log_dir' not in server_cfg:
raise ValueError('missing log_dir in the configuration')
params['login'] = server_cfg.get('login', options.get('login'))
params['log_dir'] = server_cfg['log_dir']
params['login_cmd'] = server_cfg.get('login_cmd')
params['port'] = server_cfg['port']
params['host'] = server_cfg['host']
params['with_nvidia_docker'] = server_cfg.get('with_nvidia_docker')
return params
class SSHService(Service):
def __init__(self, config):
for server in config['variables']['server_pool']:
if 'gpus' not in server:
server['gpus'] = []
if 'port' not in server:
server['port'] = 22
if 'ncpus' in server:
if 'cpus' in server and len(server['cpus']) != server['ncpus']:
raise ValueError("inconsistent ncpus and cpus option for server `%s`" % server)
server['cpus'] = list(range(server['ncpus']))
if 'cpus' not in server or len(server['cpus']) == 0:
raise ValueError("cpus cannot be empty for server `%s`" % server)
super().__init__(config)
server_pool = self._config['variables']['server_pool']
self._machines = {_hostname(server): server for server in server_pool}
self._resources = self._list_all_gpus()
@property
def resource_multitask(self):
return True
def _list_all_gpus(self):
gpus = []
for server in self._config['variables']['server_pool']:
for gpu in server['gpus']:
gpus.append('%s[%d]' % (_hostname(server), gpu))
return gpus
def get_server_detail(self, server, field_name):
# here, server must exist
return self._machines[server].get(field_name)
def list_resources(self):
resources = {server: Capacity(len(self._machines[server]['gpus']),
len(self._machines[server]['cpus'])) for server in self._machines}
return resources
@staticmethod
def get_resource_from_options(options):
if "server" not in options:
return "auto"
return options["server"]
def describe(self):
has_login = False
for server in self._config['variables']['server_pool']:
if 'login' in server:
has_login = True
break
desc = {}
if len(self._resources) > 1:
desc['server'] = {
"title": "server",
"type": "string",
"description": "server:gpu",
"enum": self._resources + ["auto"],
"default": "auto"
}
if not has_login:
desc['login'] = {
"type": "string",
"title": "login",
"description": "login to use to access the server"
}
return desc
def check(self, options, docker_registries_list):
params = _get_params(self._config, options)
client = self._get_client(params=params)
try:
details = common.check_environment(
client,
params['gpus'],
params['log_dir'],
docker_registries_list,
self._config.get('requirements'),
params.get('with_nvidia_docker'),
False)
finally:
client.close()
return details
def launch(self,
task_id,
options,
xpulist,
resource,
storages,
docker_config,
docker_registry,
docker_image,
docker_tag,
docker_command,
docker_files,
wait_after_launch,
auth_token,
support_statistics):
options['server'] = resource
params = _get_params(self._config, options)
client = self._get_client(params=params)
try:
callback_url = self._config.get('callback_url')
if auth_token:
callback_url = callback_url.replace("://", "://"+auth_token+":x@")
task = common.launch_task(
task_id,
client,
xpulist,
params,
docker_config,
docker_registry,
docker_image,
docker_tag,
docker_command,
docker_files,
wait_after_launch,
storages,
callback_url,
self._config.get('callback_interval'),
requirements=self._config.get("requirements"),
support_statistics=support_statistics)
finally:
client.close()
params['model'] = task['model']
params['pgid'] = task['pgid']
return params
def _get_client(self, params):
client = common.ssh_connect_with_retry(
params['host'],
params['port'],
params['login'],
pkey=self._config.get('pkey'),
key_filename=self._config.get('key_filename') or self._config.get('privateKey'),
login_cmd=params['login_cmd'])
return client
def status(self, task_id, params, get_log=True): # pylint: disable=arguments-differ
client = common.ssh_connect_with_retry(
params['host'],
params['port'],
params['login'],
pkey=self._config.get('pkey'),
key_filename=self._config.get('key_filename') or self._config.get('privateKey'),
login_cmd=params['login_cmd'])
if 'container_id' in params:
exit_status, _, _ = common.run_docker_command(
client, 'inspect -f {{.State.Status}} %s' % params['container_id'])
else:
exit_status, _, _ = common.run_command(client, 'kill -0 -%d' % params['pgid'])
if get_log:
common.update_log(task_id, client, params['log_dir'], self._config.get('callback_url'))
client.close()
if exit_status != 0:
return "dead"
return "running"
def terminate(self, params):
client = common.ssh_connect_with_retry(
params['host'],
params['port'],
params['login'],
pkey=self._config.get('pkey'),
key_filename=self._config.get('key_filename') or self._config.get('privateKey'),
login_cmd=params['login_cmd'])
if 'container_id' in params:
common.run_docker_command(client, 'rm --force %s' % params['container_id'])
time.sleep(5)
exit_status, _, stderr = common.run_command(client, 'kill -0 -%d' % params['pgid'])
if exit_status != 0:
logger.info("exist_status %d: %s", exit_status, stderr.read())
client.close()
return
exit_status, _, stderr = common.run_command(client, 'kill -9 -%d' % params['pgid'])
if exit_status != 0:
logger.info("exist_status %d: %s", exit_status, stderr.read())
client.close()
return
logger.info("successfully terminated")
client.close()
def init(config):
return SSHService(config)
|
the-stack_0_26683
|
import asyncio
import socket
from flax.server.server import FlaxServer
from flax.types.peer_info import PeerInfo
def start_reconnect_task(server: FlaxServer, peer_info_arg: PeerInfo, log, auth: bool):
"""
Start a background task that checks connection and reconnects periodically to a peer.
"""
# If peer_info_arg is already an address, use it, otherwise resolve it here.
if peer_info_arg.is_valid():
peer_info = peer_info_arg
else:
peer_info = PeerInfo(socket.gethostbyname(peer_info_arg.host), peer_info_arg.port)
async def connection_check():
while True:
peer_retry = True
for _, connection in server.all_connections.items():
if connection.get_peer_info() == peer_info or connection.get_peer_info() == peer_info_arg:
peer_retry = False
if peer_retry:
log.info(f"Reconnecting to peer {peer_info}")
try:
await server.start_client(peer_info, None, auth=auth)
except Exception as e:
log.info(f"Failed to connect to {peer_info} {e}")
await asyncio.sleep(3)
return asyncio.create_task(connection_check())
|
the-stack_0_26686
|
"""
Framework for visualisations
Copyright (C) 2016-2018 Jiri Borovec <[email protected]>
"""
import logging
import os
import matplotlib
from imsegm.utilities import ImageDimensionError
if os.environ.get('DISPLAY', '') == '' and matplotlib.rcParams['backend'] != 'agg':
print('No display found. Using non-interactive Agg backend.')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from planar import line as pl_line
from scipy import ndimage
from skimage import color, draw, segmentation
#: for blending two images define chess field size in pixels
SIZE_CHESS_FIELD = 50
#: columns from description files which marks the egg annotation by expert
COLUMNS_POSITION_EGG_ANNOT = ('ant_x', 'ant_y', 'post_x', 'post_y', 'lat_x', 'lat_y')
# http://matplotlib.org/examples/color/colormaps_reference.html
# http://htmlcolorcodes.com/
COLOR_ORANGE = '#FF5733'
COLOR_GRAY = '#7E7E7E'
COLOR_GREEN = '#1FFF00'
COLOR_YELLOW = '#FFFB00'
COLOR_PINK = '#FF00FF'
COLOR_BLUE = '#00AAFF'
COLORS = 'bgrmyck'
#: define markers for labels of positive (+1) neutral (0) and negative (-1) class
DICT_LABEL_MARKER = {
-1: ('.', COLOR_GRAY),
0: ('x', COLOR_GRAY),
1: ('.', COLOR_YELLOW),
}
DICT_LABEL_MARKER_FN_FP = {
-2: ('.', COLOR_PINK),
-1: ('.', COLOR_BLUE),
0: ('x', 'w'),
1: ('.', COLOR_YELLOW),
}
def _ellipse(r, c, r_radius, c_radius, orientation=0., shape=None):
""" temporary wrapper until release New version scikit-image v0.13
:param int r: center position in rows
:param int c: center position in columns
:param int r_radius: ellipse diam in rows
:param int c_radius: ellipse diam in columns
:param float orientation: ellipse orientation
:param tuple(int,int) shape: size of output mask
:return tuple(list(int),list(int)): indexes of filled positions
>>> img = np.zeros((10, 12), dtype=int)
>>> rr, cc = _ellipse(5, 6, 3, 5, orientation=np.deg2rad(30))
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
center = np.array([r, c])
radii = np.array([r_radius, c_radius])
# allow just rotation with in range +/- 180 degree
orientation %= np.pi
sin_alpha, cos_alpha = np.sin(orientation), np.cos(orientation)
# compute rotated radii by given rotation
r_radius_rot = abs(r_radius * cos_alpha) + c_radius * sin_alpha
c_radius_rot = r_radius * sin_alpha + abs(c_radius * cos_alpha)
# The upper_left and lower_right corners of the smallest rectangle
# containing the ellipse.
radii_rot = np.array([r_radius_rot, c_radius_rot])
upper_left = np.ceil(center - radii_rot).astype(int)
lower_right = np.floor(center + radii_rot).astype(int)
if shape is not None:
# Constrain upper_left and lower_right by shape boundary.
upper_left = np.maximum(upper_left, np.array([0, 0]))
lower_right = np.minimum(lower_right, np.array(shape[:2]) - 1)
shifted_center = center - upper_left
bounding_shape = lower_right - upper_left + 1
r_lim, c_lim = np.ogrid[0:int(bounding_shape[0]), 0:int(bounding_shape[1])]
r_org, c_org = shifted_center
r_rad, c_rad = radii
r, c = (r_lim - r_org), (c_lim - c_org)
dist_1 = ((r * cos_alpha + c * sin_alpha) / r_rad)**2
dist_2 = ((r * sin_alpha - c * cos_alpha) / c_rad)**2
rr, cc = np.nonzero((dist_1 + dist_2) <= 1)
rr.flags.writeable = True
cc.flags.writeable = True
rr += upper_left[0]
cc += upper_left[1]
return rr, cc
def ellipse(r, c, r_radius, c_radius, orientation=0., shape=None):
""" temporary wrapper until release New version scikit-image v0.13
.. note:: Should be solved in skimage v0.13
:param int r: center position in rows
:param int c: center position in columns
:param int r_radius: ellipse diam in rows
:param int c_radius: ellipse diam in columns
:param float orientation: ellipse orientation
:param tuple(int,int) shape: size of output mask
:return tuple(list(int),list(int)): indexes of filled positions
>>> img = np.zeros((14, 20), dtype=int)
>>> rr, cc = ellipse(7, 10, 3, 9, np.deg2rad(30), img.shape)
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
rr, cc = draw.ellipse(r, c, r_radius, c_radius, rotation=orientation, shape=shape)
# alternative version
# rr, cc = _ellipse(r, c, r_radius, c_radius, orientation, shape)
return rr, cc
def ellipse_perimeter(r, c, r_radius, c_radius, orientation=0., shape=None):
""" see New version scikit-image v0.14
.. note:: Should be solved in skimage v0.14
:param int r: center position in rows
:param int c: center position in columns
:param int r_radius: ellipse diam in rows
:param int c_radius: ellipse diam in columns
:param float orientation: ellipse orientation
:param tuple(int,int) shape: size of output mask
:return tuple(list(int),list(int)): indexes of filled positions
>>> img = np.zeros((14, 20), dtype=int)
>>> rr, cc = ellipse_perimeter(7, 10, 3, 9, np.deg2rad(30), img.shape)
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
rr, cc = draw.ellipse_perimeter(r, c, r_radius, c_radius, orientation=-orientation, shape=shape)
return rr, cc
def norm_aplha(alpha):
""" normalise alpha in range (0, 1)
:param float alpha:
:return float:
>>> norm_aplha(0.5)
0.5
>>> norm_aplha(255)
1.0
>>> norm_aplha(-1)
0
"""
alpha = alpha / 255. if alpha > 1. else alpha
alpha = 0 if alpha < 0. else alpha
alpha = 1. if alpha > 1. else alpha
return alpha
def figure_image_adjustment(fig, img_size):
""" adjust figure as nice image without axis
:param fig: Figure
:param tuple(int,int) img_size: image size
:return Figure:
>>> fig = figure_image_adjustment(plt.figure(), (150, 200))
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
ax = fig.gca()
ax.set(xlim=[0, img_size[1]], ylim=[img_size[0], 0])
ax.axis('off')
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
fig.tight_layout(pad=0)
fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
return fig
def figure_image_segm_results(img, seg, subfig_size=9, mid_labels_alpha=0.2, mid_image_gray=True):
""" creating subfigure with original image, overlapped segmentation contours
and clean result segmentation...
it turns the sequence in vertical / horizontal according major image dim
:param ndarray img: image as background
:param ndarray seg: segmentation
:param int subfig_size: max image size
:param fool mid_image_gray: used color image as bacround in middele
:param float mid_labels_alpha: alpha for middle segmentation overlap
:return Figure:
>>> img = np.random.random((100, 150, 3))
>>> seg = np.random.randint(0, 2, (100, 150))
>>> fig = figure_image_segm_results(img, seg)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
if img.shape[:2] != seg.shape[:2]:
raise ImageDimensionError('different image %r & seg_pipe %r sizes' % (img.shape, seg.shape))
if img.ndim == 2: # for gray images of ovary
# img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
img = color.gray2rgb(img)
fig, axarr = create_figure_by_image(img.shape[:2], subfig_size, nb_subfigs=3)
axarr[0].set_title('original image')
axarr[0].imshow(img)
# visualise the 3th label
axarr[1].set_title('original image w. segment overlap')
img_bg = color.rgb2gray(img) if mid_image_gray else img
axarr[1].imshow(img_bg, cmap=plt.cm.Greys_r)
axarr[1].imshow(seg, alpha=mid_labels_alpha, cmap=plt.cm.jet)
axarr[1].contour(seg, levels=np.unique(seg), linewidths=2, cmap=plt.cm.jet)
axarr[2].set_title('segmentation - all labels')
axarr[2].imshow(seg, cmap=plt.cm.jet)
for ax in axarr:
ax.axis('off')
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
fig.subplots_adjust(wspace=0.01, hspace=0.01)
fig.tight_layout()
return fig
def figure_overlap_annot_segm_image(annot, segm, img=None, subfig_size=9, drop_labels=None, segm_alpha=0.2):
""" figure showing overlap annotation - segmentation - image
:param ndarray annot: user annotation
:param ndarray segm: segmentation
:param ndarray img: original image
:param int subfig_size: maximal sub-figure size
:param float segm_alpha: use transparency
:param list(int) drop_labels: labels to be ignored
:return Figure:
>>> img = np.random.random((100, 150, 3))
>>> seg = np.random.randint(0, 2, (100, 150))
>>> fig = figure_overlap_annot_segm_image(seg, seg, img, drop_labels=[5])
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
norm_size = np.array(annot.shape) / float(np.max(annot.shape))
fig_size = norm_size[::-1] * subfig_size * np.array([3, 1])
fig, axarr = plt.subplots(ncols=3, figsize=fig_size)
if img is None:
img = np.ones(annot.shape)
if img.ndim == 2: # for gray images of ovary
img = color.gray2rgb(img)
axarr[0].set_title('Annotation')
axarr[0].imshow(img)
axarr[0].imshow(annot, alpha=segm_alpha)
axarr[0].contour(annot, levels=np.unique(annot), linewidths=2)
axarr[1].set_title('Segmentation')
axarr[1].imshow(img)
axarr[1].imshow(segm, alpha=segm_alpha)
axarr[1].contour(segm, levels=np.unique(segm), linewidths=2)
# visualise the 3th label
axarr[2].set_title('difference: annot. & segment')
# axarr[2].imshow(~(annot == segm), cmap=plt.cm.Reds)
max_val = np.max(annot.astype(int))
diff = annot - segm
if drop_labels is not None:
for lb in drop_labels:
diff[annot == lb] = 0
cax = axarr[2].imshow(diff, vmin=-max_val, vmax=max_val, alpha=0.5, cmap=plt.cm.bwr)
# vals = np.linspace(-max_val, max_val, max_val * 2 + 1)
plt.colorbar(
cax,
ticks=np.linspace(-max_val, max_val, max_val * 2 + 1),
boundaries=np.linspace(-max_val - 0.5, max_val + 0.5, max_val * 2 + 2)
)
# plt.clim(-max_val - 0.5, max_val - 0.5)
# axarr[2].contour(annot, levels=np.unique(annot), linewidths=1, colors='g')
# axarr[2].contour(segm, levels=np.unique(segm), linewidths=1, colors='b')
for i in range(len(axarr)):
axarr[i].axis('off')
axarr[i].axes.get_xaxis().set_ticklabels([])
axarr[i].axes.get_yaxis().set_ticklabels([])
fig.subplots_adjust(wspace=0.01, hspace=0.01)
fig.tight_layout()
return fig
def figure_segm_graphcut_debug(images, subfig_size=9):
""" creating subfigure with slic, graph edges and results in the first row
and individual class unary terms in the second row
:param dict images: dictionary composed from name and image array
:param int subfig_size: maximal sub-figure size
:return Figure:
>>> images = {
... 'image': np.random.random((100, 150, 3)),
... 'slic': np.random.randint(0, 2, (100, 150)),
... 'slic_mean': np.random.random((100, 150, 3)),
... 'img_graph_edges': np.random.random((100, 150, 3)),
... 'img_graph_segm': np.random.random((100, 150, 3)),
... 'imgs_unary_cost': [np.random.random((100, 150, 3))],
... }
>>> fig = figure_segm_graphcut_debug(images)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
keys = ('image', 'slic', 'slic_mean', 'img_graph_edges', 'img_graph_segm', 'imgs_unary_cost')
if not all(n in images for n in keys):
raise ValueError('missing keys in debug structure %r' % tuple(images.keys()))
nb_cols = max(3, len(images['imgs_unary_cost']))
img = images['image']
if img.ndim == 2: # for gray images of ovary
img = color.gray2rgb(img)
norm_size = np.array(img.shape[:2]) / float(np.max(img.shape))
fig_size = norm_size[::-1] * subfig_size * np.array([nb_cols, 2])
fig, axarr = plt.subplots(2, nb_cols, figsize=fig_size)
img_slic = segmentation.mark_boundaries(img, images['slic'], mode='subpixel')
axarr[0, 0].set_title('SLIC')
axarr[0, 0].imshow(img_slic)
for i, k in enumerate(['img_graph_edges', 'img_graph_segm']):
axarr[0, i + 1].set_title(k)
axarr[0, i + 1].imshow(images[k])
for i, im_uc in enumerate(images['imgs_unary_cost']):
axarr[1, i].set_title('unary cost #%i' % i)
axarr[1, i].imshow(im_uc)
for j in range(2):
for i in range(nb_cols):
axarr[j, i].axis('off')
axarr[j, i].axes.get_xaxis().set_ticklabels([])
axarr[j, i].axes.get_yaxis().set_ticklabels([])
fig.subplots_adjust(left=0, right=1, top=1, bottom=0, wspace=0.05, hspace=0.05)
return fig
def create_figure_by_image(img_size, subfig_size, nb_subfigs=1, extend=0.):
""" crearting image according backround_image
:param tuple(int,int) img_size: image size
:param float subfig_size: maximal sub-figure size
:param int nb_subfigs: number of sub-figure
:param float extend: extension
:return tuple(Figure,list):
"""
norm_size = np.array(img_size) / float(np.max(img_size))
# reverse dimensions and scale by fig size
if norm_size[0] >= norm_size[1]: # horizontal
fig_size = norm_size[::-1] * subfig_size * np.array([nb_subfigs, 1])
fig_size[0] += extend * fig_size[0]
fig, axarr = plt.subplots(ncols=nb_subfigs, figsize=fig_size)
else: # vertical
fig_size = norm_size[::-1] * subfig_size * np.array([1, nb_subfigs])
fig_size[0] += extend * fig_size[0]
fig, axarr = plt.subplots(nrows=nb_subfigs, figsize=fig_size)
return fig, axarr
def figure_ellipse_fitting(img, seg, ellipses, centers, crits, fig_size=9):
""" show figure with result of the ellipse fitting
:param ndarray img: image
:param ndarray seg: segmentation
:param list(tuple(int,int,int,int,float)) ellipses: collection of ellipse parameters
ell. parameters: (x, y, height, width, orientation)
:param list(tuple(int,int)) centers: points
:param list(float) crits:
:param float fig_size: maximal figure size
:return Figure:
>>> img = np.random.random((100, 150, 3))
>>> seg = np.random.randint(0, 2, (100, 150))
>>> ells = np.random.random((3, 5)) * 25
>>> centers = np.random.random((3, 2)) * 25
>>> crits = np.random.random(3)
>>> fig = figure_ellipse_fitting(img[:, :, 0], seg, ells, centers, crits)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
if not len(ellipses) == len(centers) == len(crits):
raise ValueError(
'number of ellipses (%i) and centers (%i) and criteria (%i) should match' %
(len(ellipses), len(centers), len(crits))
)
fig, ax = create_figure_by_image(img.shape[:2], fig_size)
if img.ndim != 2:
raise ImageDimensionError('required image dimension is 2 to instead %r' % img.shape)
ax.imshow(img, cmap=plt.cm.Greys_r)
for i, params in enumerate(ellipses):
c1, c2, h, w, phi = params
rr, cc = ellipse_perimeter(int(c1), int(c2), int(h), int(w), phi)
ax.plot(cc, rr, '.', color=COLORS[i % len(COLORS)], label='#%i with crit=%d' % ((i + 1), int(crits[i])))
ax.legend(loc='lower right')
# plt.plot(centers[:, 1], centers[:, 0], 'ow')
for i in range(len(centers)):
ax.plot(centers[i, 1], centers[i, 0], 'o', color=COLORS[i % len(COLORS)])
ax.set(xlim=[0, seg.shape[1]], ylim=[seg.shape[0], 0])
ax.axis('off')
fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
return fig
def figure_annot_slic_histogram_labels(dict_label_hist, slic_size=-1, slic_regul=-1):
""" plot ration of labels assigned to each superpixel
:param dict_label_hist: dictionary of label name and histogram
:param int slic_size: used for figure title
:param float slic_regul: used for figure title
:return Figure:
>>> np.random.seed(0)
>>> dict_label_hist = {'a': np.tile([1, 0, 0, 0, 1], (25, 1)),
... 'b': np.tile([0, 1, 0, 0, 1], (30, 1))}
>>> fig = figure_annot_slic_histogram_labels(dict_label_hist)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
matrix_hist_all = np.concatenate(tuple(dict_label_hist.values()), axis=0)
lb_sums = np.sum(matrix_hist_all, axis=0)
fig = plt.figure(figsize=(10, 5))
ax = fig.gca()
for i, nb in enumerate(lb_sums):
if nb == 0:
continue
patches, bin_edges = np.histogram(matrix_hist_all[:, i], bins=50, density=True)
bins = [(a + b) / 2. for a, b in zip(bin_edges[:-1], bin_edges[1:])]
# ax.plot(bins, patches, label='label: %i' % i)
ax.semilogy(bins, patches, label='label: %i' % i)
ax.set_title(
'Histogram of labels density in each segments over all annotated images\n'
' (superpixels: size=%i, regul=%f)' % (slic_size, slic_regul)
)
ax.set(xlabel='region densities', ylabel='[%]')
ax.legend()
ax.grid()
return fig
def figure_ray_feature(segm, points, ray_dist_raw=None, ray_dist=None, points_reconst=None, title=''):
""" visualise the segmentation with specific point and estimated ray dist.
:param ndarray segm: segmentation
:param [(float, float)] points: collection of points
:param list(float) ray_dist_raw:
:param list(float) ray_dist: Ray feature distances
:param ndarray points_reconst: collection of reconstructed points
:param str title: figure title
:return Figure:
.. note:: for more examples, see unittests
"""
ray_dist_raw = ray_dist_raw if ray_dist_raw is not None else []
ray_dist = ray_dist if ray_dist is not None else []
fig, axarr = plt.subplots(nrows=2, ncols=1)
if title:
axarr[0].set_title(title)
axarr[0].imshow(1 - segm, cmap='gray', interpolation='nearest')
axarr[0].plot(points[1], points[0], 'bo')
axarr[0].set(xlim=[0, segm.shape[1]], ylim=[segm.shape[0], 0])
if points_reconst is not None:
axarr[0].plot(points_reconst[:, 1], points_reconst[:, 0], 'g.')
axarr[1].plot(np.linspace(0, 360, len(ray_dist_raw)).tolist(), ray_dist_raw, 'b', label='original')
axarr[1].plot(np.linspace(0, 360, len(ray_dist)).tolist(), ray_dist, 'r', label='final')
axarr[1].set(xlabel='angles [deg]', xlim=[0, 360])
axarr[1].legend(loc=0)
axarr[1].grid()
return fig
def figure_used_samples(img, labels, slic, used_samples, fig_size=12):
""" draw used examples (superpixels)
:param ndarray img: input image for background
:param list(int) labels: labels associated for superpixels
:param ndarray slic: superpixel segmentation
:param list(bool) used_samples: used samples for training
:param int fig_size: figure size
:return Figure:
>>> img = np.random.random((50, 75, 3))
>>> labels = [-1, 0, 2]
>>> used = [1, 0, 0]
>>> seg = np.random.randint(0, 3, img.shape[:2])
>>> fig = figure_used_samples(img, labels, seg, used)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
w_samples = np.asarray(used_samples)[slic]
img = color.gray2rgb(img) if img.ndim == 2 else img
fig, axarr = create_figure_by_image(img.shape[:2], fig_size, nb_subfigs=2, extend=0.15)
axarr[0].imshow(np.asarray(labels)[slic], cmap=plt.cm.jet)
axarr[0].contour(slic, levels=np.unique(slic), colors='w', linewidths=0.5)
axarr[0].axis('off')
axarr[1].imshow(img)
axarr[1].contour(slic, levels=np.unique(slic), colors='w', linewidths=0.5)
cax = axarr[1].imshow(w_samples, cmap=plt.cm.RdYlGn, vmin=0, vmax=1, alpha=0.5)
cbar = plt.colorbar(cax, ticks=[0, 1], boundaries=[-0.5, 0.5, 1.5])
cbar.ax.set_yticklabels(['drop', 'used'])
axarr[1].axis('off')
fig.tight_layout()
return fig
def draw_color_labeling(segments, lut_labels):
""" visualise the graph cut results
:param ndarray segments: np.array<height, width>
:param list(int) lut_labels: look-up-table
:return ndarray: np.array<height, width, 3>
"""
seg = np.asarray(lut_labels)[segments]
clrs = plt.get_cmap('jet')
lbs = np.arange(np.max(seg) + 1)
lut = clrs(lbs / float(lbs.max()))[:, :3]
img = lut[seg]
return img
def draw_graphcut_unary_cost_segments(segments, unary_cost):
""" visualise the unary cost for each class
:param ndarray segments: np.array<height, width>
:param ndarray unary_cost: np.array<nb_spx, nb_classes>
:return []: [np.array<height, width, 3>] * nb_cls
>>> seg = np.random.randint(0, 100, (100, 150))
>>> u_cost = np.random.random((100, 3))
>>> imgs = draw_graphcut_unary_cost_segments(seg, u_cost)
>>> len(imgs)
3
>>> [img.shape for img in imgs]
[(100, 150, 3), (100, 150, 3), (100, 150, 3)]
"""
clrs = plt.get_cmap('Greens')
imgs_u_cost = [None] * unary_cost.shape[-1]
for i in range(unary_cost.shape[-1]):
pw_c_norm = 1 - (unary_cost[:, i] / unary_cost.max())
lut = np.asarray([clrs(p) for p in pw_c_norm])[:, :3]
imgs_u_cost[i] = lut[segments]
return imgs_u_cost
def closest_point_on_line(start, end, point):
""" projection of the point to the line
:param list(int) start: line starting point
:param list(int) end: line ending point
:param list(int) point: point for extimation
:return list(int): point on the line
>>> closest_point_on_line([0, 0], [1, 2], [0, 2])
array([ 0.8, 1.6])
"""
start, end, point = [np.array(a) for a in [start, end, point]]
line = pl_line.Line(start, (end - start))
proj = np.array(line.project(point))
return proj
def draw_eggs_ellipse(mask_shape, pos_ant, pos_lat, pos_post, threshold_overlap=0.6):
""" from given 3 point estimate the ellipse
:param tuple(int,int) mask_shape:
:param [tuple(int,int)] pos_ant: anterior
:param [tuple(int,int)] pos_lat: latitude
:param [tuple(int,int)] pos_post: postlude
:param float threshold_overlap:
:return ndarray:
>>> pos_ant, pos_lat, pos_post = [10, 10], [20, 20], [35, 20]
>>> points = np.array([pos_ant, pos_lat, pos_post])
>>> _= plt.plot(points[:, 0], points[:, 1], 'og')
>>> mask = draw_eggs_ellipse([30, 50], [pos_ant], [pos_lat], [pos_post])
>>> mask.shape
(30, 50)
>>> _= plt.imshow(mask, alpha=0.5, interpolation='nearest')
>>> _= plt.xlim([0, mask.shape[1]]), plt.ylim([0, mask.shape[0]]), plt.grid()
>>> # plt.show()
"""
mask_eggs = np.zeros(mask_shape)
for i, (ant, lat, post) in enumerate(zip(pos_ant, pos_lat, pos_post)):
ant, lat, post = map(np.array, [ant, lat, post])
center = ant + (post - ant) / 2.
lat_proj = closest_point_on_line(ant, post, lat)
# http://stackoverflow.com/questions/433371/ellipse-bounding-a-rectangle
radius_a = (np.linalg.norm(post - ant) / 2. / np.sqrt(2)) * 1.
radius_b = (np.linalg.norm(lat - lat_proj) / np.sqrt(2)) * 1.
angle = np.arctan2(*(post - ant))
rr, cc = ellipse(
int(center[1]), int(center[0]), int(radius_a), int(radius_b), orientation=angle, shape=mask_eggs.shape
)
mask = np.zeros(mask_shape)
mask[rr, cc] = True
# mask = ndimage.morphology.binary_fill_holes(mask)
# distance = ndimage.distance_transform_edt(mask)
# probab = distance / np.max(distance)
# mask = probab >= threshold_dist
m_overlap = np.sum(np.logical_and(mask > 0, mask_eggs > 0)) / float(np.sum(mask))
if m_overlap > threshold_overlap:
logging.debug('skip egg drawing while it overlap by %f', m_overlap)
continue
mask_eggs[mask.astype(bool)] = i + 1
return mask_eggs
def parse_annot_rectangles(rows_slice):
""" parse annotation fromDF to lists
:param rows_slice: a row from a table
:return tuple: the three points
>>> import pandas as pd
>>> dict_row = dict(ant_x=1, ant_y=2, lat_x=3, lat_y=4, post_x=5, post_y=6)
>>> row = pd.DataFrame([dict_row])
>>> parse_annot_rectangles(row)
([(1, 2)], [(3, 4)], [(5, 6)])
>>> rows = pd.DataFrame([dict_row, {n: dict_row[n] + 10 for n in dict_row}])
>>> rows
ant_x ant_y lat_x lat_y post_x post_y
0 1 2 3 4 5 6
1 11 12 13 14 15 16
>>> parse_annot_rectangles(rows)
([(1, 2), (11, 12)], [(3, 4), (13, 14)], [(5, 6), (15, 16)])
"""
dict_eggs = {col: rows_slice[col] for col in COLUMNS_POSITION_EGG_ANNOT}
if all(isinstance(dict_eggs[col], str) for col in dict_eggs):
dict_eggs = {col: map(int, dict_eggs[col][1:-1].lstrip().split()) for col in dict_eggs}
pos_ant = list(zip(dict_eggs['ant_x'], dict_eggs['ant_y']))
pos_lat = list(zip(dict_eggs['lat_x'], dict_eggs['lat_y']))
pos_post = list(zip(dict_eggs['post_x'], dict_eggs['post_y']))
return pos_ant, pos_lat, pos_post
def draw_eggs_rectangle(mask_shape, pos_ant, pos_lat, pos_post):
""" from given 3 point estimate the ellipse
:param tuple(int,int) mask_shape: segmentation size
:param [tuple(int,int)] pos_ant: points
:param [tuple(int,int)] pos_lat: points
:param [tuple(int,int)] pos_post: points
:return [ndarray]:
>>> pos_ant, pos_lat, pos_post = [10, 10], [20, 20], [35, 20]
>>> points = np.array([pos_ant, pos_lat, pos_post])
>>> _= plt.plot(points[:, 0], points[:, 1], 'og')
>>> masks = draw_eggs_rectangle([30, 50], [pos_ant], [pos_lat], [pos_post])
>>> [m.shape for m in masks]
[(30, 50)]
>>> for mask in masks:
... _= plt.imshow(mask, alpha=0.5, interpolation='nearest')
>>> _= plt.xlim([0, mask.shape[1]]), plt.ylim([0, mask.shape[0]]), plt.grid()
>>> # plt.show()
"""
list_masks = []
pos_ant, pos_lat, pos_post = list(pos_ant), list(pos_lat), list(pos_post)
for ant, lat, post in zip(pos_ant, pos_lat, pos_post):
ant, lat, post = map(np.array, [ant, lat, post])
lat_proj = closest_point_on_line(ant, post, lat)
shift = lat - lat_proj
# center = ant + (post - ant) / 2.
# dist = np.linalg.norm(shift)
# angle = np.arctan2(*(post - ant))
points = np.array([ant + shift, ant - shift, post - shift, post + shift, ant + shift])
rr, cc = draw.polygon(points[:, 1], points[:, 0], shape=mask_shape)
mask = np.zeros(mask_shape)
mask[rr, cc] = True
list_masks.append(mask)
return list_masks
def merge_object_masks(masks, overlap_thr=0.7):
""" merge several mask into one multi-class segmentation
:param [ndarray] masks: collection of masks
:param float overlap_thr: threshold for overlap
:return ndarray:
>>> m1 = np.zeros((5, 6), dtype=int)
>>> m1[:4, :4] = 1
>>> m2 = np.zeros((5, 6), dtype=int)
>>> m2[2:, 2:] = 1
>>> merge_object_masks([m1, m1])
array([[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0]])
>>> merge_object_masks([m1, m2])
array([[1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0],
[1, 1, 2, 2, 2, 2],
[1, 1, 2, 2, 2, 2],
[0, 0, 2, 2, 2, 2]])
"""
if len(masks) <= 0:
raise ValueError('no masks are given')
mask = np.array(masks[0])
for i in range(1, len(masks)):
overlap_ratios = []
for j in range(1, int(np.max(mask) + 1)):
overlap = np.sum(np.logical_and(mask == j, masks[i] == 1))
union = np.sum(np.logical_or(mask == j, masks[i] == 1))
overlap_ratios.append(float(overlap) / float(union))
if any(r > overlap_thr for r in overlap_ratios):
logging.debug('skip egg drawing while it overlap by %r', overlap_ratios)
continue
mask[masks[i] == 1] = np.max(mask) + 1
return mask
def draw_image_segm_points(
ax,
img,
points,
labels=None,
slic=None,
color_slic='w',
lut_label_marker=DICT_LABEL_MARKER,
seg_contour=None,
):
""" on plane draw background image or segmentation, overlap with SLIC
contours, add contour of adative segmentation like annot. for centers
plot point with specific property (shape and colour) according label
:param ax: figure axis
:param ndarray img: image
:param list(tuple(int,int)) points: collection of points
:param list(int) labels: LUT labels for superpixels
:param ndarray slic: superpixel segmentation
:param str color_slic: color dor superpixels
:param dict lut_label_marker: dictionary {int: (str, str)} of label and markers
:param ndarray seg_contour: segmentation contour
>>> img = np.random.randint(0, 256, (100, 100))
>>> points = np.random.randint(0, 100, (25, 2))
>>> labels = np.random.randint(0, 5, len(points))
>>> slic = np.random.randint(0, 256, (100, 100))
>>> draw_image_segm_points(plt.Figure().gca(), img, points, labels, slic)
"""
# background image or segmentation
if img.ndim == 2:
ax.imshow(img, alpha=0.3, cmap=plt.cm.gist_earth)
else:
ax.imshow(img)
if slic is not None:
ax.contour(slic, levels=np.unique(slic), alpha=0.5, colors=color_slic, linewidths=0.5)
# fig.gca().imshow(mark_boundaries(img, slic))
if seg_contour is not None and isinstance(seg_contour, np.ndarray):
if img.shape[:2] != seg_contour.shape[:2]:
raise ImageDimensionError('image size %r and segm. %r should match' % (img.shape, seg_contour.shape))
ax.contour(seg_contour, linewidths=3, levels=np.unique(seg_contour))
if labels is not None:
if len(points) != len(labels):
raise ValueError('number of points (%i) and labels (%i) should match' % (len(points), len(labels)))
for lb in lut_label_marker:
marker, clr = lut_label_marker[lb]
ax.plot(points[(labels == lb), 1], points[(labels == lb), 0], marker, color=clr)
else:
ax.plot(points[:, 1], points[:, 0], 'o', color=COLOR_ORANGE)
ax.set(xlim=[0, img.shape[1]], ylim=[img.shape[0], 0])
def figure_image_segm_centres(img, segm, centers=None, cmap_contour=plt.cm.Blues):
""" visualise the input image and segmentation in common frame
:param ndarray img: image
:param ndarray segm: segmentation
:param [tuple(int,int)]|ndarray centers: or np.array
:param obj cmap_contour:
:return Figure:
>>> img = np.random.random((100, 150, 3))
>>> seg = np.random.randint(0, 2, (100, 150))
>>> centre = [[55, 60]]
>>> fig = figure_image_segm_centres(img, seg, centre)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
fig, ax = plt.subplots()
ax.imshow(img)
if np.sum(segm) > 0:
segm_show = segm
if segm.ndim > 2:
segm_show = np.argmax(segm, axis=2)
ax.contour(segm_show, cmap=cmap_contour, linewidths=0.5)
if isinstance(centers, list):
ax.plot(np.array(centers)[:, 1], np.array(centers)[:, 0], 'o', color=COLOR_ORANGE)
elif isinstance(centers, np.ndarray):
if img.shape[:2] != centers.shape[:2]:
raise ImageDimensionError('image size %r and centers %r should match' % (img.shape, centers.shape))
ax.contour(centers, levels=np.unique(centers), cmap=plt.cm.YlOrRd)
ax.set(xlim=[0, img.shape[1]], ylim=[img.shape[0], 0])
fig.tight_layout()
return fig
def draw_graphcut_weighted_edges(segments, centers, edges, edge_weights, img_bg=None, img_alpha=0.5):
""" visualise the edges on the overlapping a background image
:param [tuple(int,int)] centers: list of centers
:param ndarray segments: np.array<height, width>
:param ndarray edges: list of edges of shape <nb_edges, 2>
:param ndarray edge_weights: weight per edge <nb_edges, 1>
:param ndarray img_bg: image background
:param float img_alpha: transparency
:return ndarray: np.array<height, width, 3>
>>> slic = np.array([[0] * 3 + [1] * 3 + [2] * 3+ [3] * 3] * 4 +
... [[4] * 3 + [5] * 3 + [6] * 3 + [7] * 3] * 4)
>>> centres = [[1, 1], [1, 4], [1, 7], [1, 10],
... [5, 1], [5, 4], [5, 7], [5, 10]]
>>> edges = [[0, 1], [1, 2], [2, 3], [0, 4], [1, 5],
... [4, 5], [2, 6], [5, 6], [3, 7], [6, 7]]
>>> img = np.random.randint(0, 256, slic.shape + (3,))
>>> edge_weights = np.ones(len(edges))
>>> edge_weights[0] = 0
>>> img = draw_graphcut_weighted_edges(slic, centres, edges, edge_weights, img_bg=img)
>>> img.shape
(8, 12, 3)
"""
if img_bg is not None:
if img_bg.ndim == 2:
# duplicate channels to be like RGB
img_bg = np.rollaxis(np.tile(img_bg, (3, 1, 1)), 0, 3)
# convert to range 0,1 so the drawing is correct
max_val = 1.
if img_bg.dtype != np.float:
max_val = max(255., img_bg.max())
img = img_bg.astype(np.float) / max_val
# make it partialy transparent
img = (1. - img_alpha) + img * img_alpha
else:
img = np.zeros(segments.shape + (3, ))
clrs = plt.get_cmap('Greens')
diff = (edge_weights.max() - edge_weights.min())
if diff > 0:
edge_ratio = (edge_weights - edge_weights.min()) / diff
else:
edge_ratio = np.zeros(edge_weights.shape)
for i, edge in enumerate(edges):
n1, n2 = edge
y1, x1 = map(int, centers[n1])
y2, x2 = map(int, centers[n2])
# line = draw.line(y1, x1, y2, x2) # , shape=img.shape[:2]
# img[line] = clrs(edge_ratio[i])[:3]
# using anti-aliasing
rr, cc, val = draw.line_aa(y1, x1, y2, x2) # , shape=img.shape[:2]
color_w = np.tile(val, (3, 1)).T
img[rr, cc, :] = color_w * clrs(edge_ratio[i])[:3] + (1 - color_w) * img[rr, cc, :]
circle = draw.circle(y1, x1, radius=2, shape=img.shape[:2])
img[circle] = 1., 1., 0.
return img
def draw_rg2sp_results(ax, seg, slic, debug_rg2sp, iter_index=-1):
""" drawing Region Growing with shape prior
:param ax: figure axis
:param ndarray seg: segmentation
:param ndarray slic: superpixels
:param dict debug_rg2sp: dictionary with debug results
:param int iter_index: iteration index
:return: ax
"""
ax.imshow(debug_rg2sp['labels'][iter_index][slic], cmap=plt.cm.jet)
ax.contour(seg, levels=np.unique(seg), colors='#bfbfbf')
for centre, shift in zip(debug_rg2sp['centres'][iter_index], debug_rg2sp['shifts'][iter_index]):
rot = np.deg2rad(shift)
ax.plot(centre[1], centre[0], 'ow')
ax.arrow(
centre[1], centre[0], np.cos(rot) * 50., np.sin(rot) * 50., fc='w', ec='w', head_width=20., head_length=30.
)
ax.set(
xlim=[0, seg.shape[1]],
ylim=[seg.shape[0], 0],
title='Iteration #%i with E=%.0f' % (iter_index, round(debug_rg2sp['criteria'][iter_index]))
)
return ax
def figure_rg2sp_debug_complete(seg, slic, debug_rg2sp, iter_index=-1, max_size=5):
""" draw figure with all debug (intermediate) segmentation steps
:param ndarray seg: segmentation
:param ndarray slic: superpixels
:param debug_rg2sp: dictionary with some debug parameters
:param int iter_index: iteration index
:param int max_size: max figure size
:return Figure:
>>> seg = np.random.randint(0, 4, (100, 150))
>>> slic = np.random.randint(0, 80, (100, 150))
>>> dict_debug = {
... 'lut_data_cost': np.random.random((80, 3)),
... 'lut_shape_cost': np.random.random((15, 80, 3)),
... 'labels': np.random.randint(0, 4, (15, 80)),
... 'centres': [np.array([np.random.randint(0, 100, 80),
... np.random.randint(0, 150, 80)]).T] * 15,
... 'shifts': np.random.random((15, 3)),
... 'criteria': np.random.random(15),
... }
>>> fig = figure_rg2sp_debug_complete(seg, slic, dict_debug)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
nb_objects = debug_rg2sp['lut_data_cost'].shape[1] - 1
nb_subfigs = max(3, nb_objects)
norm_zise = np.array(seg.shape[:2]) / float(np.max(seg.shape))
fig_size = np.array(norm_zise)[::-1] * np.array([nb_subfigs, 2]) * max_size
fig, axarr = plt.subplots(2, nb_subfigs, figsize=fig_size)
draw_rg2sp_results(axarr[0, 0], seg, slic, debug_rg2sp, iter_index)
axarr[0, 1].plot(debug_rg2sp['criteria'])
axarr[0, 1].plot(iter_index, debug_rg2sp['criteria'][iter_index], 'og')
axarr[0, 1].set(ylabel='Energy', xlabel='iteration')
axarr[0, 1].grid()
axarr[0, 2].set_title('Data cost')
img_shape_cost = debug_rg2sp['lut_shape_cost'][iter_index][:, 0][slic]
im = axarr[0, 2].imshow(img_shape_cost, cmap=plt.cm.jet)
fig.colorbar(im, ax=axarr[0, 2])
for j in range(3):
axarr[0, j].axis('off')
for i in range(nb_objects):
axarr[1, i].set_title('Shape cost for object #%i' % i)
lut = debug_rg2sp['lut_shape_cost'][iter_index][:, i + 1]
im = axarr[1, i].imshow(lut[slic], cmap=plt.cm.bone)
fig.colorbar(im, ax=axarr[1, i])
axarr[1, i].contour(seg, levels=np.unique(seg), cmap=plt.cm.jet)
axarr[1, i].plot(debug_rg2sp['centres'][iter_index][i, 1], debug_rg2sp['centres'][iter_index][i, 0], 'or')
axarr[0, i].axis('off')
fig.subplots_adjust(left=0.01, bottom=0.01, right=0.99, top=0.96)
# fig.tight_layout()
return fig
def make_overlap_images_optical(images):
""" overlap images and show them
:param [ndarray] images: collection of images
:return ndarray: combined image
>>> im1 = np.zeros((5, 8), dtype=float)
>>> im2 = np.ones((5, 8), dtype=float)
>>> make_overlap_images_optical([im1, im2])
array([[ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[ 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]])
"""
logging.info(' make_overlap_images_optical: overlap images')
# get max dimension of the images
max_size = np.max(np.vstack(tuple([im.shape for im in images])), 0)
logging.debug('compute maximal image size: %r', max_size)
imgs_w = []
for im in images:
imgs_w.append(np.zeros(max_size, dtype=im.dtype))
# copy images to the maximal image
for i, im in enumerate(images):
imgs_w[i][:im.shape[0], :im.shape[1]] = im
# put images as backgrounds
img = imgs_w[0] / len(images)
for i in range(1, len(images)):
img = img + imgs_w[i] / len(images)
return img
def make_overlap_images_chess(images, chess_field=SIZE_CHESS_FIELD):
""" overlap images and show them
:param [ndarray] images: collection of images
:param int chess_field: size of chess field size
:return ndarray: combined image
>>> im1 = np.zeros((5, 10), dtype=int)
>>> im2 = np.ones((5, 10), dtype=int)
>>> make_overlap_images_chess([im1, im2], chess_field=2)
array([[0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
logging.info(' make_overlap_images_chess: overlap images')
# get max dimension of the images
max_size = np.max(np.vstack(tuple([im.shape for im in images])), 0)
logging.debug('compute maximal image size: %r', max_size)
imgs_w = []
for im in images:
imgs_w.append(np.zeros(max_size, dtype=im.dtype))
# copy images to the maximal image
for i, im in enumerate(images):
imgs_w[i][:im.shape[0], :im.shape[1]] = im
img = np.zeros(max_size, dtype=images[0].dtype)
idx_row = 0
for i in range(int(max_size[0] / chess_field)):
idx = idx_row
for j in range(int(max_size[1] / chess_field)):
w_b = i * chess_field
if (w_b + chess_field) < max_size[0]:
w_e = w_b + chess_field
else:
w_e = max_size[0]
h_b = j * chess_field
if (h_b + chess_field) < max_size[1]:
h_e = h_b + chess_field
else:
h_e = max_size[1]
img[w_b:w_e, h_b:h_e] = imgs_w[idx][w_b:w_e, h_b:h_e]
idx = (idx + 1) % len(images)
idx_row = (idx_row + 1) % len(images)
return img
def draw_image_clusters_centers(ax, img, centres, points=None, labels_centre=None, segm=None):
""" draw imageas bacround and clusters centers
:param ax: figure axis
:param ndarray img: image
:param ndarray centres: points
:param ndarray points: optional list of all points
:param list(int) labels_centre: optional list of labels for points
:param ndarray segm: optional segmentation
>>> img = np.random.randint(0, 256, (100, 100, 3))
>>> seg = np.random.randint(0, 3, (100, 100))
>>> centres = np.random.randint(0, 100, (3, 2))
>>> points = np.random.randint(0, 100, (25, 2))
>>> labels = np.random.randint(0, 4, 25)
>>> draw_image_clusters_centers(plt.Figure().gca(), img[:, :, 0], centres, points, labels, seg)
"""
if img is not None:
img = (img / float(np.max(img)))
if img.ndim != 2:
raise ImageDimensionError('required image dimension is 2 to instead %r' % img.shape)
ax.imshow(img, cmap=plt.cm.Greys_r)
ax.set(xlim=[0, img.shape[1]], ylim=[img.shape[0], 0])
if segm is not None:
ax.imshow(segm, alpha=0.1)
ax.contour(segm)
if points is not None and len(points) > 0 and labels_centre is not None:
points = np.array(points)
for i in range(max(labels_centre) + 1):
select = points[np.asarray(labels_centre) == i]
ax.plot(select[:, 1], select[:, 0], '.')
# ax.plot(np.asarray(centres)[:, 1], np.asarray(centres)[:, 0], 'oy')
# ax.plot(np.asarray(centres)[:, 1], np.asarray(centres)[:, 0], 'xr')
if len(centres) == 0:
return
centres = np.asarray(centres)
for s, clr in [
(3e3, '#ccff33'),
(1e3, '#ff3333'),
(1e2, '#00ffff'),
]:
ax.scatter(centres[:, 1], centres[:, 0], s=s, c=clr)
ax.axes.get_xaxis().set_ticklabels([])
ax.axes.get_yaxis().set_ticklabels([])
def figure_segm_boundary_dist(segm_ref, segm, subfig_size=9):
""" visualise the boundary distances between two segmentation
:param ndarray segm_ref: reference segmentation
:param ndarray segm: estimated segmentation
:param int subfig_size: maximal sub-figure size
:return Figure:
>>> seg = np.zeros((100, 100))
>>> seg[35:80, 10:65] = 1
>>> fig = figure_segm_boundary_dist(seg, seg.T)
>>> isinstance(fig, matplotlib.figure.Figure)
True
"""
if segm_ref.shape != segm.shape:
raise ImageDimensionError('ref segm %r and segm %r should match' % (segm_ref.shape, segm.shape))
segr_boundary = segmentation.find_boundaries(segm_ref, mode='thick')
segm_boundary = segmentation.find_boundaries(segm, mode='thick')
segm_distance = ndimage.distance_transform_edt(~segm_boundary)
norm_size = np.array(segm_ref.shape[:2]) / float(np.max(segm_ref.shape))
fig_size = norm_size[::-1] * subfig_size * np.array([2, 1])
fig, axarr = plt.subplots(ncols=2, figsize=fig_size)
axarr[0].set_title('boundary distances with reference contour')
im = axarr[0].imshow(segm_distance, cmap=plt.cm.Greys)
plt.colorbar(im, ax=axarr[0])
axarr[0].contour(segm_ref, cmap=plt.cm.jet)
segm_distance[~segr_boundary] = 0
axarr[1].set_title('distance projected to ref. boundary')
im = axarr[1].imshow(segm_distance, cmap=plt.cm.Reds)
plt.colorbar(im, ax=axarr[1])
return fig
|
the-stack_0_26689
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.utils.timezone import utc
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='PageView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('path', models.CharField(max_length=350)),
('timestamp', models.DateTimeField(default=datetime.datetime(2016, 1, 7, 2, 42, 1, 919354, tzinfo=utc))),
('user', models.ForeignKey(blank=True, null=True, to=settings.AUTH_USER_MODEL)),
],
),
]
|
the-stack_0_26691
|
from lunespy.client.transactions.reissue import ReissueToken
from lunespy.client.wallet import Account
def test_token_without_asset_id_ready_failed_successful():
"""
without a asset_id parameter:
- should be return False for ReissueToken.ready
- else should be return True
"""
# Failed
sender = Account()
tx = ReissueToken(sender, quantity=1)
assert tx.ready == False
#Successful
tx.reissue_data['asset_id'] = '7npqMwVEAZ9yGgoRB8AwfHXEkCumWgiqdYr8yeTze7Pp'
assert tx.ready == True
def test_token_transaction_full_data():
"""
with a sender, asset_id and quantity:
- should be return all keys of offline-transaction for ReissueToken.transaction
"""
sender = Account()
offline_transaction = [
'ready',
'type',
'senderPublicKey',
'signature',
'timestamp',
'fee',
'assetId',
'reissuable',
'quantity',
]
tx = ReissueToken(sender, asset_id='test', quantity=10)
response = tx.transaction
print(response)
assert response['ready'] == True
assert list(response.keys()) == offline_transaction
|
the-stack_0_26695
|
"""This package includes all the modules related to data loading and preprocessing
To add a custom dataset class called 'dummy', you need to add a file called 'dummy_dataset.py' and define a subclass 'DummyDataset' inherited from BaseDataset.
You need to implement four functions:
-- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).
-- <__len__>: return the size of dataset.
-- <__getitem__>: get a data point from data loader.
-- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.
Now you can use the dataset class by specifying flag '--dataset_mode dummy'.
See our template dataset class 'template_dataset.py' for more details.
"""
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
self.eval_size = opt.eval_size if opt.model == 'egan' else 0
self.bs = opt.batch_size*(opt.D_iters + 1) + self.eval_size
print("dataset [%s] was created" % type(self.dataset).__name__)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=self.bs, # Load all data for training D and G once together.
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads))
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
self.data_size = min(len(self.dataset), self.opt.max_dataset_size)
return self.data_size
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
if (i+1) * self.bs >= self.data_size:
break
yield data
|
the-stack_0_26697
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC.
Test importmulti by generating keys on node0, importing the scriptPubKeys and
addresses on node1 and then testing the address info for the different address
variants.
- `get_key()` and `get_multisig()` are called to generate keys on node0 and
return the privkeys, pubkeys and all variants of scriptPubKey and address.
- `test_importmulti()` is called to send an importmulti call to node1, test
success, and (if unsuccessful) test the error code and error message returned.
- `test_address()` is called to call getaddressinfo for an address on node1
and test the values returned."""
from test_framework.script import (
CScript,
OP_NOP,
)
from test_framework.test_framework import DefiTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.wallet_util import (
get_key,
get_multisig,
test_address,
)
class ImportMultiTest(DefiTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]]
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
def test_importmulti(self, req, success, error_code=None, error_message=None, warnings=None):
"""Run importmulti and assert success"""
if warnings is None:
warnings = []
result = self.nodes[1].importmulti([req])
observed_warnings = []
if 'warnings' in result[0]:
observed_warnings = result[0]['warnings']
assert_equal("\n".join(sorted(warnings)), "\n".join(sorted(observed_warnings)))
assert_equal(result[0]['success'], success)
if error_code is not None:
assert_equal(result[0]['error']['code'], error_code)
assert_equal(result[0]['error']['message'], error_message)
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
node0_address1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
# Check only one address
assert_equal(node0_address1['ismine'], True)
# Node 1 sync test
assert_equal(self.nodes[1].getblockcount(), 1)
# Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Defi Address (implicit non-internal)
self.log.info("Should import an address")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=False)
watchonly_address = key.p2pkh_addr
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
self.test_importmulti({"scriptPubKey": {"address": "not valid address"},
"timestamp": "now"},
success=False,
error_code=-5,
error_message='Invalid address \"not valid address\"')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp,
ischange=True)
# ScriptPubKey + internal + label
self.log.info("Should not allow a label to be specified when internal is true")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"internal": True,
"label": "Example label"},
success=False,
error_code=-8,
error_message='Internal addresses should not have a label')
# Nonstandard scriptPubKey + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal flag")
nonstandardScriptPubKey = key.p2pkh_script + CScript([OP_NOP]).hex()
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Public key + !Internal(explicit)
self.log.info("Should import an address with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": False},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [key.pubkey],
"internal": True},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
# Nonstandard scriptPubKey + Public key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with public key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
self.log.info("Should not import an address with private key if is already imported")
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-4,
error_message='The wallet already contains the private key for this address or script ("' + key.p2pkh_script + '")')
# Address + Private key + watchonly
self.log.info("Should import an address with private key and with watchonly")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [key.privkey],
"watchonly": True},
success=True,
warnings=["All private keys are provided, outputs will be considered spendable. If this is intentional, do not specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [key.privkey],
"internal": True},
success=True)
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=True,
timestamp=timestamp)
# Nonstandard scriptPubKey + Private key + !internal
self.log.info("Should not import a nonstandard scriptPubKey without internal and with private key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": nonstandardScriptPubKey,
"timestamp": "now",
"keys": [key.privkey]},
success=False,
error_code=-8,
error_message='Internal must be set to true for nonstandard scriptPubKey imports.')
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=False,
ismine=False,
timestamp=None)
# P2SH address
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
isscript=True,
iswatchonly=True,
timestamp=timestamp)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr, timestamp=timestamp, iswatchonly=True, ismine=False, solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_addr,
timestamp=timestamp,
ismine=False,
iswatchonly=True,
solvable=True)
p2shunspent = self.nodes[1].listunspent(0, 999999, [multisig.p2sh_addr])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
multisig = get_multisig(self.nodes[0])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multisig.p2sh_addr, 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_addr},
"timestamp": "now",
"redeemscript": multisig.redeem_script,
"keys": multisig.privkeys[0:2],
"watchonly": True},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
iswatchonly=True,
ismine=False,
solvable=True,
timestamp=timestamp)
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with the wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"pubkeys": [wrong_key]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should import a scriptPubKey with internal and with a wrong public key as non-solvable")
key = get_key(self.nodes[0])
wrong_key = get_key(self.nodes[0]).pubkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"pubkeys": [wrong_key],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should import an address with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now",
"keys": [wrong_privkey]},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should import a scriptPubKey with internal and with a wrong private key as non-solvable")
key = get_key(self.nodes[0])
wrong_privkey = get_key(self.nodes[0]).privkey
self.test_importmulti({"scriptPubKey": key.p2pkh_script,
"timestamp": "now",
"keys": [wrong_privkey],
"internal": True},
success=True,
warnings=["Importing as non-solvable: some required keys are missing. If this is intentional, don't provide any keys, pubkeys, witnessscript, or redeemscript.", "Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
iswatchonly=True,
ismine=False,
solvable=False,
timestamp=timestamp)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
self.test_importmulti({"scriptPubKey": {"address": watchonly_address},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
test_address(self.nodes[1],
watchonly_address,
iswatchonly=True,
ismine=False,
timestamp=watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{"scriptPubKey": key.p2pkh_script}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": key.p2pkh_script,
"timestamp": ""
}])
# Import P2WPKH address as watch only
self.log.info("Should import a P2WPKH address as watch only")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=True,
solvable=False)
# Import P2WPKH address with public key but no private key
self.log.info("Should import a P2WPKH address and public key as solvable but not spendable")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2wpkh_addr,
ismine=False,
solvable=True)
# Import P2WPKH address with key and check it is spendable
self.log.info("Should import a P2WPKH address with key")
key = get_key(self.nodes[0])
self.test_importmulti({"scriptPubKey": {"address": key.p2wpkh_addr},
"timestamp": "now",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2wpkh_addr,
iswatchonly=False,
ismine=True)
# P2WSH multisig address without scripts or keys
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2wsh multisig as watch only without respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=False)
# Same P2WSH multisig address as above, but now with witnessscript + private keys
self.log.info("Should import a p2wsh with respective witness script and private keys")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2wsh_addr},
"timestamp": "now",
"witnessscript": multisig.redeem_script,
"keys": multisig.privkeys},
success=True)
test_address(self.nodes[1],
multisig.p2sh_addr,
solvable=True,
ismine=True,
sigsrequired=2)
# P2SH-P2WPKH address with no redeemscript or public or private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh without redeem script or keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=False,
ismine=False)
# P2SH-P2WPKH address + redeemscript + public key with no private key
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and pubkey as solvable")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"pubkeys": [key.pubkey]},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=False)
# P2SH-P2WPKH address + redeemscript + private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh with respective redeem script and private keys")
self.test_importmulti({"scriptPubKey": {"address": key.p2sh_p2wpkh_addr},
"timestamp": "now",
"redeemscript": key.p2sh_p2wpkh_redeem_script,
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True)
# P2SH-P2WSH multisig + redeemscript with no private key
multisig = get_multisig(self.nodes[0])
self.log.info("Should import a p2sh-p2wsh with respective redeem script but no private key")
self.test_importmulti({"scriptPubKey": {"address": multisig.p2sh_p2wsh_addr},
"timestamp": "now",
"redeemscript": multisig.p2wsh_script,
"witnessscript": multisig.redeem_script},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
multisig.p2sh_p2wsh_addr,
solvable=True,
ismine=False)
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
self.log.info("Should not import a p2sh-p2wpkh address from descriptor without checksum and private key")
self.test_importmulti({"desc": "sh(wpkh(" + key.pubkey + "))",
"timestamp": "now",
"label": "Descriptor import test",
"keys": [key.privkey]},
success=False,
error_code=-5,
error_message="Missing checksum")
# Test importing of a P2SH-P2WPKH address via descriptor + private key
key = get_key(self.nodes[0])
self.log.info("Should import a p2sh-p2wpkh address from descriptor and private key")
self.test_importmulti({"desc": descsum_create("sh(wpkh(" + key.pubkey + "))"),
"timestamp": "now",
"label": "Descriptor import test",
"keys": [key.privkey]},
success=True)
test_address(self.nodes[1],
key.p2sh_p2wpkh_addr,
solvable=True,
ismine=True,
label="Descriptor import test")
# Test ranged descriptor fails if range is not specified
xpriv = "tprv8ZgxMBicQKsPeuVhWwi6wuMQGfPKi9Li5GtX35jVNknACgqe3CY4g5xgkfDDJcmtF7o1QnxWDRYw4H5P26PXq7sbcUkEqeR4fg3Kxp2tigg"
addresses = ["2N7yv4p8G8yEaPddJxY41kPihnWvs39qCMf", "2MsHxyb2JS3pAySeNUsJ7mNnurtpeenDzLA"] # hdkeypath=m/0'/0'/0' and 1'
addresses += ["bcrt1qrd3n235cj2czsfmsuvqqpr3lu6lg0ju7scl8gn", "bcrt1qfqeppuvj0ww98r6qghmdkj70tv8qpchehegrg8"] # wpkh subscripts corresponding to the above addresses
desc = "sh(wpkh(" + xpriv + "/0'/0'/*'" + "))"
self.log.info("Ranged descriptor import should fail without a specified range")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Descriptor is ranged, please specify the range')
# Test importing of a ranged descriptor with xpriv
self.log.info("Should import the ranged descriptor with specified range as solvable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now",
"range": 1},
success=True)
for address in addresses:
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": -1},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [-1, 10]},
success=False, error_code=-8, error_message='Range should be greater or equal than 0')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]},
success=False, error_code=-8, error_message='End of range is too high')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [2, 1]},
success=False, error_code=-8, error_message='Range specified as [begin,end] must not have begin after end')
self.test_importmulti({"desc": descsum_create(desc), "timestamp": "now", "range": [0, 1000001]},
success=False, error_code=-8, error_message='Range is too large')
# Test importing a descriptor containing a WIF private key
wif_priv = "cTe1f5rdT8A8DFgVWTjyPwACsDPJM9ff4QngFxUixCSvvbg1x6sh"
address = "2MuhcG52uHPknxDgmGPsV18jSHFBnnRgjPg"
desc = "sh(wpkh(" + wif_priv + "))"
self.log.info("Should import a descriptor with a WIF private key as spendable")
self.test_importmulti({"desc": descsum_create(desc),
"timestamp": "now"},
success=True)
test_address(self.nodes[1],
address,
solvable=True,
ismine=True)
# dump the private key to ensure it matches what was imported
privkey = self.nodes[1].dumpprivkey(address)
assert_equal(privkey, wif_priv)
# Test importing of a P2PKH address via descriptor
key = get_key(self.nodes[0])
self.log.info("Should import a p2pkh address from descriptor")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"timestamp": "now",
"label": "Descriptor import test"},
True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
test_address(self.nodes[1],
key.p2pkh_addr,
solvable=True,
ismine=False,
label="Descriptor import test")
# Test import fails if both desc and scriptPubKey are provided
key = get_key(self.nodes[0])
self.log.info("Import should fail if both scriptPubKey and desc are provided")
self.test_importmulti({"desc": descsum_create("pkh(" + key.pubkey + ")"),
"scriptPubKey": {"address": key.p2pkh_addr},
"timestamp": "now"},
success=False,
error_code=-8,
error_message='Both a descriptor and a scriptPubKey should not be provided.')
# Test import fails if neither desc nor scriptPubKey are present
key = get_key(self.nodes[0])
self.log.info("Import should fail if neither a descriptor nor a scriptPubKey are provided")
self.test_importmulti({"timestamp": "now"},
success=False,
error_code=-8,
error_message='Either a descriptor or scriptPubKey must be provided.')
# Test importing of a multisig via descriptor
key1 = get_key(self.nodes[0])
key2 = get_key(self.nodes[0])
self.log.info("Should import a 1-of-2 bare multisig from descriptor")
self.test_importmulti({"desc": descsum_create("multi(1," + key1.pubkey + "," + key2.pubkey + ")"),
"timestamp": "now"},
success=True,
warnings=["Some private keys are missing, outputs will be considered watchonly. If this is intentional, specify the watchonly flag."])
self.log.info("Should not treat individual keys from the imported bare multisig as watchonly")
test_address(self.nodes[1],
key1.p2pkh_addr,
ismine=False,
iswatchonly=False)
# Import pubkeys with key origin info
self.log.info("Addresses should have hd keypath and master key id after import with key origin")
pub_addr = self.nodes[1].getnewaddress()
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
pub_keypath = info['hdkeypath']
pub_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + pub_fpr + pub_keypath[1:] +"]" + pub + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(pub_import_info['pubkey'], pub)
assert_equal(pub_import_info['hdkeypath'], pub_keypath)
# Import privkeys with key origin info
priv_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(priv_addr)
priv = self.nodes[1].dumpprivkey(priv_addr)
priv_keypath = info['hdkeypath']
priv_fpr = info['hdmasterfingerprint']
result = self.nodes[0].importmulti(
[{
'desc' : descsum_create("wpkh([" + priv_fpr + priv_keypath[1:] + "]" + priv + ")"),
"timestamp": "now",
}]
)
assert result[0]['success']
priv_import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(priv_import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(priv_import_info['hdkeypath'], priv_keypath)
# Make sure the key origin info are still there after a restart
self.stop_nodes()
self.start_nodes()
import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(import_info['hdmasterfingerprint'], pub_fpr)
assert_equal(import_info['hdkeypath'], pub_keypath)
import_info = self.nodes[0].getaddressinfo(priv_addr)
assert_equal(import_info['hdmasterfingerprint'], priv_fpr)
assert_equal(import_info['hdkeypath'], priv_keypath)
# Check legacy import does not import key origin info
self.log.info("Legacy imports don't have key origin info")
pub_addr = self.nodes[1].getnewaddress()
info = self.nodes[1].getaddressinfo(pub_addr)
pub = info['pubkey']
result = self.nodes[0].importmulti(
[{
'scriptPubKey': {'address': pub_addr},
'pubkeys': [pub],
"timestamp": "now",
}]
)
assert result[0]['success']
pub_import_info = self.nodes[0].getaddressinfo(pub_addr)
assert_equal(pub_import_info['pubkey'], pub)
assert 'hdmasterfingerprint' not in pub_import_info
assert 'hdkeypath' not in pub_import_info
# Import some public keys to the keypool of a no privkey wallet
self.log.info("Adding pubkey to keypool of disableprivkey wallet")
self.nodes[1].createwallet(wallet_name="noprivkeys", disable_private_keys=True)
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 2)
newaddr1 = wrpc.getnewaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getnewaddress()
assert_equal(addr2, newaddr2)
# Import some public keys to the internal keypool of a no privkey wallet
self.log.info("Adding pubkey to internal keypool of disableprivkey wallet")
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
},
{
'desc': descsum_create('wpkh(' + pub2 + ')'),
'keypool': True,
'internal': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert result[1]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize_hd_internal"], 2)
newaddr1 = wrpc.getrawchangeaddress()
assert_equal(addr1, newaddr1)
newaddr2 = wrpc.getrawchangeaddress()
assert_equal(addr2, newaddr2)
# Import a multisig and make sure the keys don't go into the keypool
self.log.info('Imported scripts with pubkeys should not have their pubkeys go into the keypool')
addr1 = self.nodes[0].getnewaddress()
addr2 = self.nodes[0].getnewaddress()
pub1 = self.nodes[0].getaddressinfo(addr1)['pubkey']
pub2 = self.nodes[0].getaddressinfo(addr2)['pubkey']
result = wrpc.importmulti(
[{
'desc': descsum_create('wsh(multi(2,' + pub1 + ',' + pub2 + '))'),
'keypool': True,
"timestamp": "now",
}]
)
assert result[0]['success']
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
# Cannot import those pubkeys to keypool of wallet with privkeys
self.log.info("Pubkeys cannot be added to the keypool of a wallet with private keys")
wrpc = self.nodes[1].get_wallet_rpc("")
assert wrpc.getwalletinfo()['private_keys_enabled']
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh(' + pub1 + ')'),
'keypool': True,
"timestamp": "now",
}]
)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], "Keys can only be imported to the keypool when private keys are disabled")
# Make sure ranged imports import keys in order
self.log.info('Key ranges should be imported in order')
wrpc = self.nodes[1].get_wallet_rpc("noprivkeys")
assert_equal(wrpc.getwalletinfo()["keypoolsize"], 0)
assert_equal(wrpc.getwalletinfo()["private_keys_enabled"], False)
xpub = "tpubDAXcJ7s7ZwicqjprRaEWdPoHKrCS215qxGYxpusRLLmJuT69ZSicuGdSfyvyKpvUNYBW1s2U3NSrT6vrCYB9e6nZUEvrqnwXPF8ArTCRXMY"
addresses = [
'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv', # m/0'/0'/0
'bcrt1q8vprchan07gzagd5e6v9wd7azyucksq2xc76k8', # m/0'/0'/1
'bcrt1qtuqdtha7zmqgcrr26n2rqxztv5y8rafjp9lulu', # m/0'/0'/2
'bcrt1qau64272ymawq26t90md6an0ps99qkrse58m640', # m/0'/0'/3
'bcrt1qsg97266hrh6cpmutqen8s4s962aryy77jp0fg0', # m/0'/0'/4
]
result = wrpc.importmulti(
[{
'desc': descsum_create('wpkh([80002067/0h/0h]' + xpub + '/*)'),
'keypool': True,
'timestamp': 'now',
'range' : [0, 4],
}]
)
for i in range(0, 5):
addr = wrpc.getnewaddress('', 'bech32')
assert_equal(addr, addresses[i])
if __name__ == '__main__':
ImportMultiTest().main()
|
the-stack_0_26698
|
#%%
import numpy as np
import fcsparser
import os
import glob
# Define the details fo the expriment.
DATE = '20190205'
atc_conc = 10 # in ng/mL
RUN_NO = 2
promoter = '28yfp'
gating_fraction = 0.4
## Hardcoded arrangement garbage.
xan_mgml = [0, 0, 0, 0.05, 0.1, 0.25, 0.5, 0.75, 1, 1.5, 2, 2.5, 3, 3.5, 4]
_strains = [['auto'], ['delta'], ['dilution'] * 13]
strains = [l[i] for l in _strains for i in range(len(l))]
# Define directories and search pattern
src = '../../../data/flow/fcs/'
dst = '../../../data/flow/csv/'
pattern = f'RP{DATE[:4]}-{DATE[4:6]}-{DATE[6:]}_r{RUN_NO}'
# Get the names of the files.
files = np.sort(glob.glob(f'{src}{pattern}*.fcs'))
# %%Iterate through each strain and concentration.
for s, c, f in zip(strains, xan_mgml, files):
# Define the new name.
new_name = f'{DATE}_r{RUN_NO}_{promoter}_{s}_{atc_conc}ngmlATC_{c}mgmlXAN'
# Load the data using fcs parser and save to csv.
_, data = fcsparser.parse(f)
data.to_csv(f'{dst}{new_name}.csv')
# Rename the FCS file.
os.rename(f, f'{src}{new_name}.fcs')
|
the-stack_0_26700
|
import sqlite3
from commons import get_random_age, get_random_active, get_random_bool, get_random_area_code, create_table
DB_NAME = "sqlite3_opt_batched.db"
def faker(con: sqlite3.Connection, count=100_000):
min_batch_size = 1_000_000
con.execute('BEGIN')
for _ in range(int(count / min_batch_size)):
with_area = get_random_bool()
current_batch = []
for _ in range(min_batch_size):
age = get_random_age()
active = get_random_active()
# switch for area code
if with_area:
# random 6 digit number
area = get_random_area_code()
current_batch.append((area, age, active))
else:
current_batch.append((age, active))
if with_area:
con.executemany('INSERT INTO user VALUES (NULL,?,?,?)', current_batch)
else:
con.executemany('INSERT INTO user VALUES (NULL,NULL,?,?)', current_batch)
con.commit()
def main():
con = sqlite3.connect(DB_NAME, isolation_level=None)
con.execute('PRAGMA journal_mode = OFF;')
con.execute('PRAGMA synchronous = 0;')
con.execute('PRAGMA cache_size = 1000000;') # give it a GB
con.execute('PRAGMA locking_mode = EXCLUSIVE;')
con.execute('PRAGMA temp_store = MEMORY;')
create_table(con)
faker(con, count=100_000_000)
if __name__ == '__main__':
main()
|
the-stack_0_26701
|
import re
from collections import defaultdict
from itertools import permutations
class Person:
def __init__(self, name):
self.name = name
self.happiness_map = defaultdict(int)
def define_happiness(self, person_name, happiness):
self.happiness_map[person_name] = happiness
def get_maximum_happiness(people: list) -> int:
maximum_happiness = -1
for people_arrange in permutations(people):
happiness_delta = 0
for i in range(len(people_arrange)):
happiness_delta += people_arrange[i].happiness_map[people_arrange[(i + 1) % len(people_arrange)].name] + \
people_arrange[i].happiness_map[people_arrange[(i - 1) % len(people_arrange)].name]
if happiness_delta > maximum_happiness:
maximum_happiness = happiness_delta
return maximum_happiness
def main():
line_pattern = r'(\w+) would (\w+) (\d+) happiness units by sitting next to (\w+).'
people = list()
with open('day_13_input.txt') as f_in:
for line in f_in.readlines():
person_1, outcome, quantity, person_2 = re.findall(line_pattern, line)[0]
if person_1 not in [person.name for person in people]:
person = Person(person_1)
people.append(person)
else:
person = [person for person in people if person.name == person_1][0]
happiness = -int(quantity) if outcome == 'lose' else int(quantity)
person.define_happiness(person_2, happiness)
maximum_happiness = get_maximum_happiness(people)
print('Total change in happiness for the optimal seating arrangement is %d' % maximum_happiness)
people.append(Person('Javitronxo'))
maximum_happiness = get_maximum_happiness(people)
print('Total change in happiness for the optimal seating arrangement with myself is %d' % maximum_happiness)
if __name__ == '__main__':
main()
|
the-stack_0_26702
|
# -*- coding: utf-8 -*-
"""DNA Center Create Application Set data model.
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator3E94Cb1B485B8B0E(object):
"""Create Application Set request schema definition."""
def __init__(self):
super(JSONSchemaValidator3E94Cb1B485B8B0E, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"items": {
"properties": {
"name": {
"description":
"Name",
"type": [
"string",
"null"
]
}
},
"type": [
"object",
"null"
]
},
"type": "array"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
the-stack_0_26703
|
import json
import requests
from rotkehlchen.assets.asset import Asset
from rotkehlchen.constants.timing import DEFAULT_TIMEOUT_TUPLE
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.history.deserialization import deserialize_price
from rotkehlchen.types import Price
PRICE_API_URL = 'https://bisq.markets/api/ticker?market={symbol}_BTC'
def get_bisq_market_price(asset: Asset) -> Price:
"""
Get price for pair at bisq marketplace. Price is returned against BTC.
Can raise:
- RemoteError: If the market doesn't exists or request fails
- DeserializationError: If the data returned is not a valid price
"""
url = PRICE_API_URL.format(symbol=asset.symbol)
try:
response = requests.get(url, timeout=DEFAULT_TIMEOUT_TUPLE)
except requests.exceptions.RequestException as e:
raise RemoteError(f'bisq.markets request {url} failed due to {str(e)}') from e
try:
data = response.json()
except json.decoder.JSONDecodeError as e:
raise RemoteError(
f'Failed to read json response from bisq.markets. {response.text}. {str(e)}',
) from e
if 'error' in data:
raise RemoteError(f'Request data from bisq.markets {url} is not valid {data["error"]}')
try:
price = data['last']
except KeyError as e:
raise DeserializationError(
f'Response from bisq.markets didnt contain expected key "last". {data}',
) from e
return deserialize_price(price)
|
the-stack_0_26704
|
"""
Empty init file in case you choose a package besides PyTest such as Nose which may look for such a rfile
"""
import os
dir = 'output/'
for f in os.listdir(dir):
os.remove(os.path.join(dir, f))
|
the-stack_0_26706
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import binascii
import itertools
import os
import pytest
from cryptography.exceptions import (
AlreadyFinalized,
InvalidSignature,
_Reasons,
)
from cryptography.hazmat.backends.interfaces import (
PEMSerializationBackend,
RSABackend,
)
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
padding,
rsa,
utils as asym_utils,
)
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateNumbers,
RSAPublicNumbers,
)
from cryptography.utils import CryptographyDeprecationWarning
from .fixtures_rsa import (
RSA_KEY_1024,
RSA_KEY_1025,
RSA_KEY_1026,
RSA_KEY_1027,
RSA_KEY_1028,
RSA_KEY_1029,
RSA_KEY_1030,
RSA_KEY_1031,
RSA_KEY_1536,
RSA_KEY_2048,
RSA_KEY_2048_ALT,
RSA_KEY_512,
RSA_KEY_512_ALT,
RSA_KEY_522,
RSA_KEY_599,
RSA_KEY_745,
RSA_KEY_768,
RSA_KEY_CORRUPTED,
)
from .utils import (
_check_rsa_private_numbers,
generate_rsa_verification_test,
skip_fips_traditional_openssl,
)
from ...doubles import (
DummyAsymmetricPadding,
DummyHashAlgorithm,
DummyKeySerializationEncryption,
)
from ...utils import (
load_nist_vectors,
load_pkcs1_vectors,
load_rsa_nist_vectors,
load_vectors_from_file,
raises_unsupported_algorithm,
)
class DummyMGF(object):
_salt_length = 0
def _check_rsa_private_numbers_if_serializable(key):
if isinstance(key, rsa.RSAPrivateKeyWithSerialization):
_check_rsa_private_numbers(key.private_numbers())
def test_check_rsa_private_numbers_if_serializable():
_check_rsa_private_numbers_if_serializable("notserializable")
def _flatten_pkcs1_examples(vectors):
flattened_vectors = []
for vector in vectors:
examples = vector[0].pop("examples")
for example in examples:
merged_vector = (vector[0], vector[1], example)
flattened_vectors.append(merged_vector)
return flattened_vectors
def _build_oaep_sha2_vectors():
base_path = os.path.join("asymmetric", "RSA", "oaep-custom")
vectors = []
hashalgs = [
hashes.SHA1(),
hashes.SHA224(),
hashes.SHA256(),
hashes.SHA384(),
hashes.SHA512(),
]
for mgf1alg, oaepalg in itertools.product(hashalgs, hashalgs):
if mgf1alg.name == "sha1" and oaepalg.name == "sha1":
# We need to generate the cartesian product of the permutations
# of all the SHAs above, but SHA1/SHA1 is something we already
# tested previously and thus did not generate custom vectors for.
continue
examples = _flatten_pkcs1_examples(
load_vectors_from_file(
os.path.join(
base_path,
"oaep-{}-{}.txt".format(mgf1alg.name, oaepalg.name),
),
load_pkcs1_vectors,
)
)
# We've loaded the files, but the loaders don't give us any information
# about the mgf1 or oaep hash algorithms. We know this info so we'll
# just add that to the end of the tuple
for private, public, vector in examples:
vectors.append((private, public, vector, mgf1alg, oaepalg))
return vectors
def _skip_pss_hash_algorithm_unsupported(backend, hash_alg):
if not backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hash_alg), salt_length=padding.PSS.MAX_LENGTH
)
):
pytest.skip(
"Does not support {} in MGF1 using PSS.".format(hash_alg.name)
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
def test_skip_pss_hash_algorithm_unsupported(backend):
with pytest.raises(pytest.skip.Exception):
_skip_pss_hash_algorithm_unsupported(backend, DummyHashAlgorithm())
def test_modular_inverse():
p = int(
"d1f9f6c09fd3d38987f7970247b85a6da84907753d42ec52bc23b745093f4fff5cff3"
"617ce43d00121a9accc0051f519c76e08cf02fc18acfe4c9e6aea18da470a2b611d2e"
"56a7b35caa2c0239bc041a53cc5875ca0b668ae6377d4b23e932d8c995fd1e58ecfd8"
"c4b73259c0d8a54d691cca3f6fb85c8a5c1baf588e898d481",
16,
)
q = int(
"d1519255eb8f678c86cfd06802d1fbef8b664441ac46b73d33d13a8404580a33a8e74"
"cb2ea2e2963125b3d454d7a922cef24dd13e55f989cbabf64255a736671f4629a47b5"
"b2347cfcd669133088d1c159518531025297c2d67c9da856a12e80222cd03b4c6ec0f"
"86c957cb7bb8de7a127b645ec9e820aa94581e4762e209f01",
16,
)
assert rsa._modinv(q, p) == int(
"0275e06afa722999315f8f322275483e15e2fb46d827b17800f99110b269a6732748f"
"624a382fa2ed1ec68c99f7fc56fb60e76eea51614881f497ba7034c17dde955f92f15"
"772f8b2b41f3e56d88b1e096cdd293eba4eae1e82db815e0fadea0c4ec971bc6fd875"
"c20e67e48c31a611e98d32c6213ae4c4d7b53023b2f80c538",
16,
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSA(object):
@pytest.mark.parametrize(
("public_exponent", "key_size"),
itertools.product(
(3, 65537),
(1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1536, 2048),
),
)
def test_generate_rsa_keys(self, backend, public_exponent, key_size):
if backend._fips_enabled:
if key_size < backend._fips_rsa_min_key_size:
pytest.skip("Key size not FIPS compliant: {}".format(key_size))
if public_exponent < backend._fips_rsa_min_public_exponent:
pytest.skip(
"Exponent not FIPS compliant: {}".format(public_exponent)
)
skey = rsa.generate_private_key(public_exponent, key_size, backend)
assert skey.key_size == key_size
_check_rsa_private_numbers_if_serializable(skey)
pkey = skey.public_key()
assert isinstance(pkey.public_numbers(), rsa.RSAPublicNumbers)
def test_generate_bad_public_exponent(self, backend):
with pytest.raises(ValueError):
rsa.generate_private_key(
public_exponent=1, key_size=2048, backend=backend
)
with pytest.raises(ValueError):
rsa.generate_private_key(
public_exponent=4, key_size=2048, backend=backend
)
with pytest.raises(ValueError):
rsa.generate_private_key(
public_exponent=65535, key_size=2048, backend=backend
)
def test_cant_generate_insecure_tiny_key(self, backend):
with pytest.raises(ValueError):
rsa.generate_private_key(
public_exponent=65537, key_size=511, backend=backend
)
with pytest.raises(ValueError):
rsa.generate_private_key(
public_exponent=65537, key_size=256, backend=backend
)
@pytest.mark.parametrize(
"pkcs1_example",
load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "pss-vect.txt"
),
load_pkcs1_vectors,
),
)
def test_load_pss_vect_example_keys(self, pkcs1_example):
secret, public = pkcs1_example
private_num = rsa.RSAPrivateNumbers(
p=secret["p"],
q=secret["q"],
d=secret["private_exponent"],
dmp1=secret["dmp1"],
dmq1=secret["dmq1"],
iqmp=secret["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=secret["public_exponent"], n=secret["modulus"]
),
)
_check_rsa_private_numbers(private_num)
public_num = rsa.RSAPublicNumbers(
e=public["public_exponent"], n=public["modulus"]
)
assert public_num
public_num2 = private_num.public_numbers
assert public_num2
assert public_num.n == public_num2.n
assert public_num.e == public_num2.e
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join("asymmetric", "RSA", "oaep-label.txt"),
load_nist_vectors,
),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=b"label",
)
),
skip_message="Does not support RSA OAEP labels",
)
def test_oaep_label_decrypt(self, vector, backend):
private_key = serialization.load_der_private_key(
binascii.unhexlify(vector["key"]), None, backend
)
assert vector["oaepdigest"] == b"SHA512"
decrypted = private_key.decrypt(
binascii.unhexlify(vector["input"]),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA512()),
algorithm=hashes.SHA512(),
label=binascii.unhexlify(vector["oaeplabel"]),
),
)
assert vector["output"][1:-1] == decrypted
@pytest.mark.parametrize(
("msg", "label"),
[
(b"amazing encrypted msg", b"some label"),
(b"amazing encrypted msg", b""),
],
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=b"label",
)
),
skip_message="Does not support RSA OAEP labels",
)
def test_oaep_label_roundtrip(self, msg, label, backend):
private_key = RSA_KEY_2048.private_key(backend)
ct = private_key.public_key().encrypt(
msg,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=label,
),
)
pt = private_key.decrypt(
ct,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=label,
),
)
assert pt == msg
@pytest.mark.parametrize(
("enclabel", "declabel"),
[(b"label1", b"label2"), (b"label3", b""), (b"", b"label4")],
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=b"label",
)
),
skip_message="Does not support RSA OAEP labels",
)
def test_oaep_wrong_label(self, enclabel, declabel, backend):
private_key = RSA_KEY_2048.private_key(backend)
msg = b"test"
ct = private_key.public_key().encrypt(
msg,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=enclabel,
),
)
with pytest.raises(ValueError):
private_key.decrypt(
ct,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=declabel,
),
)
@pytest.mark.supported(
only_if=lambda backend: not backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA256(),
label=b"label",
)
),
skip_message="Requires backend without RSA OAEP label support",
)
def test_unsupported_oaep_label_decrypt(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=b"label",
),
)
def test_rsa_generate_invalid_backend():
pretend_backend = object()
with raises_unsupported_algorithm(_Reasons.BACKEND_MISSING_INTERFACE):
rsa.generate_private_key(65537, 2048, pretend_backend)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSASignature(object):
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(
load_vectors_from_file(
os.path.join("asymmetric", "RSA", "pkcs1v15sign-vectors.txt"),
load_pkcs1_vectors,
)
),
)
def test_pkcs1v15_signing(self, pkcs1_example, backend):
private, public, example = pkcs1_example
private_key = rsa.RSAPrivateNumbers(
p=private["p"],
q=private["q"],
d=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=private["public_exponent"], n=private["modulus"]
),
).private_key(backend)
signature = private_key.sign(
binascii.unhexlify(example["message"]),
padding.PKCS1v15(),
hashes.SHA1(),
)
assert binascii.hexlify(signature) == example["signature"]
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS.",
)
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(
load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "pss-vect.txt"
),
load_pkcs1_vectors,
)
),
)
def test_pss_signing(self, pkcs1_example, backend):
private, public, example = pkcs1_example
private_key = rsa.RSAPrivateNumbers(
p=private["p"],
q=private["q"],
d=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=private["public_exponent"], n=private["modulus"]
),
).private_key(backend)
public_key = rsa.RSAPublicNumbers(
e=public["public_exponent"], n=public["modulus"]
).public_key(backend)
signature = private_key.sign(
binascii.unhexlify(example["message"]),
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
),
hashes.SHA1(),
)
assert len(signature) == (private_key.key_size + 7) // 8
# PSS signatures contain randomness so we can't do an exact
# signature check. Instead we'll verify that the signature created
# successfully verifies.
public_key.verify(
signature,
binascii.unhexlify(example["message"]),
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
),
hashes.SHA1(),
)
@pytest.mark.parametrize(
"hash_alg",
[hashes.SHA224(), hashes.SHA256(), hashes.SHA384(), hashes.SHA512()],
)
def test_pss_signing_sha2(self, hash_alg, backend):
_skip_pss_hash_algorithm_unsupported(backend, hash_alg)
private_key = RSA_KEY_768.private_key(backend)
public_key = private_key.public_key()
pss = padding.PSS(
mgf=padding.MGF1(hash_alg), salt_length=padding.PSS.MAX_LENGTH
)
msg = b"testing signature"
signature = private_key.sign(msg, pss, hash_alg)
public_key.verify(signature, msg, pss, hash_alg)
@pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA512())
and backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
)
)
),
skip_message="Does not support SHA512.",
)
def test_pss_minimum_key_size_for_digest(self, backend):
private_key = RSA_KEY_522.private_key(backend)
private_key.sign(
b"no failure",
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
),
hashes.SHA512(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS.",
)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA512()),
skip_message="Does not support SHA512.",
)
def test_pss_signing_digest_too_large_for_key_size(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.sign(
b"msg",
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
),
hashes.SHA512(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS.",
)
def test_pss_signing_salt_length_too_long(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.sign(
b"failure coming",
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()), salt_length=1000000
),
hashes.SHA1(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_use_after_finalize(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.warns(CryptographyDeprecationWarning):
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1())
signer.update(b"sign me")
signer.finalize()
with pytest.raises(AlreadyFinalized):
signer.finalize()
with pytest.raises(AlreadyFinalized):
signer.update(b"more data")
def test_unsupported_padding(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.sign(b"msg", DummyAsymmetricPadding(), hashes.SHA1())
def test_padding_incorrect_type(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(TypeError):
private_key.sign(b"msg", "notpadding", hashes.SHA1())
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
),
skip_message="Does not support PSS.",
)
def test_unsupported_pss_mgf(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MGF):
private_key.sign(
b"msg",
padding.PSS(
mgf=DummyMGF(), salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_pkcs1_digest_too_large_for_key_size(self, backend):
private_key = RSA_KEY_599.private_key(backend)
with pytest.raises(ValueError):
private_key.sign(
b"failure coming", padding.PKCS1v15(), hashes.SHA512()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_pkcs1_minimum_key_size(self, backend):
private_key = RSA_KEY_745.private_key(backend)
private_key.sign(b"no failure", padding.PKCS1v15(), hashes.SHA512())
def test_sign(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
pkcs = padding.PKCS1v15()
algorithm = hashes.SHA1()
signature = private_key.sign(message, pkcs, algorithm)
public_key = private_key.public_key()
public_key.verify(signature, message, pkcs, algorithm)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
),
skip_message="Does not support PSS.",
)
def test_prehashed_sign(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
h = hashes.Hash(hashes.SHA1(), backend)
h.update(message)
digest = h.finalize()
pss = padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
prehashed_alg = asym_utils.Prehashed(hashes.SHA1())
signature = private_key.sign(digest, pss, prehashed_alg)
public_key = private_key.public_key()
public_key.verify(signature, message, pss, hashes.SHA1())
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(
hashes.BLAKE2s(digest_size=32)
),
skip_message="Does not support BLAKE2s",
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
),
skip_message="Does not support PSS.",
)
def test_unsupported_hash(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
pss = padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=0)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_HASH):
private_key.sign(message, pss, hashes.BLAKE2s(32))
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
),
skip_message="Does not support PSS.",
)
def test_prehashed_digest_mismatch(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
h = hashes.Hash(hashes.SHA512(), backend)
h.update(message)
digest = h.finalize()
pss = padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
prehashed_alg = asym_utils.Prehashed(hashes.SHA1())
with pytest.raises(ValueError):
private_key.sign(digest, pss, prehashed_alg)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_prehashed_unsupported_in_signer_ctx(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(TypeError), pytest.warns(
CryptographyDeprecationWarning
):
private_key.signer(
padding.PKCS1v15(), asym_utils.Prehashed(hashes.SHA1())
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_prehashed_unsupported_in_verifier_ctx(self, backend):
public_key = RSA_KEY_512.private_key(backend).public_key()
with pytest.raises(TypeError), pytest.warns(
CryptographyDeprecationWarning
):
public_key.verifier(
b"0" * 64,
padding.PKCS1v15(),
asym_utils.Prehashed(hashes.SHA1()),
)
def test_corrupted_private_key(self, backend):
with pytest.raises(ValueError):
serialization.load_pem_private_key(
RSA_KEY_CORRUPTED, password=None, backend=backend
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAVerification(object):
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(
load_vectors_from_file(
os.path.join("asymmetric", "RSA", "pkcs1v15sign-vectors.txt"),
load_pkcs1_vectors,
)
),
)
def test_pkcs1v15_verification(self, pkcs1_example, backend):
private, public, example = pkcs1_example
public_key = rsa.RSAPublicNumbers(
e=public["public_exponent"], n=public["modulus"]
).public_key(backend)
public_key.verify(
binascii.unhexlify(example["signature"]),
binascii.unhexlify(example["message"]),
padding.PKCS1v15(),
hashes.SHA1(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_invalid_pkcs1v15_signature_wrong_data(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
signature = private_key.sign(
b"sign me", padding.PKCS1v15(), hashes.SHA1()
)
with pytest.raises(InvalidSignature):
public_key.verify(
signature, b"incorrect data", padding.PKCS1v15(), hashes.SHA1()
)
def test_invalid_signature_sequence_removed(self, backend):
"""
This test comes from wycheproof
"""
key_der = binascii.unhexlify(
b"30820122300d06092a864886f70d01010105000382010f003082010a02820101"
b"00a2b451a07d0aa5f96e455671513550514a8a5b462ebef717094fa1fee82224"
b"e637f9746d3f7cafd31878d80325b6ef5a1700f65903b469429e89d6eac88450"
b"97b5ab393189db92512ed8a7711a1253facd20f79c15e8247f3d3e42e46e48c9"
b"8e254a2fe9765313a03eff8f17e1a029397a1fa26a8dce26f490ed81299615d9"
b"814c22da610428e09c7d9658594266f5c021d0fceca08d945a12be82de4d1ece"
b"6b4c03145b5d3495d4ed5411eb878daf05fd7afc3e09ada0f1126422f590975a"
b"1969816f48698bcbba1b4d9cae79d460d8f9f85e7975005d9bc22c4e5ac0f7c1"
b"a45d12569a62807d3b9a02e5a530e773066f453d1f5b4c2e9cf7820283f742b9"
b"d50203010001"
)
sig = binascii.unhexlify(
b"498209f59a0679a1f926eccf3056da2cba553d7ab3064e7c41ad1d739f038249"
b"f02f5ad12ee246073d101bc3cdb563e8b6be61562056422b7e6c16ad53deb12a"
b"f5de744197753a35859833f41bb59c6597f3980132b7478fd0b95fd27dfad64a"
b"20fd5c25312bbd41a85286cd2a83c8df5efa0779158d01b0747ff165b055eb28"
b"80ea27095700a295593196d8c5922cf6aa9d7e29b5056db5ded5eb20aeb31b89"
b"42e26b15a5188a4934cd7e39cfe379a197f49a204343a493452deebca436ee61"
b"4f4daf989e355544489f7e69ffa8ccc6a1e81cf0ab33c3e6d7591091485a6a31"
b"bda3b33946490057b9a3003d3fd9daf7c4778b43fd46144d945d815f12628ff4"
)
public_key = serialization.load_der_public_key(key_der, backend)
with pytest.raises(InvalidSignature):
public_key.verify(
sig,
binascii.unhexlify(b"313233343030"),
padding.PKCS1v15(),
hashes.SHA256(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_invalid_pkcs1v15_signature_wrong_key(self, backend):
private_key = RSA_KEY_512.private_key(backend)
private_key2 = RSA_KEY_512_ALT.private_key(backend)
public_key = private_key2.public_key()
msg = b"sign me"
signature = private_key.sign(msg, padding.PKCS1v15(), hashes.SHA1())
with pytest.raises(InvalidSignature):
public_key.verify(
signature, msg, padding.PKCS1v15(), hashes.SHA1()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=20)
),
skip_message="Does not support PSS.",
)
@pytest.mark.parametrize(
"pkcs1_example",
_flatten_pkcs1_examples(
load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "pss-vect.txt"
),
load_pkcs1_vectors,
)
),
)
def test_pss_verification(self, pkcs1_example, backend):
private, public, example = pkcs1_example
public_key = rsa.RSAPublicNumbers(
e=public["public_exponent"], n=public["modulus"]
).public_key(backend)
public_key.verify(
binascii.unhexlify(example["signature"]),
binascii.unhexlify(example["message"]),
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()), salt_length=20
),
hashes.SHA1(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS.",
)
def test_invalid_pss_signature_wrong_data(self, backend):
public_key = rsa.RSAPublicNumbers(
n=int(
b"dffc2137d5e810cde9e4b4612f5796447218bab913b3fa98bdf7982e4fa6"
b"ec4d6653ef2b29fb1642b095befcbea6decc178fb4bed243d3c3592c6854"
b"6af2d3f3",
16,
),
e=65537,
).public_key(backend)
signature = binascii.unhexlify(
b"0e68c3649df91c5bc3665f96e157efa75b71934aaa514d91e94ca8418d100f45"
b"6f05288e58525f99666bab052adcffdf7186eb40f583bd38d98c97d3d524808b"
)
with pytest.raises(InvalidSignature):
public_key.verify(
signature,
b"incorrect data",
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
),
hashes.SHA1(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS.",
)
def test_invalid_pss_signature_wrong_key(self, backend):
signature = binascii.unhexlify(
b"3a1880165014ba6eb53cc1449d13e5132ebcc0cfd9ade6d7a2494a0503bd0826"
b"f8a46c431e0d7be0ca3e453f8b2b009e2733764da7927cc6dbe7a021437a242e"
)
public_key = rsa.RSAPublicNumbers(
n=int(
b"381201f4905d67dfeb3dec131a0fbea773489227ec7a1448c3109189ac68"
b"5a95441be90866a14c4d2e139cd16db540ec6c7abab13ffff91443fd46a8"
b"960cbb7658ded26a5c95c86f6e40384e1c1239c63e541ba221191c4dd303"
b"231b42e33c6dbddf5ec9a746f09bf0c25d0f8d27f93ee0ae5c0d723348f4"
b"030d3581e13522e1",
16,
),
e=65537,
).public_key(backend)
with pytest.raises(InvalidSignature):
public_key.verify(
signature,
b"sign me",
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
),
hashes.SHA1(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS.",
)
def test_invalid_pss_signature_data_too_large_for_modulus(self, backend):
# 2048 bit PSS signature
signature = binascii.unhexlify(
b"58750fc3d2f560d1f3e37c8e28bc8da6d3e93f5d58f8becd25b1c931eea30fea"
b"54cb17d44b90104a0aacb7fe9ffa2a59c5788435911d63de78178d21eb875ccd"
b"0b07121b641ed4fe6bcb1ca5060322765507b4f24bdba8a698a8e4e07e6bf2c4"
b"7a736abe5a912e85cd32f648f3e043b4385e8b612dcce342c5fddf18c524deb5"
b"6295b95f6dfa759b2896b793628a90f133e74c1ff7d3af43e3f7ee792df2e5b6"
b"a19e996ac3676884354899a437b3ae4e3ac91976c336c332a3b1db0d172b19cb"
b"40ad3d871296cfffb3c889ce74a179a3e290852c35d59525afe4b39dc907fad2"
b"ac462c50a488dca486031a3dc8c4cdbbc53e9f71d64732e1533a5d1249b833ce"
)
# 1024 bit key
public_key = RSA_KEY_1024.private_key(backend).public_key()
with pytest.raises(InvalidSignature):
public_key.verify(
signature,
b"sign me",
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
),
hashes.SHA1(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_use_after_finalize(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
signature = private_key.sign(
b"sign me", padding.PKCS1v15(), hashes.SHA1()
)
with pytest.warns(CryptographyDeprecationWarning):
verifier = public_key.verifier(
signature, padding.PKCS1v15(), hashes.SHA1()
)
verifier.update(b"sign me")
verifier.verify()
with pytest.raises(AlreadyFinalized):
verifier.verify()
with pytest.raises(AlreadyFinalized):
verifier.update(b"more data")
def test_unsupported_padding(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
public_key.verify(
b"sig", b"msg", DummyAsymmetricPadding(), hashes.SHA1()
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_signature_not_bytes(self, backend):
public_key = RSA_KEY_512.public_numbers.public_key(backend)
signature = 1234
with pytest.raises(TypeError), pytest.warns(
CryptographyDeprecationWarning
):
public_key.verifier(signature, padding.PKCS1v15(), hashes.SHA1())
def test_padding_incorrect_type(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
with pytest.raises(TypeError):
public_key.verify(b"sig", b"msg", "notpadding", hashes.SHA1())
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=0)
),
skip_message="Does not support PSS.",
)
def test_unsupported_pss_mgf(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MGF):
public_key.verify(
b"sig",
b"msg",
padding.PSS(
mgf=DummyMGF(), salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA1(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS.",
)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA512()),
skip_message="Does not support SHA512.",
)
def test_pss_verify_digest_too_large_for_key_size(self, backend):
private_key = RSA_KEY_512.private_key(backend)
signature = binascii.unhexlify(
b"8b9a3ae9fb3b64158f3476dd8d8a1f1425444e98940e0926378baa9944d219d8"
b"534c050ef6b19b1bdc6eb4da422e89161106a6f5b5cc16135b11eb6439b646bd"
)
public_key = private_key.public_key()
with pytest.raises(ValueError):
public_key.verify(
signature,
b"msg doesn't matter",
padding.PSS(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
),
hashes.SHA512(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS.",
)
def test_pss_verify_salt_length_too_long(self, backend):
signature = binascii.unhexlify(
b"8b9a3ae9fb3b64158f3476dd8d8a1f1425444e98940e0926378baa9944d219d8"
b"534c050ef6b19b1bdc6eb4da422e89161106a6f5b5cc16135b11eb6439b646bd"
)
public_key = rsa.RSAPublicNumbers(
n=int(
b"d309e4612809437548b747d7f9eb9cd3340f54fe42bb3f84a36933b0839c"
b"11b0c8b7f67e11f7252370161e31159c49c784d4bc41c42a78ce0f0b40a3"
b"ca8ffb91",
16,
),
e=65537,
).public_key(backend)
with pytest.raises(InvalidSignature):
public_key.verify(
signature,
b"sign me",
padding.PSS(
mgf=padding.MGF1(
algorithm=hashes.SHA1(),
),
salt_length=1000000,
),
hashes.SHA1(),
)
def test_verify(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
pkcs = padding.PKCS1v15()
algorithm = hashes.SHA1()
signature = private_key.sign(message, pkcs, algorithm)
public_key = private_key.public_key()
public_key.verify(signature, message, pkcs, algorithm)
def test_prehashed_verify(self, backend):
private_key = RSA_KEY_512.private_key(backend)
message = b"one little message"
h = hashes.Hash(hashes.SHA1(), backend)
h.update(message)
digest = h.finalize()
prehashed_alg = asym_utils.Prehashed(hashes.SHA1())
pkcs = padding.PKCS1v15()
signature = private_key.sign(message, pkcs, hashes.SHA1())
public_key = private_key.public_key()
public_key.verify(signature, digest, pkcs, prehashed_alg)
def test_prehashed_digest_mismatch(self, backend):
public_key = RSA_KEY_512.private_key(backend).public_key()
message = b"one little message"
h = hashes.Hash(hashes.SHA1(), backend)
h.update(message)
data = h.finalize()
prehashed_alg = asym_utils.Prehashed(hashes.SHA512())
pkcs = padding.PKCS1v15()
with pytest.raises(ValueError):
public_key.verify(b"\x00" * 64, data, pkcs, prehashed_alg)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAPSSMGF1Verification(object):
test_rsa_pss_mgf1_sha1 = pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS using MGF1 with SHA1.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA1(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
),
salt_length=params["salt_length"],
),
)
)
test_rsa_pss_mgf1_sha224 = pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA224()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS using MGF1 with SHA224.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA224(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
),
salt_length=params["salt_length"],
),
)
)
test_rsa_pss_mgf1_sha256 = pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS using MGF1 with SHA256.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA256(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
),
salt_length=params["salt_length"],
),
)
)
test_rsa_pss_mgf1_sha384 = pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA384()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS using MGF1 with SHA384.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA384(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
),
salt_length=params["salt_length"],
),
)
)
test_rsa_pss_mgf1_sha512 = pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PSS(
mgf=padding.MGF1(hashes.SHA512()),
salt_length=padding.PSS.MAX_LENGTH,
)
),
skip_message="Does not support PSS using MGF1 with SHA512.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGenPSS_186-2.rsp",
"SigGenPSS_186-3.rsp",
"SigVerPSS_186-3.rsp",
],
hashes.SHA512(),
lambda params, hash_alg: padding.PSS(
mgf=padding.MGF1(
algorithm=hash_alg,
),
salt_length=params["salt_length"],
),
)
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAPKCS1Verification(object):
test_rsa_pkcs1v15_verify_sha1 = pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA1())
and backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA1 and PKCS1v1.5.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA1(),
lambda params, hash_alg: padding.PKCS1v15(),
)
)
test_rsa_pkcs1v15_verify_sha224 = pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA224())
and backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA224 and PKCS1v1.5.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA224(),
lambda params, hash_alg: padding.PKCS1v15(),
)
)
test_rsa_pkcs1v15_verify_sha256 = pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA256())
and backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA256 and PKCS1v1.5.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA256(),
lambda params, hash_alg: padding.PKCS1v15(),
)
)
test_rsa_pkcs1v15_verify_sha384 = pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA384())
and backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA384 and PKCS1v1.5.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA384(),
lambda params, hash_alg: padding.PKCS1v15(),
)
)
test_rsa_pkcs1v15_verify_sha512 = pytest.mark.supported(
only_if=lambda backend: (
backend.hash_supported(hashes.SHA512())
and backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA512 and PKCS1v1.5.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA512(),
lambda params, hash_alg: padding.PKCS1v15(),
)
)
class TestPSS(object):
def test_calculate_max_pss_salt_length(self):
with pytest.raises(TypeError):
padding.calculate_max_pss_salt_length(object(), hashes.SHA256())
def test_invalid_salt_length_not_integer(self):
with pytest.raises(TypeError):
padding.PSS(
mgf=padding.MGF1(hashes.SHA1()), salt_length=b"not_a_length"
)
def test_invalid_salt_length_negative_integer(self):
with pytest.raises(ValueError):
padding.PSS(mgf=padding.MGF1(hashes.SHA1()), salt_length=-1)
def test_valid_pss_parameters(self):
algorithm = hashes.SHA1()
salt_length = algorithm.digest_size
mgf = padding.MGF1(algorithm)
pss = padding.PSS(mgf=mgf, salt_length=salt_length)
assert pss._mgf == mgf
assert pss._salt_length == salt_length
def test_valid_pss_parameters_maximum(self):
algorithm = hashes.SHA1()
mgf = padding.MGF1(algorithm)
pss = padding.PSS(mgf=mgf, salt_length=padding.PSS.MAX_LENGTH)
assert pss._mgf == mgf
assert pss._salt_length == padding.PSS.MAX_LENGTH
class TestMGF1(object):
def test_invalid_hash_algorithm(self):
with pytest.raises(TypeError):
padding.MGF1(b"not_a_hash")
def test_valid_mgf1_parameters(self):
algorithm = hashes.SHA1()
mgf = padding.MGF1(algorithm)
assert mgf._algorithm == algorithm
class TestOAEP(object):
def test_invalid_algorithm(self):
mgf = padding.MGF1(hashes.SHA1())
with pytest.raises(TypeError):
padding.OAEP(mgf=mgf, algorithm=b"", label=None)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSADecryption(object):
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
@pytest.mark.parametrize(
"vector",
_flatten_pkcs1_examples(
load_vectors_from_file(
os.path.join("asymmetric", "RSA", "pkcs1v15crypt-vectors.txt"),
load_pkcs1_vectors,
)
),
)
def test_decrypt_pkcs1v15_vectors(self, vector, backend):
private, public, example = vector
skey = rsa.RSAPrivateNumbers(
p=private["p"],
q=private["q"],
d=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=private["public_exponent"], n=private["modulus"]
),
).private_key(backend)
ciphertext = binascii.unhexlify(example["encryption"])
assert len(ciphertext) == (skey.key_size + 7) // 8
message = skey.decrypt(ciphertext, padding.PKCS1v15())
assert message == binascii.unhexlify(example["message"])
def test_unsupported_padding(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
private_key.decrypt(b"0" * 64, DummyAsymmetricPadding())
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_decrypt_invalid_decrypt(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.decrypt(b"\x00" * 64, padding.PKCS1v15())
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_decrypt_ciphertext_too_large(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with pytest.raises(ValueError):
private_key.decrypt(b"\x00" * 65, padding.PKCS1v15())
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
def test_decrypt_ciphertext_too_small(self, backend):
private_key = RSA_KEY_512.private_key(backend)
ct = binascii.unhexlify(
b"50b4c14136bd198c2f3c3ed243fce036e168d56517984a263cd66492b80804f1"
b"69d210f2b9bdfb48b12f9ea05009c77da257cc600ccefe3a6283789d8ea0"
)
with pytest.raises(ValueError):
private_key.decrypt(ct, padding.PKCS1v15())
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
)
),
skip_message="Does not support OAEP.",
)
@pytest.mark.parametrize(
"vector",
_flatten_pkcs1_examples(
load_vectors_from_file(
os.path.join(
"asymmetric", "RSA", "pkcs-1v2-1d2-vec", "oaep-vect.txt"
),
load_pkcs1_vectors,
)
),
)
def test_decrypt_oaep_vectors(self, vector, backend):
private, public, example = vector
skey = rsa.RSAPrivateNumbers(
p=private["p"],
q=private["q"],
d=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=private["public_exponent"], n=private["modulus"]
),
).private_key(backend)
message = skey.decrypt(
binascii.unhexlify(example["encryption"]),
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
),
)
assert message == binascii.unhexlify(example["message"])
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA224()),
algorithm=hashes.SHA224(),
label=None,
)
),
skip_message=(
"Does not support OAEP using SHA224 MGF1 and SHA224 hash."
),
)
@pytest.mark.parametrize("vector", _build_oaep_sha2_vectors())
def test_decrypt_oaep_sha2_vectors(self, vector, backend):
private, public, example, mgf1_alg, hash_alg = vector
skey = rsa.RSAPrivateNumbers(
p=private["p"],
q=private["q"],
d=private["private_exponent"],
dmp1=private["dmp1"],
dmq1=private["dmq1"],
iqmp=private["iqmp"],
public_numbers=rsa.RSAPublicNumbers(
e=private["public_exponent"], n=private["modulus"]
),
).private_key(backend)
message = skey.decrypt(
binascii.unhexlify(example["encryption"]),
padding.OAEP(
mgf=padding.MGF1(algorithm=mgf1_alg),
algorithm=hash_alg,
label=None,
),
)
assert message == binascii.unhexlify(example["message"])
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
)
),
skip_message="Does not support OAEP.",
)
def test_invalid_oaep_decryption(self, backend):
# More recent versions of OpenSSL may raise different errors.
# This test triggers a failure and confirms that we properly handle
# it.
private_key = RSA_KEY_512.private_key(backend)
ciphertext = private_key.public_key().encrypt(
b"secure data",
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
),
)
private_key_alt = RSA_KEY_512_ALT.private_key(backend)
with pytest.raises(ValueError):
private_key_alt.decrypt(
ciphertext,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
),
)
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
)
),
skip_message="Does not support OAEP.",
)
def test_invalid_oaep_decryption_data_to_large_for_modulus(self, backend):
key = RSA_KEY_2048_ALT.private_key(backend)
ciphertext = (
b"\xb1ph\xc0\x0b\x1a|\xe6\xda\xea\xb5\xd7%\x94\x07\xf96\xfb\x96"
b"\x11\x9b\xdc4\xea.-\x91\x80\x13S\x94\x04m\xe9\xc5/F\x1b\x9b:\\"
b"\x1d\x04\x16ML\xae\xb32J\x01yuA\xbb\x83\x1c\x8f\xf6\xa5\xdbp\xcd"
b"\nx\xc7\xf6\x15\xb2/\xdcH\xae\xe7\x13\x13by\r4t\x99\x0fc\x1f\xc1"
b"\x1c\xb1\xdd\xc5\x08\xd1\xee\xa1XQ\xb8H@L5v\xc3\xaf\xf2\r\x97"
b"\xed\xaa\xe7\xf1\xd4xai\xd3\x83\xd9\xaa9\xbfx\xe1\x87F \x01\xff"
b"L\xccv}ae\xb3\xfa\xf2B\xb8\xf9\x04H\x94\x85\xcb\x86\xbb\\ghx!W31"
b"\xc7;t\na_E\xc2\x16\xb0;\xa1\x18\t\x1b\xe1\xdb\x80>)\x15\xc6\x12"
b"\xcb\xeeg`\x8b\x9b\x1b\x05y4\xb0\x84M6\xcd\xa1\x827o\xfd\x96\xba"
b"Z#\x8d\xae\x01\xc9\xf2\xb6\xde\x89{8&eQ\x1e8\x03\x01#?\xb66\\"
b"\xad.\xe9\xfa!\x95 c{\xcaz\xe0*\tP\r\x91\x9a)B\xb5\xadN\xf4$\x83"
b"\t\xb5u\xab\x19\x99"
)
with pytest.raises(ValueError):
key.decrypt(
ciphertext,
padding.OAEP(
algorithm=hashes.SHA1(),
mgf=padding.MGF1(hashes.SHA1()),
label=None,
),
)
def test_unsupported_oaep_mgf(self, backend):
private_key = RSA_KEY_512.private_key(backend)
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MGF):
private_key.decrypt(
b"0" * 64,
padding.OAEP(
mgf=DummyMGF(), algorithm=hashes.SHA1(), label=None
),
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSAEncryption(object):
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
)
),
skip_message="Does not support OAEP.",
)
@pytest.mark.parametrize(
("key_data", "pad"),
itertools.product(
(
RSA_KEY_1024,
RSA_KEY_1025,
RSA_KEY_1026,
RSA_KEY_1027,
RSA_KEY_1028,
RSA_KEY_1029,
RSA_KEY_1030,
RSA_KEY_1031,
RSA_KEY_1536,
RSA_KEY_2048,
),
[
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
)
],
),
)
def test_rsa_encrypt_oaep(self, key_data, pad, backend):
private_key = key_data.private_key(backend)
pt = b"encrypt me!"
public_key = private_key.public_key()
ct = public_key.encrypt(pt, pad)
assert ct != pt
assert len(ct) == (public_key.key_size + 7) // 8
recovered_pt = private_key.decrypt(ct, pad)
assert recovered_pt == pt
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA256()),
algorithm=hashes.SHA512(),
label=None,
)
),
skip_message=(
"Does not support OAEP using SHA256 MGF1 and SHA512 hash."
),
)
@pytest.mark.parametrize(
("mgf1hash", "oaephash"),
itertools.product(
[
hashes.SHA1(),
hashes.SHA224(),
hashes.SHA256(),
hashes.SHA384(),
hashes.SHA512(),
],
[
hashes.SHA1(),
hashes.SHA224(),
hashes.SHA256(),
hashes.SHA384(),
hashes.SHA512(),
],
),
)
def test_rsa_encrypt_oaep_sha2(self, mgf1hash, oaephash, backend):
pad = padding.OAEP(
mgf=padding.MGF1(algorithm=mgf1hash),
algorithm=oaephash,
label=None,
)
private_key = RSA_KEY_2048.private_key(backend)
pt = b"encrypt me using sha2 hashes!"
public_key = private_key.public_key()
ct = public_key.encrypt(pt, pad)
assert ct != pt
assert len(ct) == (public_key.key_size + 7) // 8
recovered_pt = private_key.decrypt(ct, pad)
assert recovered_pt == pt
@pytest.mark.supported(
only_if=lambda backend: backend.rsa_padding_supported(
padding.PKCS1v15()
),
skip_message="Does not support PKCS1v1.5.",
)
@pytest.mark.parametrize(
("key_data", "pad"),
itertools.product(
(
RSA_KEY_1024,
RSA_KEY_1025,
RSA_KEY_1026,
RSA_KEY_1027,
RSA_KEY_1028,
RSA_KEY_1029,
RSA_KEY_1030,
RSA_KEY_1031,
RSA_KEY_1536,
RSA_KEY_2048,
),
[padding.PKCS1v15()],
),
)
def test_rsa_encrypt_pkcs1v15(self, key_data, pad, backend):
private_key = key_data.private_key(backend)
pt = b"encrypt me!"
public_key = private_key.public_key()
ct = public_key.encrypt(pt, pad)
assert ct != pt
assert len(ct) == (public_key.key_size + 7) // 8
recovered_pt = private_key.decrypt(ct, pad)
assert recovered_pt == pt
@pytest.mark.parametrize(
("key_data", "pad"),
itertools.product(
(
RSA_KEY_1024,
RSA_KEY_1025,
RSA_KEY_1026,
RSA_KEY_1027,
RSA_KEY_1028,
RSA_KEY_1029,
RSA_KEY_1030,
RSA_KEY_1031,
RSA_KEY_1536,
RSA_KEY_2048,
),
(
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None,
),
padding.PKCS1v15(),
),
),
)
def test_rsa_encrypt_key_too_small(self, key_data, pad, backend):
private_key = key_data.private_key(backend)
public_key = private_key.public_key()
# Slightly smaller than the key size but not enough for padding.
with pytest.raises(ValueError):
public_key.encrypt(b"\x00" * (private_key.key_size // 8 - 1), pad)
# Larger than the key size.
with pytest.raises(ValueError):
public_key.encrypt(b"\x00" * (private_key.key_size // 8 + 5), pad)
def test_unsupported_padding(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_PADDING):
public_key.encrypt(b"somedata", DummyAsymmetricPadding())
with pytest.raises(TypeError):
public_key.encrypt(b"somedata", padding=object())
def test_unsupported_oaep_mgf(self, backend):
private_key = RSA_KEY_512.private_key(backend)
public_key = private_key.public_key()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MGF):
public_key.encrypt(
b"ciphertext",
padding.OAEP(
mgf=DummyMGF(), algorithm=hashes.SHA1(), label=None
),
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
class TestRSANumbers(object):
def test_rsa_public_numbers(self):
public_numbers = rsa.RSAPublicNumbers(e=1, n=15)
assert public_numbers.e == 1
assert public_numbers.n == 15
def test_rsa_private_numbers(self):
public_numbers = rsa.RSAPublicNumbers(e=1, n=15)
private_numbers = rsa.RSAPrivateNumbers(
p=3,
q=5,
d=1,
dmp1=1,
dmq1=1,
iqmp=2,
public_numbers=public_numbers,
)
assert private_numbers.p == 3
assert private_numbers.q == 5
assert private_numbers.d == 1
assert private_numbers.dmp1 == 1
assert private_numbers.dmq1 == 1
assert private_numbers.iqmp == 2
assert private_numbers.public_numbers == public_numbers
def test_rsa_private_numbers_create_key(self, backend):
private_key = RSA_KEY_1024.private_key(backend)
assert private_key
def test_rsa_public_numbers_create_key(self, backend):
public_key = RSA_KEY_1024.public_numbers.public_key(backend)
assert public_key
public_key = rsa.RSAPublicNumbers(n=10, e=3).public_key(backend)
assert public_key
def test_public_numbers_invalid_types(self):
with pytest.raises(TypeError):
rsa.RSAPublicNumbers(e=None, n=15)
with pytest.raises(TypeError):
rsa.RSAPublicNumbers(e=1, n=None)
@pytest.mark.parametrize(
("p", "q", "d", "dmp1", "dmq1", "iqmp", "public_numbers"),
[
(None, 5, 1, 1, 1, 2, rsa.RSAPublicNumbers(e=1, n=15)),
(3, None, 1, 1, 1, 2, rsa.RSAPublicNumbers(e=1, n=15)),
(3, 5, None, 1, 1, 2, rsa.RSAPublicNumbers(e=1, n=15)),
(3, 5, 1, None, 1, 2, rsa.RSAPublicNumbers(e=1, n=15)),
(3, 5, 1, 1, None, 2, rsa.RSAPublicNumbers(e=1, n=15)),
(3, 5, 1, 1, 1, None, rsa.RSAPublicNumbers(e=1, n=15)),
(3, 5, 1, 1, 1, 2, None),
],
)
def test_private_numbers_invalid_types(
self, p, q, d, dmp1, dmq1, iqmp, public_numbers
):
with pytest.raises(TypeError):
rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=dmp1,
dmq1=dmq1,
iqmp=iqmp,
public_numbers=public_numbers,
)
@pytest.mark.parametrize(
("e", "n"),
[
(7, 2), # modulus < 3
(1, 15), # public_exponent < 3
(17, 15), # public_exponent > modulus
(14, 15), # public_exponent not odd
],
)
def test_invalid_public_numbers_argument_values(self, e, n, backend):
# Start with public_exponent=7, modulus=15. Then change one value at a
# time to test the bounds.
with pytest.raises(ValueError):
rsa.RSAPublicNumbers(e=e, n=n).public_key(backend)
@pytest.mark.parametrize(
("p", "q", "d", "dmp1", "dmq1", "iqmp", "e", "n"),
[
(3, 11, 3, 1, 3, 2, 7, 2), # modulus < 3
(3, 11, 3, 1, 3, 2, 7, 35), # modulus != p * q
(37, 11, 3, 1, 3, 2, 7, 33), # p > modulus
(3, 37, 3, 1, 3, 2, 7, 33), # q > modulus
(3, 11, 3, 35, 3, 2, 7, 33), # dmp1 > modulus
(3, 11, 3, 1, 35, 2, 7, 33), # dmq1 > modulus
(3, 11, 3, 1, 3, 35, 7, 33), # iqmp > modulus
(3, 11, 37, 1, 3, 2, 7, 33), # d > modulus
(3, 11, 3, 1, 3, 2, 1, 33), # public_exponent < 3
(3, 11, 3, 1, 3, 35, 65537, 33), # public_exponent > modulus
(3, 11, 3, 1, 3, 2, 6, 33), # public_exponent is not odd
(3, 11, 3, 2, 3, 2, 7, 33), # dmp1 is not odd
(3, 11, 3, 1, 4, 2, 7, 33), # dmq1 is not odd
],
)
def test_invalid_private_numbers_argument_values(
self, p, q, d, dmp1, dmq1, iqmp, e, n, backend
):
# Start with p=3, q=11, private_exponent=3, public_exponent=7,
# modulus=33, dmp1=1, dmq1=3, iqmp=2. Then change one value at
# a time to test the bounds.
with pytest.raises(ValueError):
rsa.RSAPrivateNumbers(
p=p,
q=q,
d=d,
dmp1=dmp1,
dmq1=dmq1,
iqmp=iqmp,
public_numbers=rsa.RSAPublicNumbers(e=e, n=n),
).private_key(backend)
def test_public_number_repr(self):
num = RSAPublicNumbers(1, 1)
assert repr(num) == "<RSAPublicNumbers(e=1, n=1)>"
class TestRSANumbersEquality(object):
def test_public_numbers_eq(self):
num = RSAPublicNumbers(1, 2)
num2 = RSAPublicNumbers(1, 2)
assert num == num2
def test_public_numbers_ne(self):
num = RSAPublicNumbers(1, 2)
assert num != RSAPublicNumbers(2, 2)
assert num != RSAPublicNumbers(1, 3)
assert num != object()
def test_private_numbers_eq(self):
pub = RSAPublicNumbers(1, 2)
num = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, pub)
pub2 = RSAPublicNumbers(1, 2)
num2 = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, pub2)
assert num == num2
def test_private_numbers_ne(self):
pub = RSAPublicNumbers(1, 2)
num = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, pub)
assert num != RSAPrivateNumbers(
1, 2, 3, 4, 5, 7, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
1, 2, 3, 4, 4, 6, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
1, 2, 3, 5, 5, 6, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
1, 2, 4, 4, 5, 6, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
1, 3, 3, 4, 5, 6, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
2, 2, 3, 4, 5, 6, RSAPublicNumbers(1, 2)
)
assert num != RSAPrivateNumbers(
1, 2, 3, 4, 5, 6, RSAPublicNumbers(2, 2)
)
assert num != RSAPrivateNumbers(
1, 2, 3, 4, 5, 6, RSAPublicNumbers(1, 3)
)
assert num != object()
def test_public_numbers_hash(self):
pub1 = RSAPublicNumbers(3, 17)
pub2 = RSAPublicNumbers(3, 17)
pub3 = RSAPublicNumbers(7, 21)
assert hash(pub1) == hash(pub2)
assert hash(pub1) != hash(pub3)
def test_private_numbers_hash(self):
priv1 = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, RSAPublicNumbers(1, 2))
priv2 = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, RSAPublicNumbers(1, 2))
priv3 = RSAPrivateNumbers(1, 2, 3, 4, 5, 6, RSAPublicNumbers(1, 3))
assert hash(priv1) == hash(priv2)
assert hash(priv1) != hash(priv3)
class TestRSAPrimeFactorRecovery(object):
@pytest.mark.parametrize(
"vector",
_flatten_pkcs1_examples(
load_vectors_from_file(
os.path.join("asymmetric", "RSA", "pkcs1v15crypt-vectors.txt"),
load_pkcs1_vectors,
)
),
)
def test_recover_prime_factors(self, vector):
private, public, example = vector
p, q = rsa.rsa_recover_prime_factors(
private["modulus"],
private["public_exponent"],
private["private_exponent"],
)
# Unfortunately there is no convention on which prime should be p
# and which one q. The function we use always makes p > q, but the
# NIST vectors are not so consistent. Accordingly, we verify we've
# recovered the proper (p, q) by sorting them and asserting on that.
assert sorted([p, q]) == sorted([private["p"], private["q"]])
assert p > q
def test_invalid_recover_prime_factors(self):
with pytest.raises(ValueError):
rsa.rsa_recover_prime_factors(34, 3, 7)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=PEMSerializationBackend)
class TestRSAPrivateKeySerialization(object):
@pytest.mark.parametrize(
("fmt", "password"),
itertools.product(
[
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.PrivateFormat.PKCS8,
],
[
b"s",
b"longerpassword",
b"!*$&(@#$*&($T@%_somesymbols",
b"\x01" * 1000,
],
),
)
def test_private_bytes_encrypted_pem(self, backend, fmt, password):
skip_fips_traditional_openssl(backend, fmt)
key = RSA_KEY_2048.private_key(backend)
serialized = key.private_bytes(
serialization.Encoding.PEM,
fmt,
serialization.BestAvailableEncryption(password),
)
loaded_key = serialization.load_pem_private_key(
serialized, password, backend
)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.parametrize(
("encoding", "fmt"),
[
(serialization.Encoding.Raw, serialization.PrivateFormat.PKCS8),
(serialization.Encoding.DER, serialization.PrivateFormat.Raw),
(serialization.Encoding.Raw, serialization.PrivateFormat.Raw),
(serialization.Encoding.X962, serialization.PrivateFormat.PKCS8),
],
)
def test_private_bytes_rejects_invalid(self, encoding, fmt, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(ValueError):
key.private_bytes(encoding, fmt, serialization.NoEncryption())
@pytest.mark.parametrize(
("fmt", "password"),
[
[serialization.PrivateFormat.PKCS8, b"s"],
[serialization.PrivateFormat.PKCS8, b"longerpassword"],
[serialization.PrivateFormat.PKCS8, b"!*$&(@#$*&($T@%_somesymbol"],
[serialization.PrivateFormat.PKCS8, b"\x01" * 1000],
],
)
def test_private_bytes_encrypted_der(self, backend, fmt, password):
key = RSA_KEY_2048.private_key(backend)
serialized = key.private_bytes(
serialization.Encoding.DER,
fmt,
serialization.BestAvailableEncryption(password),
)
loaded_key = serialization.load_der_private_key(
serialized, password, backend
)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.parametrize(
("encoding", "fmt", "loader_func"),
[
[
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.load_pem_private_key,
],
[
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.load_der_private_key,
],
[
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.load_pem_private_key,
],
[
serialization.Encoding.DER,
serialization.PrivateFormat.PKCS8,
serialization.load_der_private_key,
],
],
)
def test_private_bytes_unencrypted(
self, backend, encoding, fmt, loader_func
):
key = RSA_KEY_2048.private_key(backend)
serialized = key.private_bytes(
encoding, fmt, serialization.NoEncryption()
)
loaded_key = loader_func(serialized, None, backend)
loaded_priv_num = loaded_key.private_numbers()
priv_num = key.private_numbers()
assert loaded_priv_num == priv_num
@pytest.mark.skip_fips(
reason="Traditional OpenSSL key format is not supported in FIPS mode."
)
@pytest.mark.parametrize(
("key_path", "encoding", "loader_func"),
[
[
os.path.join(
"asymmetric",
"Traditional_OpenSSL_Serialization",
"testrsa.pem",
),
serialization.Encoding.PEM,
serialization.load_pem_private_key,
],
[
os.path.join("asymmetric", "DER_Serialization", "testrsa.der"),
serialization.Encoding.DER,
serialization.load_der_private_key,
],
],
)
def test_private_bytes_traditional_openssl_unencrypted(
self, backend, key_path, encoding, loader_func
):
key_bytes = load_vectors_from_file(
key_path, lambda pemfile: pemfile.read(), mode="rb"
)
key = loader_func(key_bytes, None, backend)
serialized = key.private_bytes(
encoding,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.NoEncryption(),
)
assert serialized == key_bytes
def test_private_bytes_traditional_der_encrypted_invalid(self, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.DER,
serialization.PrivateFormat.TraditionalOpenSSL,
serialization.BestAvailableEncryption(b"password"),
)
def test_private_bytes_invalid_encoding(self, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
key.private_bytes(
"notencoding",
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption(),
)
def test_private_bytes_invalid_format(self, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
"invalidformat",
serialization.NoEncryption(),
)
def test_private_bytes_invalid_encryption_algorithm(self, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(TypeError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
"notanencalg",
)
def test_private_bytes_unsupported_encryption_type(self, backend):
key = RSA_KEY_2048.private_key(backend)
with pytest.raises(ValueError):
key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.TraditionalOpenSSL,
DummyKeySerializationEncryption(),
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=PEMSerializationBackend)
class TestRSAPEMPublicKeySerialization(object):
@pytest.mark.parametrize(
("key_path", "loader_func", "encoding", "format"),
[
(
os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.pem"),
serialization.load_pem_public_key,
serialization.Encoding.PEM,
serialization.PublicFormat.PKCS1,
),
(
os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.der"),
serialization.load_der_public_key,
serialization.Encoding.DER,
serialization.PublicFormat.PKCS1,
),
(
os.path.join("asymmetric", "PKCS8", "unenc-rsa-pkcs8.pub.pem"),
serialization.load_pem_public_key,
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo,
),
(
os.path.join(
"asymmetric",
"DER_Serialization",
"unenc-rsa-pkcs8.pub.der",
),
serialization.load_der_public_key,
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo,
),
],
)
def test_public_bytes_match(
self, key_path, loader_func, encoding, format, backend
):
key_bytes = load_vectors_from_file(
key_path, lambda pemfile: pemfile.read(), mode="rb"
)
key = loader_func(key_bytes, backend)
serialized = key.public_bytes(encoding, format)
assert serialized == key_bytes
def test_public_bytes_openssh(self, backend):
key_bytes = load_vectors_from_file(
os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.pem"),
lambda pemfile: pemfile.read(),
mode="rb",
)
key = serialization.load_pem_public_key(key_bytes, backend)
ssh_bytes = key.public_bytes(
serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH
)
assert ssh_bytes == (
b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC7JHoJfg6yNzLMOWet8Z49a4KD"
b"0dCspMAYvo2YAMB7/wdEycocujbhJ2n/seONi+5XqTqqFkM5VBl8rmkkFPZk/7x0"
b"xmdsTPECSWnHK+HhoaNDFPR3j8jQhVo1laxiqcEhAHegi5cwtFosuJAvSKAFKEvy"
b"D43si00DQnXWrYHAEQ=="
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.PEM, serialization.PublicFormat.OpenSSH
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.DER, serialization.PublicFormat.OpenSSH
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.PKCS1,
)
with pytest.raises(ValueError):
key.public_bytes(
serialization.Encoding.OpenSSH,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
def test_public_bytes_invalid_encoding(self, backend):
key = RSA_KEY_2048.private_key(backend).public_key()
with pytest.raises(TypeError):
key.public_bytes("notencoding", serialization.PublicFormat.PKCS1)
def test_public_bytes_invalid_format(self, backend):
key = RSA_KEY_2048.private_key(backend).public_key()
with pytest.raises(TypeError):
key.public_bytes(serialization.Encoding.PEM, "invalidformat")
@pytest.mark.parametrize(
("encoding", "fmt"),
[
(
serialization.Encoding.Raw,
serialization.PublicFormat.SubjectPublicKeyInfo,
),
(serialization.Encoding.Raw, serialization.PublicFormat.PKCS1),
]
+ list(
itertools.product(
[
serialization.Encoding.Raw,
serialization.Encoding.X962,
serialization.Encoding.PEM,
serialization.Encoding.DER,
],
[
serialization.PublicFormat.Raw,
serialization.PublicFormat.UncompressedPoint,
serialization.PublicFormat.CompressedPoint,
],
)
),
)
def test_public_bytes_rejects_invalid(self, encoding, fmt, backend):
key = RSA_KEY_2048.private_key(backend).public_key()
with pytest.raises(ValueError):
key.public_bytes(encoding, fmt)
|
the-stack_0_26707
|
#!/usr/bin/env python3
def test_sat():
#
# [<<< table of contents](index.html)
#
# ---
#
# Constraint satisfier
# ====================
#
#
#
from bruhat.render.sat import Variable, Solver, System
x = Variable('x')
y = Variable('y')
z = Variable('z')
items = [
x+y >= 1.,
x+z == 5,
y >= 3.
]
solver = Solver(items)
result = solver.solve()
print(result)
system = System()
v = system.get_var()
u = system.get_var()
w = system.get_var()
system.add(v+u+w == 3.)
system.solve()
print(system[v] + system[u] + system[w])
if __name__ == "__main__":
test_variable()
|
the-stack_0_26708
|
import socket
import threading
from threading import Thread
import threading
import sys
import time
import random
from queue import Queue
host = ''
port = 8888
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
s.listen(3)
class ThreadPoolManger():
"""线程池管理器"""
def __init__(self, thread_num):
# 初始化参数
self.work_queue = Queue()
self.thread_num = thread_num
self.__init_threading_pool(self.thread_num)
def __init_threading_pool(self, thread_num):
# 初始化线程池,创建指定数量的线程池
for i in range(thread_num):
thread = ThreadManger(self.work_queue)
thread.start()
def add_job(self, func, *args):
# 将任务放入队列,等待线程池阻塞读取,参数是被执行的函数和函数的参数
self.work_queue.put((func, args))
class ThreadManger(Thread):
"""定义线程类,继承threading.Thread"""
def __init__(self, work_queue):
Thread.__init__(self)
self.work_queue = work_queue
self.daemon = True
def run(self):
# 启动线程
while True:
target, args = self.work_queue.get()
target(*args)
self.work_queue.task_done()
# 创建一个有4个线程的线程池
thread_pool = ThreadPoolManger(4)
# 处理http请求,这里简单返回200 hello world
def handle_request(conn_socket):
recv_data = conn_socket.recv(1024)
print("++", recv_data)
reply = 'HTTP/1.1 200 OK \r\n\r\n'
reply += 'hello world'
print ('thread %s is running ' % threading.current_thread().name)
conn_socket.send(reply.encode('utf8'))
conn_socket.close()
# 循环等待接收客户端请求
while True:
# 阻塞等待请求
conn_socket, addr = s.accept()
# 一旦有请求了,把socket扔到我们指定处理函数handle_request处理,等待线程池分配线程处理
thread_pool.add_job(handle_request, *(conn_socket, ))
s.close()
|
the-stack_0_26709
|
# -*- coding: utf-8 -*-
"""
--------------------------------------
@File : model_constant.py
@Author : maixiaochai
@Email : [email protected]
@Created on : 2020/5/10 15:47
--------------------------------------
"""
# 权限值
USER_COMMON = 0
USER_BLACK = 1
USER_VIP = 2
ADMIN_NONE = 0
ADMIN_COMMON = 2
# 通用msg
MSG_403 = "Permission denied."
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.